xref: /dpdk/app/test-pmd/testpmd.c (revision 933617d87eed576b3626b92882bb178a0d0494f9)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 
80 #include "testpmd.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_swap_engine,
148 	&flow_gen_engine,
149 	&rx_only_engine,
150 	&tx_only_engine,
151 	&csum_fwd_engine,
152 	&icmp_echo_engine,
153 #ifdef RTE_LIBRTE_IEEE1588
154 	&ieee1588_fwd_engine,
155 #endif
156 	NULL,
157 };
158 
159 struct fwd_config cur_fwd_config;
160 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
161 uint32_t retry_enabled;
162 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
163 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
164 
165 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
166 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
167                                       * specified on command-line. */
168 
169 /*
170  * Configuration of packet segments used by the "txonly" processing engine.
171  */
172 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
173 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
174 	TXONLY_DEF_PACKET_LEN,
175 };
176 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
177 
178 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
179 /**< Split policy for packets to TX. */
180 
181 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
182 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
183 
184 /* current configuration is in DCB or not,0 means it is not in DCB mode */
185 uint8_t dcb_config = 0;
186 
187 /* Whether the dcb is in testing status */
188 uint8_t dcb_test = 0;
189 
190 /*
191  * Configurable number of RX/TX queues.
192  */
193 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
194 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 
196 /*
197  * Configurable number of RX/TX ring descriptors.
198  */
199 #define RTE_TEST_RX_DESC_DEFAULT 128
200 #define RTE_TEST_TX_DESC_DEFAULT 512
201 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
202 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
203 
204 #define RTE_PMD_PARAM_UNSET -1
205 /*
206  * Configurable values of RX and TX ring threshold registers.
207  */
208 
209 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
212 
213 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
214 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
215 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 
217 /*
218  * Configurable value of RX free threshold.
219  */
220 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 
222 /*
223  * Configurable value of RX drop enable.
224  */
225 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 
227 /*
228  * Configurable value of TX free threshold.
229  */
230 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 
232 /*
233  * Configurable value of TX RS bit threshold.
234  */
235 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 
237 /*
238  * Configurable value of TX queue flags.
239  */
240 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 
242 /*
243  * Receive Side Scaling (RSS) configuration.
244  */
245 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 
247 /*
248  * Port topology configuration
249  */
250 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 
252 /*
253  * Avoids to flush all the RX streams before starts forwarding.
254  */
255 uint8_t no_flush_rx = 0; /* flush by default */
256 
257 /*
258  * Avoids to check link status when starting/stopping a port.
259  */
260 uint8_t no_link_check = 0; /* check by default */
261 
262 /*
263  * NIC bypass mode configuration options.
264  */
265 #ifdef RTE_NIC_BYPASS
266 
267 /* The NIC bypass watchdog timeout. */
268 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
269 
270 #endif
271 
272 /*
273  * Ethernet device configuration.
274  */
275 struct rte_eth_rxmode rx_mode = {
276 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
277 	.split_hdr_size = 0,
278 	.header_split   = 0, /**< Header Split disabled. */
279 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
280 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
281 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
282 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
283 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
284 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
285 };
286 
287 struct rte_fdir_conf fdir_conf = {
288 	.mode = RTE_FDIR_MODE_NONE,
289 	.pballoc = RTE_FDIR_PBALLOC_64K,
290 	.status = RTE_FDIR_REPORT_STATUS,
291 	.mask = {
292 		.vlan_tci_mask = 0x0,
293 		.ipv4_mask     = {
294 			.src_ip = 0xFFFFFFFF,
295 			.dst_ip = 0xFFFFFFFF,
296 		},
297 		.ipv6_mask     = {
298 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
300 		},
301 		.src_port_mask = 0xFFFF,
302 		.dst_port_mask = 0xFFFF,
303 		.mac_addr_byte_mask = 0xFF,
304 		.tunnel_type_mask = 1,
305 		.tunnel_id_mask = 0xFFFFFFFF,
306 	},
307 	.drop_queue = 127,
308 };
309 
310 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
311 
312 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
313 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
314 
315 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
316 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
317 
318 uint16_t nb_tx_queue_stats_mappings = 0;
319 uint16_t nb_rx_queue_stats_mappings = 0;
320 
321 unsigned max_socket = 0;
322 
323 /* Forward function declarations */
324 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
325 static void check_all_ports_link_status(uint32_t port_mask);
326 
327 /*
328  * Check if all the ports are started.
329  * If yes, return positive value. If not, return zero.
330  */
331 static int all_ports_started(void);
332 
333 /*
334  * Find next enabled port
335  */
336 portid_t
337 find_next_port(portid_t p, struct rte_port *ports, int size)
338 {
339 	if (ports == NULL)
340 		rte_exit(-EINVAL, "failed to find a next port id\n");
341 
342 	while ((p < size) && (ports[p].enabled == 0))
343 		p++;
344 	return p;
345 }
346 
347 /*
348  * Setup default configuration.
349  */
350 static void
351 set_default_fwd_lcores_config(void)
352 {
353 	unsigned int i;
354 	unsigned int nb_lc;
355 	unsigned int sock_num;
356 
357 	nb_lc = 0;
358 	for (i = 0; i < RTE_MAX_LCORE; i++) {
359 		sock_num = rte_lcore_to_socket_id(i) + 1;
360 		if (sock_num > max_socket) {
361 			if (sock_num > RTE_MAX_NUMA_NODES)
362 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
363 			max_socket = sock_num;
364 		}
365 		if (!rte_lcore_is_enabled(i))
366 			continue;
367 		if (i == rte_get_master_lcore())
368 			continue;
369 		fwd_lcores_cpuids[nb_lc++] = i;
370 	}
371 	nb_lcores = (lcoreid_t) nb_lc;
372 	nb_cfg_lcores = nb_lcores;
373 	nb_fwd_lcores = 1;
374 }
375 
376 static void
377 set_def_peer_eth_addrs(void)
378 {
379 	portid_t i;
380 
381 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
382 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
383 		peer_eth_addrs[i].addr_bytes[5] = i;
384 	}
385 }
386 
387 static void
388 set_default_fwd_ports_config(void)
389 {
390 	portid_t pt_id;
391 
392 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
393 		fwd_ports_ids[pt_id] = pt_id;
394 
395 	nb_cfg_ports = nb_ports;
396 	nb_fwd_ports = nb_ports;
397 }
398 
399 void
400 set_def_fwd_config(void)
401 {
402 	set_default_fwd_lcores_config();
403 	set_def_peer_eth_addrs();
404 	set_default_fwd_ports_config();
405 }
406 
407 /*
408  * Configuration initialisation done once at init time.
409  */
410 static void
411 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
412 		 unsigned int socket_id)
413 {
414 	char pool_name[RTE_MEMPOOL_NAMESIZE];
415 	struct rte_mempool *rte_mp = NULL;
416 	uint32_t mb_size;
417 
418 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
419 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
420 
421 	RTE_LOG(INFO, USER1,
422 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
423 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
424 
425 #ifdef RTE_LIBRTE_PMD_XENVIRT
426 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
427 		(unsigned) mb_mempool_cache,
428 		sizeof(struct rte_pktmbuf_pool_private),
429 		rte_pktmbuf_pool_init, NULL,
430 		rte_pktmbuf_init, NULL,
431 		socket_id, 0);
432 #endif
433 
434 	/* if the former XEN allocation failed fall back to normal allocation */
435 	if (rte_mp == NULL) {
436 		if (mp_anon != 0) {
437 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
438 				mb_size, (unsigned) mb_mempool_cache,
439 				sizeof(struct rte_pktmbuf_pool_private),
440 				socket_id, 0);
441 
442 			if (rte_mempool_populate_anon(rte_mp) == 0) {
443 				rte_mempool_free(rte_mp);
444 				rte_mp = NULL;
445 			}
446 			rte_pktmbuf_pool_init(rte_mp, NULL);
447 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
448 		} else {
449 			/* wrapper to rte_mempool_create() */
450 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
451 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
452 		}
453 	}
454 
455 	if (rte_mp == NULL) {
456 		rte_exit(EXIT_FAILURE,
457 			"Creation of mbuf pool for socket %u failed: %s\n",
458 			socket_id, rte_strerror(rte_errno));
459 	} else if (verbose_level > 0) {
460 		rte_mempool_dump(stdout, rte_mp);
461 	}
462 }
463 
464 /*
465  * Check given socket id is valid or not with NUMA mode,
466  * if valid, return 0, else return -1
467  */
468 static int
469 check_socket_id(const unsigned int socket_id)
470 {
471 	static int warning_once = 0;
472 
473 	if (socket_id >= max_socket) {
474 		if (!warning_once && numa_support)
475 			printf("Warning: NUMA should be configured manually by"
476 			       " using --port-numa-config and"
477 			       " --ring-numa-config parameters along with"
478 			       " --numa.\n");
479 		warning_once = 1;
480 		return -1;
481 	}
482 	return 0;
483 }
484 
485 static void
486 init_config(void)
487 {
488 	portid_t pid;
489 	struct rte_port *port;
490 	struct rte_mempool *mbp;
491 	unsigned int nb_mbuf_per_pool;
492 	lcoreid_t  lc_id;
493 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
494 
495 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
496 	/* Configuration of logical cores. */
497 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
498 				sizeof(struct fwd_lcore *) * nb_lcores,
499 				RTE_CACHE_LINE_SIZE);
500 	if (fwd_lcores == NULL) {
501 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
502 							"failed\n", nb_lcores);
503 	}
504 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
505 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
506 					       sizeof(struct fwd_lcore),
507 					       RTE_CACHE_LINE_SIZE);
508 		if (fwd_lcores[lc_id] == NULL) {
509 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
510 								"failed\n");
511 		}
512 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
513 	}
514 
515 	/*
516 	 * Create pools of mbuf.
517 	 * If NUMA support is disabled, create a single pool of mbuf in
518 	 * socket 0 memory by default.
519 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
520 	 *
521 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
522 	 * nb_txd can be configured at run time.
523 	 */
524 	if (param_total_num_mbufs)
525 		nb_mbuf_per_pool = param_total_num_mbufs;
526 	else {
527 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
528 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
529 
530 		if (!numa_support)
531 			nb_mbuf_per_pool =
532 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
533 	}
534 
535 	if (!numa_support) {
536 		if (socket_num == UMA_NO_CONFIG)
537 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
538 		else
539 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
540 						 socket_num);
541 	}
542 
543 	FOREACH_PORT(pid, ports) {
544 		port = &ports[pid];
545 		rte_eth_dev_info_get(pid, &port->dev_info);
546 
547 		if (numa_support) {
548 			if (port_numa[pid] != NUMA_NO_CONFIG)
549 				port_per_socket[port_numa[pid]]++;
550 			else {
551 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
552 
553 				/* if socket_id is invalid, set to 0 */
554 				if (check_socket_id(socket_id) < 0)
555 					socket_id = 0;
556 				port_per_socket[socket_id]++;
557 			}
558 		}
559 
560 		/* set flag to initialize port/queue */
561 		port->need_reconfig = 1;
562 		port->need_reconfig_queues = 1;
563 	}
564 
565 	if (numa_support) {
566 		uint8_t i;
567 		unsigned int nb_mbuf;
568 
569 		if (param_total_num_mbufs)
570 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
571 
572 		for (i = 0; i < max_socket; i++) {
573 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
574 			if (nb_mbuf)
575 				mbuf_pool_create(mbuf_data_size,
576 						nb_mbuf,i);
577 		}
578 	}
579 	init_port_config();
580 
581 	/*
582 	 * Records which Mbuf pool to use by each logical core, if needed.
583 	 */
584 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
585 		mbp = mbuf_pool_find(
586 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
587 
588 		if (mbp == NULL)
589 			mbp = mbuf_pool_find(0);
590 		fwd_lcores[lc_id]->mbp = mbp;
591 	}
592 
593 	/* Configuration of packet forwarding streams. */
594 	if (init_fwd_streams() < 0)
595 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
596 
597 	fwd_config_setup();
598 }
599 
600 
601 void
602 reconfig(portid_t new_port_id, unsigned socket_id)
603 {
604 	struct rte_port *port;
605 
606 	/* Reconfiguration of Ethernet ports. */
607 	port = &ports[new_port_id];
608 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
609 
610 	/* set flag to initialize port/queue */
611 	port->need_reconfig = 1;
612 	port->need_reconfig_queues = 1;
613 	port->socket_id = socket_id;
614 
615 	init_port_config();
616 }
617 
618 
619 int
620 init_fwd_streams(void)
621 {
622 	portid_t pid;
623 	struct rte_port *port;
624 	streamid_t sm_id, nb_fwd_streams_new;
625 	queueid_t q;
626 
627 	/* set socket id according to numa or not */
628 	FOREACH_PORT(pid, ports) {
629 		port = &ports[pid];
630 		if (nb_rxq > port->dev_info.max_rx_queues) {
631 			printf("Fail: nb_rxq(%d) is greater than "
632 				"max_rx_queues(%d)\n", nb_rxq,
633 				port->dev_info.max_rx_queues);
634 			return -1;
635 		}
636 		if (nb_txq > port->dev_info.max_tx_queues) {
637 			printf("Fail: nb_txq(%d) is greater than "
638 				"max_tx_queues(%d)\n", nb_txq,
639 				port->dev_info.max_tx_queues);
640 			return -1;
641 		}
642 		if (numa_support) {
643 			if (port_numa[pid] != NUMA_NO_CONFIG)
644 				port->socket_id = port_numa[pid];
645 			else {
646 				port->socket_id = rte_eth_dev_socket_id(pid);
647 
648 				/* if socket_id is invalid, set to 0 */
649 				if (check_socket_id(port->socket_id) < 0)
650 					port->socket_id = 0;
651 			}
652 		}
653 		else {
654 			if (socket_num == UMA_NO_CONFIG)
655 				port->socket_id = 0;
656 			else
657 				port->socket_id = socket_num;
658 		}
659 	}
660 
661 	q = RTE_MAX(nb_rxq, nb_txq);
662 	if (q == 0) {
663 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
664 		return -1;
665 	}
666 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
667 	if (nb_fwd_streams_new == nb_fwd_streams)
668 		return 0;
669 	/* clear the old */
670 	if (fwd_streams != NULL) {
671 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
672 			if (fwd_streams[sm_id] == NULL)
673 				continue;
674 			rte_free(fwd_streams[sm_id]);
675 			fwd_streams[sm_id] = NULL;
676 		}
677 		rte_free(fwd_streams);
678 		fwd_streams = NULL;
679 	}
680 
681 	/* init new */
682 	nb_fwd_streams = nb_fwd_streams_new;
683 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
684 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
685 	if (fwd_streams == NULL)
686 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
687 						"failed\n", nb_fwd_streams);
688 
689 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
690 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
691 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
692 		if (fwd_streams[sm_id] == NULL)
693 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
694 								" failed\n");
695 	}
696 
697 	return 0;
698 }
699 
700 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
701 static void
702 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
703 {
704 	unsigned int total_burst;
705 	unsigned int nb_burst;
706 	unsigned int burst_stats[3];
707 	uint16_t pktnb_stats[3];
708 	uint16_t nb_pkt;
709 	int burst_percent[3];
710 
711 	/*
712 	 * First compute the total number of packet bursts and the
713 	 * two highest numbers of bursts of the same number of packets.
714 	 */
715 	total_burst = 0;
716 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
717 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
718 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
719 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
720 		if (nb_burst == 0)
721 			continue;
722 		total_burst += nb_burst;
723 		if (nb_burst > burst_stats[0]) {
724 			burst_stats[1] = burst_stats[0];
725 			pktnb_stats[1] = pktnb_stats[0];
726 			burst_stats[0] = nb_burst;
727 			pktnb_stats[0] = nb_pkt;
728 		}
729 	}
730 	if (total_burst == 0)
731 		return;
732 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
733 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
734 	       burst_percent[0], (int) pktnb_stats[0]);
735 	if (burst_stats[0] == total_burst) {
736 		printf("]\n");
737 		return;
738 	}
739 	if (burst_stats[0] + burst_stats[1] == total_burst) {
740 		printf(" + %d%% of %d pkts]\n",
741 		       100 - burst_percent[0], pktnb_stats[1]);
742 		return;
743 	}
744 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
745 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
746 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
747 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
748 		return;
749 	}
750 	printf(" + %d%% of %d pkts + %d%% of others]\n",
751 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
752 }
753 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
754 
755 static void
756 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
757 {
758 	struct rte_port *port;
759 	uint8_t i;
760 
761 	static const char *fwd_stats_border = "----------------------";
762 
763 	port = &ports[port_id];
764 	printf("\n  %s Forward statistics for port %-2d %s\n",
765 	       fwd_stats_border, port_id, fwd_stats_border);
766 
767 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
768 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
769 		       "%-"PRIu64"\n",
770 		       stats->ipackets, stats->imissed,
771 		       (uint64_t) (stats->ipackets + stats->imissed));
772 
773 		if (cur_fwd_eng == &csum_fwd_engine)
774 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
775 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
776 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
777 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
778 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
779 		}
780 
781 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
782 		       "%-"PRIu64"\n",
783 		       stats->opackets, port->tx_dropped,
784 		       (uint64_t) (stats->opackets + port->tx_dropped));
785 	}
786 	else {
787 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
788 		       "%14"PRIu64"\n",
789 		       stats->ipackets, stats->imissed,
790 		       (uint64_t) (stats->ipackets + stats->imissed));
791 
792 		if (cur_fwd_eng == &csum_fwd_engine)
793 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
794 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
795 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
796 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
797 			printf("  RX-nombufs:             %14"PRIu64"\n",
798 			       stats->rx_nombuf);
799 		}
800 
801 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
802 		       "%14"PRIu64"\n",
803 		       stats->opackets, port->tx_dropped,
804 		       (uint64_t) (stats->opackets + port->tx_dropped));
805 	}
806 
807 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
808 	if (port->rx_stream)
809 		pkt_burst_stats_display("RX",
810 			&port->rx_stream->rx_burst_stats);
811 	if (port->tx_stream)
812 		pkt_burst_stats_display("TX",
813 			&port->tx_stream->tx_burst_stats);
814 #endif
815 
816 	if (port->rx_queue_stats_mapping_enabled) {
817 		printf("\n");
818 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
819 			printf("  Stats reg %2d RX-packets:%14"PRIu64
820 			       "     RX-errors:%14"PRIu64
821 			       "    RX-bytes:%14"PRIu64"\n",
822 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
823 		}
824 		printf("\n");
825 	}
826 	if (port->tx_queue_stats_mapping_enabled) {
827 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
828 			printf("  Stats reg %2d TX-packets:%14"PRIu64
829 			       "                                 TX-bytes:%14"PRIu64"\n",
830 			       i, stats->q_opackets[i], stats->q_obytes[i]);
831 		}
832 	}
833 
834 	printf("  %s--------------------------------%s\n",
835 	       fwd_stats_border, fwd_stats_border);
836 }
837 
838 static void
839 fwd_stream_stats_display(streamid_t stream_id)
840 {
841 	struct fwd_stream *fs;
842 	static const char *fwd_top_stats_border = "-------";
843 
844 	fs = fwd_streams[stream_id];
845 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
846 	    (fs->fwd_dropped == 0))
847 		return;
848 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
849 	       "TX Port=%2d/Queue=%2d %s\n",
850 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
851 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
852 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
853 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
854 
855 	/* if checksum mode */
856 	if (cur_fwd_eng == &csum_fwd_engine) {
857 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
858 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
859 	}
860 
861 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
862 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
863 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
864 #endif
865 }
866 
867 static void
868 flush_fwd_rx_queues(void)
869 {
870 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
871 	portid_t  rxp;
872 	portid_t port_id;
873 	queueid_t rxq;
874 	uint16_t  nb_rx;
875 	uint16_t  i;
876 	uint8_t   j;
877 
878 	for (j = 0; j < 2; j++) {
879 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
880 			for (rxq = 0; rxq < nb_rxq; rxq++) {
881 				port_id = fwd_ports_ids[rxp];
882 				do {
883 					nb_rx = rte_eth_rx_burst(port_id, rxq,
884 						pkts_burst, MAX_PKT_BURST);
885 					for (i = 0; i < nb_rx; i++)
886 						rte_pktmbuf_free(pkts_burst[i]);
887 				} while (nb_rx > 0);
888 			}
889 		}
890 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
891 	}
892 }
893 
894 static void
895 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
896 {
897 	struct fwd_stream **fsm;
898 	streamid_t nb_fs;
899 	streamid_t sm_id;
900 
901 	fsm = &fwd_streams[fc->stream_idx];
902 	nb_fs = fc->stream_nb;
903 	do {
904 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
905 			(*pkt_fwd)(fsm[sm_id]);
906 	} while (! fc->stopped);
907 }
908 
909 static int
910 start_pkt_forward_on_core(void *fwd_arg)
911 {
912 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
913 			     cur_fwd_config.fwd_eng->packet_fwd);
914 	return 0;
915 }
916 
917 /*
918  * Run the TXONLY packet forwarding engine to send a single burst of packets.
919  * Used to start communication flows in network loopback test configurations.
920  */
921 static int
922 run_one_txonly_burst_on_core(void *fwd_arg)
923 {
924 	struct fwd_lcore *fwd_lc;
925 	struct fwd_lcore tmp_lcore;
926 
927 	fwd_lc = (struct fwd_lcore *) fwd_arg;
928 	tmp_lcore = *fwd_lc;
929 	tmp_lcore.stopped = 1;
930 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
931 	return 0;
932 }
933 
934 /*
935  * Launch packet forwarding:
936  *     - Setup per-port forwarding context.
937  *     - launch logical cores with their forwarding configuration.
938  */
939 static void
940 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
941 {
942 	port_fwd_begin_t port_fwd_begin;
943 	unsigned int i;
944 	unsigned int lc_id;
945 	int diag;
946 
947 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
948 	if (port_fwd_begin != NULL) {
949 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
950 			(*port_fwd_begin)(fwd_ports_ids[i]);
951 	}
952 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
953 		lc_id = fwd_lcores_cpuids[i];
954 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
955 			fwd_lcores[i]->stopped = 0;
956 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
957 						     fwd_lcores[i], lc_id);
958 			if (diag != 0)
959 				printf("launch lcore %u failed - diag=%d\n",
960 				       lc_id, diag);
961 		}
962 	}
963 }
964 
965 /*
966  * Launch packet forwarding configuration.
967  */
968 void
969 start_packet_forwarding(int with_tx_first)
970 {
971 	port_fwd_begin_t port_fwd_begin;
972 	port_fwd_end_t  port_fwd_end;
973 	struct rte_port *port;
974 	unsigned int i;
975 	portid_t   pt_id;
976 	streamid_t sm_id;
977 
978 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
979 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
980 
981 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
982 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
983 
984 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
985 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
986 		(!nb_rxq || !nb_txq))
987 		rte_exit(EXIT_FAILURE,
988 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
989 			cur_fwd_eng->fwd_mode_name);
990 
991 	if (all_ports_started() == 0) {
992 		printf("Not all ports were started\n");
993 		return;
994 	}
995 	if (test_done == 0) {
996 		printf("Packet forwarding already started\n");
997 		return;
998 	}
999 
1000 	if (init_fwd_streams() < 0) {
1001 		printf("Fail from init_fwd_streams()\n");
1002 		return;
1003 	}
1004 
1005 	if(dcb_test) {
1006 		for (i = 0; i < nb_fwd_ports; i++) {
1007 			pt_id = fwd_ports_ids[i];
1008 			port = &ports[pt_id];
1009 			if (!port->dcb_flag) {
1010 				printf("In DCB mode, all forwarding ports must "
1011                                        "be configured in this mode.\n");
1012 				return;
1013 			}
1014 		}
1015 		if (nb_fwd_lcores == 1) {
1016 			printf("In DCB mode,the nb forwarding cores "
1017                                "should be larger than 1.\n");
1018 			return;
1019 		}
1020 	}
1021 	test_done = 0;
1022 
1023 	if(!no_flush_rx)
1024 		flush_fwd_rx_queues();
1025 
1026 	fwd_config_setup();
1027 	pkt_fwd_config_display(&cur_fwd_config);
1028 	rxtx_config_display();
1029 
1030 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1031 		pt_id = fwd_ports_ids[i];
1032 		port = &ports[pt_id];
1033 		rte_eth_stats_get(pt_id, &port->stats);
1034 		port->tx_dropped = 0;
1035 
1036 		map_port_queue_stats_mapping_registers(pt_id, port);
1037 	}
1038 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1039 		fwd_streams[sm_id]->rx_packets = 0;
1040 		fwd_streams[sm_id]->tx_packets = 0;
1041 		fwd_streams[sm_id]->fwd_dropped = 0;
1042 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1043 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1044 
1045 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1046 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1047 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1048 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1049 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1050 #endif
1051 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1052 		fwd_streams[sm_id]->core_cycles = 0;
1053 #endif
1054 	}
1055 	if (with_tx_first) {
1056 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1057 		if (port_fwd_begin != NULL) {
1058 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1059 				(*port_fwd_begin)(fwd_ports_ids[i]);
1060 		}
1061 		while (with_tx_first--) {
1062 			launch_packet_forwarding(
1063 					run_one_txonly_burst_on_core);
1064 			rte_eal_mp_wait_lcore();
1065 		}
1066 		port_fwd_end = tx_only_engine.port_fwd_end;
1067 		if (port_fwd_end != NULL) {
1068 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1069 				(*port_fwd_end)(fwd_ports_ids[i]);
1070 		}
1071 	}
1072 	launch_packet_forwarding(start_pkt_forward_on_core);
1073 }
1074 
1075 void
1076 stop_packet_forwarding(void)
1077 {
1078 	struct rte_eth_stats stats;
1079 	struct rte_port *port;
1080 	port_fwd_end_t  port_fwd_end;
1081 	int i;
1082 	portid_t   pt_id;
1083 	streamid_t sm_id;
1084 	lcoreid_t  lc_id;
1085 	uint64_t total_recv;
1086 	uint64_t total_xmit;
1087 	uint64_t total_rx_dropped;
1088 	uint64_t total_tx_dropped;
1089 	uint64_t total_rx_nombuf;
1090 	uint64_t tx_dropped;
1091 	uint64_t rx_bad_ip_csum;
1092 	uint64_t rx_bad_l4_csum;
1093 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1094 	uint64_t fwd_cycles;
1095 #endif
1096 	static const char *acc_stats_border = "+++++++++++++++";
1097 
1098 	if (test_done) {
1099 		printf("Packet forwarding not started\n");
1100 		return;
1101 	}
1102 	printf("Telling cores to stop...");
1103 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1104 		fwd_lcores[lc_id]->stopped = 1;
1105 	printf("\nWaiting for lcores to finish...\n");
1106 	rte_eal_mp_wait_lcore();
1107 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1108 	if (port_fwd_end != NULL) {
1109 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1110 			pt_id = fwd_ports_ids[i];
1111 			(*port_fwd_end)(pt_id);
1112 		}
1113 	}
1114 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1115 	fwd_cycles = 0;
1116 #endif
1117 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1118 		if (cur_fwd_config.nb_fwd_streams >
1119 		    cur_fwd_config.nb_fwd_ports) {
1120 			fwd_stream_stats_display(sm_id);
1121 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1122 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1123 		} else {
1124 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1125 				fwd_streams[sm_id];
1126 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1127 				fwd_streams[sm_id];
1128 		}
1129 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1130 		tx_dropped = (uint64_t) (tx_dropped +
1131 					 fwd_streams[sm_id]->fwd_dropped);
1132 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1133 
1134 		rx_bad_ip_csum =
1135 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1136 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1137 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1138 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1139 							rx_bad_ip_csum;
1140 
1141 		rx_bad_l4_csum =
1142 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1143 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1144 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1145 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1146 							rx_bad_l4_csum;
1147 
1148 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1149 		fwd_cycles = (uint64_t) (fwd_cycles +
1150 					 fwd_streams[sm_id]->core_cycles);
1151 #endif
1152 	}
1153 	total_recv = 0;
1154 	total_xmit = 0;
1155 	total_rx_dropped = 0;
1156 	total_tx_dropped = 0;
1157 	total_rx_nombuf  = 0;
1158 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1159 		pt_id = fwd_ports_ids[i];
1160 
1161 		port = &ports[pt_id];
1162 		rte_eth_stats_get(pt_id, &stats);
1163 		stats.ipackets -= port->stats.ipackets;
1164 		port->stats.ipackets = 0;
1165 		stats.opackets -= port->stats.opackets;
1166 		port->stats.opackets = 0;
1167 		stats.ibytes   -= port->stats.ibytes;
1168 		port->stats.ibytes = 0;
1169 		stats.obytes   -= port->stats.obytes;
1170 		port->stats.obytes = 0;
1171 		stats.imissed  -= port->stats.imissed;
1172 		port->stats.imissed = 0;
1173 		stats.oerrors  -= port->stats.oerrors;
1174 		port->stats.oerrors = 0;
1175 		stats.rx_nombuf -= port->stats.rx_nombuf;
1176 		port->stats.rx_nombuf = 0;
1177 
1178 		total_recv += stats.ipackets;
1179 		total_xmit += stats.opackets;
1180 		total_rx_dropped += stats.imissed;
1181 		total_tx_dropped += port->tx_dropped;
1182 		total_rx_nombuf  += stats.rx_nombuf;
1183 
1184 		fwd_port_stats_display(pt_id, &stats);
1185 	}
1186 	printf("\n  %s Accumulated forward statistics for all ports"
1187 	       "%s\n",
1188 	       acc_stats_border, acc_stats_border);
1189 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1190 	       "%-"PRIu64"\n"
1191 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1192 	       "%-"PRIu64"\n",
1193 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1194 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1195 	if (total_rx_nombuf > 0)
1196 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1197 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1198 	       "%s\n",
1199 	       acc_stats_border, acc_stats_border);
1200 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1201 	if (total_recv > 0)
1202 		printf("\n  CPU cycles/packet=%u (total cycles="
1203 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1204 		       (unsigned int)(fwd_cycles / total_recv),
1205 		       fwd_cycles, total_recv);
1206 #endif
1207 	printf("\nDone.\n");
1208 	test_done = 1;
1209 }
1210 
1211 void
1212 dev_set_link_up(portid_t pid)
1213 {
1214 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1215 		printf("\nSet link up fail.\n");
1216 }
1217 
1218 void
1219 dev_set_link_down(portid_t pid)
1220 {
1221 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1222 		printf("\nSet link down fail.\n");
1223 }
1224 
1225 static int
1226 all_ports_started(void)
1227 {
1228 	portid_t pi;
1229 	struct rte_port *port;
1230 
1231 	FOREACH_PORT(pi, ports) {
1232 		port = &ports[pi];
1233 		/* Check if there is a port which is not started */
1234 		if ((port->port_status != RTE_PORT_STARTED) &&
1235 			(port->slave_flag == 0))
1236 			return 0;
1237 	}
1238 
1239 	/* No port is not started */
1240 	return 1;
1241 }
1242 
1243 int
1244 all_ports_stopped(void)
1245 {
1246 	portid_t pi;
1247 	struct rte_port *port;
1248 
1249 	FOREACH_PORT(pi, ports) {
1250 		port = &ports[pi];
1251 		if ((port->port_status != RTE_PORT_STOPPED) &&
1252 			(port->slave_flag == 0))
1253 			return 0;
1254 	}
1255 
1256 	return 1;
1257 }
1258 
1259 int
1260 port_is_started(portid_t port_id)
1261 {
1262 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1263 		return 0;
1264 
1265 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1266 		return 0;
1267 
1268 	return 1;
1269 }
1270 
1271 static int
1272 port_is_closed(portid_t port_id)
1273 {
1274 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1275 		return 0;
1276 
1277 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1278 		return 0;
1279 
1280 	return 1;
1281 }
1282 
1283 int
1284 start_port(portid_t pid)
1285 {
1286 	int diag, need_check_link_status = -1;
1287 	portid_t pi;
1288 	queueid_t qi;
1289 	struct rte_port *port;
1290 	struct ether_addr mac_addr;
1291 
1292 	if (port_id_is_invalid(pid, ENABLED_WARN))
1293 		return 0;
1294 
1295 	if(dcb_config)
1296 		dcb_test = 1;
1297 	FOREACH_PORT(pi, ports) {
1298 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1299 			continue;
1300 
1301 		need_check_link_status = 0;
1302 		port = &ports[pi];
1303 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1304 						 RTE_PORT_HANDLING) == 0) {
1305 			printf("Port %d is now not stopped\n", pi);
1306 			continue;
1307 		}
1308 
1309 		if (port->need_reconfig > 0) {
1310 			port->need_reconfig = 0;
1311 
1312 			printf("Configuring Port %d (socket %u)\n", pi,
1313 					port->socket_id);
1314 			/* configure port */
1315 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1316 						&(port->dev_conf));
1317 			if (diag != 0) {
1318 				if (rte_atomic16_cmpset(&(port->port_status),
1319 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1320 					printf("Port %d can not be set back "
1321 							"to stopped\n", pi);
1322 				printf("Fail to configure port %d\n", pi);
1323 				/* try to reconfigure port next time */
1324 				port->need_reconfig = 1;
1325 				return -1;
1326 			}
1327 		}
1328 		if (port->need_reconfig_queues > 0) {
1329 			port->need_reconfig_queues = 0;
1330 			/* setup tx queues */
1331 			for (qi = 0; qi < nb_txq; qi++) {
1332 				if ((numa_support) &&
1333 					(txring_numa[pi] != NUMA_NO_CONFIG))
1334 					diag = rte_eth_tx_queue_setup(pi, qi,
1335 						nb_txd,txring_numa[pi],
1336 						&(port->tx_conf));
1337 				else
1338 					diag = rte_eth_tx_queue_setup(pi, qi,
1339 						nb_txd,port->socket_id,
1340 						&(port->tx_conf));
1341 
1342 				if (diag == 0)
1343 					continue;
1344 
1345 				/* Fail to setup tx queue, return */
1346 				if (rte_atomic16_cmpset(&(port->port_status),
1347 							RTE_PORT_HANDLING,
1348 							RTE_PORT_STOPPED) == 0)
1349 					printf("Port %d can not be set back "
1350 							"to stopped\n", pi);
1351 				printf("Fail to configure port %d tx queues\n", pi);
1352 				/* try to reconfigure queues next time */
1353 				port->need_reconfig_queues = 1;
1354 				return -1;
1355 			}
1356 			/* setup rx queues */
1357 			for (qi = 0; qi < nb_rxq; qi++) {
1358 				if ((numa_support) &&
1359 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1360 					struct rte_mempool * mp =
1361 						mbuf_pool_find(rxring_numa[pi]);
1362 					if (mp == NULL) {
1363 						printf("Failed to setup RX queue:"
1364 							"No mempool allocation"
1365 							" on the socket %d\n",
1366 							rxring_numa[pi]);
1367 						return -1;
1368 					}
1369 
1370 					diag = rte_eth_rx_queue_setup(pi, qi,
1371 					     nb_rxd,rxring_numa[pi],
1372 					     &(port->rx_conf),mp);
1373 				} else {
1374 					struct rte_mempool *mp =
1375 						mbuf_pool_find(port->socket_id);
1376 					if (mp == NULL) {
1377 						printf("Failed to setup RX queue:"
1378 							"No mempool allocation"
1379 							" on the socket %d\n",
1380 							port->socket_id);
1381 						return -1;
1382 					}
1383 					diag = rte_eth_rx_queue_setup(pi, qi,
1384 					     nb_rxd,port->socket_id,
1385 					     &(port->rx_conf), mp);
1386 				}
1387 				if (diag == 0)
1388 					continue;
1389 
1390 				/* Fail to setup rx queue, return */
1391 				if (rte_atomic16_cmpset(&(port->port_status),
1392 							RTE_PORT_HANDLING,
1393 							RTE_PORT_STOPPED) == 0)
1394 					printf("Port %d can not be set back "
1395 							"to stopped\n", pi);
1396 				printf("Fail to configure port %d rx queues\n", pi);
1397 				/* try to reconfigure queues next time */
1398 				port->need_reconfig_queues = 1;
1399 				return -1;
1400 			}
1401 		}
1402 		/* start port */
1403 		if (rte_eth_dev_start(pi) < 0) {
1404 			printf("Fail to start port %d\n", pi);
1405 
1406 			/* Fail to setup rx queue, return */
1407 			if (rte_atomic16_cmpset(&(port->port_status),
1408 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1409 				printf("Port %d can not be set back to "
1410 							"stopped\n", pi);
1411 			continue;
1412 		}
1413 
1414 		if (rte_atomic16_cmpset(&(port->port_status),
1415 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1416 			printf("Port %d can not be set into started\n", pi);
1417 
1418 		rte_eth_macaddr_get(pi, &mac_addr);
1419 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1420 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1421 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1422 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1423 
1424 		/* at least one port started, need checking link status */
1425 		need_check_link_status = 1;
1426 	}
1427 
1428 	if (need_check_link_status == 1 && !no_link_check)
1429 		check_all_ports_link_status(RTE_PORT_ALL);
1430 	else if (need_check_link_status == 0)
1431 		printf("Please stop the ports first\n");
1432 
1433 	printf("Done\n");
1434 	return 0;
1435 }
1436 
1437 void
1438 stop_port(portid_t pid)
1439 {
1440 	portid_t pi;
1441 	struct rte_port *port;
1442 	int need_check_link_status = 0;
1443 
1444 	if (dcb_test) {
1445 		dcb_test = 0;
1446 		dcb_config = 0;
1447 	}
1448 
1449 	if (port_id_is_invalid(pid, ENABLED_WARN))
1450 		return;
1451 
1452 	printf("Stopping ports...\n");
1453 
1454 	FOREACH_PORT(pi, ports) {
1455 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1456 			continue;
1457 
1458 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1459 			printf("Please remove port %d from forwarding configuration.\n", pi);
1460 			continue;
1461 		}
1462 
1463 		if (port_is_bonding_slave(pi)) {
1464 			printf("Please remove port %d from bonded device.\n", pi);
1465 			continue;
1466 		}
1467 
1468 		port = &ports[pi];
1469 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1470 						RTE_PORT_HANDLING) == 0)
1471 			continue;
1472 
1473 		rte_eth_dev_stop(pi);
1474 
1475 		if (rte_atomic16_cmpset(&(port->port_status),
1476 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1477 			printf("Port %d can not be set into stopped\n", pi);
1478 		need_check_link_status = 1;
1479 	}
1480 	if (need_check_link_status && !no_link_check)
1481 		check_all_ports_link_status(RTE_PORT_ALL);
1482 
1483 	printf("Done\n");
1484 }
1485 
1486 void
1487 close_port(portid_t pid)
1488 {
1489 	portid_t pi;
1490 	struct rte_port *port;
1491 
1492 	if (port_id_is_invalid(pid, ENABLED_WARN))
1493 		return;
1494 
1495 	printf("Closing ports...\n");
1496 
1497 	FOREACH_PORT(pi, ports) {
1498 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1499 			continue;
1500 
1501 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1502 			printf("Please remove port %d from forwarding configuration.\n", pi);
1503 			continue;
1504 		}
1505 
1506 		if (port_is_bonding_slave(pi)) {
1507 			printf("Please remove port %d from bonded device.\n", pi);
1508 			continue;
1509 		}
1510 
1511 		port = &ports[pi];
1512 		if (rte_atomic16_cmpset(&(port->port_status),
1513 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1514 			printf("Port %d is already closed\n", pi);
1515 			continue;
1516 		}
1517 
1518 		if (rte_atomic16_cmpset(&(port->port_status),
1519 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1520 			printf("Port %d is now not stopped\n", pi);
1521 			continue;
1522 		}
1523 
1524 		rte_eth_dev_close(pi);
1525 
1526 		if (rte_atomic16_cmpset(&(port->port_status),
1527 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1528 			printf("Port %d cannot be set to closed\n", pi);
1529 	}
1530 
1531 	printf("Done\n");
1532 }
1533 
1534 void
1535 attach_port(char *identifier)
1536 {
1537 	portid_t pi = 0;
1538 	unsigned int socket_id;
1539 
1540 	printf("Attaching a new port...\n");
1541 
1542 	if (identifier == NULL) {
1543 		printf("Invalid parameters are specified\n");
1544 		return;
1545 	}
1546 
1547 	if (rte_eth_dev_attach(identifier, &pi))
1548 		return;
1549 
1550 	ports[pi].enabled = 1;
1551 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1552 	/* if socket_id is invalid, set to 0 */
1553 	if (check_socket_id(socket_id) < 0)
1554 		socket_id = 0;
1555 	reconfig(pi, socket_id);
1556 	rte_eth_promiscuous_enable(pi);
1557 
1558 	nb_ports = rte_eth_dev_count();
1559 
1560 	ports[pi].port_status = RTE_PORT_STOPPED;
1561 
1562 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1563 	printf("Done\n");
1564 }
1565 
1566 void
1567 detach_port(uint8_t port_id)
1568 {
1569 	char name[RTE_ETH_NAME_MAX_LEN];
1570 
1571 	printf("Detaching a port...\n");
1572 
1573 	if (!port_is_closed(port_id)) {
1574 		printf("Please close port first\n");
1575 		return;
1576 	}
1577 
1578 	if (rte_eth_dev_detach(port_id, name))
1579 		return;
1580 
1581 	ports[port_id].enabled = 0;
1582 	nb_ports = rte_eth_dev_count();
1583 
1584 	printf("Port '%s' is detached. Now total ports is %d\n",
1585 			name, nb_ports);
1586 	printf("Done\n");
1587 	return;
1588 }
1589 
1590 void
1591 pmd_test_exit(void)
1592 {
1593 	portid_t pt_id;
1594 
1595 	if (test_done == 0)
1596 		stop_packet_forwarding();
1597 
1598 	if (ports != NULL) {
1599 		no_link_check = 1;
1600 		FOREACH_PORT(pt_id, ports) {
1601 			printf("\nShutting down port %d...\n", pt_id);
1602 			fflush(stdout);
1603 			stop_port(pt_id);
1604 			close_port(pt_id);
1605 		}
1606 	}
1607 	printf("\nBye...\n");
1608 }
1609 
1610 typedef void (*cmd_func_t)(void);
1611 struct pmd_test_command {
1612 	const char *cmd_name;
1613 	cmd_func_t cmd_func;
1614 };
1615 
1616 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1617 
1618 /* Check the link status of all ports in up to 9s, and print them finally */
1619 static void
1620 check_all_ports_link_status(uint32_t port_mask)
1621 {
1622 #define CHECK_INTERVAL 100 /* 100ms */
1623 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1624 	uint8_t portid, count, all_ports_up, print_flag = 0;
1625 	struct rte_eth_link link;
1626 
1627 	printf("Checking link statuses...\n");
1628 	fflush(stdout);
1629 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1630 		all_ports_up = 1;
1631 		FOREACH_PORT(portid, ports) {
1632 			if ((port_mask & (1 << portid)) == 0)
1633 				continue;
1634 			memset(&link, 0, sizeof(link));
1635 			rte_eth_link_get_nowait(portid, &link);
1636 			/* print link status if flag set */
1637 			if (print_flag == 1) {
1638 				if (link.link_status)
1639 					printf("Port %d Link Up - speed %u "
1640 						"Mbps - %s\n", (uint8_t)portid,
1641 						(unsigned)link.link_speed,
1642 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1643 					("full-duplex") : ("half-duplex\n"));
1644 				else
1645 					printf("Port %d Link Down\n",
1646 						(uint8_t)portid);
1647 				continue;
1648 			}
1649 			/* clear all_ports_up flag if any link down */
1650 			if (link.link_status == ETH_LINK_DOWN) {
1651 				all_ports_up = 0;
1652 				break;
1653 			}
1654 		}
1655 		/* after finally printing all link status, get out */
1656 		if (print_flag == 1)
1657 			break;
1658 
1659 		if (all_ports_up == 0) {
1660 			fflush(stdout);
1661 			rte_delay_ms(CHECK_INTERVAL);
1662 		}
1663 
1664 		/* set the print_flag if all ports up or timeout */
1665 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1666 			print_flag = 1;
1667 		}
1668 	}
1669 }
1670 
1671 static int
1672 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1673 {
1674 	uint16_t i;
1675 	int diag;
1676 	uint8_t mapping_found = 0;
1677 
1678 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1679 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1680 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1681 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1682 					tx_queue_stats_mappings[i].queue_id,
1683 					tx_queue_stats_mappings[i].stats_counter_id);
1684 			if (diag != 0)
1685 				return diag;
1686 			mapping_found = 1;
1687 		}
1688 	}
1689 	if (mapping_found)
1690 		port->tx_queue_stats_mapping_enabled = 1;
1691 	return 0;
1692 }
1693 
1694 static int
1695 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1696 {
1697 	uint16_t i;
1698 	int diag;
1699 	uint8_t mapping_found = 0;
1700 
1701 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1702 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1703 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1704 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1705 					rx_queue_stats_mappings[i].queue_id,
1706 					rx_queue_stats_mappings[i].stats_counter_id);
1707 			if (diag != 0)
1708 				return diag;
1709 			mapping_found = 1;
1710 		}
1711 	}
1712 	if (mapping_found)
1713 		port->rx_queue_stats_mapping_enabled = 1;
1714 	return 0;
1715 }
1716 
1717 static void
1718 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1719 {
1720 	int diag = 0;
1721 
1722 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1723 	if (diag != 0) {
1724 		if (diag == -ENOTSUP) {
1725 			port->tx_queue_stats_mapping_enabled = 0;
1726 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1727 		}
1728 		else
1729 			rte_exit(EXIT_FAILURE,
1730 					"set_tx_queue_stats_mapping_registers "
1731 					"failed for port id=%d diag=%d\n",
1732 					pi, diag);
1733 	}
1734 
1735 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1736 	if (diag != 0) {
1737 		if (diag == -ENOTSUP) {
1738 			port->rx_queue_stats_mapping_enabled = 0;
1739 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1740 		}
1741 		else
1742 			rte_exit(EXIT_FAILURE,
1743 					"set_rx_queue_stats_mapping_registers "
1744 					"failed for port id=%d diag=%d\n",
1745 					pi, diag);
1746 	}
1747 }
1748 
1749 static void
1750 rxtx_port_config(struct rte_port *port)
1751 {
1752 	port->rx_conf = port->dev_info.default_rxconf;
1753 	port->tx_conf = port->dev_info.default_txconf;
1754 
1755 	/* Check if any RX/TX parameters have been passed */
1756 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1757 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1758 
1759 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1760 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1761 
1762 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1763 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1764 
1765 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1766 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1767 
1768 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1769 		port->rx_conf.rx_drop_en = rx_drop_en;
1770 
1771 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1772 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1773 
1774 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1775 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1776 
1777 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1778 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1779 
1780 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1781 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1782 
1783 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1784 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1785 
1786 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1787 		port->tx_conf.txq_flags = txq_flags;
1788 }
1789 
1790 void
1791 init_port_config(void)
1792 {
1793 	portid_t pid;
1794 	struct rte_port *port;
1795 
1796 	FOREACH_PORT(pid, ports) {
1797 		port = &ports[pid];
1798 		port->dev_conf.rxmode = rx_mode;
1799 		port->dev_conf.fdir_conf = fdir_conf;
1800 		if (nb_rxq > 1) {
1801 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1802 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1803 		} else {
1804 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1805 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1806 		}
1807 
1808 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1809 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1810 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1811 			else
1812 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1813 		}
1814 
1815 		if (port->dev_info.max_vfs != 0) {
1816 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1817 				port->dev_conf.rxmode.mq_mode =
1818 					ETH_MQ_RX_VMDQ_RSS;
1819 			else
1820 				port->dev_conf.rxmode.mq_mode =
1821 					ETH_MQ_RX_NONE;
1822 
1823 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1824 		}
1825 
1826 		rxtx_port_config(port);
1827 
1828 		rte_eth_macaddr_get(pid, &port->eth_addr);
1829 
1830 		map_port_queue_stats_mapping_registers(pid, port);
1831 #ifdef RTE_NIC_BYPASS
1832 		rte_eth_dev_bypass_init(pid);
1833 #endif
1834 	}
1835 }
1836 
1837 void set_port_slave_flag(portid_t slave_pid)
1838 {
1839 	struct rte_port *port;
1840 
1841 	port = &ports[slave_pid];
1842 	port->slave_flag = 1;
1843 }
1844 
1845 void clear_port_slave_flag(portid_t slave_pid)
1846 {
1847 	struct rte_port *port;
1848 
1849 	port = &ports[slave_pid];
1850 	port->slave_flag = 0;
1851 }
1852 
1853 uint8_t port_is_bonding_slave(portid_t slave_pid)
1854 {
1855 	struct rte_port *port;
1856 
1857 	port = &ports[slave_pid];
1858 	return port->slave_flag;
1859 }
1860 
1861 const uint16_t vlan_tags[] = {
1862 		0,  1,  2,  3,  4,  5,  6,  7,
1863 		8,  9, 10, 11,  12, 13, 14, 15,
1864 		16, 17, 18, 19, 20, 21, 22, 23,
1865 		24, 25, 26, 27, 28, 29, 30, 31
1866 };
1867 
1868 static  int
1869 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1870 		 enum dcb_mode_enable dcb_mode,
1871 		 enum rte_eth_nb_tcs num_tcs,
1872 		 uint8_t pfc_en)
1873 {
1874 	uint8_t i;
1875 
1876 	/*
1877 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1878 	 * given above, and the number of traffic classes available for use.
1879 	 */
1880 	if (dcb_mode == DCB_VT_ENABLED) {
1881 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1882 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1883 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1884 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1885 
1886 		/* VMDQ+DCB RX and TX configrations */
1887 		vmdq_rx_conf->enable_default_pool = 0;
1888 		vmdq_rx_conf->default_pool = 0;
1889 		vmdq_rx_conf->nb_queue_pools =
1890 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1891 		vmdq_tx_conf->nb_queue_pools =
1892 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1893 
1894 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1895 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1896 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1897 			vmdq_rx_conf->pool_map[i].pools =
1898 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1899 		}
1900 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1901 			vmdq_rx_conf->dcb_tc[i] = i;
1902 			vmdq_tx_conf->dcb_tc[i] = i;
1903 		}
1904 
1905 		/* set DCB mode of RX and TX of multiple queues */
1906 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1907 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1908 	} else {
1909 		struct rte_eth_dcb_rx_conf *rx_conf =
1910 				&eth_conf->rx_adv_conf.dcb_rx_conf;
1911 		struct rte_eth_dcb_tx_conf *tx_conf =
1912 				&eth_conf->tx_adv_conf.dcb_tx_conf;
1913 
1914 		rx_conf->nb_tcs = num_tcs;
1915 		tx_conf->nb_tcs = num_tcs;
1916 
1917 		for (i = 0; i < num_tcs; i++) {
1918 			rx_conf->dcb_tc[i] = i;
1919 			tx_conf->dcb_tc[i] = i;
1920 		}
1921 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1922 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1923 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1924 	}
1925 
1926 	if (pfc_en)
1927 		eth_conf->dcb_capability_en =
1928 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1929 	else
1930 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1931 
1932 	return 0;
1933 }
1934 
1935 int
1936 init_port_dcb_config(portid_t pid,
1937 		     enum dcb_mode_enable dcb_mode,
1938 		     enum rte_eth_nb_tcs num_tcs,
1939 		     uint8_t pfc_en)
1940 {
1941 	struct rte_eth_conf port_conf;
1942 	struct rte_eth_dev_info dev_info;
1943 	struct rte_port *rte_port;
1944 	int retval;
1945 	uint16_t i;
1946 
1947 	rte_eth_dev_info_get(pid, &dev_info);
1948 
1949 	/* If dev_info.vmdq_pool_base is greater than 0,
1950 	 * the queue id of vmdq pools is started after pf queues.
1951 	 */
1952 	if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1953 		printf("VMDQ_DCB multi-queue mode is nonsensical"
1954 			" for port %d.", pid);
1955 		return -1;
1956 	}
1957 
1958 	/* Assume the ports in testpmd have the same dcb capability
1959 	 * and has the same number of rxq and txq in dcb mode
1960 	 */
1961 	if (dcb_mode == DCB_VT_ENABLED) {
1962 		nb_rxq = dev_info.max_rx_queues;
1963 		nb_txq = dev_info.max_tx_queues;
1964 	} else {
1965 		/*if vt is disabled, use all pf queues */
1966 		if (dev_info.vmdq_pool_base == 0) {
1967 			nb_rxq = dev_info.max_rx_queues;
1968 			nb_txq = dev_info.max_tx_queues;
1969 		} else {
1970 			nb_rxq = (queueid_t)num_tcs;
1971 			nb_txq = (queueid_t)num_tcs;
1972 
1973 		}
1974 	}
1975 	rx_free_thresh = 64;
1976 
1977 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1978 	/* Enter DCB configuration status */
1979 	dcb_config = 1;
1980 
1981 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1982 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1983 	if (retval < 0)
1984 		return retval;
1985 
1986 	rte_port = &ports[pid];
1987 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1988 
1989 	rxtx_port_config(rte_port);
1990 	/* VLAN filter */
1991 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1992 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
1993 		rx_vft_set(pid, vlan_tags[i], 1);
1994 
1995 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1996 	map_port_queue_stats_mapping_registers(pid, rte_port);
1997 
1998 	rte_port->dcb_flag = 1;
1999 
2000 	return 0;
2001 }
2002 
2003 static void
2004 init_port(void)
2005 {
2006 	portid_t pid;
2007 
2008 	/* Configuration of Ethernet ports. */
2009 	ports = rte_zmalloc("testpmd: ports",
2010 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2011 			    RTE_CACHE_LINE_SIZE);
2012 	if (ports == NULL) {
2013 		rte_exit(EXIT_FAILURE,
2014 				"rte_zmalloc(%d struct rte_port) failed\n",
2015 				RTE_MAX_ETHPORTS);
2016 	}
2017 
2018 	/* enabled allocated ports */
2019 	for (pid = 0; pid < nb_ports; pid++)
2020 		ports[pid].enabled = 1;
2021 }
2022 
2023 static void
2024 force_quit(void)
2025 {
2026 	pmd_test_exit();
2027 	prompt_exit();
2028 }
2029 
2030 static void
2031 signal_handler(int signum)
2032 {
2033 	if (signum == SIGINT || signum == SIGTERM) {
2034 		printf("\nSignal %d received, preparing to exit...\n",
2035 				signum);
2036 		force_quit();
2037 		/* exit with the expected status */
2038 		signal(signum, SIG_DFL);
2039 		kill(getpid(), signum);
2040 	}
2041 }
2042 
2043 int
2044 main(int argc, char** argv)
2045 {
2046 	int  diag;
2047 	uint8_t port_id;
2048 
2049 	signal(SIGINT, signal_handler);
2050 	signal(SIGTERM, signal_handler);
2051 
2052 	diag = rte_eal_init(argc, argv);
2053 	if (diag < 0)
2054 		rte_panic("Cannot init EAL\n");
2055 
2056 	nb_ports = (portid_t) rte_eth_dev_count();
2057 	if (nb_ports == 0)
2058 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2059 
2060 	/* allocate port structures, and init them */
2061 	init_port();
2062 
2063 	set_def_fwd_config();
2064 	if (nb_lcores == 0)
2065 		rte_panic("Empty set of forwarding logical cores - check the "
2066 			  "core mask supplied in the command parameters\n");
2067 
2068 	argc -= diag;
2069 	argv += diag;
2070 	if (argc > 1)
2071 		launch_args_parse(argc, argv);
2072 
2073 	if (!nb_rxq && !nb_txq)
2074 		printf("Warning: Either rx or tx queues should be non-zero\n");
2075 
2076 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2077 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2078 		       "but nb_txq=%d will prevent to fully test it.\n",
2079 		       nb_rxq, nb_txq);
2080 
2081 	init_config();
2082 	if (start_port(RTE_PORT_ALL) != 0)
2083 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2084 
2085 	/* set all ports to promiscuous mode by default */
2086 	FOREACH_PORT(port_id, ports)
2087 		rte_eth_promiscuous_enable(port_id);
2088 
2089 #ifdef RTE_LIBRTE_CMDLINE
2090 	if (interactive == 1) {
2091 		if (auto_start) {
2092 			printf("Start automatic packet forwarding\n");
2093 			start_packet_forwarding(0);
2094 		}
2095 		prompt();
2096 	} else
2097 #endif
2098 	{
2099 		char c;
2100 		int rc;
2101 
2102 		printf("No commandline core given, start packet forwarding\n");
2103 		start_packet_forwarding(0);
2104 		printf("Press enter to exit\n");
2105 		rc = read(0, &c, 1);
2106 		pmd_test_exit();
2107 		if (rc < 0)
2108 			return 1;
2109 	}
2110 
2111 	return 0;
2112 }
2113