xref: /dpdk/app/test-pmd/testpmd.c (revision 47b3ac6b45a1c2bef5b620d3bf67a7ffeb545319)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_retry_fwd_engine,
148 	&mac_swap_engine,
149 	&flow_gen_engine,
150 	&rx_only_engine,
151 	&tx_only_engine,
152 	&csum_fwd_engine,
153 	&icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 	&ieee1588_fwd_engine,
156 #endif
157 	NULL,
158 };
159 
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162 
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166 
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 	TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175 
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178 
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181 
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184 
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187 
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201 
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206 
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210 
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214 
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219 
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224 
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229 
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234 
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239 
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244 
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249 
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254 
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259 
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264 
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267 
268 #endif
269 
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275 	.split_hdr_size = 0,
276 	.header_split   = 0, /**< Header Split disabled. */
277 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284 
285 struct rte_fdir_conf fdir_conf = {
286 	.mode = RTE_FDIR_MODE_NONE,
287 	.pballoc = RTE_FDIR_PBALLOC_64K,
288 	.status = RTE_FDIR_REPORT_STATUS,
289 	.mask = {
290 		.vlan_tci_mask = 0x0,
291 		.ipv4_mask     = {
292 			.src_ip = 0xFFFFFFFF,
293 			.dst_ip = 0xFFFFFFFF,
294 		},
295 		.ipv6_mask     = {
296 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298 		},
299 		.src_port_mask = 0xFFFF,
300 		.dst_port_mask = 0xFFFF,
301 		.mac_addr_byte_mask = 0xFF,
302 		.tunnel_type_mask = 1,
303 		.tunnel_id_mask = 0xFFFFFFFF,
304 	},
305 	.drop_queue = 127,
306 };
307 
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
309 
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
312 
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
315 
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
318 
319 unsigned max_socket = 0;
320 
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
324 
325 /*
326  * Check if all the ports are started.
327  * If yes, return positive value. If not, return zero.
328  */
329 static int all_ports_started(void);
330 
331 /*
332  * Find next enabled port
333  */
334 portid_t
335 find_next_port(portid_t p, struct rte_port *ports, int size)
336 {
337 	if (ports == NULL)
338 		rte_exit(-EINVAL, "failed to find a next port id\n");
339 
340 	while ((p < size) && (ports[p].enabled == 0))
341 		p++;
342 	return p;
343 }
344 
345 /*
346  * Setup default configuration.
347  */
348 static void
349 set_default_fwd_lcores_config(void)
350 {
351 	unsigned int i;
352 	unsigned int nb_lc;
353 	unsigned int sock_num;
354 
355 	nb_lc = 0;
356 	for (i = 0; i < RTE_MAX_LCORE; i++) {
357 		if (! rte_lcore_is_enabled(i))
358 			continue;
359 		if (i == rte_get_master_lcore())
360 			continue;
361 		fwd_lcores_cpuids[nb_lc++] = i;
362 		sock_num = rte_lcore_to_socket_id(i) + 1;
363 		if (sock_num > max_socket) {
364 			if (sock_num > RTE_MAX_NUMA_NODES)
365 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
366 			max_socket = sock_num;
367 		}
368 	}
369 	nb_lcores = (lcoreid_t) nb_lc;
370 	nb_cfg_lcores = nb_lcores;
371 	nb_fwd_lcores = 1;
372 }
373 
374 static void
375 set_def_peer_eth_addrs(void)
376 {
377 	portid_t i;
378 
379 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381 		peer_eth_addrs[i].addr_bytes[5] = i;
382 	}
383 }
384 
385 static void
386 set_default_fwd_ports_config(void)
387 {
388 	portid_t pt_id;
389 
390 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
391 		fwd_ports_ids[pt_id] = pt_id;
392 
393 	nb_cfg_ports = nb_ports;
394 	nb_fwd_ports = nb_ports;
395 }
396 
397 void
398 set_def_fwd_config(void)
399 {
400 	set_default_fwd_lcores_config();
401 	set_def_peer_eth_addrs();
402 	set_default_fwd_ports_config();
403 }
404 
405 /*
406  * Configuration initialisation done once at init time.
407  */
408 static void
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410 		 unsigned int socket_id)
411 {
412 	char pool_name[RTE_MEMPOOL_NAMESIZE];
413 	struct rte_mempool *rte_mp;
414 	uint32_t mb_size;
415 
416 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418 
419 #ifdef RTE_LIBRTE_PMD_XENVIRT
420 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
421 		(unsigned) mb_mempool_cache,
422 		sizeof(struct rte_pktmbuf_pool_private),
423 		rte_pktmbuf_pool_init, NULL,
424 		rte_pktmbuf_init, NULL,
425 		socket_id, 0);
426 
427 
428 
429 #else
430 	if (mp_anon != 0)
431 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
432 				    (unsigned) mb_mempool_cache,
433 				    sizeof(struct rte_pktmbuf_pool_private),
434 				    rte_pktmbuf_pool_init, NULL,
435 				    rte_pktmbuf_init, NULL,
436 				    socket_id, 0);
437 	else
438 		/* wrapper to rte_mempool_create() */
439 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
440 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
441 
442 #endif
443 
444 	if (rte_mp == NULL) {
445 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
446 						"failed\n", socket_id);
447 	} else if (verbose_level > 0) {
448 		rte_mempool_dump(stdout, rte_mp);
449 	}
450 }
451 
452 /*
453  * Check given socket id is valid or not with NUMA mode,
454  * if valid, return 0, else return -1
455  */
456 static int
457 check_socket_id(const unsigned int socket_id)
458 {
459 	static int warning_once = 0;
460 
461 	if (socket_id >= max_socket) {
462 		if (!warning_once && numa_support)
463 			printf("Warning: NUMA should be configured manually by"
464 			       " using --port-numa-config and"
465 			       " --ring-numa-config parameters along with"
466 			       " --numa.\n");
467 		warning_once = 1;
468 		return -1;
469 	}
470 	return 0;
471 }
472 
473 static void
474 init_config(void)
475 {
476 	portid_t pid;
477 	struct rte_port *port;
478 	struct rte_mempool *mbp;
479 	unsigned int nb_mbuf_per_pool;
480 	lcoreid_t  lc_id;
481 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
482 
483 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
484 	/* Configuration of logical cores. */
485 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
486 				sizeof(struct fwd_lcore *) * nb_lcores,
487 				RTE_CACHE_LINE_SIZE);
488 	if (fwd_lcores == NULL) {
489 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
490 							"failed\n", nb_lcores);
491 	}
492 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
493 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
494 					       sizeof(struct fwd_lcore),
495 					       RTE_CACHE_LINE_SIZE);
496 		if (fwd_lcores[lc_id] == NULL) {
497 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
498 								"failed\n");
499 		}
500 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
501 	}
502 
503 	/*
504 	 * Create pools of mbuf.
505 	 * If NUMA support is disabled, create a single pool of mbuf in
506 	 * socket 0 memory by default.
507 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
508 	 *
509 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
510 	 * nb_txd can be configured at run time.
511 	 */
512 	if (param_total_num_mbufs)
513 		nb_mbuf_per_pool = param_total_num_mbufs;
514 	else {
515 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
516 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
517 
518 		if (!numa_support)
519 			nb_mbuf_per_pool =
520 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
521 	}
522 
523 	if (!numa_support) {
524 		if (socket_num == UMA_NO_CONFIG)
525 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
526 		else
527 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
528 						 socket_num);
529 	}
530 
531 	FOREACH_PORT(pid, ports) {
532 		port = &ports[pid];
533 		rte_eth_dev_info_get(pid, &port->dev_info);
534 
535 		if (numa_support) {
536 			if (port_numa[pid] != NUMA_NO_CONFIG)
537 				port_per_socket[port_numa[pid]]++;
538 			else {
539 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
540 
541 				/* if socket_id is invalid, set to 0 */
542 				if (check_socket_id(socket_id) < 0)
543 					socket_id = 0;
544 				port_per_socket[socket_id]++;
545 			}
546 		}
547 
548 		/* set flag to initialize port/queue */
549 		port->need_reconfig = 1;
550 		port->need_reconfig_queues = 1;
551 	}
552 
553 	if (numa_support) {
554 		uint8_t i;
555 		unsigned int nb_mbuf;
556 
557 		if (param_total_num_mbufs)
558 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
559 
560 		for (i = 0; i < max_socket; i++) {
561 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
562 			if (nb_mbuf)
563 				mbuf_pool_create(mbuf_data_size,
564 						nb_mbuf,i);
565 		}
566 	}
567 	init_port_config();
568 
569 	/*
570 	 * Records which Mbuf pool to use by each logical core, if needed.
571 	 */
572 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
573 		mbp = mbuf_pool_find(
574 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
575 
576 		if (mbp == NULL)
577 			mbp = mbuf_pool_find(0);
578 		fwd_lcores[lc_id]->mbp = mbp;
579 	}
580 
581 	/* Configuration of packet forwarding streams. */
582 	if (init_fwd_streams() < 0)
583 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
584 }
585 
586 
587 void
588 reconfig(portid_t new_port_id, unsigned socket_id)
589 {
590 	struct rte_port *port;
591 
592 	/* Reconfiguration of Ethernet ports. */
593 	port = &ports[new_port_id];
594 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
595 
596 	/* set flag to initialize port/queue */
597 	port->need_reconfig = 1;
598 	port->need_reconfig_queues = 1;
599 	port->socket_id = socket_id;
600 
601 	init_port_config();
602 }
603 
604 
605 int
606 init_fwd_streams(void)
607 {
608 	portid_t pid;
609 	struct rte_port *port;
610 	streamid_t sm_id, nb_fwd_streams_new;
611 
612 	/* set socket id according to numa or not */
613 	FOREACH_PORT(pid, ports) {
614 		port = &ports[pid];
615 		if (nb_rxq > port->dev_info.max_rx_queues) {
616 			printf("Fail: nb_rxq(%d) is greater than "
617 				"max_rx_queues(%d)\n", nb_rxq,
618 				port->dev_info.max_rx_queues);
619 			return -1;
620 		}
621 		if (nb_txq > port->dev_info.max_tx_queues) {
622 			printf("Fail: nb_txq(%d) is greater than "
623 				"max_tx_queues(%d)\n", nb_txq,
624 				port->dev_info.max_tx_queues);
625 			return -1;
626 		}
627 		if (numa_support) {
628 			if (port_numa[pid] != NUMA_NO_CONFIG)
629 				port->socket_id = port_numa[pid];
630 			else {
631 				port->socket_id = rte_eth_dev_socket_id(pid);
632 
633 				/* if socket_id is invalid, set to 0 */
634 				if (check_socket_id(port->socket_id) < 0)
635 					port->socket_id = 0;
636 			}
637 		}
638 		else {
639 			if (socket_num == UMA_NO_CONFIG)
640 				port->socket_id = 0;
641 			else
642 				port->socket_id = socket_num;
643 		}
644 	}
645 
646 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
647 	if (nb_fwd_streams_new == nb_fwd_streams)
648 		return 0;
649 	/* clear the old */
650 	if (fwd_streams != NULL) {
651 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
652 			if (fwd_streams[sm_id] == NULL)
653 				continue;
654 			rte_free(fwd_streams[sm_id]);
655 			fwd_streams[sm_id] = NULL;
656 		}
657 		rte_free(fwd_streams);
658 		fwd_streams = NULL;
659 	}
660 
661 	/* init new */
662 	nb_fwd_streams = nb_fwd_streams_new;
663 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
664 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
665 	if (fwd_streams == NULL)
666 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
667 						"failed\n", nb_fwd_streams);
668 
669 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
670 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
671 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
672 		if (fwd_streams[sm_id] == NULL)
673 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
674 								" failed\n");
675 	}
676 
677 	return 0;
678 }
679 
680 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
681 static void
682 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
683 {
684 	unsigned int total_burst;
685 	unsigned int nb_burst;
686 	unsigned int burst_stats[3];
687 	uint16_t pktnb_stats[3];
688 	uint16_t nb_pkt;
689 	int burst_percent[3];
690 
691 	/*
692 	 * First compute the total number of packet bursts and the
693 	 * two highest numbers of bursts of the same number of packets.
694 	 */
695 	total_burst = 0;
696 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
697 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
698 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
699 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
700 		if (nb_burst == 0)
701 			continue;
702 		total_burst += nb_burst;
703 		if (nb_burst > burst_stats[0]) {
704 			burst_stats[1] = burst_stats[0];
705 			pktnb_stats[1] = pktnb_stats[0];
706 			burst_stats[0] = nb_burst;
707 			pktnb_stats[0] = nb_pkt;
708 		}
709 	}
710 	if (total_burst == 0)
711 		return;
712 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
713 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
714 	       burst_percent[0], (int) pktnb_stats[0]);
715 	if (burst_stats[0] == total_burst) {
716 		printf("]\n");
717 		return;
718 	}
719 	if (burst_stats[0] + burst_stats[1] == total_burst) {
720 		printf(" + %d%% of %d pkts]\n",
721 		       100 - burst_percent[0], pktnb_stats[1]);
722 		return;
723 	}
724 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
725 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
726 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
727 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
728 		return;
729 	}
730 	printf(" + %d%% of %d pkts + %d%% of others]\n",
731 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
732 }
733 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
734 
735 static void
736 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
737 {
738 	struct rte_port *port;
739 	uint8_t i;
740 
741 	static const char *fwd_stats_border = "----------------------";
742 
743 	port = &ports[port_id];
744 	printf("\n  %s Forward statistics for port %-2d %s\n",
745 	       fwd_stats_border, port_id, fwd_stats_border);
746 
747 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
748 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
749 		       "%-"PRIu64"\n",
750 		       stats->ipackets, stats->imissed,
751 		       (uint64_t) (stats->ipackets + stats->imissed));
752 
753 		if (cur_fwd_eng == &csum_fwd_engine)
754 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
755 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
756 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
757 			printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
758 			       "RX-error: %-"PRIu64"\n",
759 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
760 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
761 		}
762 
763 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
764 		       "%-"PRIu64"\n",
765 		       stats->opackets, port->tx_dropped,
766 		       (uint64_t) (stats->opackets + port->tx_dropped));
767 	}
768 	else {
769 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
770 		       "%14"PRIu64"\n",
771 		       stats->ipackets, stats->imissed,
772 		       (uint64_t) (stats->ipackets + stats->imissed));
773 
774 		if (cur_fwd_eng == &csum_fwd_engine)
775 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
776 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
777 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
778 			printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
779 			       "    RX-error:%"PRIu64"\n",
780 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
781 			printf("  RX-nombufs:             %14"PRIu64"\n",
782 			       stats->rx_nombuf);
783 		}
784 
785 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
786 		       "%14"PRIu64"\n",
787 		       stats->opackets, port->tx_dropped,
788 		       (uint64_t) (stats->opackets + port->tx_dropped));
789 	}
790 
791 	/* Display statistics of XON/XOFF pause frames, if any. */
792 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
793 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
794 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
795 		       stats->rx_pause_xoff, stats->rx_pause_xon);
796 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
797 		       stats->tx_pause_xoff, stats->tx_pause_xon);
798 	}
799 
800 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
801 	if (port->rx_stream)
802 		pkt_burst_stats_display("RX",
803 			&port->rx_stream->rx_burst_stats);
804 	if (port->tx_stream)
805 		pkt_burst_stats_display("TX",
806 			&port->tx_stream->tx_burst_stats);
807 #endif
808 	/* stats fdir */
809 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
810 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
811 		       stats->fdirmiss,
812 		       stats->fdirmatch);
813 
814 	if (port->rx_queue_stats_mapping_enabled) {
815 		printf("\n");
816 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
817 			printf("  Stats reg %2d RX-packets:%14"PRIu64
818 			       "     RX-errors:%14"PRIu64
819 			       "    RX-bytes:%14"PRIu64"\n",
820 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
821 		}
822 		printf("\n");
823 	}
824 	if (port->tx_queue_stats_mapping_enabled) {
825 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
826 			printf("  Stats reg %2d TX-packets:%14"PRIu64
827 			       "                                 TX-bytes:%14"PRIu64"\n",
828 			       i, stats->q_opackets[i], stats->q_obytes[i]);
829 		}
830 	}
831 
832 	printf("  %s--------------------------------%s\n",
833 	       fwd_stats_border, fwd_stats_border);
834 }
835 
836 static void
837 fwd_stream_stats_display(streamid_t stream_id)
838 {
839 	struct fwd_stream *fs;
840 	static const char *fwd_top_stats_border = "-------";
841 
842 	fs = fwd_streams[stream_id];
843 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
844 	    (fs->fwd_dropped == 0))
845 		return;
846 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
847 	       "TX Port=%2d/Queue=%2d %s\n",
848 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
849 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
850 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
851 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
852 
853 	/* if checksum mode */
854 	if (cur_fwd_eng == &csum_fwd_engine) {
855 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
856 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
857 	}
858 
859 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
860 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
861 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
862 #endif
863 }
864 
865 static void
866 flush_fwd_rx_queues(void)
867 {
868 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
869 	portid_t  rxp;
870 	portid_t port_id;
871 	queueid_t rxq;
872 	uint16_t  nb_rx;
873 	uint16_t  i;
874 	uint8_t   j;
875 
876 	for (j = 0; j < 2; j++) {
877 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
878 			for (rxq = 0; rxq < nb_rxq; rxq++) {
879 				port_id = fwd_ports_ids[rxp];
880 				do {
881 					nb_rx = rte_eth_rx_burst(port_id, rxq,
882 						pkts_burst, MAX_PKT_BURST);
883 					for (i = 0; i < nb_rx; i++)
884 						rte_pktmbuf_free(pkts_burst[i]);
885 				} while (nb_rx > 0);
886 			}
887 		}
888 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
889 	}
890 }
891 
892 static void
893 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
894 {
895 	struct fwd_stream **fsm;
896 	streamid_t nb_fs;
897 	streamid_t sm_id;
898 
899 	fsm = &fwd_streams[fc->stream_idx];
900 	nb_fs = fc->stream_nb;
901 	do {
902 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
903 			(*pkt_fwd)(fsm[sm_id]);
904 	} while (! fc->stopped);
905 }
906 
907 static int
908 start_pkt_forward_on_core(void *fwd_arg)
909 {
910 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
911 			     cur_fwd_config.fwd_eng->packet_fwd);
912 	return 0;
913 }
914 
915 /*
916  * Run the TXONLY packet forwarding engine to send a single burst of packets.
917  * Used to start communication flows in network loopback test configurations.
918  */
919 static int
920 run_one_txonly_burst_on_core(void *fwd_arg)
921 {
922 	struct fwd_lcore *fwd_lc;
923 	struct fwd_lcore tmp_lcore;
924 
925 	fwd_lc = (struct fwd_lcore *) fwd_arg;
926 	tmp_lcore = *fwd_lc;
927 	tmp_lcore.stopped = 1;
928 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
929 	return 0;
930 }
931 
932 /*
933  * Launch packet forwarding:
934  *     - Setup per-port forwarding context.
935  *     - launch logical cores with their forwarding configuration.
936  */
937 static void
938 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
939 {
940 	port_fwd_begin_t port_fwd_begin;
941 	unsigned int i;
942 	unsigned int lc_id;
943 	int diag;
944 
945 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
946 	if (port_fwd_begin != NULL) {
947 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
948 			(*port_fwd_begin)(fwd_ports_ids[i]);
949 	}
950 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
951 		lc_id = fwd_lcores_cpuids[i];
952 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
953 			fwd_lcores[i]->stopped = 0;
954 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
955 						     fwd_lcores[i], lc_id);
956 			if (diag != 0)
957 				printf("launch lcore %u failed - diag=%d\n",
958 				       lc_id, diag);
959 		}
960 	}
961 }
962 
963 /*
964  * Launch packet forwarding configuration.
965  */
966 void
967 start_packet_forwarding(int with_tx_first)
968 {
969 	port_fwd_begin_t port_fwd_begin;
970 	port_fwd_end_t  port_fwd_end;
971 	struct rte_port *port;
972 	unsigned int i;
973 	portid_t   pt_id;
974 	streamid_t sm_id;
975 
976 	if (all_ports_started() == 0) {
977 		printf("Not all ports were started\n");
978 		return;
979 	}
980 	if (test_done == 0) {
981 		printf("Packet forwarding already started\n");
982 		return;
983 	}
984 	if(dcb_test) {
985 		for (i = 0; i < nb_fwd_ports; i++) {
986 			pt_id = fwd_ports_ids[i];
987 			port = &ports[pt_id];
988 			if (!port->dcb_flag) {
989 				printf("In DCB mode, all forwarding ports must "
990                                        "be configured in this mode.\n");
991 				return;
992 			}
993 		}
994 		if (nb_fwd_lcores == 1) {
995 			printf("In DCB mode,the nb forwarding cores "
996                                "should be larger than 1.\n");
997 			return;
998 		}
999 	}
1000 	test_done = 0;
1001 
1002 	if(!no_flush_rx)
1003 		flush_fwd_rx_queues();
1004 
1005 	fwd_config_setup();
1006 	rxtx_config_display();
1007 
1008 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1009 		pt_id = fwd_ports_ids[i];
1010 		port = &ports[pt_id];
1011 		rte_eth_stats_get(pt_id, &port->stats);
1012 		port->tx_dropped = 0;
1013 
1014 		map_port_queue_stats_mapping_registers(pt_id, port);
1015 	}
1016 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1017 		fwd_streams[sm_id]->rx_packets = 0;
1018 		fwd_streams[sm_id]->tx_packets = 0;
1019 		fwd_streams[sm_id]->fwd_dropped = 0;
1020 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1021 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1022 
1023 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1024 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1025 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1026 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1027 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1028 #endif
1029 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1030 		fwd_streams[sm_id]->core_cycles = 0;
1031 #endif
1032 	}
1033 	if (with_tx_first) {
1034 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1035 		if (port_fwd_begin != NULL) {
1036 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1037 				(*port_fwd_begin)(fwd_ports_ids[i]);
1038 		}
1039 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1040 		rte_eal_mp_wait_lcore();
1041 		port_fwd_end = tx_only_engine.port_fwd_end;
1042 		if (port_fwd_end != NULL) {
1043 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1044 				(*port_fwd_end)(fwd_ports_ids[i]);
1045 		}
1046 	}
1047 	launch_packet_forwarding(start_pkt_forward_on_core);
1048 }
1049 
1050 void
1051 stop_packet_forwarding(void)
1052 {
1053 	struct rte_eth_stats stats;
1054 	struct rte_port *port;
1055 	port_fwd_end_t  port_fwd_end;
1056 	int i;
1057 	portid_t   pt_id;
1058 	streamid_t sm_id;
1059 	lcoreid_t  lc_id;
1060 	uint64_t total_recv;
1061 	uint64_t total_xmit;
1062 	uint64_t total_rx_dropped;
1063 	uint64_t total_tx_dropped;
1064 	uint64_t total_rx_nombuf;
1065 	uint64_t tx_dropped;
1066 	uint64_t rx_bad_ip_csum;
1067 	uint64_t rx_bad_l4_csum;
1068 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1069 	uint64_t fwd_cycles;
1070 #endif
1071 	static const char *acc_stats_border = "+++++++++++++++";
1072 
1073 	if (all_ports_started() == 0) {
1074 		printf("Not all ports were started\n");
1075 		return;
1076 	}
1077 	if (test_done) {
1078 		printf("Packet forwarding not started\n");
1079 		return;
1080 	}
1081 	printf("Telling cores to stop...");
1082 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1083 		fwd_lcores[lc_id]->stopped = 1;
1084 	printf("\nWaiting for lcores to finish...\n");
1085 	rte_eal_mp_wait_lcore();
1086 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1087 	if (port_fwd_end != NULL) {
1088 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1089 			pt_id = fwd_ports_ids[i];
1090 			(*port_fwd_end)(pt_id);
1091 		}
1092 	}
1093 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1094 	fwd_cycles = 0;
1095 #endif
1096 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1097 		if (cur_fwd_config.nb_fwd_streams >
1098 		    cur_fwd_config.nb_fwd_ports) {
1099 			fwd_stream_stats_display(sm_id);
1100 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1101 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1102 		} else {
1103 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1104 				fwd_streams[sm_id];
1105 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1106 				fwd_streams[sm_id];
1107 		}
1108 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1109 		tx_dropped = (uint64_t) (tx_dropped +
1110 					 fwd_streams[sm_id]->fwd_dropped);
1111 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1112 
1113 		rx_bad_ip_csum =
1114 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1115 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1116 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1117 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1118 							rx_bad_ip_csum;
1119 
1120 		rx_bad_l4_csum =
1121 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1122 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1123 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1124 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1125 							rx_bad_l4_csum;
1126 
1127 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1128 		fwd_cycles = (uint64_t) (fwd_cycles +
1129 					 fwd_streams[sm_id]->core_cycles);
1130 #endif
1131 	}
1132 	total_recv = 0;
1133 	total_xmit = 0;
1134 	total_rx_dropped = 0;
1135 	total_tx_dropped = 0;
1136 	total_rx_nombuf  = 0;
1137 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1138 		pt_id = fwd_ports_ids[i];
1139 
1140 		port = &ports[pt_id];
1141 		rte_eth_stats_get(pt_id, &stats);
1142 		stats.ipackets -= port->stats.ipackets;
1143 		port->stats.ipackets = 0;
1144 		stats.opackets -= port->stats.opackets;
1145 		port->stats.opackets = 0;
1146 		stats.ibytes   -= port->stats.ibytes;
1147 		port->stats.ibytes = 0;
1148 		stats.obytes   -= port->stats.obytes;
1149 		port->stats.obytes = 0;
1150 		stats.imissed  -= port->stats.imissed;
1151 		port->stats.imissed = 0;
1152 		stats.oerrors  -= port->stats.oerrors;
1153 		port->stats.oerrors = 0;
1154 		stats.rx_nombuf -= port->stats.rx_nombuf;
1155 		port->stats.rx_nombuf = 0;
1156 		stats.fdirmatch -= port->stats.fdirmatch;
1157 		port->stats.rx_nombuf = 0;
1158 		stats.fdirmiss -= port->stats.fdirmiss;
1159 		port->stats.rx_nombuf = 0;
1160 
1161 		total_recv += stats.ipackets;
1162 		total_xmit += stats.opackets;
1163 		total_rx_dropped += stats.imissed;
1164 		total_tx_dropped += port->tx_dropped;
1165 		total_rx_nombuf  += stats.rx_nombuf;
1166 
1167 		fwd_port_stats_display(pt_id, &stats);
1168 	}
1169 	printf("\n  %s Accumulated forward statistics for all ports"
1170 	       "%s\n",
1171 	       acc_stats_border, acc_stats_border);
1172 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1173 	       "%-"PRIu64"\n"
1174 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1175 	       "%-"PRIu64"\n",
1176 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1177 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1178 	if (total_rx_nombuf > 0)
1179 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1180 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1181 	       "%s\n",
1182 	       acc_stats_border, acc_stats_border);
1183 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1184 	if (total_recv > 0)
1185 		printf("\n  CPU cycles/packet=%u (total cycles="
1186 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1187 		       (unsigned int)(fwd_cycles / total_recv),
1188 		       fwd_cycles, total_recv);
1189 #endif
1190 	printf("\nDone.\n");
1191 	test_done = 1;
1192 }
1193 
1194 void
1195 dev_set_link_up(portid_t pid)
1196 {
1197 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1198 		printf("\nSet link up fail.\n");
1199 }
1200 
1201 void
1202 dev_set_link_down(portid_t pid)
1203 {
1204 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1205 		printf("\nSet link down fail.\n");
1206 }
1207 
1208 static int
1209 all_ports_started(void)
1210 {
1211 	portid_t pi;
1212 	struct rte_port *port;
1213 
1214 	FOREACH_PORT(pi, ports) {
1215 		port = &ports[pi];
1216 		/* Check if there is a port which is not started */
1217 		if ((port->port_status != RTE_PORT_STARTED) &&
1218 			(port->slave_flag == 0))
1219 			return 0;
1220 	}
1221 
1222 	/* No port is not started */
1223 	return 1;
1224 }
1225 
1226 int
1227 all_ports_stopped(void)
1228 {
1229 	portid_t pi;
1230 	struct rte_port *port;
1231 
1232 	FOREACH_PORT(pi, ports) {
1233 		port = &ports[pi];
1234 		if ((port->port_status != RTE_PORT_STOPPED) &&
1235 			(port->slave_flag == 0))
1236 			return 0;
1237 	}
1238 
1239 	return 1;
1240 }
1241 
1242 int
1243 port_is_started(portid_t port_id)
1244 {
1245 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1246 		return 0;
1247 
1248 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1249 		return 0;
1250 
1251 	return 1;
1252 }
1253 
1254 static int
1255 port_is_closed(portid_t port_id)
1256 {
1257 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1258 		return 0;
1259 
1260 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1261 		return 0;
1262 
1263 	return 1;
1264 }
1265 
1266 int
1267 start_port(portid_t pid)
1268 {
1269 	int diag, need_check_link_status = -1;
1270 	portid_t pi;
1271 	queueid_t qi;
1272 	struct rte_port *port;
1273 	struct ether_addr mac_addr;
1274 
1275 	if (test_done == 0) {
1276 		printf("Please stop forwarding first\n");
1277 		return -1;
1278 	}
1279 
1280 	if (port_id_is_invalid(pid, ENABLED_WARN))
1281 		return 0;
1282 
1283 	if (init_fwd_streams() < 0) {
1284 		printf("Fail from init_fwd_streams()\n");
1285 		return -1;
1286 	}
1287 
1288 	if(dcb_config)
1289 		dcb_test = 1;
1290 	FOREACH_PORT(pi, ports) {
1291 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1292 			continue;
1293 
1294 		need_check_link_status = 0;
1295 		port = &ports[pi];
1296 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1297 						 RTE_PORT_HANDLING) == 0) {
1298 			printf("Port %d is now not stopped\n", pi);
1299 			continue;
1300 		}
1301 
1302 		if (port->need_reconfig > 0) {
1303 			port->need_reconfig = 0;
1304 
1305 			printf("Configuring Port %d (socket %u)\n", pi,
1306 					port->socket_id);
1307 			/* configure port */
1308 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1309 						&(port->dev_conf));
1310 			if (diag != 0) {
1311 				if (rte_atomic16_cmpset(&(port->port_status),
1312 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1313 					printf("Port %d can not be set back "
1314 							"to stopped\n", pi);
1315 				printf("Fail to configure port %d\n", pi);
1316 				/* try to reconfigure port next time */
1317 				port->need_reconfig = 1;
1318 				return -1;
1319 			}
1320 		}
1321 		if (port->need_reconfig_queues > 0) {
1322 			port->need_reconfig_queues = 0;
1323 			/* setup tx queues */
1324 			for (qi = 0; qi < nb_txq; qi++) {
1325 				if ((numa_support) &&
1326 					(txring_numa[pi] != NUMA_NO_CONFIG))
1327 					diag = rte_eth_tx_queue_setup(pi, qi,
1328 						nb_txd,txring_numa[pi],
1329 						&(port->tx_conf));
1330 				else
1331 					diag = rte_eth_tx_queue_setup(pi, qi,
1332 						nb_txd,port->socket_id,
1333 						&(port->tx_conf));
1334 
1335 				if (diag == 0)
1336 					continue;
1337 
1338 				/* Fail to setup tx queue, return */
1339 				if (rte_atomic16_cmpset(&(port->port_status),
1340 							RTE_PORT_HANDLING,
1341 							RTE_PORT_STOPPED) == 0)
1342 					printf("Port %d can not be set back "
1343 							"to stopped\n", pi);
1344 				printf("Fail to configure port %d tx queues\n", pi);
1345 				/* try to reconfigure queues next time */
1346 				port->need_reconfig_queues = 1;
1347 				return -1;
1348 			}
1349 			/* setup rx queues */
1350 			for (qi = 0; qi < nb_rxq; qi++) {
1351 				if ((numa_support) &&
1352 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1353 					struct rte_mempool * mp =
1354 						mbuf_pool_find(rxring_numa[pi]);
1355 					if (mp == NULL) {
1356 						printf("Failed to setup RX queue:"
1357 							"No mempool allocation"
1358 							"on the socket %d\n",
1359 							rxring_numa[pi]);
1360 						return -1;
1361 					}
1362 
1363 					diag = rte_eth_rx_queue_setup(pi, qi,
1364 					     nb_rxd,rxring_numa[pi],
1365 					     &(port->rx_conf),mp);
1366 				}
1367 				else
1368 					diag = rte_eth_rx_queue_setup(pi, qi,
1369 					     nb_rxd,port->socket_id,
1370 					     &(port->rx_conf),
1371 				             mbuf_pool_find(port->socket_id));
1372 
1373 				if (diag == 0)
1374 					continue;
1375 
1376 
1377 				/* Fail to setup rx queue, return */
1378 				if (rte_atomic16_cmpset(&(port->port_status),
1379 							RTE_PORT_HANDLING,
1380 							RTE_PORT_STOPPED) == 0)
1381 					printf("Port %d can not be set back "
1382 							"to stopped\n", pi);
1383 				printf("Fail to configure port %d rx queues\n", pi);
1384 				/* try to reconfigure queues next time */
1385 				port->need_reconfig_queues = 1;
1386 				return -1;
1387 			}
1388 		}
1389 		/* start port */
1390 		if (rte_eth_dev_start(pi) < 0) {
1391 			printf("Fail to start port %d\n", pi);
1392 
1393 			/* Fail to setup rx queue, return */
1394 			if (rte_atomic16_cmpset(&(port->port_status),
1395 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1396 				printf("Port %d can not be set back to "
1397 							"stopped\n", pi);
1398 			continue;
1399 		}
1400 
1401 		if (rte_atomic16_cmpset(&(port->port_status),
1402 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1403 			printf("Port %d can not be set into started\n", pi);
1404 
1405 		rte_eth_macaddr_get(pi, &mac_addr);
1406 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1407 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1408 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1409 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1410 
1411 		/* at least one port started, need checking link status */
1412 		need_check_link_status = 1;
1413 	}
1414 
1415 	if (need_check_link_status == 1 && !no_link_check)
1416 		check_all_ports_link_status(RTE_PORT_ALL);
1417 	else if (need_check_link_status == 0)
1418 		printf("Please stop the ports first\n");
1419 
1420 	printf("Done\n");
1421 	return 0;
1422 }
1423 
1424 void
1425 stop_port(portid_t pid)
1426 {
1427 	portid_t pi;
1428 	struct rte_port *port;
1429 	int need_check_link_status = 0;
1430 
1431 	if (test_done == 0) {
1432 		printf("Please stop forwarding first\n");
1433 		return;
1434 	}
1435 	if (dcb_test) {
1436 		dcb_test = 0;
1437 		dcb_config = 0;
1438 	}
1439 
1440 	if (port_id_is_invalid(pid, ENABLED_WARN))
1441 		return;
1442 
1443 	printf("Stopping ports...\n");
1444 
1445 	FOREACH_PORT(pi, ports) {
1446 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1447 			continue;
1448 
1449 		port = &ports[pi];
1450 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1451 						RTE_PORT_HANDLING) == 0)
1452 			continue;
1453 
1454 		rte_eth_dev_stop(pi);
1455 
1456 		if (rte_atomic16_cmpset(&(port->port_status),
1457 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1458 			printf("Port %d can not be set into stopped\n", pi);
1459 		need_check_link_status = 1;
1460 	}
1461 	if (need_check_link_status && !no_link_check)
1462 		check_all_ports_link_status(RTE_PORT_ALL);
1463 
1464 	printf("Done\n");
1465 }
1466 
1467 void
1468 close_port(portid_t pid)
1469 {
1470 	portid_t pi;
1471 	struct rte_port *port;
1472 
1473 	if (test_done == 0) {
1474 		printf("Please stop forwarding first\n");
1475 		return;
1476 	}
1477 
1478 	if (port_id_is_invalid(pid, ENABLED_WARN))
1479 		return;
1480 
1481 	printf("Closing ports...\n");
1482 
1483 	FOREACH_PORT(pi, ports) {
1484 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1485 			continue;
1486 
1487 		port = &ports[pi];
1488 		if (rte_atomic16_cmpset(&(port->port_status),
1489 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1490 			printf("Port %d is already closed\n", pi);
1491 			continue;
1492 		}
1493 
1494 		if (rte_atomic16_cmpset(&(port->port_status),
1495 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1496 			printf("Port %d is now not stopped\n", pi);
1497 			continue;
1498 		}
1499 
1500 		rte_eth_dev_close(pi);
1501 
1502 		if (rte_atomic16_cmpset(&(port->port_status),
1503 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1504 			printf("Port %d can not be set into stopped\n", pi);
1505 	}
1506 
1507 	printf("Done\n");
1508 }
1509 
1510 void
1511 attach_port(char *identifier)
1512 {
1513 	portid_t i, j, pi = 0;
1514 
1515 	printf("Attaching a new port...\n");
1516 
1517 	if (identifier == NULL) {
1518 		printf("Invalid parameters are specified\n");
1519 		return;
1520 	}
1521 
1522 	if (test_done == 0) {
1523 		printf("Please stop forwarding first\n");
1524 		return;
1525 	}
1526 
1527 	if (rte_eth_dev_attach(identifier, &pi))
1528 		return;
1529 
1530 	ports[pi].enabled = 1;
1531 	reconfig(pi, rte_eth_dev_socket_id(pi));
1532 	rte_eth_promiscuous_enable(pi);
1533 
1534 	nb_ports = rte_eth_dev_count();
1535 
1536 	/* set_default_fwd_ports_config(); */
1537 	bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1538 	i = 0;
1539 	FOREACH_PORT(j, ports) {
1540 		fwd_ports_ids[i] = j;
1541 		i++;
1542 	}
1543 	nb_cfg_ports = nb_ports;
1544 	nb_fwd_ports++;
1545 
1546 	ports[pi].port_status = RTE_PORT_STOPPED;
1547 
1548 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1549 	printf("Done\n");
1550 }
1551 
1552 void
1553 detach_port(uint8_t port_id)
1554 {
1555 	portid_t i, pi = 0;
1556 	char name[RTE_ETH_NAME_MAX_LEN];
1557 
1558 	printf("Detaching a port...\n");
1559 
1560 	if (!port_is_closed(port_id)) {
1561 		printf("Please close port first\n");
1562 		return;
1563 	}
1564 
1565 	if (rte_eth_dev_detach(port_id, name))
1566 		return;
1567 
1568 	ports[port_id].enabled = 0;
1569 	nb_ports = rte_eth_dev_count();
1570 
1571 	/* set_default_fwd_ports_config(); */
1572 	bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1573 	i = 0;
1574 	FOREACH_PORT(pi, ports) {
1575 		fwd_ports_ids[i] = pi;
1576 		i++;
1577 	}
1578 	nb_cfg_ports = nb_ports;
1579 	nb_fwd_ports--;
1580 
1581 	printf("Port '%s' is detached. Now total ports is %d\n",
1582 			name, nb_ports);
1583 	printf("Done\n");
1584 	return;
1585 }
1586 
1587 void
1588 pmd_test_exit(void)
1589 {
1590 	portid_t pt_id;
1591 
1592 	if (test_done == 0)
1593 		stop_packet_forwarding();
1594 
1595 	FOREACH_PORT(pt_id, ports) {
1596 		printf("Stopping port %d...", pt_id);
1597 		fflush(stdout);
1598 		rte_eth_dev_close(pt_id);
1599 		printf("done\n");
1600 	}
1601 	printf("bye...\n");
1602 }
1603 
1604 typedef void (*cmd_func_t)(void);
1605 struct pmd_test_command {
1606 	const char *cmd_name;
1607 	cmd_func_t cmd_func;
1608 };
1609 
1610 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1611 
1612 /* Check the link status of all ports in up to 9s, and print them finally */
1613 static void
1614 check_all_ports_link_status(uint32_t port_mask)
1615 {
1616 #define CHECK_INTERVAL 100 /* 100ms */
1617 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1618 	uint8_t portid, count, all_ports_up, print_flag = 0;
1619 	struct rte_eth_link link;
1620 
1621 	printf("Checking link statuses...\n");
1622 	fflush(stdout);
1623 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1624 		all_ports_up = 1;
1625 		FOREACH_PORT(portid, ports) {
1626 			if ((port_mask & (1 << portid)) == 0)
1627 				continue;
1628 			memset(&link, 0, sizeof(link));
1629 			rte_eth_link_get_nowait(portid, &link);
1630 			/* print link status if flag set */
1631 			if (print_flag == 1) {
1632 				if (link.link_status)
1633 					printf("Port %d Link Up - speed %u "
1634 						"Mbps - %s\n", (uint8_t)portid,
1635 						(unsigned)link.link_speed,
1636 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1637 					("full-duplex") : ("half-duplex\n"));
1638 				else
1639 					printf("Port %d Link Down\n",
1640 						(uint8_t)portid);
1641 				continue;
1642 			}
1643 			/* clear all_ports_up flag if any link down */
1644 			if (link.link_status == 0) {
1645 				all_ports_up = 0;
1646 				break;
1647 			}
1648 		}
1649 		/* after finally printing all link status, get out */
1650 		if (print_flag == 1)
1651 			break;
1652 
1653 		if (all_ports_up == 0) {
1654 			fflush(stdout);
1655 			rte_delay_ms(CHECK_INTERVAL);
1656 		}
1657 
1658 		/* set the print_flag if all ports up or timeout */
1659 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1660 			print_flag = 1;
1661 		}
1662 	}
1663 }
1664 
1665 static int
1666 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1667 {
1668 	uint16_t i;
1669 	int diag;
1670 	uint8_t mapping_found = 0;
1671 
1672 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1673 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1674 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1675 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1676 					tx_queue_stats_mappings[i].queue_id,
1677 					tx_queue_stats_mappings[i].stats_counter_id);
1678 			if (diag != 0)
1679 				return diag;
1680 			mapping_found = 1;
1681 		}
1682 	}
1683 	if (mapping_found)
1684 		port->tx_queue_stats_mapping_enabled = 1;
1685 	return 0;
1686 }
1687 
1688 static int
1689 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1690 {
1691 	uint16_t i;
1692 	int diag;
1693 	uint8_t mapping_found = 0;
1694 
1695 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1696 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1697 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1698 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1699 					rx_queue_stats_mappings[i].queue_id,
1700 					rx_queue_stats_mappings[i].stats_counter_id);
1701 			if (diag != 0)
1702 				return diag;
1703 			mapping_found = 1;
1704 		}
1705 	}
1706 	if (mapping_found)
1707 		port->rx_queue_stats_mapping_enabled = 1;
1708 	return 0;
1709 }
1710 
1711 static void
1712 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1713 {
1714 	int diag = 0;
1715 
1716 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1717 	if (diag != 0) {
1718 		if (diag == -ENOTSUP) {
1719 			port->tx_queue_stats_mapping_enabled = 0;
1720 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1721 		}
1722 		else
1723 			rte_exit(EXIT_FAILURE,
1724 					"set_tx_queue_stats_mapping_registers "
1725 					"failed for port id=%d diag=%d\n",
1726 					pi, diag);
1727 	}
1728 
1729 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1730 	if (diag != 0) {
1731 		if (diag == -ENOTSUP) {
1732 			port->rx_queue_stats_mapping_enabled = 0;
1733 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1734 		}
1735 		else
1736 			rte_exit(EXIT_FAILURE,
1737 					"set_rx_queue_stats_mapping_registers "
1738 					"failed for port id=%d diag=%d\n",
1739 					pi, diag);
1740 	}
1741 }
1742 
1743 static void
1744 rxtx_port_config(struct rte_port *port)
1745 {
1746 	port->rx_conf = port->dev_info.default_rxconf;
1747 	port->tx_conf = port->dev_info.default_txconf;
1748 
1749 	/* Check if any RX/TX parameters have been passed */
1750 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1751 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1752 
1753 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1754 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1755 
1756 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1757 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1758 
1759 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1760 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1761 
1762 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1763 		port->rx_conf.rx_drop_en = rx_drop_en;
1764 
1765 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1766 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1767 
1768 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1769 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1770 
1771 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1772 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1773 
1774 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1775 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1776 
1777 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1778 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1779 
1780 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1781 		port->tx_conf.txq_flags = txq_flags;
1782 }
1783 
1784 void
1785 init_port_config(void)
1786 {
1787 	portid_t pid;
1788 	struct rte_port *port;
1789 
1790 	FOREACH_PORT(pid, ports) {
1791 		port = &ports[pid];
1792 		port->dev_conf.rxmode = rx_mode;
1793 		port->dev_conf.fdir_conf = fdir_conf;
1794 		if (nb_rxq > 1) {
1795 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1796 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1797 		} else {
1798 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1799 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1800 		}
1801 
1802 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1803 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1804 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1805 			else
1806 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1807 		}
1808 
1809 		if (port->dev_info.max_vfs != 0) {
1810 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1811 				port->dev_conf.rxmode.mq_mode =
1812 					ETH_MQ_RX_VMDQ_RSS;
1813 			else
1814 				port->dev_conf.rxmode.mq_mode =
1815 					ETH_MQ_RX_NONE;
1816 
1817 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1818 		}
1819 
1820 		rxtx_port_config(port);
1821 
1822 		rte_eth_macaddr_get(pid, &port->eth_addr);
1823 
1824 		map_port_queue_stats_mapping_registers(pid, port);
1825 #ifdef RTE_NIC_BYPASS
1826 		rte_eth_dev_bypass_init(pid);
1827 #endif
1828 	}
1829 }
1830 
1831 void set_port_slave_flag(portid_t slave_pid)
1832 {
1833 	struct rte_port *port;
1834 
1835 	port = &ports[slave_pid];
1836 	port->slave_flag = 1;
1837 }
1838 
1839 void clear_port_slave_flag(portid_t slave_pid)
1840 {
1841 	struct rte_port *port;
1842 
1843 	port = &ports[slave_pid];
1844 	port->slave_flag = 0;
1845 }
1846 
1847 const uint16_t vlan_tags[] = {
1848 		0,  1,  2,  3,  4,  5,  6,  7,
1849 		8,  9, 10, 11,  12, 13, 14, 15,
1850 		16, 17, 18, 19, 20, 21, 22, 23,
1851 		24, 25, 26, 27, 28, 29, 30, 31
1852 };
1853 
1854 static  int
1855 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1856 {
1857         uint8_t i;
1858 
1859 	/*
1860 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1861 	 * given above, and the number of traffic classes available for use.
1862 	 */
1863 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1864 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1865 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1866 
1867 		/* VMDQ+DCB RX and TX configrations */
1868 		vmdq_rx_conf.enable_default_pool = 0;
1869 		vmdq_rx_conf.default_pool = 0;
1870 		vmdq_rx_conf.nb_queue_pools =
1871 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1872 		vmdq_tx_conf.nb_queue_pools =
1873 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1874 
1875 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1876 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1877 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1878 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1879 		}
1880 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1881 			vmdq_rx_conf.dcb_queue[i] = i;
1882 			vmdq_tx_conf.dcb_queue[i] = i;
1883 		}
1884 
1885 		/*set DCB mode of RX and TX of multiple queues*/
1886 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1887 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1888 		if (dcb_conf->pfc_en)
1889 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1890 		else
1891 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1892 
1893 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1894                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1895 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1896                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1897 	}
1898 	else {
1899 		struct rte_eth_dcb_rx_conf rx_conf;
1900 		struct rte_eth_dcb_tx_conf tx_conf;
1901 
1902 		/* queue mapping configuration of DCB RX and TX */
1903 		if (dcb_conf->num_tcs == ETH_4_TCS)
1904 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1905 		else
1906 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1907 
1908 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1909 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1910 
1911 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1912 			rx_conf.dcb_queue[i] = i;
1913 			tx_conf.dcb_queue[i] = i;
1914 		}
1915 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1916 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1917 		if (dcb_conf->pfc_en)
1918 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1919 		else
1920 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1921 
1922 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1923                                 sizeof(struct rte_eth_dcb_rx_conf)));
1924 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1925                                 sizeof(struct rte_eth_dcb_tx_conf)));
1926 	}
1927 
1928 	return 0;
1929 }
1930 
1931 int
1932 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1933 {
1934 	struct rte_eth_conf port_conf;
1935 	struct rte_port *rte_port;
1936 	int retval;
1937 	uint16_t nb_vlan;
1938 	uint16_t i;
1939 
1940 	/* rxq and txq configuration in dcb mode */
1941 	nb_rxq = 128;
1942 	nb_txq = 128;
1943 	rx_free_thresh = 64;
1944 
1945 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1946 	/* Enter DCB configuration status */
1947 	dcb_config = 1;
1948 
1949 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1950 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1951 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1952 	if (retval < 0)
1953 		return retval;
1954 
1955 	rte_port = &ports[pid];
1956 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1957 
1958 	rxtx_port_config(rte_port);
1959 	/* VLAN filter */
1960 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1961 	for (i = 0; i < nb_vlan; i++){
1962 		rx_vft_set(pid, vlan_tags[i], 1);
1963 	}
1964 
1965 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1966 	map_port_queue_stats_mapping_registers(pid, rte_port);
1967 
1968 	rte_port->dcb_flag = 1;
1969 
1970 	return 0;
1971 }
1972 
1973 static void
1974 init_port(void)
1975 {
1976 	portid_t pid;
1977 
1978 	/* Configuration of Ethernet ports. */
1979 	ports = rte_zmalloc("testpmd: ports",
1980 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1981 			    RTE_CACHE_LINE_SIZE);
1982 	if (ports == NULL) {
1983 		rte_exit(EXIT_FAILURE,
1984 				"rte_zmalloc(%d struct rte_port) failed\n",
1985 				RTE_MAX_ETHPORTS);
1986 	}
1987 
1988 	/* enabled allocated ports */
1989 	for (pid = 0; pid < nb_ports; pid++)
1990 		ports[pid].enabled = 1;
1991 }
1992 
1993 int
1994 main(int argc, char** argv)
1995 {
1996 	int  diag;
1997 	uint8_t port_id;
1998 
1999 	diag = rte_eal_init(argc, argv);
2000 	if (diag < 0)
2001 		rte_panic("Cannot init EAL\n");
2002 
2003 	nb_ports = (portid_t) rte_eth_dev_count();
2004 	if (nb_ports == 0)
2005 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2006 
2007 	/* allocate port structures, and init them */
2008 	init_port();
2009 
2010 	set_def_fwd_config();
2011 	if (nb_lcores == 0)
2012 		rte_panic("Empty set of forwarding logical cores - check the "
2013 			  "core mask supplied in the command parameters\n");
2014 
2015 	argc -= diag;
2016 	argv += diag;
2017 	if (argc > 1)
2018 		launch_args_parse(argc, argv);
2019 
2020 	if (nb_rxq > nb_txq)
2021 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2022 		       "but nb_txq=%d will prevent to fully test it.\n",
2023 		       nb_rxq, nb_txq);
2024 
2025 	init_config();
2026 	if (start_port(RTE_PORT_ALL) != 0)
2027 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2028 
2029 	/* set all ports to promiscuous mode by default */
2030 	FOREACH_PORT(port_id, ports)
2031 		rte_eth_promiscuous_enable(port_id);
2032 
2033 #ifdef RTE_LIBRTE_CMDLINE
2034 	if (interactive == 1) {
2035 		if (auto_start) {
2036 			printf("Start automatic packet forwarding\n");
2037 			start_packet_forwarding(0);
2038 		}
2039 		prompt();
2040 	} else
2041 #endif
2042 	{
2043 		char c;
2044 		int rc;
2045 
2046 		printf("No commandline core given, start packet forwarding\n");
2047 		start_packet_forwarding(0);
2048 		printf("Press enter to exit\n");
2049 		rc = read(0, &c, 1);
2050 		if (rc < 0)
2051 			return 1;
2052 	}
2053 
2054 	return 0;
2055 }
2056