xref: /dpdk/app/test-pmd/testpmd.c (revision 5706de65334e11bae4847e9ace5e58b71d743f42)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 
87 /*
88  * NUMA support configuration.
89  * When set, the NUMA support attempts to dispatch the allocation of the
90  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91  * probed ports among the CPU sockets 0 and 1.
92  * Otherwise, all memory is allocated from CPU socket 0.
93  */
94 uint8_t numa_support = 0; /**< No numa support by default */
95 
96 /*
97  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
98  * not configured.
99  */
100 uint8_t socket_num = UMA_NO_CONFIG;
101 
102 /*
103  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
104  */
105 uint8_t mp_anon = 0;
106 
107 /*
108  * Record the Ethernet address of peer target ports to which packets are
109  * forwarded.
110  * Must be instanciated with the ethernet addresses of peer traffic generator
111  * ports.
112  */
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
115 
116 /*
117  * Probed Target Environment.
118  */
119 struct rte_port *ports;	       /**< For all probed ethernet ports. */
120 portid_t nb_ports;             /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
123 
124 /*
125  * Test Forwarding Configuration.
126  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
128  */
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
132 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
133 
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
136 
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
139 
140 /*
141  * Forwarding engines.
142  */
143 struct fwd_engine * fwd_engines[] = {
144 	&io_fwd_engine,
145 	&mac_fwd_engine,
146 	&mac_retry_fwd_engine,
147 	&rx_only_engine,
148 	&tx_only_engine,
149 	&csum_fwd_engine,
150 #ifdef RTE_LIBRTE_IEEE1588
151 	&ieee1588_fwd_engine,
152 #endif
153 	NULL,
154 };
155 
156 struct fwd_config cur_fwd_config;
157 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
158 
159 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
161                                       * specified on command-line. */
162 
163 /*
164  * Configuration of packet segments used by the "txonly" processing engine.
165  */
166 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168 	TXONLY_DEF_PACKET_LEN,
169 };
170 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
171 
172 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
174 
175 /* current configuration is in DCB or not,0 means it is not in DCB mode */
176 uint8_t dcb_config = 0;
177 
178 /* Whether the dcb is in testing status */
179 uint8_t dcb_test = 0;
180 
181 /* DCB on and VT on mapping is default */
182 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
183 
184 /*
185  * Configurable number of RX/TX queues.
186  */
187 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
189 
190 /*
191  * Configurable number of RX/TX ring descriptors.
192  */
193 #define RTE_TEST_RX_DESC_DEFAULT 128
194 #define RTE_TEST_TX_DESC_DEFAULT 512
195 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
197 
198 /*
199  * Configurable values of RX and TX ring threshold registers.
200  */
201 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
204 
205 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
208 
209 struct rte_eth_thresh rx_thresh = {
210 	.pthresh = RX_PTHRESH,
211 	.hthresh = RX_HTHRESH,
212 	.wthresh = RX_WTHRESH,
213 };
214 
215 struct rte_eth_thresh tx_thresh = {
216 	.pthresh = TX_PTHRESH,
217 	.hthresh = TX_HTHRESH,
218 	.wthresh = TX_WTHRESH,
219 };
220 
221 /*
222  * Configurable value of RX free threshold.
223  */
224 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
225 
226 /*
227  * Configurable value of RX drop enable.
228  */
229 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
230 
231 /*
232  * Configurable value of TX free threshold.
233  */
234 uint16_t tx_free_thresh = 0; /* Use default values. */
235 
236 /*
237  * Configurable value of TX RS bit threshold.
238  */
239 uint16_t tx_rs_thresh = 0; /* Use default values. */
240 
241 /*
242  * Configurable value of TX queue flags.
243  */
244 uint32_t txq_flags = 0; /* No flags set. */
245 
246 /*
247  * Receive Side Scaling (RSS) configuration.
248  */
249 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
250 
251 /*
252  * Port topology configuration
253  */
254 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
255 
256 /*
257  * Avoids to flush all the RX streams before starts forwarding.
258  */
259 uint8_t no_flush_rx = 0; /* flush by default */
260 
261 /*
262  * NIC bypass mode configuration options.
263  */
264 #ifdef RTE_NIC_BYPASS
265 
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
268 
269 #endif
270 
271 /*
272  * Ethernet device configuration.
273  */
274 struct rte_eth_rxmode rx_mode = {
275 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 	.split_hdr_size = 0,
277 	.header_split   = 0, /**< Header Split disabled. */
278 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
281 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
283 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
284 };
285 
286 struct rte_fdir_conf fdir_conf = {
287 	.mode = RTE_FDIR_MODE_NONE,
288 	.pballoc = RTE_FDIR_PBALLOC_64K,
289 	.status = RTE_FDIR_REPORT_STATUS,
290 	.flexbytes_offset = 0x6,
291 	.drop_queue = 127,
292 };
293 
294 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
295 
296 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
298 
299 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
301 
302 uint16_t nb_tx_queue_stats_mappings = 0;
303 uint16_t nb_rx_queue_stats_mappings = 0;
304 
305 /* Forward function declarations */
306 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
308 
309 /*
310  * Check if all the ports are started.
311  * If yes, return positive value. If not, return zero.
312  */
313 static int all_ports_started(void);
314 
315 /*
316  * Setup default configuration.
317  */
318 static void
319 set_default_fwd_lcores_config(void)
320 {
321 	unsigned int i;
322 	unsigned int nb_lc;
323 
324 	nb_lc = 0;
325 	for (i = 0; i < RTE_MAX_LCORE; i++) {
326 		if (! rte_lcore_is_enabled(i))
327 			continue;
328 		if (i == rte_get_master_lcore())
329 			continue;
330 		fwd_lcores_cpuids[nb_lc++] = i;
331 	}
332 	nb_lcores = (lcoreid_t) nb_lc;
333 	nb_cfg_lcores = nb_lcores;
334 	nb_fwd_lcores = 1;
335 }
336 
337 static void
338 set_def_peer_eth_addrs(void)
339 {
340 	portid_t i;
341 
342 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344 		peer_eth_addrs[i].addr_bytes[5] = i;
345 	}
346 }
347 
348 static void
349 set_default_fwd_ports_config(void)
350 {
351 	portid_t pt_id;
352 
353 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
354 		fwd_ports_ids[pt_id] = pt_id;
355 
356 	nb_cfg_ports = nb_ports;
357 	nb_fwd_ports = nb_ports;
358 }
359 
360 void
361 set_def_fwd_config(void)
362 {
363 	set_default_fwd_lcores_config();
364 	set_def_peer_eth_addrs();
365 	set_default_fwd_ports_config();
366 }
367 
368 /*
369  * Configuration initialisation done once at init time.
370  */
371 struct mbuf_ctor_arg {
372 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
374 };
375 
376 struct mbuf_pool_ctor_arg {
377 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
378 };
379 
380 static void
381 testpmd_mbuf_ctor(struct rte_mempool *mp,
382 		  void *opaque_arg,
383 		  void *raw_mbuf,
384 		  __attribute__((unused)) unsigned i)
385 {
386 	struct mbuf_ctor_arg *mb_ctor_arg;
387 	struct rte_mbuf    *mb;
388 
389 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390 	mb = (struct rte_mbuf *) raw_mbuf;
391 
392 	mb->type         = RTE_MBUF_PKT;
393 	mb->pool         = mp;
394 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396 			mb_ctor_arg->seg_buf_offset);
397 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
398 	mb->type         = RTE_MBUF_PKT;
399 	mb->ol_flags     = 0;
400 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
401 	mb->pkt.nb_segs  = 1;
402 	mb->pkt.vlan_macip.data = 0;
403 	mb->pkt.hash.rss = 0;
404 }
405 
406 static void
407 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
408 		       void *opaque_arg)
409 {
410 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
411 	struct rte_pktmbuf_pool_private *mbp_priv;
412 
413 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414 		printf("%s(%s) private_data_size %d < %d\n",
415 		       __func__, mp->name, (int) mp->private_data_size,
416 		       (int) sizeof(struct rte_pktmbuf_pool_private));
417 		return;
418 	}
419 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420 	mbp_priv = rte_mempool_get_priv(mp);
421 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
422 }
423 
424 static void
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426 		 unsigned int socket_id)
427 {
428 	char pool_name[RTE_MEMPOOL_NAMESIZE];
429 	struct rte_mempool *rte_mp;
430 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
431 	struct mbuf_ctor_arg mb_ctor_arg;
432 	uint32_t mb_size;
433 
434 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
435 						mbuf_seg_size);
436 	mb_ctor_arg.seg_buf_offset =
437 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
441 
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444                                    (unsigned) mb_mempool_cache,
445                                    sizeof(struct rte_pktmbuf_pool_private),
446                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447                                    testpmd_mbuf_ctor, &mb_ctor_arg,
448                                    socket_id, 0);
449 
450 
451 
452 #else
453 	if (mp_anon != 0)
454 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455 				    (unsigned) mb_mempool_cache,
456 				    sizeof(struct rte_pktmbuf_pool_private),
457 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458 				    testpmd_mbuf_ctor, &mb_ctor_arg,
459 				    socket_id, 0);
460 	else
461 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462 				    (unsigned) mb_mempool_cache,
463 				    sizeof(struct rte_pktmbuf_pool_private),
464 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465 				    testpmd_mbuf_ctor, &mb_ctor_arg,
466 				    socket_id, 0);
467 
468 #endif
469 
470 	if (rte_mp == NULL) {
471 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472 						"failed\n", socket_id);
473 	} else if (verbose_level > 0) {
474 		rte_mempool_dump(rte_mp);
475 	}
476 }
477 
478 /*
479  * Check given socket id is valid or not with NUMA mode,
480  * if valid, return 0, else return -1
481  */
482 static int
483 check_socket_id(const unsigned int socket_id)
484 {
485 	static int warning_once = 0;
486 
487 	if (socket_id >= MAX_SOCKET) {
488 		if (!warning_once && numa_support)
489 			printf("Warning: NUMA should be configured manually by"
490 			       " using --port-numa-config and"
491 			       " --ring-numa-config parameters along with"
492 			       " --numa.\n");
493 		warning_once = 1;
494 		return -1;
495 	}
496 	return 0;
497 }
498 
499 static void
500 init_config(void)
501 {
502 	portid_t pid;
503 	struct rte_port *port;
504 	struct rte_mempool *mbp;
505 	unsigned int nb_mbuf_per_pool;
506 	lcoreid_t  lc_id;
507 	uint8_t port_per_socket[MAX_SOCKET];
508 
509 	memset(port_per_socket,0,MAX_SOCKET);
510 	/* Configuration of logical cores. */
511 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
512 				sizeof(struct fwd_lcore *) * nb_lcores,
513 				CACHE_LINE_SIZE);
514 	if (fwd_lcores == NULL) {
515 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
516 							"failed\n", nb_lcores);
517 	}
518 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
519 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
520 					       sizeof(struct fwd_lcore),
521 					       CACHE_LINE_SIZE);
522 		if (fwd_lcores[lc_id] == NULL) {
523 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
524 								"failed\n");
525 		}
526 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
527 	}
528 
529 	/*
530 	 * Create pools of mbuf.
531 	 * If NUMA support is disabled, create a single pool of mbuf in
532 	 * socket 0 memory by default.
533 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
534 	 *
535 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
536 	 * nb_txd can be configured at run time.
537 	 */
538 	if (param_total_num_mbufs)
539 		nb_mbuf_per_pool = param_total_num_mbufs;
540 	else {
541 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
542 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
543 
544 		if (!numa_support)
545 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
546 	}
547 
548 	if (!numa_support) {
549 		if (socket_num == UMA_NO_CONFIG)
550 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
551 		else
552 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
553 						 socket_num);
554 	}
555 
556 	/* Configuration of Ethernet ports. */
557 	ports = rte_zmalloc("testpmd: ports",
558 			    sizeof(struct rte_port) * nb_ports,
559 			    CACHE_LINE_SIZE);
560 	if (ports == NULL) {
561 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
562 							"failed\n", nb_ports);
563 	}
564 
565 	for (pid = 0; pid < nb_ports; pid++) {
566 		port = &ports[pid];
567 		rte_eth_dev_info_get(pid, &port->dev_info);
568 
569 		if (numa_support) {
570 			if (port_numa[pid] != NUMA_NO_CONFIG)
571 				port_per_socket[port_numa[pid]]++;
572 			else {
573 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
574 
575 				/* if socket_id is invalid, set to 0 */
576 				if (check_socket_id(socket_id) < 0)
577 					socket_id = 0;
578 				port_per_socket[socket_id]++;
579 			}
580 		}
581 
582 		/* set flag to initialize port/queue */
583 		port->need_reconfig = 1;
584 		port->need_reconfig_queues = 1;
585 	}
586 
587 	if (numa_support) {
588 		uint8_t i;
589 		unsigned int nb_mbuf;
590 
591 		if (param_total_num_mbufs)
592 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
593 
594 		for (i = 0; i < MAX_SOCKET; i++) {
595 			nb_mbuf = (nb_mbuf_per_pool *
596 						port_per_socket[i]);
597 			if (nb_mbuf)
598 				mbuf_pool_create(mbuf_data_size,
599 						nb_mbuf,i);
600 		}
601 	}
602 	init_port_config();
603 
604 	/*
605 	 * Records which Mbuf pool to use by each logical core, if needed.
606 	 */
607 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
608 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
609 		if (mbp == NULL)
610 			mbp = mbuf_pool_find(0);
611 		fwd_lcores[lc_id]->mbp = mbp;
612 	}
613 
614 	/* Configuration of packet forwarding streams. */
615 	if (init_fwd_streams() < 0)
616 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
617 }
618 
619 int
620 init_fwd_streams(void)
621 {
622 	portid_t pid;
623 	struct rte_port *port;
624 	streamid_t sm_id, nb_fwd_streams_new;
625 
626 	/* set socket id according to numa or not */
627 	for (pid = 0; pid < nb_ports; pid++) {
628 		port = &ports[pid];
629 		if (nb_rxq > port->dev_info.max_rx_queues) {
630 			printf("Fail: nb_rxq(%d) is greater than "
631 				"max_rx_queues(%d)\n", nb_rxq,
632 				port->dev_info.max_rx_queues);
633 			return -1;
634 		}
635 		if (nb_txq > port->dev_info.max_tx_queues) {
636 			printf("Fail: nb_txq(%d) is greater than "
637 				"max_tx_queues(%d)\n", nb_txq,
638 				port->dev_info.max_tx_queues);
639 			return -1;
640 		}
641 		if (numa_support) {
642 			if (port_numa[pid] != NUMA_NO_CONFIG)
643 				port->socket_id = port_numa[pid];
644 			else {
645 				port->socket_id = rte_eth_dev_socket_id(pid);
646 
647 				/* if socket_id is invalid, set to 0 */
648 				if (check_socket_id(port->socket_id) < 0)
649 					port->socket_id = 0;
650 			}
651 		}
652 		else {
653 			if (socket_num == UMA_NO_CONFIG)
654 				port->socket_id = 0;
655 			else
656 				port->socket_id = socket_num;
657 		}
658 	}
659 
660 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
661 	if (nb_fwd_streams_new == nb_fwd_streams)
662 		return 0;
663 	/* clear the old */
664 	if (fwd_streams != NULL) {
665 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
666 			if (fwd_streams[sm_id] == NULL)
667 				continue;
668 			rte_free(fwd_streams[sm_id]);
669 			fwd_streams[sm_id] = NULL;
670 		}
671 		rte_free(fwd_streams);
672 		fwd_streams = NULL;
673 	}
674 
675 	/* init new */
676 	nb_fwd_streams = nb_fwd_streams_new;
677 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
678 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
679 	if (fwd_streams == NULL)
680 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
681 						"failed\n", nb_fwd_streams);
682 
683 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
684 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
685 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
686 		if (fwd_streams[sm_id] == NULL)
687 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
688 								" failed\n");
689 	}
690 
691 	return 0;
692 }
693 
694 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
695 static void
696 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
697 {
698 	unsigned int total_burst;
699 	unsigned int nb_burst;
700 	unsigned int burst_stats[3];
701 	uint16_t pktnb_stats[3];
702 	uint16_t nb_pkt;
703 	int burst_percent[3];
704 
705 	/*
706 	 * First compute the total number of packet bursts and the
707 	 * two highest numbers of bursts of the same number of packets.
708 	 */
709 	total_burst = 0;
710 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
711 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
712 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
713 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
714 		if (nb_burst == 0)
715 			continue;
716 		total_burst += nb_burst;
717 		if (nb_burst > burst_stats[0]) {
718 			burst_stats[1] = burst_stats[0];
719 			pktnb_stats[1] = pktnb_stats[0];
720 			burst_stats[0] = nb_burst;
721 			pktnb_stats[0] = nb_pkt;
722 		}
723 	}
724 	if (total_burst == 0)
725 		return;
726 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
727 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
728 	       burst_percent[0], (int) pktnb_stats[0]);
729 	if (burst_stats[0] == total_burst) {
730 		printf("]\n");
731 		return;
732 	}
733 	if (burst_stats[0] + burst_stats[1] == total_burst) {
734 		printf(" + %d%% of %d pkts]\n",
735 		       100 - burst_percent[0], pktnb_stats[1]);
736 		return;
737 	}
738 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
739 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
740 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
741 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
742 		return;
743 	}
744 	printf(" + %d%% of %d pkts + %d%% of others]\n",
745 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
746 }
747 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
748 
749 static void
750 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
751 {
752 	struct rte_port *port;
753 	uint8_t i;
754 
755 	static const char *fwd_stats_border = "----------------------";
756 
757 	port = &ports[port_id];
758 	printf("\n  %s Forward statistics for port %-2d %s\n",
759 	       fwd_stats_border, port_id, fwd_stats_border);
760 
761 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
762 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
763 		       "%-"PRIu64"\n",
764 		       stats->ipackets, stats->ierrors,
765 		       (uint64_t) (stats->ipackets + stats->ierrors));
766 
767 		if (cur_fwd_eng == &csum_fwd_engine)
768 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
769 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
770 
771 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
772 		       "%-"PRIu64"\n",
773 		       stats->opackets, port->tx_dropped,
774 		       (uint64_t) (stats->opackets + port->tx_dropped));
775 
776 		if (stats->rx_nombuf > 0)
777 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
778 
779 	}
780 	else {
781 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
782 		       "%14"PRIu64"\n",
783 		       stats->ipackets, stats->ierrors,
784 		       (uint64_t) (stats->ipackets + stats->ierrors));
785 
786 		if (cur_fwd_eng == &csum_fwd_engine)
787 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
788 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
789 
790 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
791 		       "%14"PRIu64"\n",
792 		       stats->opackets, port->tx_dropped,
793 		       (uint64_t) (stats->opackets + port->tx_dropped));
794 
795 		if (stats->rx_nombuf > 0)
796 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
797 	}
798 
799 	/* Display statistics of XON/XOFF pause frames, if any. */
800 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
801 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
802 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
803 		       stats->rx_pause_xoff, stats->rx_pause_xon);
804 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
805 		       stats->tx_pause_xoff, stats->tx_pause_xon);
806 	}
807 
808 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
809 	if (port->rx_stream)
810 		pkt_burst_stats_display("RX",
811 			&port->rx_stream->rx_burst_stats);
812 	if (port->tx_stream)
813 		pkt_burst_stats_display("TX",
814 			&port->tx_stream->tx_burst_stats);
815 #endif
816 	/* stats fdir */
817 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
818 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
819 		       stats->fdirmiss,
820 		       stats->fdirmatch);
821 
822 	if (port->rx_queue_stats_mapping_enabled) {
823 		printf("\n");
824 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
825 			printf("  Stats reg %2d RX-packets:%14"PRIu64
826 			       "     RX-errors:%14"PRIu64
827 			       "    RX-bytes:%14"PRIu64"\n",
828 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
829 		}
830 		printf("\n");
831 	}
832 	if (port->tx_queue_stats_mapping_enabled) {
833 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
834 			printf("  Stats reg %2d TX-packets:%14"PRIu64
835 			       "                                 TX-bytes:%14"PRIu64"\n",
836 			       i, stats->q_opackets[i], stats->q_obytes[i]);
837 		}
838 	}
839 
840 	printf("  %s--------------------------------%s\n",
841 	       fwd_stats_border, fwd_stats_border);
842 }
843 
844 static void
845 fwd_stream_stats_display(streamid_t stream_id)
846 {
847 	struct fwd_stream *fs;
848 	static const char *fwd_top_stats_border = "-------";
849 
850 	fs = fwd_streams[stream_id];
851 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
852 	    (fs->fwd_dropped == 0))
853 		return;
854 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
855 	       "TX Port=%2d/Queue=%2d %s\n",
856 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
857 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
858 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
859 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
860 
861 	/* if checksum mode */
862 	if (cur_fwd_eng == &csum_fwd_engine) {
863 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
864 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
865 	}
866 
867 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
868 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
869 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
870 #endif
871 }
872 
873 static void
874 flush_fwd_rx_queues(void)
875 {
876 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
877 	portid_t  rxp;
878 	portid_t port_id;
879 	queueid_t rxq;
880 	uint16_t  nb_rx;
881 	uint16_t  i;
882 	uint8_t   j;
883 
884 	for (j = 0; j < 2; j++) {
885 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
886 			for (rxq = 0; rxq < nb_rxq; rxq++) {
887 				port_id = fwd_ports_ids[rxp];
888 				do {
889 					nb_rx = rte_eth_rx_burst(port_id, rxq,
890 						pkts_burst, MAX_PKT_BURST);
891 					for (i = 0; i < nb_rx; i++)
892 						rte_pktmbuf_free(pkts_burst[i]);
893 				} while (nb_rx > 0);
894 			}
895 		}
896 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
897 	}
898 }
899 
900 static void
901 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
902 {
903 	struct fwd_stream **fsm;
904 	streamid_t nb_fs;
905 	streamid_t sm_id;
906 
907 	fsm = &fwd_streams[fc->stream_idx];
908 	nb_fs = fc->stream_nb;
909 	do {
910 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
911 			(*pkt_fwd)(fsm[sm_id]);
912 	} while (! fc->stopped);
913 }
914 
915 static int
916 start_pkt_forward_on_core(void *fwd_arg)
917 {
918 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
919 			     cur_fwd_config.fwd_eng->packet_fwd);
920 	return 0;
921 }
922 
923 /*
924  * Run the TXONLY packet forwarding engine to send a single burst of packets.
925  * Used to start communication flows in network loopback test configurations.
926  */
927 static int
928 run_one_txonly_burst_on_core(void *fwd_arg)
929 {
930 	struct fwd_lcore *fwd_lc;
931 	struct fwd_lcore tmp_lcore;
932 
933 	fwd_lc = (struct fwd_lcore *) fwd_arg;
934 	tmp_lcore = *fwd_lc;
935 	tmp_lcore.stopped = 1;
936 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
937 	return 0;
938 }
939 
940 /*
941  * Launch packet forwarding:
942  *     - Setup per-port forwarding context.
943  *     - launch logical cores with their forwarding configuration.
944  */
945 static void
946 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
947 {
948 	port_fwd_begin_t port_fwd_begin;
949 	unsigned int i;
950 	unsigned int lc_id;
951 	int diag;
952 
953 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
954 	if (port_fwd_begin != NULL) {
955 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
956 			(*port_fwd_begin)(fwd_ports_ids[i]);
957 	}
958 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
959 		lc_id = fwd_lcores_cpuids[i];
960 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
961 			fwd_lcores[i]->stopped = 0;
962 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
963 						     fwd_lcores[i], lc_id);
964 			if (diag != 0)
965 				printf("launch lcore %u failed - diag=%d\n",
966 				       lc_id, diag);
967 		}
968 	}
969 }
970 
971 /*
972  * Launch packet forwarding configuration.
973  */
974 void
975 start_packet_forwarding(int with_tx_first)
976 {
977 	port_fwd_begin_t port_fwd_begin;
978 	port_fwd_end_t  port_fwd_end;
979 	struct rte_port *port;
980 	unsigned int i;
981 	portid_t   pt_id;
982 	streamid_t sm_id;
983 
984 	if (all_ports_started() == 0) {
985 		printf("Not all ports were started\n");
986 		return;
987 	}
988 	if (test_done == 0) {
989 		printf("Packet forwarding already started\n");
990 		return;
991 	}
992 	if(dcb_test) {
993 		for (i = 0; i < nb_fwd_ports; i++) {
994 			pt_id = fwd_ports_ids[i];
995 			port = &ports[pt_id];
996 			if (!port->dcb_flag) {
997 				printf("In DCB mode, all forwarding ports must "
998                                        "be configured in this mode.\n");
999 				return;
1000 			}
1001 		}
1002 		if (nb_fwd_lcores == 1) {
1003 			printf("In DCB mode,the nb forwarding cores "
1004                                "should be larger than 1.\n");
1005 			return;
1006 		}
1007 	}
1008 	test_done = 0;
1009 
1010 	if(!no_flush_rx)
1011 		flush_fwd_rx_queues();
1012 
1013 	fwd_config_setup();
1014 	rxtx_config_display();
1015 
1016 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1017 		pt_id = fwd_ports_ids[i];
1018 		port = &ports[pt_id];
1019 		rte_eth_stats_get(pt_id, &port->stats);
1020 		port->tx_dropped = 0;
1021 
1022 		map_port_queue_stats_mapping_registers(pt_id, port);
1023 	}
1024 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1025 		fwd_streams[sm_id]->rx_packets = 0;
1026 		fwd_streams[sm_id]->tx_packets = 0;
1027 		fwd_streams[sm_id]->fwd_dropped = 0;
1028 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1029 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1030 
1031 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1032 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1033 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1034 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1035 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1036 #endif
1037 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1038 		fwd_streams[sm_id]->core_cycles = 0;
1039 #endif
1040 	}
1041 	if (with_tx_first) {
1042 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1043 		if (port_fwd_begin != NULL) {
1044 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1045 				(*port_fwd_begin)(fwd_ports_ids[i]);
1046 		}
1047 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1048 		rte_eal_mp_wait_lcore();
1049 		port_fwd_end = tx_only_engine.port_fwd_end;
1050 		if (port_fwd_end != NULL) {
1051 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1052 				(*port_fwd_end)(fwd_ports_ids[i]);
1053 		}
1054 	}
1055 	launch_packet_forwarding(start_pkt_forward_on_core);
1056 }
1057 
1058 void
1059 stop_packet_forwarding(void)
1060 {
1061 	struct rte_eth_stats stats;
1062 	struct rte_port *port;
1063 	port_fwd_end_t  port_fwd_end;
1064 	int i;
1065 	portid_t   pt_id;
1066 	streamid_t sm_id;
1067 	lcoreid_t  lc_id;
1068 	uint64_t total_recv;
1069 	uint64_t total_xmit;
1070 	uint64_t total_rx_dropped;
1071 	uint64_t total_tx_dropped;
1072 	uint64_t total_rx_nombuf;
1073 	uint64_t tx_dropped;
1074 	uint64_t rx_bad_ip_csum;
1075 	uint64_t rx_bad_l4_csum;
1076 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1077 	uint64_t fwd_cycles;
1078 #endif
1079 	static const char *acc_stats_border = "+++++++++++++++";
1080 
1081 	if (all_ports_started() == 0) {
1082 		printf("Not all ports were started\n");
1083 		return;
1084 	}
1085 	if (test_done) {
1086 		printf("Packet forwarding not started\n");
1087 		return;
1088 	}
1089 	printf("Telling cores to stop...");
1090 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1091 		fwd_lcores[lc_id]->stopped = 1;
1092 	printf("\nWaiting for lcores to finish...\n");
1093 	rte_eal_mp_wait_lcore();
1094 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1095 	if (port_fwd_end != NULL) {
1096 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1097 			pt_id = fwd_ports_ids[i];
1098 			(*port_fwd_end)(pt_id);
1099 		}
1100 	}
1101 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1102 	fwd_cycles = 0;
1103 #endif
1104 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1105 		if (cur_fwd_config.nb_fwd_streams >
1106 		    cur_fwd_config.nb_fwd_ports) {
1107 			fwd_stream_stats_display(sm_id);
1108 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1109 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1110 		} else {
1111 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1112 				fwd_streams[sm_id];
1113 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1114 				fwd_streams[sm_id];
1115 		}
1116 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1117 		tx_dropped = (uint64_t) (tx_dropped +
1118 					 fwd_streams[sm_id]->fwd_dropped);
1119 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1120 
1121 		rx_bad_ip_csum =
1122 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1123 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1124 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1125 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1126 							rx_bad_ip_csum;
1127 
1128 		rx_bad_l4_csum =
1129 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1130 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1131 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1132 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1133 							rx_bad_l4_csum;
1134 
1135 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1136 		fwd_cycles = (uint64_t) (fwd_cycles +
1137 					 fwd_streams[sm_id]->core_cycles);
1138 #endif
1139 	}
1140 	total_recv = 0;
1141 	total_xmit = 0;
1142 	total_rx_dropped = 0;
1143 	total_tx_dropped = 0;
1144 	total_rx_nombuf  = 0;
1145 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1146 		pt_id = fwd_ports_ids[i];
1147 
1148 		port = &ports[pt_id];
1149 		rte_eth_stats_get(pt_id, &stats);
1150 		stats.ipackets -= port->stats.ipackets;
1151 		port->stats.ipackets = 0;
1152 		stats.opackets -= port->stats.opackets;
1153 		port->stats.opackets = 0;
1154 		stats.ibytes   -= port->stats.ibytes;
1155 		port->stats.ibytes = 0;
1156 		stats.obytes   -= port->stats.obytes;
1157 		port->stats.obytes = 0;
1158 		stats.ierrors  -= port->stats.ierrors;
1159 		port->stats.ierrors = 0;
1160 		stats.oerrors  -= port->stats.oerrors;
1161 		port->stats.oerrors = 0;
1162 		stats.rx_nombuf -= port->stats.rx_nombuf;
1163 		port->stats.rx_nombuf = 0;
1164 		stats.fdirmatch -= port->stats.fdirmatch;
1165 		port->stats.rx_nombuf = 0;
1166 		stats.fdirmiss -= port->stats.fdirmiss;
1167 		port->stats.rx_nombuf = 0;
1168 
1169 		total_recv += stats.ipackets;
1170 		total_xmit += stats.opackets;
1171 		total_rx_dropped += stats.ierrors;
1172 		total_tx_dropped += port->tx_dropped;
1173 		total_rx_nombuf  += stats.rx_nombuf;
1174 
1175 		fwd_port_stats_display(pt_id, &stats);
1176 	}
1177 	printf("\n  %s Accumulated forward statistics for all ports"
1178 	       "%s\n",
1179 	       acc_stats_border, acc_stats_border);
1180 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1181 	       "%-"PRIu64"\n"
1182 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1183 	       "%-"PRIu64"\n",
1184 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1185 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1186 	if (total_rx_nombuf > 0)
1187 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1188 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1189 	       "%s\n",
1190 	       acc_stats_border, acc_stats_border);
1191 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1192 	if (total_recv > 0)
1193 		printf("\n  CPU cycles/packet=%u (total cycles="
1194 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1195 		       (unsigned int)(fwd_cycles / total_recv),
1196 		       fwd_cycles, total_recv);
1197 #endif
1198 	printf("\nDone.\n");
1199 	test_done = 1;
1200 }
1201 
1202 static int
1203 all_ports_started(void)
1204 {
1205 	portid_t pi;
1206 	struct rte_port *port;
1207 
1208 	for (pi = 0; pi < nb_ports; pi++) {
1209 		port = &ports[pi];
1210 		/* Check if there is a port which is not started */
1211 		if (port->port_status != RTE_PORT_STARTED)
1212 			return 0;
1213 	}
1214 
1215 	/* No port is not started */
1216 	return 1;
1217 }
1218 
1219 int
1220 start_port(portid_t pid)
1221 {
1222 	int diag, need_check_link_status = 0;
1223 	portid_t pi;
1224 	queueid_t qi;
1225 	struct rte_port *port;
1226 
1227 	if (test_done == 0) {
1228 		printf("Please stop forwarding first\n");
1229 		return -1;
1230 	}
1231 
1232 	if (init_fwd_streams() < 0) {
1233 		printf("Fail from init_fwd_streams()\n");
1234 		return -1;
1235 	}
1236 
1237 	if(dcb_config)
1238 		dcb_test = 1;
1239 	for (pi = 0; pi < nb_ports; pi++) {
1240 		if (pid < nb_ports && pid != pi)
1241 			continue;
1242 
1243 		port = &ports[pi];
1244 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1245 						 RTE_PORT_HANDLING) == 0) {
1246 			printf("Port %d is now not stopped\n", pi);
1247 			continue;
1248 		}
1249 
1250 		if (port->need_reconfig > 0) {
1251 			port->need_reconfig = 0;
1252 
1253 			printf("Configuring Port %d (socket %u)\n", pi,
1254 					port->socket_id);
1255 			/* configure port */
1256 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1257 						&(port->dev_conf));
1258 			if (diag != 0) {
1259 				if (rte_atomic16_cmpset(&(port->port_status),
1260 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1261 					printf("Port %d can not be set back "
1262 							"to stopped\n", pi);
1263 				printf("Fail to configure port %d\n", pi);
1264 				/* try to reconfigure port next time */
1265 				port->need_reconfig = 1;
1266 				return -1;
1267 			}
1268 		}
1269 		if (port->need_reconfig_queues > 0) {
1270 			port->need_reconfig_queues = 0;
1271 			/* setup tx queues */
1272 			for (qi = 0; qi < nb_txq; qi++) {
1273 				if ((numa_support) &&
1274 					(txring_numa[pi] != NUMA_NO_CONFIG))
1275 					diag = rte_eth_tx_queue_setup(pi, qi,
1276 						nb_txd,txring_numa[pi],
1277 						&(port->tx_conf));
1278 				else
1279 					diag = rte_eth_tx_queue_setup(pi, qi,
1280 						nb_txd,port->socket_id,
1281 						&(port->tx_conf));
1282 
1283 				if (diag == 0)
1284 					continue;
1285 
1286 				/* Fail to setup tx queue, return */
1287 				if (rte_atomic16_cmpset(&(port->port_status),
1288 							RTE_PORT_HANDLING,
1289 							RTE_PORT_STOPPED) == 0)
1290 					printf("Port %d can not be set back "
1291 							"to stopped\n", pi);
1292 				printf("Fail to configure port %d tx queues\n", pi);
1293 				/* try to reconfigure queues next time */
1294 				port->need_reconfig_queues = 1;
1295 				return -1;
1296 			}
1297 			/* setup rx queues */
1298 			for (qi = 0; qi < nb_rxq; qi++) {
1299 				if ((numa_support) &&
1300 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1301 					struct rte_mempool * mp =
1302 						mbuf_pool_find(rxring_numa[pi]);
1303 					if (mp == NULL) {
1304 						printf("Failed to setup RX queue:"
1305 							"No mempool allocation"
1306 							"on the socket %d\n",
1307 							rxring_numa[pi]);
1308 						return -1;
1309 					}
1310 
1311 					diag = rte_eth_rx_queue_setup(pi, qi,
1312 					     nb_rxd,rxring_numa[pi],
1313 					     &(port->rx_conf),mp);
1314 				}
1315 				else
1316 					diag = rte_eth_rx_queue_setup(pi, qi,
1317 					     nb_rxd,port->socket_id,
1318 					     &(port->rx_conf),
1319 				             mbuf_pool_find(port->socket_id));
1320 
1321 				if (diag == 0)
1322 					continue;
1323 
1324 
1325 				/* Fail to setup rx queue, return */
1326 				if (rte_atomic16_cmpset(&(port->port_status),
1327 							RTE_PORT_HANDLING,
1328 							RTE_PORT_STOPPED) == 0)
1329 					printf("Port %d can not be set back "
1330 							"to stopped\n", pi);
1331 				printf("Fail to configure port %d rx queues\n", pi);
1332 				/* try to reconfigure queues next time */
1333 				port->need_reconfig_queues = 1;
1334 				return -1;
1335 			}
1336 		}
1337 		/* start port */
1338 		if (rte_eth_dev_start(pi) < 0) {
1339 			printf("Fail to start port %d\n", pi);
1340 
1341 			/* Fail to setup rx queue, return */
1342 			if (rte_atomic16_cmpset(&(port->port_status),
1343 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1344 				printf("Port %d can not be set back to "
1345 							"stopped\n", pi);
1346 			continue;
1347 		}
1348 
1349 		if (rte_atomic16_cmpset(&(port->port_status),
1350 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1351 			printf("Port %d can not be set into started\n", pi);
1352 
1353 		/* at least one port started, need checking link status */
1354 		need_check_link_status = 1;
1355 	}
1356 
1357 	if (need_check_link_status)
1358 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1359 	else
1360 		printf("Please stop the ports first\n");
1361 
1362 	printf("Done\n");
1363 	return 0;
1364 }
1365 
1366 void
1367 stop_port(portid_t pid)
1368 {
1369 	portid_t pi;
1370 	struct rte_port *port;
1371 	int need_check_link_status = 0;
1372 
1373 	if (test_done == 0) {
1374 		printf("Please stop forwarding first\n");
1375 		return;
1376 	}
1377 	if (dcb_test) {
1378 		dcb_test = 0;
1379 		dcb_config = 0;
1380 	}
1381 	printf("Stopping ports...\n");
1382 
1383 	for (pi = 0; pi < nb_ports; pi++) {
1384 		if (pid < nb_ports && pid != pi)
1385 			continue;
1386 
1387 		port = &ports[pi];
1388 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1389 						RTE_PORT_HANDLING) == 0)
1390 			continue;
1391 
1392 		rte_eth_dev_stop(pi);
1393 
1394 		if (rte_atomic16_cmpset(&(port->port_status),
1395 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1396 			printf("Port %d can not be set into stopped\n", pi);
1397 		need_check_link_status = 1;
1398 	}
1399 	if (need_check_link_status)
1400 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1401 
1402 	printf("Done\n");
1403 }
1404 
1405 void
1406 close_port(portid_t pid)
1407 {
1408 	portid_t pi;
1409 	struct rte_port *port;
1410 
1411 	if (test_done == 0) {
1412 		printf("Please stop forwarding first\n");
1413 		return;
1414 	}
1415 
1416 	printf("Closing ports...\n");
1417 
1418 	for (pi = 0; pi < nb_ports; pi++) {
1419 		if (pid < nb_ports && pid != pi)
1420 			continue;
1421 
1422 		port = &ports[pi];
1423 		if (rte_atomic16_cmpset(&(port->port_status),
1424 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1425 			printf("Port %d is now not stopped\n", pi);
1426 			continue;
1427 		}
1428 
1429 		rte_eth_dev_close(pi);
1430 
1431 		if (rte_atomic16_cmpset(&(port->port_status),
1432 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1433 			printf("Port %d can not be set into stopped\n", pi);
1434 	}
1435 
1436 	printf("Done\n");
1437 }
1438 
1439 int
1440 all_ports_stopped(void)
1441 {
1442 	portid_t pi;
1443 	struct rte_port *port;
1444 
1445 	for (pi = 0; pi < nb_ports; pi++) {
1446 		port = &ports[pi];
1447 		if (port->port_status != RTE_PORT_STOPPED)
1448 			return 0;
1449 	}
1450 
1451 	return 1;
1452 }
1453 
1454 void
1455 pmd_test_exit(void)
1456 {
1457 	portid_t pt_id;
1458 
1459 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1460 		printf("Stopping port %d...", pt_id);
1461 		fflush(stdout);
1462 		rte_eth_dev_close(pt_id);
1463 		printf("done\n");
1464 	}
1465 	printf("bye...\n");
1466 }
1467 
1468 typedef void (*cmd_func_t)(void);
1469 struct pmd_test_command {
1470 	const char *cmd_name;
1471 	cmd_func_t cmd_func;
1472 };
1473 
1474 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1475 
1476 /* Check the link status of all ports in up to 9s, and print them finally */
1477 static void
1478 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1479 {
1480 #define CHECK_INTERVAL 100 /* 100ms */
1481 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1482 	uint8_t portid, count, all_ports_up, print_flag = 0;
1483 	struct rte_eth_link link;
1484 
1485 	printf("Checking link statuses...\n");
1486 	fflush(stdout);
1487 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1488 		all_ports_up = 1;
1489 		for (portid = 0; portid < port_num; portid++) {
1490 			if ((port_mask & (1 << portid)) == 0)
1491 				continue;
1492 			memset(&link, 0, sizeof(link));
1493 			rte_eth_link_get_nowait(portid, &link);
1494 			/* print link status if flag set */
1495 			if (print_flag == 1) {
1496 				if (link.link_status)
1497 					printf("Port %d Link Up - speed %u "
1498 						"Mbps - %s\n", (uint8_t)portid,
1499 						(unsigned)link.link_speed,
1500 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1501 					("full-duplex") : ("half-duplex\n"));
1502 				else
1503 					printf("Port %d Link Down\n",
1504 						(uint8_t)portid);
1505 				continue;
1506 			}
1507 			/* clear all_ports_up flag if any link down */
1508 			if (link.link_status == 0) {
1509 				all_ports_up = 0;
1510 				break;
1511 			}
1512 		}
1513 		/* after finally printing all link status, get out */
1514 		if (print_flag == 1)
1515 			break;
1516 
1517 		if (all_ports_up == 0) {
1518 			fflush(stdout);
1519 			rte_delay_ms(CHECK_INTERVAL);
1520 		}
1521 
1522 		/* set the print_flag if all ports up or timeout */
1523 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1524 			print_flag = 1;
1525 		}
1526 	}
1527 }
1528 
1529 static int
1530 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1531 {
1532 	uint16_t i;
1533 	int diag;
1534 	uint8_t mapping_found = 0;
1535 
1536 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1537 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1538 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1539 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1540 					tx_queue_stats_mappings[i].queue_id,
1541 					tx_queue_stats_mappings[i].stats_counter_id);
1542 			if (diag != 0)
1543 				return diag;
1544 			mapping_found = 1;
1545 		}
1546 	}
1547 	if (mapping_found)
1548 		port->tx_queue_stats_mapping_enabled = 1;
1549 	return 0;
1550 }
1551 
1552 static int
1553 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1554 {
1555 	uint16_t i;
1556 	int diag;
1557 	uint8_t mapping_found = 0;
1558 
1559 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1560 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1561 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1562 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1563 					rx_queue_stats_mappings[i].queue_id,
1564 					rx_queue_stats_mappings[i].stats_counter_id);
1565 			if (diag != 0)
1566 				return diag;
1567 			mapping_found = 1;
1568 		}
1569 	}
1570 	if (mapping_found)
1571 		port->rx_queue_stats_mapping_enabled = 1;
1572 	return 0;
1573 }
1574 
1575 static void
1576 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1577 {
1578 	int diag = 0;
1579 
1580 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1581 	if (diag != 0) {
1582 		if (diag == -ENOTSUP) {
1583 			port->tx_queue_stats_mapping_enabled = 0;
1584 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1585 		}
1586 		else
1587 			rte_exit(EXIT_FAILURE,
1588 					"set_tx_queue_stats_mapping_registers "
1589 					"failed for port id=%d diag=%d\n",
1590 					pi, diag);
1591 	}
1592 
1593 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1594 	if (diag != 0) {
1595 		if (diag == -ENOTSUP) {
1596 			port->rx_queue_stats_mapping_enabled = 0;
1597 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1598 		}
1599 		else
1600 			rte_exit(EXIT_FAILURE,
1601 					"set_rx_queue_stats_mapping_registers "
1602 					"failed for port id=%d diag=%d\n",
1603 					pi, diag);
1604 	}
1605 }
1606 
1607 void
1608 init_port_config(void)
1609 {
1610 	portid_t pid;
1611 	struct rte_port *port;
1612 
1613 	for (pid = 0; pid < nb_ports; pid++) {
1614 		port = &ports[pid];
1615 		port->dev_conf.rxmode = rx_mode;
1616 		port->dev_conf.fdir_conf = fdir_conf;
1617 		if (nb_rxq > 1) {
1618 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1619 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1620 		} else {
1621 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1622 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1623 		}
1624 
1625 		/* In SR-IOV mode, RSS mode is not available */
1626 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1627 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1628 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1629 			else
1630 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1631 		}
1632 
1633 		port->rx_conf.rx_thresh = rx_thresh;
1634 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1635 		port->rx_conf.rx_drop_en = rx_drop_en;
1636 		port->tx_conf.tx_thresh = tx_thresh;
1637 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1638 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1639 		port->tx_conf.txq_flags = txq_flags;
1640 
1641 		rte_eth_macaddr_get(pid, &port->eth_addr);
1642 
1643 		map_port_queue_stats_mapping_registers(pid, port);
1644 #ifdef RTE_NIC_BYPASS
1645 		rte_eth_dev_bypass_init(pid);
1646 #endif
1647 	}
1648 }
1649 
1650 const uint16_t vlan_tags[] = {
1651 		0,  1,  2,  3,  4,  5,  6,  7,
1652 		8,  9, 10, 11,  12, 13, 14, 15,
1653 		16, 17, 18, 19, 20, 21, 22, 23,
1654 		24, 25, 26, 27, 28, 29, 30, 31
1655 };
1656 
1657 static  int
1658 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1659 {
1660         uint8_t i;
1661 
1662  	/*
1663  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1664  	 * given above, and the number of traffic classes available for use.
1665  	 */
1666 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1667 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1668 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1669 
1670 		/* VMDQ+DCB RX and TX configrations */
1671 		vmdq_rx_conf.enable_default_pool = 0;
1672 		vmdq_rx_conf.default_pool = 0;
1673 		vmdq_rx_conf.nb_queue_pools =
1674 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1675 		vmdq_tx_conf.nb_queue_pools =
1676 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1677 
1678 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1679 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1680 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1681 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1682 		}
1683 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1684 			vmdq_rx_conf.dcb_queue[i] = i;
1685 			vmdq_tx_conf.dcb_queue[i] = i;
1686 		}
1687 
1688 		/*set DCB mode of RX and TX of multiple queues*/
1689 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1690 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1691 		if (dcb_conf->pfc_en)
1692 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1693 		else
1694 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1695 
1696 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1697                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1698 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1699                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1700 	}
1701 	else {
1702 		struct rte_eth_dcb_rx_conf rx_conf;
1703 		struct rte_eth_dcb_tx_conf tx_conf;
1704 
1705 		/* queue mapping configuration of DCB RX and TX */
1706 		if (dcb_conf->num_tcs == ETH_4_TCS)
1707 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1708 		else
1709 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1710 
1711 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1712 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1713 
1714 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1715 			rx_conf.dcb_queue[i] = i;
1716 			tx_conf.dcb_queue[i] = i;
1717 		}
1718 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1719 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1720 		if (dcb_conf->pfc_en)
1721 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1722 		else
1723 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1724 
1725 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1726                                 sizeof(struct rte_eth_dcb_rx_conf)));
1727 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1728                                 sizeof(struct rte_eth_dcb_tx_conf)));
1729 	}
1730 
1731 	return 0;
1732 }
1733 
1734 int
1735 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1736 {
1737 	struct rte_eth_conf port_conf;
1738 	struct rte_port *rte_port;
1739 	int retval;
1740 	uint16_t nb_vlan;
1741 	uint16_t i;
1742 
1743 	/* rxq and txq configuration in dcb mode */
1744 	nb_rxq = 128;
1745 	nb_txq = 128;
1746 	rx_free_thresh = 64;
1747 
1748 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1749 	/* Enter DCB configuration status */
1750 	dcb_config = 1;
1751 
1752 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1753 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1754 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1755 	if (retval < 0)
1756 		return retval;
1757 
1758 	rte_port = &ports[pid];
1759 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1760 
1761 	rte_port->rx_conf.rx_thresh = rx_thresh;
1762 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1763 	rte_port->tx_conf.tx_thresh = tx_thresh;
1764 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1765 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1766 	/* VLAN filter */
1767 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1768 	for (i = 0; i < nb_vlan; i++){
1769 		rx_vft_set(pid, vlan_tags[i], 1);
1770 	}
1771 
1772 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1773 	map_port_queue_stats_mapping_registers(pid, rte_port);
1774 
1775 	rte_port->dcb_flag = 1;
1776 
1777 	return 0;
1778 }
1779 
1780 #ifdef RTE_EXEC_ENV_BAREMETAL
1781 #define main _main
1782 #endif
1783 
1784 int
1785 main(int argc, char** argv)
1786 {
1787 	int  diag;
1788 	uint8_t port_id;
1789 
1790 	diag = rte_eal_init(argc, argv);
1791 	if (diag < 0)
1792 		rte_panic("Cannot init EAL\n");
1793 
1794 	if (rte_pmd_init_all())
1795 		rte_panic("Cannot init PMD\n");
1796 
1797 	if (rte_eal_pci_probe())
1798 		rte_panic("Cannot probe PCI\n");
1799 
1800 	nb_ports = (portid_t) rte_eth_dev_count();
1801 	if (nb_ports == 0)
1802 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1803 							"check that "
1804 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1805 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1806 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1807 			  "configuration file\n");
1808 
1809 	set_def_fwd_config();
1810 	if (nb_lcores == 0)
1811 		rte_panic("Empty set of forwarding logical cores - check the "
1812 			  "core mask supplied in the command parameters\n");
1813 
1814 	argc -= diag;
1815 	argv += diag;
1816 	if (argc > 1)
1817 		launch_args_parse(argc, argv);
1818 
1819 	if (nb_rxq > nb_txq)
1820 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1821 		       "but nb_txq=%d will prevent to fully test it.\n",
1822 		       nb_rxq, nb_txq);
1823 
1824 	init_config();
1825 	if (start_port(RTE_PORT_ALL) != 0)
1826 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1827 
1828 	/* set all ports to promiscuous mode by default */
1829 	for (port_id = 0; port_id < nb_ports; port_id++)
1830 		rte_eth_promiscuous_enable(port_id);
1831 
1832 #ifdef RTE_LIBRTE_CMDLINE
1833 	if (interactive == 1)
1834 		prompt();
1835 	else
1836 #endif
1837 	{
1838 		char c;
1839 		int rc;
1840 
1841 		printf("No commandline core given, start packet forwarding\n");
1842 		start_packet_forwarding(0);
1843 		printf("Press enter to exit\n");
1844 		rc = read(0, &c, 1);
1845 		if (rc < 0)
1846 			return 1;
1847 	}
1848 
1849 	return 0;
1850 }
1851