xref: /dpdk/app/test-pmd/testpmd.c (revision e9e23a617eb877b625544a84a7bb76bd992bced4)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_retry_fwd_engine,
148 	&mac_swap_engine,
149 	&flow_gen_engine,
150 	&rx_only_engine,
151 	&tx_only_engine,
152 	&csum_fwd_engine,
153 #ifdef RTE_LIBRTE_IEEE1588
154 	&ieee1588_fwd_engine,
155 #endif
156 	NULL,
157 };
158 
159 struct fwd_config cur_fwd_config;
160 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
161 
162 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
163 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
164                                       * specified on command-line. */
165 
166 /*
167  * Configuration of packet segments used by the "txonly" processing engine.
168  */
169 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
170 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
171 	TXONLY_DEF_PACKET_LEN,
172 };
173 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
174 
175 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
176 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
177 
178 /* current configuration is in DCB or not,0 means it is not in DCB mode */
179 uint8_t dcb_config = 0;
180 
181 /* Whether the dcb is in testing status */
182 uint8_t dcb_test = 0;
183 
184 /* DCB on and VT on mapping is default */
185 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
186 
187 /*
188  * Configurable number of RX/TX queues.
189  */
190 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
191 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
192 
193 /*
194  * Configurable number of RX/TX ring descriptors.
195  */
196 #define RTE_TEST_RX_DESC_DEFAULT 128
197 #define RTE_TEST_TX_DESC_DEFAULT 512
198 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
199 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
200 
201 /*
202  * Configurable values of RX and TX ring threshold registers.
203  */
204 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
205 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
206 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
207 
208 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
209 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
210 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
211 
212 struct rte_eth_thresh rx_thresh = {
213 	.pthresh = RX_PTHRESH,
214 	.hthresh = RX_HTHRESH,
215 	.wthresh = RX_WTHRESH,
216 };
217 
218 struct rte_eth_thresh tx_thresh = {
219 	.pthresh = TX_PTHRESH,
220 	.hthresh = TX_HTHRESH,
221 	.wthresh = TX_WTHRESH,
222 };
223 
224 /*
225  * Configurable value of RX free threshold.
226  */
227 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
228 
229 /*
230  * Configurable value of RX drop enable.
231  */
232 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
233 
234 /*
235  * Configurable value of TX free threshold.
236  */
237 uint16_t tx_free_thresh = 0; /* Use default values. */
238 
239 /*
240  * Configurable value of TX RS bit threshold.
241  */
242 uint16_t tx_rs_thresh = 0; /* Use default values. */
243 
244 /*
245  * Configurable value of TX queue flags.
246  */
247 uint32_t txq_flags = 0; /* No flags set. */
248 
249 /*
250  * Receive Side Scaling (RSS) configuration.
251  */
252 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
253 
254 /*
255  * Port topology configuration
256  */
257 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
258 
259 /*
260  * Avoids to flush all the RX streams before starts forwarding.
261  */
262 uint8_t no_flush_rx = 0; /* flush by default */
263 
264 /*
265  * NIC bypass mode configuration options.
266  */
267 #ifdef RTE_NIC_BYPASS
268 
269 /* The NIC bypass watchdog timeout. */
270 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 
272 #endif
273 
274 /*
275  * Ethernet device configuration.
276  */
277 struct rte_eth_rxmode rx_mode = {
278 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
279 	.split_hdr_size = 0,
280 	.header_split   = 0, /**< Header Split disabled. */
281 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
282 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
283 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
284 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
285 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
286 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
287 };
288 
289 struct rte_fdir_conf fdir_conf = {
290 	.mode = RTE_FDIR_MODE_NONE,
291 	.pballoc = RTE_FDIR_PBALLOC_64K,
292 	.status = RTE_FDIR_REPORT_STATUS,
293 	.flexbytes_offset = 0x6,
294 	.drop_queue = 127,
295 };
296 
297 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
298 
299 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
300 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
301 
302 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
303 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
304 
305 uint16_t nb_tx_queue_stats_mappings = 0;
306 uint16_t nb_rx_queue_stats_mappings = 0;
307 
308 /* Forward function declarations */
309 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
310 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
311 
312 /*
313  * Check if all the ports are started.
314  * If yes, return positive value. If not, return zero.
315  */
316 static int all_ports_started(void);
317 
318 /*
319  * Setup default configuration.
320  */
321 static void
322 set_default_fwd_lcores_config(void)
323 {
324 	unsigned int i;
325 	unsigned int nb_lc;
326 
327 	nb_lc = 0;
328 	for (i = 0; i < RTE_MAX_LCORE; i++) {
329 		if (! rte_lcore_is_enabled(i))
330 			continue;
331 		if (i == rte_get_master_lcore())
332 			continue;
333 		fwd_lcores_cpuids[nb_lc++] = i;
334 	}
335 	nb_lcores = (lcoreid_t) nb_lc;
336 	nb_cfg_lcores = nb_lcores;
337 	nb_fwd_lcores = 1;
338 }
339 
340 static void
341 set_def_peer_eth_addrs(void)
342 {
343 	portid_t i;
344 
345 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
346 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
347 		peer_eth_addrs[i].addr_bytes[5] = i;
348 	}
349 }
350 
351 static void
352 set_default_fwd_ports_config(void)
353 {
354 	portid_t pt_id;
355 
356 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
357 		fwd_ports_ids[pt_id] = pt_id;
358 
359 	nb_cfg_ports = nb_ports;
360 	nb_fwd_ports = nb_ports;
361 }
362 
363 void
364 set_def_fwd_config(void)
365 {
366 	set_default_fwd_lcores_config();
367 	set_def_peer_eth_addrs();
368 	set_default_fwd_ports_config();
369 }
370 
371 /*
372  * Configuration initialisation done once at init time.
373  */
374 struct mbuf_ctor_arg {
375 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
376 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
377 };
378 
379 struct mbuf_pool_ctor_arg {
380 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
381 };
382 
383 static void
384 testpmd_mbuf_ctor(struct rte_mempool *mp,
385 		  void *opaque_arg,
386 		  void *raw_mbuf,
387 		  __attribute__((unused)) unsigned i)
388 {
389 	struct mbuf_ctor_arg *mb_ctor_arg;
390 	struct rte_mbuf    *mb;
391 
392 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
393 	mb = (struct rte_mbuf *) raw_mbuf;
394 
395 	mb->type         = RTE_MBUF_PKT;
396 	mb->pool         = mp;
397 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
398 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
399 			mb_ctor_arg->seg_buf_offset);
400 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
401 	mb->type         = RTE_MBUF_PKT;
402 	mb->ol_flags     = 0;
403 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
404 	mb->pkt.nb_segs  = 1;
405 	mb->pkt.vlan_macip.data = 0;
406 	mb->pkt.hash.rss = 0;
407 }
408 
409 static void
410 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
411 		       void *opaque_arg)
412 {
413 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
414 	struct rte_pktmbuf_pool_private *mbp_priv;
415 
416 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
417 		printf("%s(%s) private_data_size %d < %d\n",
418 		       __func__, mp->name, (int) mp->private_data_size,
419 		       (int) sizeof(struct rte_pktmbuf_pool_private));
420 		return;
421 	}
422 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
423 	mbp_priv = rte_mempool_get_priv(mp);
424 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
425 }
426 
427 static void
428 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
429 		 unsigned int socket_id)
430 {
431 	char pool_name[RTE_MEMPOOL_NAMESIZE];
432 	struct rte_mempool *rte_mp;
433 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
434 	struct mbuf_ctor_arg mb_ctor_arg;
435 	uint32_t mb_size;
436 
437 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
438 						mbuf_seg_size);
439 	mb_ctor_arg.seg_buf_offset =
440 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
441 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
442 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
443 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
444 
445 #ifdef RTE_LIBRTE_PMD_XENVIRT
446 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
447                                    (unsigned) mb_mempool_cache,
448                                    sizeof(struct rte_pktmbuf_pool_private),
449                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
450                                    testpmd_mbuf_ctor, &mb_ctor_arg,
451                                    socket_id, 0);
452 
453 
454 
455 #else
456 	if (mp_anon != 0)
457 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
458 				    (unsigned) mb_mempool_cache,
459 				    sizeof(struct rte_pktmbuf_pool_private),
460 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
461 				    testpmd_mbuf_ctor, &mb_ctor_arg,
462 				    socket_id, 0);
463 	else
464 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
465 				    (unsigned) mb_mempool_cache,
466 				    sizeof(struct rte_pktmbuf_pool_private),
467 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
468 				    testpmd_mbuf_ctor, &mb_ctor_arg,
469 				    socket_id, 0);
470 
471 #endif
472 
473 	if (rte_mp == NULL) {
474 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
475 						"failed\n", socket_id);
476 	} else if (verbose_level > 0) {
477 		rte_mempool_dump(rte_mp);
478 	}
479 }
480 
481 /*
482  * Check given socket id is valid or not with NUMA mode,
483  * if valid, return 0, else return -1
484  */
485 static int
486 check_socket_id(const unsigned int socket_id)
487 {
488 	static int warning_once = 0;
489 
490 	if (socket_id >= MAX_SOCKET) {
491 		if (!warning_once && numa_support)
492 			printf("Warning: NUMA should be configured manually by"
493 			       " using --port-numa-config and"
494 			       " --ring-numa-config parameters along with"
495 			       " --numa.\n");
496 		warning_once = 1;
497 		return -1;
498 	}
499 	return 0;
500 }
501 
502 static void
503 init_config(void)
504 {
505 	portid_t pid;
506 	struct rte_port *port;
507 	struct rte_mempool *mbp;
508 	unsigned int nb_mbuf_per_pool;
509 	lcoreid_t  lc_id;
510 	uint8_t port_per_socket[MAX_SOCKET];
511 
512 	memset(port_per_socket,0,MAX_SOCKET);
513 	/* Configuration of logical cores. */
514 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
515 				sizeof(struct fwd_lcore *) * nb_lcores,
516 				CACHE_LINE_SIZE);
517 	if (fwd_lcores == NULL) {
518 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
519 							"failed\n", nb_lcores);
520 	}
521 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
522 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
523 					       sizeof(struct fwd_lcore),
524 					       CACHE_LINE_SIZE);
525 		if (fwd_lcores[lc_id] == NULL) {
526 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
527 								"failed\n");
528 		}
529 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
530 	}
531 
532 	/*
533 	 * Create pools of mbuf.
534 	 * If NUMA support is disabled, create a single pool of mbuf in
535 	 * socket 0 memory by default.
536 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
537 	 *
538 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
539 	 * nb_txd can be configured at run time.
540 	 */
541 	if (param_total_num_mbufs)
542 		nb_mbuf_per_pool = param_total_num_mbufs;
543 	else {
544 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
545 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
546 
547 		if (!numa_support)
548 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
549 	}
550 
551 	if (!numa_support) {
552 		if (socket_num == UMA_NO_CONFIG)
553 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
554 		else
555 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
556 						 socket_num);
557 	}
558 
559 	/* Configuration of Ethernet ports. */
560 	ports = rte_zmalloc("testpmd: ports",
561 			    sizeof(struct rte_port) * nb_ports,
562 			    CACHE_LINE_SIZE);
563 	if (ports == NULL) {
564 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
565 							"failed\n", nb_ports);
566 	}
567 
568 	for (pid = 0; pid < nb_ports; pid++) {
569 		port = &ports[pid];
570 		rte_eth_dev_info_get(pid, &port->dev_info);
571 
572 		if (numa_support) {
573 			if (port_numa[pid] != NUMA_NO_CONFIG)
574 				port_per_socket[port_numa[pid]]++;
575 			else {
576 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
577 
578 				/* if socket_id is invalid, set to 0 */
579 				if (check_socket_id(socket_id) < 0)
580 					socket_id = 0;
581 				port_per_socket[socket_id]++;
582 			}
583 		}
584 
585 		/* set flag to initialize port/queue */
586 		port->need_reconfig = 1;
587 		port->need_reconfig_queues = 1;
588 	}
589 
590 	if (numa_support) {
591 		uint8_t i;
592 		unsigned int nb_mbuf;
593 
594 		if (param_total_num_mbufs)
595 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
596 
597 		for (i = 0; i < MAX_SOCKET; i++) {
598 			nb_mbuf = (nb_mbuf_per_pool *
599 						port_per_socket[i]);
600 			if (nb_mbuf)
601 				mbuf_pool_create(mbuf_data_size,
602 						nb_mbuf,i);
603 		}
604 	}
605 	init_port_config();
606 
607 	/*
608 	 * Records which Mbuf pool to use by each logical core, if needed.
609 	 */
610 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
611 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
612 		if (mbp == NULL)
613 			mbp = mbuf_pool_find(0);
614 		fwd_lcores[lc_id]->mbp = mbp;
615 	}
616 
617 	/* Configuration of packet forwarding streams. */
618 	if (init_fwd_streams() < 0)
619 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
620 }
621 
622 int
623 init_fwd_streams(void)
624 {
625 	portid_t pid;
626 	struct rte_port *port;
627 	streamid_t sm_id, nb_fwd_streams_new;
628 
629 	/* set socket id according to numa or not */
630 	for (pid = 0; pid < nb_ports; pid++) {
631 		port = &ports[pid];
632 		if (nb_rxq > port->dev_info.max_rx_queues) {
633 			printf("Fail: nb_rxq(%d) is greater than "
634 				"max_rx_queues(%d)\n", nb_rxq,
635 				port->dev_info.max_rx_queues);
636 			return -1;
637 		}
638 		if (nb_txq > port->dev_info.max_tx_queues) {
639 			printf("Fail: nb_txq(%d) is greater than "
640 				"max_tx_queues(%d)\n", nb_txq,
641 				port->dev_info.max_tx_queues);
642 			return -1;
643 		}
644 		if (numa_support) {
645 			if (port_numa[pid] != NUMA_NO_CONFIG)
646 				port->socket_id = port_numa[pid];
647 			else {
648 				port->socket_id = rte_eth_dev_socket_id(pid);
649 
650 				/* if socket_id is invalid, set to 0 */
651 				if (check_socket_id(port->socket_id) < 0)
652 					port->socket_id = 0;
653 			}
654 		}
655 		else {
656 			if (socket_num == UMA_NO_CONFIG)
657 				port->socket_id = 0;
658 			else
659 				port->socket_id = socket_num;
660 		}
661 	}
662 
663 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
664 	if (nb_fwd_streams_new == nb_fwd_streams)
665 		return 0;
666 	/* clear the old */
667 	if (fwd_streams != NULL) {
668 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
669 			if (fwd_streams[sm_id] == NULL)
670 				continue;
671 			rte_free(fwd_streams[sm_id]);
672 			fwd_streams[sm_id] = NULL;
673 		}
674 		rte_free(fwd_streams);
675 		fwd_streams = NULL;
676 	}
677 
678 	/* init new */
679 	nb_fwd_streams = nb_fwd_streams_new;
680 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
681 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
682 	if (fwd_streams == NULL)
683 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
684 						"failed\n", nb_fwd_streams);
685 
686 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
687 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
688 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
689 		if (fwd_streams[sm_id] == NULL)
690 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
691 								" failed\n");
692 	}
693 
694 	return 0;
695 }
696 
697 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
698 static void
699 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
700 {
701 	unsigned int total_burst;
702 	unsigned int nb_burst;
703 	unsigned int burst_stats[3];
704 	uint16_t pktnb_stats[3];
705 	uint16_t nb_pkt;
706 	int burst_percent[3];
707 
708 	/*
709 	 * First compute the total number of packet bursts and the
710 	 * two highest numbers of bursts of the same number of packets.
711 	 */
712 	total_burst = 0;
713 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
714 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
715 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
716 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
717 		if (nb_burst == 0)
718 			continue;
719 		total_burst += nb_burst;
720 		if (nb_burst > burst_stats[0]) {
721 			burst_stats[1] = burst_stats[0];
722 			pktnb_stats[1] = pktnb_stats[0];
723 			burst_stats[0] = nb_burst;
724 			pktnb_stats[0] = nb_pkt;
725 		}
726 	}
727 	if (total_burst == 0)
728 		return;
729 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
730 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
731 	       burst_percent[0], (int) pktnb_stats[0]);
732 	if (burst_stats[0] == total_burst) {
733 		printf("]\n");
734 		return;
735 	}
736 	if (burst_stats[0] + burst_stats[1] == total_burst) {
737 		printf(" + %d%% of %d pkts]\n",
738 		       100 - burst_percent[0], pktnb_stats[1]);
739 		return;
740 	}
741 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
742 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
743 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
744 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
745 		return;
746 	}
747 	printf(" + %d%% of %d pkts + %d%% of others]\n",
748 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
749 }
750 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
751 
752 static void
753 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
754 {
755 	struct rte_port *port;
756 	uint8_t i;
757 
758 	static const char *fwd_stats_border = "----------------------";
759 
760 	port = &ports[port_id];
761 	printf("\n  %s Forward statistics for port %-2d %s\n",
762 	       fwd_stats_border, port_id, fwd_stats_border);
763 
764 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
765 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
766 		       "%-"PRIu64"\n",
767 		       stats->ipackets, stats->ierrors,
768 		       (uint64_t) (stats->ipackets + stats->ierrors));
769 
770 		if (cur_fwd_eng == &csum_fwd_engine)
771 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
772 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
773 
774 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
775 		       "%-"PRIu64"\n",
776 		       stats->opackets, port->tx_dropped,
777 		       (uint64_t) (stats->opackets + port->tx_dropped));
778 
779 		if (stats->rx_nombuf > 0)
780 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
781 
782 	}
783 	else {
784 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
785 		       "%14"PRIu64"\n",
786 		       stats->ipackets, stats->ierrors,
787 		       (uint64_t) (stats->ipackets + stats->ierrors));
788 
789 		if (cur_fwd_eng == &csum_fwd_engine)
790 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
791 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
792 
793 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
794 		       "%14"PRIu64"\n",
795 		       stats->opackets, port->tx_dropped,
796 		       (uint64_t) (stats->opackets + port->tx_dropped));
797 
798 		if (stats->rx_nombuf > 0)
799 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
800 	}
801 
802 	/* Display statistics of XON/XOFF pause frames, if any. */
803 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
804 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
805 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
806 		       stats->rx_pause_xoff, stats->rx_pause_xon);
807 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
808 		       stats->tx_pause_xoff, stats->tx_pause_xon);
809 	}
810 
811 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
812 	if (port->rx_stream)
813 		pkt_burst_stats_display("RX",
814 			&port->rx_stream->rx_burst_stats);
815 	if (port->tx_stream)
816 		pkt_burst_stats_display("TX",
817 			&port->tx_stream->tx_burst_stats);
818 #endif
819 	/* stats fdir */
820 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
821 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
822 		       stats->fdirmiss,
823 		       stats->fdirmatch);
824 
825 	if (port->rx_queue_stats_mapping_enabled) {
826 		printf("\n");
827 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
828 			printf("  Stats reg %2d RX-packets:%14"PRIu64
829 			       "     RX-errors:%14"PRIu64
830 			       "    RX-bytes:%14"PRIu64"\n",
831 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
832 		}
833 		printf("\n");
834 	}
835 	if (port->tx_queue_stats_mapping_enabled) {
836 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
837 			printf("  Stats reg %2d TX-packets:%14"PRIu64
838 			       "                                 TX-bytes:%14"PRIu64"\n",
839 			       i, stats->q_opackets[i], stats->q_obytes[i]);
840 		}
841 	}
842 
843 	printf("  %s--------------------------------%s\n",
844 	       fwd_stats_border, fwd_stats_border);
845 }
846 
847 static void
848 fwd_stream_stats_display(streamid_t stream_id)
849 {
850 	struct fwd_stream *fs;
851 	static const char *fwd_top_stats_border = "-------";
852 
853 	fs = fwd_streams[stream_id];
854 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
855 	    (fs->fwd_dropped == 0))
856 		return;
857 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
858 	       "TX Port=%2d/Queue=%2d %s\n",
859 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
860 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
861 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
862 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
863 
864 	/* if checksum mode */
865 	if (cur_fwd_eng == &csum_fwd_engine) {
866 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
867 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
868 	}
869 
870 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
871 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
872 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
873 #endif
874 }
875 
876 static void
877 flush_fwd_rx_queues(void)
878 {
879 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
880 	portid_t  rxp;
881 	portid_t port_id;
882 	queueid_t rxq;
883 	uint16_t  nb_rx;
884 	uint16_t  i;
885 	uint8_t   j;
886 
887 	for (j = 0; j < 2; j++) {
888 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
889 			for (rxq = 0; rxq < nb_rxq; rxq++) {
890 				port_id = fwd_ports_ids[rxp];
891 				do {
892 					nb_rx = rte_eth_rx_burst(port_id, rxq,
893 						pkts_burst, MAX_PKT_BURST);
894 					for (i = 0; i < nb_rx; i++)
895 						rte_pktmbuf_free(pkts_burst[i]);
896 				} while (nb_rx > 0);
897 			}
898 		}
899 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
900 	}
901 }
902 
903 static void
904 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
905 {
906 	struct fwd_stream **fsm;
907 	streamid_t nb_fs;
908 	streamid_t sm_id;
909 
910 	fsm = &fwd_streams[fc->stream_idx];
911 	nb_fs = fc->stream_nb;
912 	do {
913 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
914 			(*pkt_fwd)(fsm[sm_id]);
915 	} while (! fc->stopped);
916 }
917 
918 static int
919 start_pkt_forward_on_core(void *fwd_arg)
920 {
921 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
922 			     cur_fwd_config.fwd_eng->packet_fwd);
923 	return 0;
924 }
925 
926 /*
927  * Run the TXONLY packet forwarding engine to send a single burst of packets.
928  * Used to start communication flows in network loopback test configurations.
929  */
930 static int
931 run_one_txonly_burst_on_core(void *fwd_arg)
932 {
933 	struct fwd_lcore *fwd_lc;
934 	struct fwd_lcore tmp_lcore;
935 
936 	fwd_lc = (struct fwd_lcore *) fwd_arg;
937 	tmp_lcore = *fwd_lc;
938 	tmp_lcore.stopped = 1;
939 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
940 	return 0;
941 }
942 
943 /*
944  * Launch packet forwarding:
945  *     - Setup per-port forwarding context.
946  *     - launch logical cores with their forwarding configuration.
947  */
948 static void
949 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
950 {
951 	port_fwd_begin_t port_fwd_begin;
952 	unsigned int i;
953 	unsigned int lc_id;
954 	int diag;
955 
956 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
957 	if (port_fwd_begin != NULL) {
958 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
959 			(*port_fwd_begin)(fwd_ports_ids[i]);
960 	}
961 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
962 		lc_id = fwd_lcores_cpuids[i];
963 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
964 			fwd_lcores[i]->stopped = 0;
965 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
966 						     fwd_lcores[i], lc_id);
967 			if (diag != 0)
968 				printf("launch lcore %u failed - diag=%d\n",
969 				       lc_id, diag);
970 		}
971 	}
972 }
973 
974 /*
975  * Launch packet forwarding configuration.
976  */
977 void
978 start_packet_forwarding(int with_tx_first)
979 {
980 	port_fwd_begin_t port_fwd_begin;
981 	port_fwd_end_t  port_fwd_end;
982 	struct rte_port *port;
983 	unsigned int i;
984 	portid_t   pt_id;
985 	streamid_t sm_id;
986 
987 	if (all_ports_started() == 0) {
988 		printf("Not all ports were started\n");
989 		return;
990 	}
991 	if (test_done == 0) {
992 		printf("Packet forwarding already started\n");
993 		return;
994 	}
995 	if(dcb_test) {
996 		for (i = 0; i < nb_fwd_ports; i++) {
997 			pt_id = fwd_ports_ids[i];
998 			port = &ports[pt_id];
999 			if (!port->dcb_flag) {
1000 				printf("In DCB mode, all forwarding ports must "
1001                                        "be configured in this mode.\n");
1002 				return;
1003 			}
1004 		}
1005 		if (nb_fwd_lcores == 1) {
1006 			printf("In DCB mode,the nb forwarding cores "
1007                                "should be larger than 1.\n");
1008 			return;
1009 		}
1010 	}
1011 	test_done = 0;
1012 
1013 	if(!no_flush_rx)
1014 		flush_fwd_rx_queues();
1015 
1016 	fwd_config_setup();
1017 	rxtx_config_display();
1018 
1019 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1020 		pt_id = fwd_ports_ids[i];
1021 		port = &ports[pt_id];
1022 		rte_eth_stats_get(pt_id, &port->stats);
1023 		port->tx_dropped = 0;
1024 
1025 		map_port_queue_stats_mapping_registers(pt_id, port);
1026 	}
1027 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1028 		fwd_streams[sm_id]->rx_packets = 0;
1029 		fwd_streams[sm_id]->tx_packets = 0;
1030 		fwd_streams[sm_id]->fwd_dropped = 0;
1031 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1032 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1033 
1034 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1035 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1036 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1037 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1038 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1039 #endif
1040 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1041 		fwd_streams[sm_id]->core_cycles = 0;
1042 #endif
1043 	}
1044 	if (with_tx_first) {
1045 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1046 		if (port_fwd_begin != NULL) {
1047 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1048 				(*port_fwd_begin)(fwd_ports_ids[i]);
1049 		}
1050 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1051 		rte_eal_mp_wait_lcore();
1052 		port_fwd_end = tx_only_engine.port_fwd_end;
1053 		if (port_fwd_end != NULL) {
1054 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1055 				(*port_fwd_end)(fwd_ports_ids[i]);
1056 		}
1057 	}
1058 	launch_packet_forwarding(start_pkt_forward_on_core);
1059 }
1060 
1061 void
1062 stop_packet_forwarding(void)
1063 {
1064 	struct rte_eth_stats stats;
1065 	struct rte_port *port;
1066 	port_fwd_end_t  port_fwd_end;
1067 	int i;
1068 	portid_t   pt_id;
1069 	streamid_t sm_id;
1070 	lcoreid_t  lc_id;
1071 	uint64_t total_recv;
1072 	uint64_t total_xmit;
1073 	uint64_t total_rx_dropped;
1074 	uint64_t total_tx_dropped;
1075 	uint64_t total_rx_nombuf;
1076 	uint64_t tx_dropped;
1077 	uint64_t rx_bad_ip_csum;
1078 	uint64_t rx_bad_l4_csum;
1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1080 	uint64_t fwd_cycles;
1081 #endif
1082 	static const char *acc_stats_border = "+++++++++++++++";
1083 
1084 	if (all_ports_started() == 0) {
1085 		printf("Not all ports were started\n");
1086 		return;
1087 	}
1088 	if (test_done) {
1089 		printf("Packet forwarding not started\n");
1090 		return;
1091 	}
1092 	printf("Telling cores to stop...");
1093 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1094 		fwd_lcores[lc_id]->stopped = 1;
1095 	printf("\nWaiting for lcores to finish...\n");
1096 	rte_eal_mp_wait_lcore();
1097 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1098 	if (port_fwd_end != NULL) {
1099 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1100 			pt_id = fwd_ports_ids[i];
1101 			(*port_fwd_end)(pt_id);
1102 		}
1103 	}
1104 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1105 	fwd_cycles = 0;
1106 #endif
1107 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1108 		if (cur_fwd_config.nb_fwd_streams >
1109 		    cur_fwd_config.nb_fwd_ports) {
1110 			fwd_stream_stats_display(sm_id);
1111 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1112 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1113 		} else {
1114 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1115 				fwd_streams[sm_id];
1116 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1117 				fwd_streams[sm_id];
1118 		}
1119 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1120 		tx_dropped = (uint64_t) (tx_dropped +
1121 					 fwd_streams[sm_id]->fwd_dropped);
1122 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1123 
1124 		rx_bad_ip_csum =
1125 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1126 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1127 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1128 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1129 							rx_bad_ip_csum;
1130 
1131 		rx_bad_l4_csum =
1132 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1133 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1134 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1135 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1136 							rx_bad_l4_csum;
1137 
1138 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1139 		fwd_cycles = (uint64_t) (fwd_cycles +
1140 					 fwd_streams[sm_id]->core_cycles);
1141 #endif
1142 	}
1143 	total_recv = 0;
1144 	total_xmit = 0;
1145 	total_rx_dropped = 0;
1146 	total_tx_dropped = 0;
1147 	total_rx_nombuf  = 0;
1148 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1149 		pt_id = fwd_ports_ids[i];
1150 
1151 		port = &ports[pt_id];
1152 		rte_eth_stats_get(pt_id, &stats);
1153 		stats.ipackets -= port->stats.ipackets;
1154 		port->stats.ipackets = 0;
1155 		stats.opackets -= port->stats.opackets;
1156 		port->stats.opackets = 0;
1157 		stats.ibytes   -= port->stats.ibytes;
1158 		port->stats.ibytes = 0;
1159 		stats.obytes   -= port->stats.obytes;
1160 		port->stats.obytes = 0;
1161 		stats.ierrors  -= port->stats.ierrors;
1162 		port->stats.ierrors = 0;
1163 		stats.oerrors  -= port->stats.oerrors;
1164 		port->stats.oerrors = 0;
1165 		stats.rx_nombuf -= port->stats.rx_nombuf;
1166 		port->stats.rx_nombuf = 0;
1167 		stats.fdirmatch -= port->stats.fdirmatch;
1168 		port->stats.rx_nombuf = 0;
1169 		stats.fdirmiss -= port->stats.fdirmiss;
1170 		port->stats.rx_nombuf = 0;
1171 
1172 		total_recv += stats.ipackets;
1173 		total_xmit += stats.opackets;
1174 		total_rx_dropped += stats.ierrors;
1175 		total_tx_dropped += port->tx_dropped;
1176 		total_rx_nombuf  += stats.rx_nombuf;
1177 
1178 		fwd_port_stats_display(pt_id, &stats);
1179 	}
1180 	printf("\n  %s Accumulated forward statistics for all ports"
1181 	       "%s\n",
1182 	       acc_stats_border, acc_stats_border);
1183 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1184 	       "%-"PRIu64"\n"
1185 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1186 	       "%-"PRIu64"\n",
1187 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1188 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1189 	if (total_rx_nombuf > 0)
1190 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1191 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1192 	       "%s\n",
1193 	       acc_stats_border, acc_stats_border);
1194 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1195 	if (total_recv > 0)
1196 		printf("\n  CPU cycles/packet=%u (total cycles="
1197 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1198 		       (unsigned int)(fwd_cycles / total_recv),
1199 		       fwd_cycles, total_recv);
1200 #endif
1201 	printf("\nDone.\n");
1202 	test_done = 1;
1203 }
1204 
1205 static int
1206 all_ports_started(void)
1207 {
1208 	portid_t pi;
1209 	struct rte_port *port;
1210 
1211 	for (pi = 0; pi < nb_ports; pi++) {
1212 		port = &ports[pi];
1213 		/* Check if there is a port which is not started */
1214 		if (port->port_status != RTE_PORT_STARTED)
1215 			return 0;
1216 	}
1217 
1218 	/* No port is not started */
1219 	return 1;
1220 }
1221 
1222 int
1223 start_port(portid_t pid)
1224 {
1225 	int diag, need_check_link_status = 0;
1226 	portid_t pi;
1227 	queueid_t qi;
1228 	struct rte_port *port;
1229 
1230 	if (test_done == 0) {
1231 		printf("Please stop forwarding first\n");
1232 		return -1;
1233 	}
1234 
1235 	if (init_fwd_streams() < 0) {
1236 		printf("Fail from init_fwd_streams()\n");
1237 		return -1;
1238 	}
1239 
1240 	if(dcb_config)
1241 		dcb_test = 1;
1242 	for (pi = 0; pi < nb_ports; pi++) {
1243 		if (pid < nb_ports && pid != pi)
1244 			continue;
1245 
1246 		port = &ports[pi];
1247 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1248 						 RTE_PORT_HANDLING) == 0) {
1249 			printf("Port %d is now not stopped\n", pi);
1250 			continue;
1251 		}
1252 
1253 		if (port->need_reconfig > 0) {
1254 			port->need_reconfig = 0;
1255 
1256 			printf("Configuring Port %d (socket %u)\n", pi,
1257 					port->socket_id);
1258 			/* configure port */
1259 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1260 						&(port->dev_conf));
1261 			if (diag != 0) {
1262 				if (rte_atomic16_cmpset(&(port->port_status),
1263 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1264 					printf("Port %d can not be set back "
1265 							"to stopped\n", pi);
1266 				printf("Fail to configure port %d\n", pi);
1267 				/* try to reconfigure port next time */
1268 				port->need_reconfig = 1;
1269 				return -1;
1270 			}
1271 		}
1272 		if (port->need_reconfig_queues > 0) {
1273 			port->need_reconfig_queues = 0;
1274 			/* setup tx queues */
1275 			for (qi = 0; qi < nb_txq; qi++) {
1276 				if ((numa_support) &&
1277 					(txring_numa[pi] != NUMA_NO_CONFIG))
1278 					diag = rte_eth_tx_queue_setup(pi, qi,
1279 						nb_txd,txring_numa[pi],
1280 						&(port->tx_conf));
1281 				else
1282 					diag = rte_eth_tx_queue_setup(pi, qi,
1283 						nb_txd,port->socket_id,
1284 						&(port->tx_conf));
1285 
1286 				if (diag == 0)
1287 					continue;
1288 
1289 				/* Fail to setup tx queue, return */
1290 				if (rte_atomic16_cmpset(&(port->port_status),
1291 							RTE_PORT_HANDLING,
1292 							RTE_PORT_STOPPED) == 0)
1293 					printf("Port %d can not be set back "
1294 							"to stopped\n", pi);
1295 				printf("Fail to configure port %d tx queues\n", pi);
1296 				/* try to reconfigure queues next time */
1297 				port->need_reconfig_queues = 1;
1298 				return -1;
1299 			}
1300 			/* setup rx queues */
1301 			for (qi = 0; qi < nb_rxq; qi++) {
1302 				if ((numa_support) &&
1303 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1304 					struct rte_mempool * mp =
1305 						mbuf_pool_find(rxring_numa[pi]);
1306 					if (mp == NULL) {
1307 						printf("Failed to setup RX queue:"
1308 							"No mempool allocation"
1309 							"on the socket %d\n",
1310 							rxring_numa[pi]);
1311 						return -1;
1312 					}
1313 
1314 					diag = rte_eth_rx_queue_setup(pi, qi,
1315 					     nb_rxd,rxring_numa[pi],
1316 					     &(port->rx_conf),mp);
1317 				}
1318 				else
1319 					diag = rte_eth_rx_queue_setup(pi, qi,
1320 					     nb_rxd,port->socket_id,
1321 					     &(port->rx_conf),
1322 				             mbuf_pool_find(port->socket_id));
1323 
1324 				if (diag == 0)
1325 					continue;
1326 
1327 
1328 				/* Fail to setup rx queue, return */
1329 				if (rte_atomic16_cmpset(&(port->port_status),
1330 							RTE_PORT_HANDLING,
1331 							RTE_PORT_STOPPED) == 0)
1332 					printf("Port %d can not be set back "
1333 							"to stopped\n", pi);
1334 				printf("Fail to configure port %d rx queues\n", pi);
1335 				/* try to reconfigure queues next time */
1336 				port->need_reconfig_queues = 1;
1337 				return -1;
1338 			}
1339 		}
1340 		/* start port */
1341 		if (rte_eth_dev_start(pi) < 0) {
1342 			printf("Fail to start port %d\n", pi);
1343 
1344 			/* Fail to setup rx queue, return */
1345 			if (rte_atomic16_cmpset(&(port->port_status),
1346 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1347 				printf("Port %d can not be set back to "
1348 							"stopped\n", pi);
1349 			continue;
1350 		}
1351 
1352 		if (rte_atomic16_cmpset(&(port->port_status),
1353 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1354 			printf("Port %d can not be set into started\n", pi);
1355 
1356 		/* at least one port started, need checking link status */
1357 		need_check_link_status = 1;
1358 	}
1359 
1360 	if (need_check_link_status)
1361 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1362 	else
1363 		printf("Please stop the ports first\n");
1364 
1365 	printf("Done\n");
1366 	return 0;
1367 }
1368 
1369 void
1370 stop_port(portid_t pid)
1371 {
1372 	portid_t pi;
1373 	struct rte_port *port;
1374 	int need_check_link_status = 0;
1375 
1376 	if (test_done == 0) {
1377 		printf("Please stop forwarding first\n");
1378 		return;
1379 	}
1380 	if (dcb_test) {
1381 		dcb_test = 0;
1382 		dcb_config = 0;
1383 	}
1384 	printf("Stopping ports...\n");
1385 
1386 	for (pi = 0; pi < nb_ports; pi++) {
1387 		if (pid < nb_ports && pid != pi)
1388 			continue;
1389 
1390 		port = &ports[pi];
1391 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1392 						RTE_PORT_HANDLING) == 0)
1393 			continue;
1394 
1395 		rte_eth_dev_stop(pi);
1396 
1397 		if (rte_atomic16_cmpset(&(port->port_status),
1398 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1399 			printf("Port %d can not be set into stopped\n", pi);
1400 		need_check_link_status = 1;
1401 	}
1402 	if (need_check_link_status)
1403 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1404 
1405 	printf("Done\n");
1406 }
1407 
1408 void
1409 close_port(portid_t pid)
1410 {
1411 	portid_t pi;
1412 	struct rte_port *port;
1413 
1414 	if (test_done == 0) {
1415 		printf("Please stop forwarding first\n");
1416 		return;
1417 	}
1418 
1419 	printf("Closing ports...\n");
1420 
1421 	for (pi = 0; pi < nb_ports; pi++) {
1422 		if (pid < nb_ports && pid != pi)
1423 			continue;
1424 
1425 		port = &ports[pi];
1426 		if (rte_atomic16_cmpset(&(port->port_status),
1427 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1428 			printf("Port %d is now not stopped\n", pi);
1429 			continue;
1430 		}
1431 
1432 		rte_eth_dev_close(pi);
1433 
1434 		if (rte_atomic16_cmpset(&(port->port_status),
1435 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1436 			printf("Port %d can not be set into stopped\n", pi);
1437 	}
1438 
1439 	printf("Done\n");
1440 }
1441 
1442 int
1443 all_ports_stopped(void)
1444 {
1445 	portid_t pi;
1446 	struct rte_port *port;
1447 
1448 	for (pi = 0; pi < nb_ports; pi++) {
1449 		port = &ports[pi];
1450 		if (port->port_status != RTE_PORT_STOPPED)
1451 			return 0;
1452 	}
1453 
1454 	return 1;
1455 }
1456 
1457 void
1458 pmd_test_exit(void)
1459 {
1460 	portid_t pt_id;
1461 
1462 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1463 		printf("Stopping port %d...", pt_id);
1464 		fflush(stdout);
1465 		rte_eth_dev_close(pt_id);
1466 		printf("done\n");
1467 	}
1468 	printf("bye...\n");
1469 }
1470 
1471 typedef void (*cmd_func_t)(void);
1472 struct pmd_test_command {
1473 	const char *cmd_name;
1474 	cmd_func_t cmd_func;
1475 };
1476 
1477 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1478 
1479 /* Check the link status of all ports in up to 9s, and print them finally */
1480 static void
1481 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1482 {
1483 #define CHECK_INTERVAL 100 /* 100ms */
1484 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1485 	uint8_t portid, count, all_ports_up, print_flag = 0;
1486 	struct rte_eth_link link;
1487 
1488 	printf("Checking link statuses...\n");
1489 	fflush(stdout);
1490 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1491 		all_ports_up = 1;
1492 		for (portid = 0; portid < port_num; portid++) {
1493 			if ((port_mask & (1 << portid)) == 0)
1494 				continue;
1495 			memset(&link, 0, sizeof(link));
1496 			rte_eth_link_get_nowait(portid, &link);
1497 			/* print link status if flag set */
1498 			if (print_flag == 1) {
1499 				if (link.link_status)
1500 					printf("Port %d Link Up - speed %u "
1501 						"Mbps - %s\n", (uint8_t)portid,
1502 						(unsigned)link.link_speed,
1503 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1504 					("full-duplex") : ("half-duplex\n"));
1505 				else
1506 					printf("Port %d Link Down\n",
1507 						(uint8_t)portid);
1508 				continue;
1509 			}
1510 			/* clear all_ports_up flag if any link down */
1511 			if (link.link_status == 0) {
1512 				all_ports_up = 0;
1513 				break;
1514 			}
1515 		}
1516 		/* after finally printing all link status, get out */
1517 		if (print_flag == 1)
1518 			break;
1519 
1520 		if (all_ports_up == 0) {
1521 			fflush(stdout);
1522 			rte_delay_ms(CHECK_INTERVAL);
1523 		}
1524 
1525 		/* set the print_flag if all ports up or timeout */
1526 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1527 			print_flag = 1;
1528 		}
1529 	}
1530 }
1531 
1532 static int
1533 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1534 {
1535 	uint16_t i;
1536 	int diag;
1537 	uint8_t mapping_found = 0;
1538 
1539 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1540 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1541 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1542 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1543 					tx_queue_stats_mappings[i].queue_id,
1544 					tx_queue_stats_mappings[i].stats_counter_id);
1545 			if (diag != 0)
1546 				return diag;
1547 			mapping_found = 1;
1548 		}
1549 	}
1550 	if (mapping_found)
1551 		port->tx_queue_stats_mapping_enabled = 1;
1552 	return 0;
1553 }
1554 
1555 static int
1556 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1557 {
1558 	uint16_t i;
1559 	int diag;
1560 	uint8_t mapping_found = 0;
1561 
1562 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1563 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1564 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1565 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1566 					rx_queue_stats_mappings[i].queue_id,
1567 					rx_queue_stats_mappings[i].stats_counter_id);
1568 			if (diag != 0)
1569 				return diag;
1570 			mapping_found = 1;
1571 		}
1572 	}
1573 	if (mapping_found)
1574 		port->rx_queue_stats_mapping_enabled = 1;
1575 	return 0;
1576 }
1577 
1578 static void
1579 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1580 {
1581 	int diag = 0;
1582 
1583 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1584 	if (diag != 0) {
1585 		if (diag == -ENOTSUP) {
1586 			port->tx_queue_stats_mapping_enabled = 0;
1587 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1588 		}
1589 		else
1590 			rte_exit(EXIT_FAILURE,
1591 					"set_tx_queue_stats_mapping_registers "
1592 					"failed for port id=%d diag=%d\n",
1593 					pi, diag);
1594 	}
1595 
1596 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1597 	if (diag != 0) {
1598 		if (diag == -ENOTSUP) {
1599 			port->rx_queue_stats_mapping_enabled = 0;
1600 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1601 		}
1602 		else
1603 			rte_exit(EXIT_FAILURE,
1604 					"set_rx_queue_stats_mapping_registers "
1605 					"failed for port id=%d diag=%d\n",
1606 					pi, diag);
1607 	}
1608 }
1609 
1610 void
1611 init_port_config(void)
1612 {
1613 	portid_t pid;
1614 	struct rte_port *port;
1615 
1616 	for (pid = 0; pid < nb_ports; pid++) {
1617 		port = &ports[pid];
1618 		port->dev_conf.rxmode = rx_mode;
1619 		port->dev_conf.fdir_conf = fdir_conf;
1620 		if (nb_rxq > 1) {
1621 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1622 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1623 		} else {
1624 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1625 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1626 		}
1627 
1628 		/* In SR-IOV mode, RSS mode is not available */
1629 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1630 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1631 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1632 			else
1633 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1634 		}
1635 
1636 		port->rx_conf.rx_thresh = rx_thresh;
1637 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1638 		port->rx_conf.rx_drop_en = rx_drop_en;
1639 		port->tx_conf.tx_thresh = tx_thresh;
1640 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1641 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1642 		port->tx_conf.txq_flags = txq_flags;
1643 
1644 		rte_eth_macaddr_get(pid, &port->eth_addr);
1645 
1646 		map_port_queue_stats_mapping_registers(pid, port);
1647 #ifdef RTE_NIC_BYPASS
1648 		rte_eth_dev_bypass_init(pid);
1649 #endif
1650 	}
1651 }
1652 
1653 const uint16_t vlan_tags[] = {
1654 		0,  1,  2,  3,  4,  5,  6,  7,
1655 		8,  9, 10, 11,  12, 13, 14, 15,
1656 		16, 17, 18, 19, 20, 21, 22, 23,
1657 		24, 25, 26, 27, 28, 29, 30, 31
1658 };
1659 
1660 static  int
1661 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1662 {
1663         uint8_t i;
1664 
1665  	/*
1666  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1667  	 * given above, and the number of traffic classes available for use.
1668  	 */
1669 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1670 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1671 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1672 
1673 		/* VMDQ+DCB RX and TX configrations */
1674 		vmdq_rx_conf.enable_default_pool = 0;
1675 		vmdq_rx_conf.default_pool = 0;
1676 		vmdq_rx_conf.nb_queue_pools =
1677 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1678 		vmdq_tx_conf.nb_queue_pools =
1679 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1680 
1681 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1682 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1683 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1684 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1685 		}
1686 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1687 			vmdq_rx_conf.dcb_queue[i] = i;
1688 			vmdq_tx_conf.dcb_queue[i] = i;
1689 		}
1690 
1691 		/*set DCB mode of RX and TX of multiple queues*/
1692 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1693 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1694 		if (dcb_conf->pfc_en)
1695 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1696 		else
1697 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1698 
1699 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1700                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1701 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1702                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1703 	}
1704 	else {
1705 		struct rte_eth_dcb_rx_conf rx_conf;
1706 		struct rte_eth_dcb_tx_conf tx_conf;
1707 
1708 		/* queue mapping configuration of DCB RX and TX */
1709 		if (dcb_conf->num_tcs == ETH_4_TCS)
1710 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1711 		else
1712 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1713 
1714 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1715 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1716 
1717 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1718 			rx_conf.dcb_queue[i] = i;
1719 			tx_conf.dcb_queue[i] = i;
1720 		}
1721 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1722 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1723 		if (dcb_conf->pfc_en)
1724 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1725 		else
1726 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1727 
1728 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1729                                 sizeof(struct rte_eth_dcb_rx_conf)));
1730 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1731                                 sizeof(struct rte_eth_dcb_tx_conf)));
1732 	}
1733 
1734 	return 0;
1735 }
1736 
1737 int
1738 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1739 {
1740 	struct rte_eth_conf port_conf;
1741 	struct rte_port *rte_port;
1742 	int retval;
1743 	uint16_t nb_vlan;
1744 	uint16_t i;
1745 
1746 	/* rxq and txq configuration in dcb mode */
1747 	nb_rxq = 128;
1748 	nb_txq = 128;
1749 	rx_free_thresh = 64;
1750 
1751 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1752 	/* Enter DCB configuration status */
1753 	dcb_config = 1;
1754 
1755 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1756 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1757 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1758 	if (retval < 0)
1759 		return retval;
1760 
1761 	rte_port = &ports[pid];
1762 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1763 
1764 	rte_port->rx_conf.rx_thresh = rx_thresh;
1765 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1766 	rte_port->tx_conf.tx_thresh = tx_thresh;
1767 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1768 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1769 	/* VLAN filter */
1770 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1771 	for (i = 0; i < nb_vlan; i++){
1772 		rx_vft_set(pid, vlan_tags[i], 1);
1773 	}
1774 
1775 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1776 	map_port_queue_stats_mapping_registers(pid, rte_port);
1777 
1778 	rte_port->dcb_flag = 1;
1779 
1780 	return 0;
1781 }
1782 
1783 #ifdef RTE_EXEC_ENV_BAREMETAL
1784 #define main _main
1785 #endif
1786 
1787 int
1788 main(int argc, char** argv)
1789 {
1790 	int  diag;
1791 	uint8_t port_id;
1792 
1793 	diag = rte_eal_init(argc, argv);
1794 	if (diag < 0)
1795 		rte_panic("Cannot init EAL\n");
1796 
1797 	if (rte_pmd_init_all())
1798 		rte_panic("Cannot init PMD\n");
1799 
1800 	if (rte_eal_pci_probe())
1801 		rte_panic("Cannot probe PCI\n");
1802 
1803 	nb_ports = (portid_t) rte_eth_dev_count();
1804 	if (nb_ports == 0)
1805 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1806 							"check that "
1807 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1808 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1809 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1810 			  "configuration file\n");
1811 
1812 	set_def_fwd_config();
1813 	if (nb_lcores == 0)
1814 		rte_panic("Empty set of forwarding logical cores - check the "
1815 			  "core mask supplied in the command parameters\n");
1816 
1817 	argc -= diag;
1818 	argv += diag;
1819 	if (argc > 1)
1820 		launch_args_parse(argc, argv);
1821 
1822 	if (nb_rxq > nb_txq)
1823 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1824 		       "but nb_txq=%d will prevent to fully test it.\n",
1825 		       nb_rxq, nb_txq);
1826 
1827 	init_config();
1828 	if (start_port(RTE_PORT_ALL) != 0)
1829 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1830 
1831 	/* set all ports to promiscuous mode by default */
1832 	for (port_id = 0; port_id < nb_ports; port_id++)
1833 		rte_eth_promiscuous_enable(port_id);
1834 
1835 #ifdef RTE_LIBRTE_CMDLINE
1836 	if (interactive == 1) {
1837 		if (auto_start) {
1838 			printf("Start automatic packet forwarding\n");
1839 			start_packet_forwarding(0);
1840 		}
1841 		prompt();
1842 	} else
1843 #endif
1844 	{
1845 		char c;
1846 		int rc;
1847 
1848 		printf("No commandline core given, start packet forwarding\n");
1849 		start_packet_forwarding(0);
1850 		printf("Press enter to exit\n");
1851 		rc = read(0, &c, 1);
1852 		if (rc < 0)
1853 			return 1;
1854 	}
1855 
1856 	return 0;
1857 }
1858