xref: /dpdk/app/test-pmd/testpmd.c (revision ea672a8b1655bbb44876d2550ff56f384968a43b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_retry_fwd_engine,
148 	&mac_swap_engine,
149 	&flow_gen_engine,
150 	&rx_only_engine,
151 	&tx_only_engine,
152 	&csum_fwd_engine,
153 	&icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 	&ieee1588_fwd_engine,
156 #endif
157 	NULL,
158 };
159 
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162 
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166 
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 	TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175 
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178 
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181 
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184 
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187 
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201 
202 /*
203  * Configurable values of RX and TX ring threshold registers.
204  */
205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */
208 
209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */
210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
212 
213 struct rte_eth_thresh rx_thresh = {
214 	.pthresh = RX_PTHRESH,
215 	.hthresh = RX_HTHRESH,
216 	.wthresh = RX_WTHRESH,
217 };
218 
219 struct rte_eth_thresh tx_thresh = {
220 	.pthresh = TX_PTHRESH,
221 	.hthresh = TX_HTHRESH,
222 	.wthresh = TX_WTHRESH,
223 };
224 
225 /*
226  * Configurable value of RX free threshold.
227  */
228 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
229 
230 /*
231  * Configurable value of RX drop enable.
232  */
233 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
234 
235 /*
236  * Configurable value of TX free threshold.
237  */
238 uint16_t tx_free_thresh = 0; /* Use default values. */
239 
240 /*
241  * Configurable value of TX RS bit threshold.
242  */
243 uint16_t tx_rs_thresh = 0; /* Use default values. */
244 
245 /*
246  * Configurable value of TX queue flags.
247  */
248 uint32_t txq_flags = 0; /* No flags set. */
249 
250 /*
251  * Receive Side Scaling (RSS) configuration.
252  */
253 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
254 
255 /*
256  * Port topology configuration
257  */
258 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
259 
260 /*
261  * Avoids to flush all the RX streams before starts forwarding.
262  */
263 uint8_t no_flush_rx = 0; /* flush by default */
264 
265 /*
266  * Avoids to check link status when starting/stopping a port.
267  */
268 uint8_t no_link_check = 0; /* check by default */
269 
270 /*
271  * NIC bypass mode configuration options.
272  */
273 #ifdef RTE_NIC_BYPASS
274 
275 /* The NIC bypass watchdog timeout. */
276 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
277 
278 #endif
279 
280 /*
281  * Ethernet device configuration.
282  */
283 struct rte_eth_rxmode rx_mode = {
284 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
285 	.split_hdr_size = 0,
286 	.header_split   = 0, /**< Header Split disabled. */
287 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
288 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
289 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
290 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
291 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
292 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
293 };
294 
295 struct rte_fdir_conf fdir_conf = {
296 	.mode = RTE_FDIR_MODE_NONE,
297 	.pballoc = RTE_FDIR_PBALLOC_64K,
298 	.status = RTE_FDIR_REPORT_STATUS,
299 	.flexbytes_offset = 0x6,
300 	.drop_queue = 127,
301 };
302 
303 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
304 
305 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
306 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
307 
308 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
309 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
310 
311 uint16_t nb_tx_queue_stats_mappings = 0;
312 uint16_t nb_rx_queue_stats_mappings = 0;
313 
314 /* Forward function declarations */
315 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
316 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
317 
318 /*
319  * Check if all the ports are started.
320  * If yes, return positive value. If not, return zero.
321  */
322 static int all_ports_started(void);
323 
324 /*
325  * Setup default configuration.
326  */
327 static void
328 set_default_fwd_lcores_config(void)
329 {
330 	unsigned int i;
331 	unsigned int nb_lc;
332 
333 	nb_lc = 0;
334 	for (i = 0; i < RTE_MAX_LCORE; i++) {
335 		if (! rte_lcore_is_enabled(i))
336 			continue;
337 		if (i == rte_get_master_lcore())
338 			continue;
339 		fwd_lcores_cpuids[nb_lc++] = i;
340 	}
341 	nb_lcores = (lcoreid_t) nb_lc;
342 	nb_cfg_lcores = nb_lcores;
343 	nb_fwd_lcores = 1;
344 }
345 
346 static void
347 set_def_peer_eth_addrs(void)
348 {
349 	portid_t i;
350 
351 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
352 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
353 		peer_eth_addrs[i].addr_bytes[5] = i;
354 	}
355 }
356 
357 static void
358 set_default_fwd_ports_config(void)
359 {
360 	portid_t pt_id;
361 
362 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
363 		fwd_ports_ids[pt_id] = pt_id;
364 
365 	nb_cfg_ports = nb_ports;
366 	nb_fwd_ports = nb_ports;
367 }
368 
369 void
370 set_def_fwd_config(void)
371 {
372 	set_default_fwd_lcores_config();
373 	set_def_peer_eth_addrs();
374 	set_default_fwd_ports_config();
375 }
376 
377 /*
378  * Configuration initialisation done once at init time.
379  */
380 struct mbuf_ctor_arg {
381 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
382 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
383 };
384 
385 struct mbuf_pool_ctor_arg {
386 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
387 };
388 
389 static void
390 testpmd_mbuf_ctor(struct rte_mempool *mp,
391 		  void *opaque_arg,
392 		  void *raw_mbuf,
393 		  __attribute__((unused)) unsigned i)
394 {
395 	struct mbuf_ctor_arg *mb_ctor_arg;
396 	struct rte_mbuf    *mb;
397 
398 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
399 	mb = (struct rte_mbuf *) raw_mbuf;
400 
401 	mb->pool         = mp;
402 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
403 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
404 			mb_ctor_arg->seg_buf_offset);
405 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
406 	mb->ol_flags     = 0;
407 	mb->data         = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
408 	mb->nb_segs      = 1;
409 	mb->vlan_macip.data = 0;
410 	mb->hash.rss     = 0;
411 }
412 
413 static void
414 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
415 		       void *opaque_arg)
416 {
417 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
418 	struct rte_pktmbuf_pool_private *mbp_priv;
419 
420 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
421 		printf("%s(%s) private_data_size %d < %d\n",
422 		       __func__, mp->name, (int) mp->private_data_size,
423 		       (int) sizeof(struct rte_pktmbuf_pool_private));
424 		return;
425 	}
426 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
427 	mbp_priv = rte_mempool_get_priv(mp);
428 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
429 }
430 
431 static void
432 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
433 		 unsigned int socket_id)
434 {
435 	char pool_name[RTE_MEMPOOL_NAMESIZE];
436 	struct rte_mempool *rte_mp;
437 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
438 	struct mbuf_ctor_arg mb_ctor_arg;
439 	uint32_t mb_size;
440 
441 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
442 						mbuf_seg_size);
443 	mb_ctor_arg.seg_buf_offset =
444 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
445 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
446 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
447 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
448 
449 #ifdef RTE_LIBRTE_PMD_XENVIRT
450 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
451                                    (unsigned) mb_mempool_cache,
452                                    sizeof(struct rte_pktmbuf_pool_private),
453                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
454                                    testpmd_mbuf_ctor, &mb_ctor_arg,
455                                    socket_id, 0);
456 
457 
458 
459 #else
460 	if (mp_anon != 0)
461 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
462 				    (unsigned) mb_mempool_cache,
463 				    sizeof(struct rte_pktmbuf_pool_private),
464 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465 				    testpmd_mbuf_ctor, &mb_ctor_arg,
466 				    socket_id, 0);
467 	else
468 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
469 				    (unsigned) mb_mempool_cache,
470 				    sizeof(struct rte_pktmbuf_pool_private),
471 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
472 				    testpmd_mbuf_ctor, &mb_ctor_arg,
473 				    socket_id, 0);
474 
475 #endif
476 
477 	if (rte_mp == NULL) {
478 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
479 						"failed\n", socket_id);
480 	} else if (verbose_level > 0) {
481 		rte_mempool_dump(stdout, rte_mp);
482 	}
483 }
484 
485 /*
486  * Check given socket id is valid or not with NUMA mode,
487  * if valid, return 0, else return -1
488  */
489 static int
490 check_socket_id(const unsigned int socket_id)
491 {
492 	static int warning_once = 0;
493 
494 	if (socket_id >= MAX_SOCKET) {
495 		if (!warning_once && numa_support)
496 			printf("Warning: NUMA should be configured manually by"
497 			       " using --port-numa-config and"
498 			       " --ring-numa-config parameters along with"
499 			       " --numa.\n");
500 		warning_once = 1;
501 		return -1;
502 	}
503 	return 0;
504 }
505 
506 static void
507 init_config(void)
508 {
509 	portid_t pid;
510 	struct rte_port *port;
511 	struct rte_mempool *mbp;
512 	unsigned int nb_mbuf_per_pool;
513 	lcoreid_t  lc_id;
514 	uint8_t port_per_socket[MAX_SOCKET];
515 
516 	memset(port_per_socket,0,MAX_SOCKET);
517 	/* Configuration of logical cores. */
518 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
519 				sizeof(struct fwd_lcore *) * nb_lcores,
520 				CACHE_LINE_SIZE);
521 	if (fwd_lcores == NULL) {
522 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
523 							"failed\n", nb_lcores);
524 	}
525 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
526 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
527 					       sizeof(struct fwd_lcore),
528 					       CACHE_LINE_SIZE);
529 		if (fwd_lcores[lc_id] == NULL) {
530 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
531 								"failed\n");
532 		}
533 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
534 	}
535 
536 	/*
537 	 * Create pools of mbuf.
538 	 * If NUMA support is disabled, create a single pool of mbuf in
539 	 * socket 0 memory by default.
540 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
541 	 *
542 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
543 	 * nb_txd can be configured at run time.
544 	 */
545 	if (param_total_num_mbufs)
546 		nb_mbuf_per_pool = param_total_num_mbufs;
547 	else {
548 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
549 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
550 
551 		if (!numa_support)
552 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
553 	}
554 
555 	if (!numa_support) {
556 		if (socket_num == UMA_NO_CONFIG)
557 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
558 		else
559 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
560 						 socket_num);
561 	}
562 
563 	/* Configuration of Ethernet ports. */
564 	ports = rte_zmalloc("testpmd: ports",
565 			    sizeof(struct rte_port) * nb_ports,
566 			    CACHE_LINE_SIZE);
567 	if (ports == NULL) {
568 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
569 							"failed\n", nb_ports);
570 	}
571 
572 	for (pid = 0; pid < nb_ports; pid++) {
573 		port = &ports[pid];
574 		rte_eth_dev_info_get(pid, &port->dev_info);
575 
576 		if (numa_support) {
577 			if (port_numa[pid] != NUMA_NO_CONFIG)
578 				port_per_socket[port_numa[pid]]++;
579 			else {
580 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
581 
582 				/* if socket_id is invalid, set to 0 */
583 				if (check_socket_id(socket_id) < 0)
584 					socket_id = 0;
585 				port_per_socket[socket_id]++;
586 			}
587 		}
588 
589 		/* set flag to initialize port/queue */
590 		port->need_reconfig = 1;
591 		port->need_reconfig_queues = 1;
592 	}
593 
594 	if (numa_support) {
595 		uint8_t i;
596 		unsigned int nb_mbuf;
597 
598 		if (param_total_num_mbufs)
599 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
600 
601 		for (i = 0; i < MAX_SOCKET; i++) {
602 			nb_mbuf = (nb_mbuf_per_pool *
603 						port_per_socket[i]);
604 			if (nb_mbuf)
605 				mbuf_pool_create(mbuf_data_size,
606 						nb_mbuf,i);
607 		}
608 	}
609 	init_port_config();
610 
611 	/*
612 	 * Records which Mbuf pool to use by each logical core, if needed.
613 	 */
614 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
615 		mbp = mbuf_pool_find(
616 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
617 
618 		if (mbp == NULL)
619 			mbp = mbuf_pool_find(0);
620 		fwd_lcores[lc_id]->mbp = mbp;
621 	}
622 
623 	/* Configuration of packet forwarding streams. */
624 	if (init_fwd_streams() < 0)
625 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
626 }
627 
628 
629 void
630 reconfig(portid_t new_port_id)
631 {
632 	struct rte_port *port;
633 
634 	/* Reconfiguration of Ethernet ports. */
635 	ports = rte_realloc(ports,
636 			    sizeof(struct rte_port) * nb_ports,
637 			    CACHE_LINE_SIZE);
638 	if (ports == NULL) {
639 		rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
640 				nb_ports);
641 	}
642 
643 	port = &ports[new_port_id];
644 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
645 
646 	/* set flag to initialize port/queue */
647 	port->need_reconfig = 1;
648 	port->need_reconfig_queues = 1;
649 
650 	init_port_config();
651 }
652 
653 
654 int
655 init_fwd_streams(void)
656 {
657 	portid_t pid;
658 	struct rte_port *port;
659 	streamid_t sm_id, nb_fwd_streams_new;
660 
661 	/* set socket id according to numa or not */
662 	for (pid = 0; pid < nb_ports; pid++) {
663 		port = &ports[pid];
664 		if (nb_rxq > port->dev_info.max_rx_queues) {
665 			printf("Fail: nb_rxq(%d) is greater than "
666 				"max_rx_queues(%d)\n", nb_rxq,
667 				port->dev_info.max_rx_queues);
668 			return -1;
669 		}
670 		if (nb_txq > port->dev_info.max_tx_queues) {
671 			printf("Fail: nb_txq(%d) is greater than "
672 				"max_tx_queues(%d)\n", nb_txq,
673 				port->dev_info.max_tx_queues);
674 			return -1;
675 		}
676 		if (numa_support) {
677 			if (port_numa[pid] != NUMA_NO_CONFIG)
678 				port->socket_id = port_numa[pid];
679 			else {
680 				port->socket_id = rte_eth_dev_socket_id(pid);
681 
682 				/* if socket_id is invalid, set to 0 */
683 				if (check_socket_id(port->socket_id) < 0)
684 					port->socket_id = 0;
685 			}
686 		}
687 		else {
688 			if (socket_num == UMA_NO_CONFIG)
689 				port->socket_id = 0;
690 			else
691 				port->socket_id = socket_num;
692 		}
693 	}
694 
695 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
696 	if (nb_fwd_streams_new == nb_fwd_streams)
697 		return 0;
698 	/* clear the old */
699 	if (fwd_streams != NULL) {
700 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
701 			if (fwd_streams[sm_id] == NULL)
702 				continue;
703 			rte_free(fwd_streams[sm_id]);
704 			fwd_streams[sm_id] = NULL;
705 		}
706 		rte_free(fwd_streams);
707 		fwd_streams = NULL;
708 	}
709 
710 	/* init new */
711 	nb_fwd_streams = nb_fwd_streams_new;
712 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
713 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
714 	if (fwd_streams == NULL)
715 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
716 						"failed\n", nb_fwd_streams);
717 
718 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
719 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
720 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
721 		if (fwd_streams[sm_id] == NULL)
722 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
723 								" failed\n");
724 	}
725 
726 	return 0;
727 }
728 
729 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
730 static void
731 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
732 {
733 	unsigned int total_burst;
734 	unsigned int nb_burst;
735 	unsigned int burst_stats[3];
736 	uint16_t pktnb_stats[3];
737 	uint16_t nb_pkt;
738 	int burst_percent[3];
739 
740 	/*
741 	 * First compute the total number of packet bursts and the
742 	 * two highest numbers of bursts of the same number of packets.
743 	 */
744 	total_burst = 0;
745 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
746 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
747 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
748 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
749 		if (nb_burst == 0)
750 			continue;
751 		total_burst += nb_burst;
752 		if (nb_burst > burst_stats[0]) {
753 			burst_stats[1] = burst_stats[0];
754 			pktnb_stats[1] = pktnb_stats[0];
755 			burst_stats[0] = nb_burst;
756 			pktnb_stats[0] = nb_pkt;
757 		}
758 	}
759 	if (total_burst == 0)
760 		return;
761 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
762 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
763 	       burst_percent[0], (int) pktnb_stats[0]);
764 	if (burst_stats[0] == total_burst) {
765 		printf("]\n");
766 		return;
767 	}
768 	if (burst_stats[0] + burst_stats[1] == total_burst) {
769 		printf(" + %d%% of %d pkts]\n",
770 		       100 - burst_percent[0], pktnb_stats[1]);
771 		return;
772 	}
773 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
774 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
775 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
776 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
777 		return;
778 	}
779 	printf(" + %d%% of %d pkts + %d%% of others]\n",
780 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
781 }
782 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
783 
784 static void
785 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
786 {
787 	struct rte_port *port;
788 	uint8_t i;
789 
790 	static const char *fwd_stats_border = "----------------------";
791 
792 	port = &ports[port_id];
793 	printf("\n  %s Forward statistics for port %-2d %s\n",
794 	       fwd_stats_border, port_id, fwd_stats_border);
795 
796 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
797 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
798 		       "%-"PRIu64"\n",
799 		       stats->ipackets, stats->imissed,
800 		       (uint64_t) (stats->ipackets + stats->imissed));
801 
802 		if (cur_fwd_eng == &csum_fwd_engine)
803 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
804 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
805 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
806 			printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
807 			       "RX-error: %-"PRIu64"\n",
808 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
809 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
810 		}
811 
812 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
813 		       "%-"PRIu64"\n",
814 		       stats->opackets, port->tx_dropped,
815 		       (uint64_t) (stats->opackets + port->tx_dropped));
816 	}
817 	else {
818 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
819 		       "%14"PRIu64"\n",
820 		       stats->ipackets, stats->imissed,
821 		       (uint64_t) (stats->ipackets + stats->imissed));
822 
823 		if (cur_fwd_eng == &csum_fwd_engine)
824 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
825 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
826 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
827 			printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
828 			       "    RX-error:%"PRIu64"\n",
829 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
830 			printf("  RX-nombufs:             %14"PRIu64"\n",
831 			       stats->rx_nombuf);
832 		}
833 
834 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
835 		       "%14"PRIu64"\n",
836 		       stats->opackets, port->tx_dropped,
837 		       (uint64_t) (stats->opackets + port->tx_dropped));
838 	}
839 
840 	/* Display statistics of XON/XOFF pause frames, if any. */
841 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
842 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
843 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
844 		       stats->rx_pause_xoff, stats->rx_pause_xon);
845 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
846 		       stats->tx_pause_xoff, stats->tx_pause_xon);
847 	}
848 
849 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
850 	if (port->rx_stream)
851 		pkt_burst_stats_display("RX",
852 			&port->rx_stream->rx_burst_stats);
853 	if (port->tx_stream)
854 		pkt_burst_stats_display("TX",
855 			&port->tx_stream->tx_burst_stats);
856 #endif
857 	/* stats fdir */
858 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
859 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
860 		       stats->fdirmiss,
861 		       stats->fdirmatch);
862 
863 	if (port->rx_queue_stats_mapping_enabled) {
864 		printf("\n");
865 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
866 			printf("  Stats reg %2d RX-packets:%14"PRIu64
867 			       "     RX-errors:%14"PRIu64
868 			       "    RX-bytes:%14"PRIu64"\n",
869 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
870 		}
871 		printf("\n");
872 	}
873 	if (port->tx_queue_stats_mapping_enabled) {
874 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
875 			printf("  Stats reg %2d TX-packets:%14"PRIu64
876 			       "                                 TX-bytes:%14"PRIu64"\n",
877 			       i, stats->q_opackets[i], stats->q_obytes[i]);
878 		}
879 	}
880 
881 	printf("  %s--------------------------------%s\n",
882 	       fwd_stats_border, fwd_stats_border);
883 }
884 
885 static void
886 fwd_stream_stats_display(streamid_t stream_id)
887 {
888 	struct fwd_stream *fs;
889 	static const char *fwd_top_stats_border = "-------";
890 
891 	fs = fwd_streams[stream_id];
892 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
893 	    (fs->fwd_dropped == 0))
894 		return;
895 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
896 	       "TX Port=%2d/Queue=%2d %s\n",
897 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
898 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
899 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
900 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
901 
902 	/* if checksum mode */
903 	if (cur_fwd_eng == &csum_fwd_engine) {
904 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
905 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
906 	}
907 
908 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
909 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
910 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
911 #endif
912 }
913 
914 static void
915 flush_fwd_rx_queues(void)
916 {
917 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
918 	portid_t  rxp;
919 	portid_t port_id;
920 	queueid_t rxq;
921 	uint16_t  nb_rx;
922 	uint16_t  i;
923 	uint8_t   j;
924 
925 	for (j = 0; j < 2; j++) {
926 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
927 			for (rxq = 0; rxq < nb_rxq; rxq++) {
928 				port_id = fwd_ports_ids[rxp];
929 				do {
930 					nb_rx = rte_eth_rx_burst(port_id, rxq,
931 						pkts_burst, MAX_PKT_BURST);
932 					for (i = 0; i < nb_rx; i++)
933 						rte_pktmbuf_free(pkts_burst[i]);
934 				} while (nb_rx > 0);
935 			}
936 		}
937 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
938 	}
939 }
940 
941 static void
942 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
943 {
944 	struct fwd_stream **fsm;
945 	streamid_t nb_fs;
946 	streamid_t sm_id;
947 
948 	fsm = &fwd_streams[fc->stream_idx];
949 	nb_fs = fc->stream_nb;
950 	do {
951 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
952 			(*pkt_fwd)(fsm[sm_id]);
953 	} while (! fc->stopped);
954 }
955 
956 static int
957 start_pkt_forward_on_core(void *fwd_arg)
958 {
959 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
960 			     cur_fwd_config.fwd_eng->packet_fwd);
961 	return 0;
962 }
963 
964 /*
965  * Run the TXONLY packet forwarding engine to send a single burst of packets.
966  * Used to start communication flows in network loopback test configurations.
967  */
968 static int
969 run_one_txonly_burst_on_core(void *fwd_arg)
970 {
971 	struct fwd_lcore *fwd_lc;
972 	struct fwd_lcore tmp_lcore;
973 
974 	fwd_lc = (struct fwd_lcore *) fwd_arg;
975 	tmp_lcore = *fwd_lc;
976 	tmp_lcore.stopped = 1;
977 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
978 	return 0;
979 }
980 
981 /*
982  * Launch packet forwarding:
983  *     - Setup per-port forwarding context.
984  *     - launch logical cores with their forwarding configuration.
985  */
986 static void
987 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
988 {
989 	port_fwd_begin_t port_fwd_begin;
990 	unsigned int i;
991 	unsigned int lc_id;
992 	int diag;
993 
994 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
995 	if (port_fwd_begin != NULL) {
996 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
997 			(*port_fwd_begin)(fwd_ports_ids[i]);
998 	}
999 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1000 		lc_id = fwd_lcores_cpuids[i];
1001 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1002 			fwd_lcores[i]->stopped = 0;
1003 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1004 						     fwd_lcores[i], lc_id);
1005 			if (diag != 0)
1006 				printf("launch lcore %u failed - diag=%d\n",
1007 				       lc_id, diag);
1008 		}
1009 	}
1010 }
1011 
1012 /*
1013  * Launch packet forwarding configuration.
1014  */
1015 void
1016 start_packet_forwarding(int with_tx_first)
1017 {
1018 	port_fwd_begin_t port_fwd_begin;
1019 	port_fwd_end_t  port_fwd_end;
1020 	struct rte_port *port;
1021 	unsigned int i;
1022 	portid_t   pt_id;
1023 	streamid_t sm_id;
1024 
1025 	if (all_ports_started() == 0) {
1026 		printf("Not all ports were started\n");
1027 		return;
1028 	}
1029 	if (test_done == 0) {
1030 		printf("Packet forwarding already started\n");
1031 		return;
1032 	}
1033 	if(dcb_test) {
1034 		for (i = 0; i < nb_fwd_ports; i++) {
1035 			pt_id = fwd_ports_ids[i];
1036 			port = &ports[pt_id];
1037 			if (!port->dcb_flag) {
1038 				printf("In DCB mode, all forwarding ports must "
1039                                        "be configured in this mode.\n");
1040 				return;
1041 			}
1042 		}
1043 		if (nb_fwd_lcores == 1) {
1044 			printf("In DCB mode,the nb forwarding cores "
1045                                "should be larger than 1.\n");
1046 			return;
1047 		}
1048 	}
1049 	test_done = 0;
1050 
1051 	if(!no_flush_rx)
1052 		flush_fwd_rx_queues();
1053 
1054 	fwd_config_setup();
1055 	rxtx_config_display();
1056 
1057 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1058 		pt_id = fwd_ports_ids[i];
1059 		port = &ports[pt_id];
1060 		rte_eth_stats_get(pt_id, &port->stats);
1061 		port->tx_dropped = 0;
1062 
1063 		map_port_queue_stats_mapping_registers(pt_id, port);
1064 	}
1065 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1066 		fwd_streams[sm_id]->rx_packets = 0;
1067 		fwd_streams[sm_id]->tx_packets = 0;
1068 		fwd_streams[sm_id]->fwd_dropped = 0;
1069 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1070 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1071 
1072 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1073 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1074 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1075 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1076 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1077 #endif
1078 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1079 		fwd_streams[sm_id]->core_cycles = 0;
1080 #endif
1081 	}
1082 	if (with_tx_first) {
1083 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1084 		if (port_fwd_begin != NULL) {
1085 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1086 				(*port_fwd_begin)(fwd_ports_ids[i]);
1087 		}
1088 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1089 		rte_eal_mp_wait_lcore();
1090 		port_fwd_end = tx_only_engine.port_fwd_end;
1091 		if (port_fwd_end != NULL) {
1092 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1093 				(*port_fwd_end)(fwd_ports_ids[i]);
1094 		}
1095 	}
1096 	launch_packet_forwarding(start_pkt_forward_on_core);
1097 }
1098 
1099 void
1100 stop_packet_forwarding(void)
1101 {
1102 	struct rte_eth_stats stats;
1103 	struct rte_port *port;
1104 	port_fwd_end_t  port_fwd_end;
1105 	int i;
1106 	portid_t   pt_id;
1107 	streamid_t sm_id;
1108 	lcoreid_t  lc_id;
1109 	uint64_t total_recv;
1110 	uint64_t total_xmit;
1111 	uint64_t total_rx_dropped;
1112 	uint64_t total_tx_dropped;
1113 	uint64_t total_rx_nombuf;
1114 	uint64_t tx_dropped;
1115 	uint64_t rx_bad_ip_csum;
1116 	uint64_t rx_bad_l4_csum;
1117 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1118 	uint64_t fwd_cycles;
1119 #endif
1120 	static const char *acc_stats_border = "+++++++++++++++";
1121 
1122 	if (all_ports_started() == 0) {
1123 		printf("Not all ports were started\n");
1124 		return;
1125 	}
1126 	if (test_done) {
1127 		printf("Packet forwarding not started\n");
1128 		return;
1129 	}
1130 	printf("Telling cores to stop...");
1131 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1132 		fwd_lcores[lc_id]->stopped = 1;
1133 	printf("\nWaiting for lcores to finish...\n");
1134 	rte_eal_mp_wait_lcore();
1135 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1136 	if (port_fwd_end != NULL) {
1137 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1138 			pt_id = fwd_ports_ids[i];
1139 			(*port_fwd_end)(pt_id);
1140 		}
1141 	}
1142 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1143 	fwd_cycles = 0;
1144 #endif
1145 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1146 		if (cur_fwd_config.nb_fwd_streams >
1147 		    cur_fwd_config.nb_fwd_ports) {
1148 			fwd_stream_stats_display(sm_id);
1149 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1150 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1151 		} else {
1152 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1153 				fwd_streams[sm_id];
1154 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1155 				fwd_streams[sm_id];
1156 		}
1157 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1158 		tx_dropped = (uint64_t) (tx_dropped +
1159 					 fwd_streams[sm_id]->fwd_dropped);
1160 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1161 
1162 		rx_bad_ip_csum =
1163 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1164 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1165 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1166 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1167 							rx_bad_ip_csum;
1168 
1169 		rx_bad_l4_csum =
1170 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1171 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1172 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1173 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1174 							rx_bad_l4_csum;
1175 
1176 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1177 		fwd_cycles = (uint64_t) (fwd_cycles +
1178 					 fwd_streams[sm_id]->core_cycles);
1179 #endif
1180 	}
1181 	total_recv = 0;
1182 	total_xmit = 0;
1183 	total_rx_dropped = 0;
1184 	total_tx_dropped = 0;
1185 	total_rx_nombuf  = 0;
1186 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1187 		pt_id = fwd_ports_ids[i];
1188 
1189 		port = &ports[pt_id];
1190 		rte_eth_stats_get(pt_id, &stats);
1191 		stats.ipackets -= port->stats.ipackets;
1192 		port->stats.ipackets = 0;
1193 		stats.opackets -= port->stats.opackets;
1194 		port->stats.opackets = 0;
1195 		stats.ibytes   -= port->stats.ibytes;
1196 		port->stats.ibytes = 0;
1197 		stats.obytes   -= port->stats.obytes;
1198 		port->stats.obytes = 0;
1199 		stats.imissed  -= port->stats.imissed;
1200 		port->stats.imissed = 0;
1201 		stats.oerrors  -= port->stats.oerrors;
1202 		port->stats.oerrors = 0;
1203 		stats.rx_nombuf -= port->stats.rx_nombuf;
1204 		port->stats.rx_nombuf = 0;
1205 		stats.fdirmatch -= port->stats.fdirmatch;
1206 		port->stats.rx_nombuf = 0;
1207 		stats.fdirmiss -= port->stats.fdirmiss;
1208 		port->stats.rx_nombuf = 0;
1209 
1210 		total_recv += stats.ipackets;
1211 		total_xmit += stats.opackets;
1212 		total_rx_dropped += stats.imissed;
1213 		total_tx_dropped += port->tx_dropped;
1214 		total_rx_nombuf  += stats.rx_nombuf;
1215 
1216 		fwd_port_stats_display(pt_id, &stats);
1217 	}
1218 	printf("\n  %s Accumulated forward statistics for all ports"
1219 	       "%s\n",
1220 	       acc_stats_border, acc_stats_border);
1221 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1222 	       "%-"PRIu64"\n"
1223 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1224 	       "%-"PRIu64"\n",
1225 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1226 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1227 	if (total_rx_nombuf > 0)
1228 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1229 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1230 	       "%s\n",
1231 	       acc_stats_border, acc_stats_border);
1232 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1233 	if (total_recv > 0)
1234 		printf("\n  CPU cycles/packet=%u (total cycles="
1235 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1236 		       (unsigned int)(fwd_cycles / total_recv),
1237 		       fwd_cycles, total_recv);
1238 #endif
1239 	printf("\nDone.\n");
1240 	test_done = 1;
1241 }
1242 
1243 void
1244 dev_set_link_up(portid_t pid)
1245 {
1246 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1247 		printf("\nSet link up fail.\n");
1248 }
1249 
1250 void
1251 dev_set_link_down(portid_t pid)
1252 {
1253 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1254 		printf("\nSet link down fail.\n");
1255 }
1256 
1257 static int
1258 all_ports_started(void)
1259 {
1260 	portid_t pi;
1261 	struct rte_port *port;
1262 
1263 	for (pi = 0; pi < nb_ports; pi++) {
1264 		port = &ports[pi];
1265 		/* Check if there is a port which is not started */
1266 		if (port->port_status != RTE_PORT_STARTED)
1267 			return 0;
1268 	}
1269 
1270 	/* No port is not started */
1271 	return 1;
1272 }
1273 
1274 int
1275 start_port(portid_t pid)
1276 {
1277 	int diag, need_check_link_status = 0;
1278 	portid_t pi;
1279 	queueid_t qi;
1280 	struct rte_port *port;
1281 	struct ether_addr mac_addr;
1282 
1283 	if (test_done == 0) {
1284 		printf("Please stop forwarding first\n");
1285 		return -1;
1286 	}
1287 
1288 	if (init_fwd_streams() < 0) {
1289 		printf("Fail from init_fwd_streams()\n");
1290 		return -1;
1291 	}
1292 
1293 	if(dcb_config)
1294 		dcb_test = 1;
1295 	for (pi = 0; pi < nb_ports; pi++) {
1296 		if (pid < nb_ports && pid != pi)
1297 			continue;
1298 
1299 		port = &ports[pi];
1300 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1301 						 RTE_PORT_HANDLING) == 0) {
1302 			printf("Port %d is now not stopped\n", pi);
1303 			continue;
1304 		}
1305 
1306 		if (port->need_reconfig > 0) {
1307 			port->need_reconfig = 0;
1308 
1309 			printf("Configuring Port %d (socket %u)\n", pi,
1310 					port->socket_id);
1311 			/* configure port */
1312 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1313 						&(port->dev_conf));
1314 			if (diag != 0) {
1315 				if (rte_atomic16_cmpset(&(port->port_status),
1316 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1317 					printf("Port %d can not be set back "
1318 							"to stopped\n", pi);
1319 				printf("Fail to configure port %d\n", pi);
1320 				/* try to reconfigure port next time */
1321 				port->need_reconfig = 1;
1322 				return -1;
1323 			}
1324 		}
1325 		if (port->need_reconfig_queues > 0) {
1326 			port->need_reconfig_queues = 0;
1327 			/* setup tx queues */
1328 			for (qi = 0; qi < nb_txq; qi++) {
1329 				if ((numa_support) &&
1330 					(txring_numa[pi] != NUMA_NO_CONFIG))
1331 					diag = rte_eth_tx_queue_setup(pi, qi,
1332 						nb_txd,txring_numa[pi],
1333 						&(port->tx_conf));
1334 				else
1335 					diag = rte_eth_tx_queue_setup(pi, qi,
1336 						nb_txd,port->socket_id,
1337 						&(port->tx_conf));
1338 
1339 				if (diag == 0)
1340 					continue;
1341 
1342 				/* Fail to setup tx queue, return */
1343 				if (rte_atomic16_cmpset(&(port->port_status),
1344 							RTE_PORT_HANDLING,
1345 							RTE_PORT_STOPPED) == 0)
1346 					printf("Port %d can not be set back "
1347 							"to stopped\n", pi);
1348 				printf("Fail to configure port %d tx queues\n", pi);
1349 				/* try to reconfigure queues next time */
1350 				port->need_reconfig_queues = 1;
1351 				return -1;
1352 			}
1353 			/* setup rx queues */
1354 			for (qi = 0; qi < nb_rxq; qi++) {
1355 				if ((numa_support) &&
1356 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1357 					struct rte_mempool * mp =
1358 						mbuf_pool_find(rxring_numa[pi]);
1359 					if (mp == NULL) {
1360 						printf("Failed to setup RX queue:"
1361 							"No mempool allocation"
1362 							"on the socket %d\n",
1363 							rxring_numa[pi]);
1364 						return -1;
1365 					}
1366 
1367 					diag = rte_eth_rx_queue_setup(pi, qi,
1368 					     nb_rxd,rxring_numa[pi],
1369 					     &(port->rx_conf),mp);
1370 				}
1371 				else
1372 					diag = rte_eth_rx_queue_setup(pi, qi,
1373 					     nb_rxd,port->socket_id,
1374 					     &(port->rx_conf),
1375 				             mbuf_pool_find(port->socket_id));
1376 
1377 				if (diag == 0)
1378 					continue;
1379 
1380 
1381 				/* Fail to setup rx queue, return */
1382 				if (rte_atomic16_cmpset(&(port->port_status),
1383 							RTE_PORT_HANDLING,
1384 							RTE_PORT_STOPPED) == 0)
1385 					printf("Port %d can not be set back "
1386 							"to stopped\n", pi);
1387 				printf("Fail to configure port %d rx queues\n", pi);
1388 				/* try to reconfigure queues next time */
1389 				port->need_reconfig_queues = 1;
1390 				return -1;
1391 			}
1392 		}
1393 		/* start port */
1394 		if (rte_eth_dev_start(pi) < 0) {
1395 			printf("Fail to start port %d\n", pi);
1396 
1397 			/* Fail to setup rx queue, return */
1398 			if (rte_atomic16_cmpset(&(port->port_status),
1399 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1400 				printf("Port %d can not be set back to "
1401 							"stopped\n", pi);
1402 			continue;
1403 		}
1404 
1405 		if (rte_atomic16_cmpset(&(port->port_status),
1406 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1407 			printf("Port %d can not be set into started\n", pi);
1408 
1409 		rte_eth_macaddr_get(pi, &mac_addr);
1410 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1411 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1412 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1413 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1414 
1415 		/* at least one port started, need checking link status */
1416 		need_check_link_status = 1;
1417 	}
1418 
1419 	if (need_check_link_status && !no_link_check)
1420 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1421 	else
1422 		printf("Please stop the ports first\n");
1423 
1424 	printf("Done\n");
1425 	return 0;
1426 }
1427 
1428 void
1429 stop_port(portid_t pid)
1430 {
1431 	portid_t pi;
1432 	struct rte_port *port;
1433 	int need_check_link_status = 0;
1434 
1435 	if (test_done == 0) {
1436 		printf("Please stop forwarding first\n");
1437 		return;
1438 	}
1439 	if (dcb_test) {
1440 		dcb_test = 0;
1441 		dcb_config = 0;
1442 	}
1443 	printf("Stopping ports...\n");
1444 
1445 	for (pi = 0; pi < nb_ports; pi++) {
1446 		if (pid < nb_ports && pid != pi)
1447 			continue;
1448 
1449 		port = &ports[pi];
1450 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1451 						RTE_PORT_HANDLING) == 0)
1452 			continue;
1453 
1454 		rte_eth_dev_stop(pi);
1455 
1456 		if (rte_atomic16_cmpset(&(port->port_status),
1457 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1458 			printf("Port %d can not be set into stopped\n", pi);
1459 		need_check_link_status = 1;
1460 	}
1461 	if (need_check_link_status && !no_link_check)
1462 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1463 
1464 	printf("Done\n");
1465 }
1466 
1467 void
1468 close_port(portid_t pid)
1469 {
1470 	portid_t pi;
1471 	struct rte_port *port;
1472 
1473 	if (test_done == 0) {
1474 		printf("Please stop forwarding first\n");
1475 		return;
1476 	}
1477 
1478 	printf("Closing ports...\n");
1479 
1480 	for (pi = 0; pi < nb_ports; pi++) {
1481 		if (pid < nb_ports && pid != pi)
1482 			continue;
1483 
1484 		port = &ports[pi];
1485 		if (rte_atomic16_cmpset(&(port->port_status),
1486 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1487 			printf("Port %d is now not stopped\n", pi);
1488 			continue;
1489 		}
1490 
1491 		rte_eth_dev_close(pi);
1492 
1493 		if (rte_atomic16_cmpset(&(port->port_status),
1494 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1495 			printf("Port %d can not be set into stopped\n", pi);
1496 	}
1497 
1498 	printf("Done\n");
1499 }
1500 
1501 int
1502 all_ports_stopped(void)
1503 {
1504 	portid_t pi;
1505 	struct rte_port *port;
1506 
1507 	for (pi = 0; pi < nb_ports; pi++) {
1508 		port = &ports[pi];
1509 		if (port->port_status != RTE_PORT_STOPPED)
1510 			return 0;
1511 	}
1512 
1513 	return 1;
1514 }
1515 
1516 int
1517 port_is_started(portid_t port_id)
1518 {
1519 	if (port_id_is_invalid(port_id))
1520 		return -1;
1521 
1522 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1523 		return 0;
1524 
1525 	return 1;
1526 }
1527 
1528 void
1529 pmd_test_exit(void)
1530 {
1531 	portid_t pt_id;
1532 
1533 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1534 		printf("Stopping port %d...", pt_id);
1535 		fflush(stdout);
1536 		rte_eth_dev_close(pt_id);
1537 		printf("done\n");
1538 	}
1539 	printf("bye...\n");
1540 }
1541 
1542 typedef void (*cmd_func_t)(void);
1543 struct pmd_test_command {
1544 	const char *cmd_name;
1545 	cmd_func_t cmd_func;
1546 };
1547 
1548 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1549 
1550 /* Check the link status of all ports in up to 9s, and print them finally */
1551 static void
1552 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1553 {
1554 #define CHECK_INTERVAL 100 /* 100ms */
1555 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1556 	uint8_t portid, count, all_ports_up, print_flag = 0;
1557 	struct rte_eth_link link;
1558 
1559 	printf("Checking link statuses...\n");
1560 	fflush(stdout);
1561 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1562 		all_ports_up = 1;
1563 		for (portid = 0; portid < port_num; portid++) {
1564 			if ((port_mask & (1 << portid)) == 0)
1565 				continue;
1566 			memset(&link, 0, sizeof(link));
1567 			rte_eth_link_get_nowait(portid, &link);
1568 			/* print link status if flag set */
1569 			if (print_flag == 1) {
1570 				if (link.link_status)
1571 					printf("Port %d Link Up - speed %u "
1572 						"Mbps - %s\n", (uint8_t)portid,
1573 						(unsigned)link.link_speed,
1574 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1575 					("full-duplex") : ("half-duplex\n"));
1576 				else
1577 					printf("Port %d Link Down\n",
1578 						(uint8_t)portid);
1579 				continue;
1580 			}
1581 			/* clear all_ports_up flag if any link down */
1582 			if (link.link_status == 0) {
1583 				all_ports_up = 0;
1584 				break;
1585 			}
1586 		}
1587 		/* after finally printing all link status, get out */
1588 		if (print_flag == 1)
1589 			break;
1590 
1591 		if (all_ports_up == 0) {
1592 			fflush(stdout);
1593 			rte_delay_ms(CHECK_INTERVAL);
1594 		}
1595 
1596 		/* set the print_flag if all ports up or timeout */
1597 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1598 			print_flag = 1;
1599 		}
1600 	}
1601 }
1602 
1603 static int
1604 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1605 {
1606 	uint16_t i;
1607 	int diag;
1608 	uint8_t mapping_found = 0;
1609 
1610 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1611 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1612 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1613 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1614 					tx_queue_stats_mappings[i].queue_id,
1615 					tx_queue_stats_mappings[i].stats_counter_id);
1616 			if (diag != 0)
1617 				return diag;
1618 			mapping_found = 1;
1619 		}
1620 	}
1621 	if (mapping_found)
1622 		port->tx_queue_stats_mapping_enabled = 1;
1623 	return 0;
1624 }
1625 
1626 static int
1627 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1628 {
1629 	uint16_t i;
1630 	int diag;
1631 	uint8_t mapping_found = 0;
1632 
1633 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1634 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1635 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1636 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1637 					rx_queue_stats_mappings[i].queue_id,
1638 					rx_queue_stats_mappings[i].stats_counter_id);
1639 			if (diag != 0)
1640 				return diag;
1641 			mapping_found = 1;
1642 		}
1643 	}
1644 	if (mapping_found)
1645 		port->rx_queue_stats_mapping_enabled = 1;
1646 	return 0;
1647 }
1648 
1649 static void
1650 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1651 {
1652 	int diag = 0;
1653 
1654 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1655 	if (diag != 0) {
1656 		if (diag == -ENOTSUP) {
1657 			port->tx_queue_stats_mapping_enabled = 0;
1658 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1659 		}
1660 		else
1661 			rte_exit(EXIT_FAILURE,
1662 					"set_tx_queue_stats_mapping_registers "
1663 					"failed for port id=%d diag=%d\n",
1664 					pi, diag);
1665 	}
1666 
1667 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1668 	if (diag != 0) {
1669 		if (diag == -ENOTSUP) {
1670 			port->rx_queue_stats_mapping_enabled = 0;
1671 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1672 		}
1673 		else
1674 			rte_exit(EXIT_FAILURE,
1675 					"set_rx_queue_stats_mapping_registers "
1676 					"failed for port id=%d diag=%d\n",
1677 					pi, diag);
1678 	}
1679 }
1680 
1681 void
1682 init_port_config(void)
1683 {
1684 	portid_t pid;
1685 	struct rte_port *port;
1686 
1687 	for (pid = 0; pid < nb_ports; pid++) {
1688 		port = &ports[pid];
1689 		port->dev_conf.rxmode = rx_mode;
1690 		port->dev_conf.fdir_conf = fdir_conf;
1691 		if (nb_rxq > 1) {
1692 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1693 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1694 		} else {
1695 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1696 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1697 		}
1698 
1699 		/* In SR-IOV mode, RSS mode is not available */
1700 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1701 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1702 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1703 			else
1704 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1705 		}
1706 
1707 		port->rx_conf.rx_thresh = rx_thresh;
1708 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1709 		port->rx_conf.rx_drop_en = rx_drop_en;
1710 		port->tx_conf.tx_thresh = tx_thresh;
1711 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1712 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1713 		port->tx_conf.txq_flags = txq_flags;
1714 
1715 		rte_eth_macaddr_get(pid, &port->eth_addr);
1716 
1717 		map_port_queue_stats_mapping_registers(pid, port);
1718 #ifdef RTE_NIC_BYPASS
1719 		rte_eth_dev_bypass_init(pid);
1720 #endif
1721 	}
1722 }
1723 
1724 const uint16_t vlan_tags[] = {
1725 		0,  1,  2,  3,  4,  5,  6,  7,
1726 		8,  9, 10, 11,  12, 13, 14, 15,
1727 		16, 17, 18, 19, 20, 21, 22, 23,
1728 		24, 25, 26, 27, 28, 29, 30, 31
1729 };
1730 
1731 static  int
1732 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1733 {
1734         uint8_t i;
1735 
1736  	/*
1737  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1738  	 * given above, and the number of traffic classes available for use.
1739  	 */
1740 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1741 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1742 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1743 
1744 		/* VMDQ+DCB RX and TX configrations */
1745 		vmdq_rx_conf.enable_default_pool = 0;
1746 		vmdq_rx_conf.default_pool = 0;
1747 		vmdq_rx_conf.nb_queue_pools =
1748 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1749 		vmdq_tx_conf.nb_queue_pools =
1750 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1751 
1752 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1753 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1754 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1755 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1756 		}
1757 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1758 			vmdq_rx_conf.dcb_queue[i] = i;
1759 			vmdq_tx_conf.dcb_queue[i] = i;
1760 		}
1761 
1762 		/*set DCB mode of RX and TX of multiple queues*/
1763 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1764 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1765 		if (dcb_conf->pfc_en)
1766 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1767 		else
1768 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1769 
1770 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1771                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1772 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1773                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1774 	}
1775 	else {
1776 		struct rte_eth_dcb_rx_conf rx_conf;
1777 		struct rte_eth_dcb_tx_conf tx_conf;
1778 
1779 		/* queue mapping configuration of DCB RX and TX */
1780 		if (dcb_conf->num_tcs == ETH_4_TCS)
1781 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1782 		else
1783 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1784 
1785 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1786 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1787 
1788 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1789 			rx_conf.dcb_queue[i] = i;
1790 			tx_conf.dcb_queue[i] = i;
1791 		}
1792 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1793 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1794 		if (dcb_conf->pfc_en)
1795 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1796 		else
1797 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1798 
1799 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1800                                 sizeof(struct rte_eth_dcb_rx_conf)));
1801 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1802                                 sizeof(struct rte_eth_dcb_tx_conf)));
1803 	}
1804 
1805 	return 0;
1806 }
1807 
1808 int
1809 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1810 {
1811 	struct rte_eth_conf port_conf;
1812 	struct rte_port *rte_port;
1813 	int retval;
1814 	uint16_t nb_vlan;
1815 	uint16_t i;
1816 
1817 	/* rxq and txq configuration in dcb mode */
1818 	nb_rxq = 128;
1819 	nb_txq = 128;
1820 	rx_free_thresh = 64;
1821 
1822 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1823 	/* Enter DCB configuration status */
1824 	dcb_config = 1;
1825 
1826 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1827 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1828 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1829 	if (retval < 0)
1830 		return retval;
1831 
1832 	rte_port = &ports[pid];
1833 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1834 
1835 	rte_port->rx_conf.rx_thresh = rx_thresh;
1836 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1837 	rte_port->tx_conf.tx_thresh = tx_thresh;
1838 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1839 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1840 	/* VLAN filter */
1841 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1842 	for (i = 0; i < nb_vlan; i++){
1843 		rx_vft_set(pid, vlan_tags[i], 1);
1844 	}
1845 
1846 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1847 	map_port_queue_stats_mapping_registers(pid, rte_port);
1848 
1849 	rte_port->dcb_flag = 1;
1850 
1851 	return 0;
1852 }
1853 
1854 #ifdef RTE_EXEC_ENV_BAREMETAL
1855 #define main _main
1856 #endif
1857 
1858 int
1859 main(int argc, char** argv)
1860 {
1861 	int  diag;
1862 	uint8_t port_id;
1863 
1864 	diag = rte_eal_init(argc, argv);
1865 	if (diag < 0)
1866 		rte_panic("Cannot init EAL\n");
1867 
1868 	nb_ports = (portid_t) rte_eth_dev_count();
1869 	if (nb_ports == 0)
1870 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1871 							"check that "
1872 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1873 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1874 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1875 			  "configuration file\n");
1876 
1877 	set_def_fwd_config();
1878 	if (nb_lcores == 0)
1879 		rte_panic("Empty set of forwarding logical cores - check the "
1880 			  "core mask supplied in the command parameters\n");
1881 
1882 	argc -= diag;
1883 	argv += diag;
1884 	if (argc > 1)
1885 		launch_args_parse(argc, argv);
1886 
1887 	if (nb_rxq > nb_txq)
1888 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1889 		       "but nb_txq=%d will prevent to fully test it.\n",
1890 		       nb_rxq, nb_txq);
1891 
1892 	init_config();
1893 	if (start_port(RTE_PORT_ALL) != 0)
1894 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1895 
1896 	/* set all ports to promiscuous mode by default */
1897 	for (port_id = 0; port_id < nb_ports; port_id++)
1898 		rte_eth_promiscuous_enable(port_id);
1899 
1900 #ifdef RTE_LIBRTE_CMDLINE
1901 	if (interactive == 1) {
1902 		if (auto_start) {
1903 			printf("Start automatic packet forwarding\n");
1904 			start_packet_forwarding(0);
1905 		}
1906 		prompt();
1907 	} else
1908 #endif
1909 	{
1910 		char c;
1911 		int rc;
1912 
1913 		printf("No commandline core given, start packet forwarding\n");
1914 		start_packet_forwarding(0);
1915 		printf("Press enter to exit\n");
1916 		rc = read(0, &c, 1);
1917 		if (rc < 0)
1918 			return 1;
1919 	}
1920 
1921 	return 0;
1922 }
1923