xref: /dpdk/app/test-pmd/testpmd.c (revision 148f963fb5323c1c6b6d5cea95084deb25cc73f8)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 
87 /*
88  * NUMA support configuration.
89  * When set, the NUMA support attempts to dispatch the allocation of the
90  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91  * probed ports among the CPU sockets 0 and 1.
92  * Otherwise, all memory is allocated from CPU socket 0.
93  */
94 uint8_t numa_support = 0; /**< No numa support by default */
95 
96 /*
97  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
98  * not configured.
99  */
100 uint8_t socket_num = UMA_NO_CONFIG;
101 
102 /*
103  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
104  */
105 uint8_t mp_anon = 0;
106 
107 /*
108  * Record the Ethernet address of peer target ports to which packets are
109  * forwarded.
110  * Must be instanciated with the ethernet addresses of peer traffic generator
111  * ports.
112  */
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
115 
116 /*
117  * Probed Target Environment.
118  */
119 struct rte_port *ports;	       /**< For all probed ethernet ports. */
120 portid_t nb_ports;             /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
123 
124 /*
125  * Test Forwarding Configuration.
126  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
128  */
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
132 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
133 
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
136 
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
139 
140 /*
141  * Forwarding engines.
142  */
143 struct fwd_engine * fwd_engines[] = {
144 	&io_fwd_engine,
145 	&mac_fwd_engine,
146 	&rx_only_engine,
147 	&tx_only_engine,
148 	&csum_fwd_engine,
149 #ifdef RTE_LIBRTE_IEEE1588
150 	&ieee1588_fwd_engine,
151 #endif
152 	NULL,
153 };
154 
155 struct fwd_config cur_fwd_config;
156 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
157 
158 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
159 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
160                                       * specified on command-line. */
161 
162 /*
163  * Configuration of packet segments used by the "txonly" processing engine.
164  */
165 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
166 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
167 	TXONLY_DEF_PACKET_LEN,
168 };
169 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
170 
171 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
172 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
173 
174 /* current configuration is in DCB or not,0 means it is not in DCB mode */
175 uint8_t dcb_config = 0;
176 
177 /* Whether the dcb is in testing status */
178 uint8_t dcb_test = 0;
179 
180 /* DCB on and VT on mapping is default */
181 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
182 
183 /*
184  * Configurable number of RX/TX queues.
185  */
186 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
187 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
188 
189 /*
190  * Configurable number of RX/TX ring descriptors.
191  */
192 #define RTE_TEST_RX_DESC_DEFAULT 128
193 #define RTE_TEST_TX_DESC_DEFAULT 512
194 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
195 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
196 
197 /*
198  * Configurable values of RX and TX ring threshold registers.
199  */
200 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
201 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
202 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
203 
204 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
205 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
206 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
207 
208 struct rte_eth_thresh rx_thresh = {
209 	.pthresh = RX_PTHRESH,
210 	.hthresh = RX_HTHRESH,
211 	.wthresh = RX_WTHRESH,
212 };
213 
214 struct rte_eth_thresh tx_thresh = {
215 	.pthresh = TX_PTHRESH,
216 	.hthresh = TX_HTHRESH,
217 	.wthresh = TX_WTHRESH,
218 };
219 
220 /*
221  * Configurable value of RX free threshold.
222  */
223 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
224 
225 /*
226  * Configurable value of RX drop enable.
227  */
228 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
229 
230 /*
231  * Configurable value of TX free threshold.
232  */
233 uint16_t tx_free_thresh = 0; /* Use default values. */
234 
235 /*
236  * Configurable value of TX RS bit threshold.
237  */
238 uint16_t tx_rs_thresh = 0; /* Use default values. */
239 
240 /*
241  * Configurable value of TX queue flags.
242  */
243 uint32_t txq_flags = 0; /* No flags set. */
244 
245 /*
246  * Receive Side Scaling (RSS) configuration.
247  */
248 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
249 
250 /*
251  * Port topology configuration
252  */
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
254 
255 /*
256  * Avoids to flush all the RX streams before starts forwarding.
257  */
258 uint8_t no_flush_rx = 0; /* flush by default */
259 
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264 
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267 
268 #endif
269 
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275 	.split_hdr_size = 0,
276 	.header_split   = 0, /**< Header Split disabled. */
277 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284 
285 struct rte_fdir_conf fdir_conf = {
286 	.mode = RTE_FDIR_MODE_NONE,
287 	.pballoc = RTE_FDIR_PBALLOC_64K,
288 	.status = RTE_FDIR_REPORT_STATUS,
289 	.flexbytes_offset = 0x6,
290 	.drop_queue = 127,
291 };
292 
293 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
294 
295 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
296 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
297 
298 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
299 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
300 
301 uint16_t nb_tx_queue_stats_mappings = 0;
302 uint16_t nb_rx_queue_stats_mappings = 0;
303 
304 /* Forward function declarations */
305 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
306 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
307 
308 /*
309  * Check if all the ports are started.
310  * If yes, return positive value. If not, return zero.
311  */
312 static int all_ports_started(void);
313 
314 /*
315  * Setup default configuration.
316  */
317 static void
318 set_default_fwd_lcores_config(void)
319 {
320 	unsigned int i;
321 	unsigned int nb_lc;
322 
323 	nb_lc = 0;
324 	for (i = 0; i < RTE_MAX_LCORE; i++) {
325 		if (! rte_lcore_is_enabled(i))
326 			continue;
327 		if (i == rte_get_master_lcore())
328 			continue;
329 		fwd_lcores_cpuids[nb_lc++] = i;
330 	}
331 	nb_lcores = (lcoreid_t) nb_lc;
332 	nb_cfg_lcores = nb_lcores;
333 	nb_fwd_lcores = 1;
334 }
335 
336 static void
337 set_def_peer_eth_addrs(void)
338 {
339 	portid_t i;
340 
341 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
342 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
343 		peer_eth_addrs[i].addr_bytes[5] = i;
344 	}
345 }
346 
347 static void
348 set_default_fwd_ports_config(void)
349 {
350 	portid_t pt_id;
351 
352 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
353 		fwd_ports_ids[pt_id] = pt_id;
354 
355 	nb_cfg_ports = nb_ports;
356 	nb_fwd_ports = nb_ports;
357 }
358 
359 void
360 set_def_fwd_config(void)
361 {
362 	set_default_fwd_lcores_config();
363 	set_def_peer_eth_addrs();
364 	set_default_fwd_ports_config();
365 }
366 
367 /*
368  * Configuration initialisation done once at init time.
369  */
370 struct mbuf_ctor_arg {
371 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
372 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
373 };
374 
375 struct mbuf_pool_ctor_arg {
376 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
377 };
378 
379 static void
380 testpmd_mbuf_ctor(struct rte_mempool *mp,
381 		  void *opaque_arg,
382 		  void *raw_mbuf,
383 		  __attribute__((unused)) unsigned i)
384 {
385 	struct mbuf_ctor_arg *mb_ctor_arg;
386 	struct rte_mbuf    *mb;
387 
388 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
389 	mb = (struct rte_mbuf *) raw_mbuf;
390 
391 	mb->type         = RTE_MBUF_PKT;
392 	mb->pool         = mp;
393 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
394 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
395 			mb_ctor_arg->seg_buf_offset);
396 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
397 	mb->type         = RTE_MBUF_PKT;
398 	mb->ol_flags     = 0;
399 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
400 	mb->pkt.nb_segs  = 1;
401 	mb->pkt.vlan_macip.data = 0;
402 	mb->pkt.hash.rss = 0;
403 }
404 
405 static void
406 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
407 		       void *opaque_arg)
408 {
409 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
410 	struct rte_pktmbuf_pool_private *mbp_priv;
411 
412 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
413 		printf("%s(%s) private_data_size %d < %d\n",
414 		       __func__, mp->name, (int) mp->private_data_size,
415 		       (int) sizeof(struct rte_pktmbuf_pool_private));
416 		return;
417 	}
418 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
419 	mbp_priv = rte_mempool_get_priv(mp);
420 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
421 }
422 
423 static void
424 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
425 		 unsigned int socket_id)
426 {
427 	char pool_name[RTE_MEMPOOL_NAMESIZE];
428 	struct rte_mempool *rte_mp;
429 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
430 	struct mbuf_ctor_arg mb_ctor_arg;
431 	uint32_t mb_size;
432 
433 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
434 						mbuf_seg_size);
435 	mb_ctor_arg.seg_buf_offset =
436 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
437 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
438 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
439 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
440 
441 #ifdef RTE_LIBRTE_PMD_XENVIRT
442 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
443                                    (unsigned) mb_mempool_cache,
444                                    sizeof(struct rte_pktmbuf_pool_private),
445                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
446                                    testpmd_mbuf_ctor, &mb_ctor_arg,
447                                    socket_id, 0);
448 
449 
450 
451 #else
452 	if (mp_anon != 0)
453 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
454 				    (unsigned) mb_mempool_cache,
455 				    sizeof(struct rte_pktmbuf_pool_private),
456 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
457 				    testpmd_mbuf_ctor, &mb_ctor_arg,
458 				    socket_id, 0);
459 	else
460 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
461 				    (unsigned) mb_mempool_cache,
462 				    sizeof(struct rte_pktmbuf_pool_private),
463 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
464 				    testpmd_mbuf_ctor, &mb_ctor_arg,
465 				    socket_id, 0);
466 
467 #endif
468 
469 	if (rte_mp == NULL) {
470 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
471 						"failed\n", socket_id);
472 	} else if (verbose_level > 0) {
473 		rte_mempool_dump(rte_mp);
474 	}
475 }
476 
477 static void
478 init_config(void)
479 {
480 	portid_t pid;
481 	struct rte_port *port;
482 	struct rte_mempool *mbp;
483 	unsigned int nb_mbuf_per_pool;
484 	lcoreid_t  lc_id;
485 	uint8_t port_per_socket[MAX_SOCKET];
486 
487 	memset(port_per_socket,0,MAX_SOCKET);
488 	/* Configuration of logical cores. */
489 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
490 				sizeof(struct fwd_lcore *) * nb_lcores,
491 				CACHE_LINE_SIZE);
492 	if (fwd_lcores == NULL) {
493 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
494 							"failed\n", nb_lcores);
495 	}
496 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
497 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
498 					       sizeof(struct fwd_lcore),
499 					       CACHE_LINE_SIZE);
500 		if (fwd_lcores[lc_id] == NULL) {
501 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
502 								"failed\n");
503 		}
504 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
505 	}
506 
507 	/*
508 	 * Create pools of mbuf.
509 	 * If NUMA support is disabled, create a single pool of mbuf in
510 	 * socket 0 memory by default.
511 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
512 	 *
513 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
514 	 * nb_txd can be configured at run time.
515 	 */
516 	if (param_total_num_mbufs)
517 		nb_mbuf_per_pool = param_total_num_mbufs;
518 	else {
519 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
520 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
521 
522 		if (!numa_support)
523 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
524 	}
525 
526 	if (!numa_support) {
527 		if (socket_num == UMA_NO_CONFIG)
528 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
529 		else
530 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
531 						 socket_num);
532 	}
533 	/*
534 	 * Records which Mbuf pool to use by each logical core, if needed.
535 	 */
536 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
537 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
538 		if (mbp == NULL)
539 			mbp = mbuf_pool_find(0);
540 		fwd_lcores[lc_id]->mbp = mbp;
541 	}
542 
543 	/* Configuration of Ethernet ports. */
544 	ports = rte_zmalloc("testpmd: ports",
545 			    sizeof(struct rte_port) * nb_ports,
546 			    CACHE_LINE_SIZE);
547 	if (ports == NULL) {
548 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
549 							"failed\n", nb_ports);
550 	}
551 
552 	for (pid = 0; pid < nb_ports; pid++) {
553 		port = &ports[pid];
554 		rte_eth_dev_info_get(pid, &port->dev_info);
555 
556 		if (numa_support) {
557 			if (port_numa[pid] != NUMA_NO_CONFIG)
558 				port_per_socket[port_numa[pid]]++;
559 			else {
560 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
561 				port_per_socket[socket_id]++;
562 			}
563 		}
564 
565 		/* set flag to initialize port/queue */
566 		port->need_reconfig = 1;
567 		port->need_reconfig_queues = 1;
568 	}
569 
570 	if (numa_support) {
571 		uint8_t i;
572 		unsigned int nb_mbuf;
573 
574 		if (param_total_num_mbufs)
575 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
576 
577 		for (i = 0; i < MAX_SOCKET; i++) {
578 			nb_mbuf = (nb_mbuf_per_pool *
579 						port_per_socket[i]);
580 			if (nb_mbuf)
581 				mbuf_pool_create(mbuf_data_size,
582 						nb_mbuf,i);
583 		}
584 	}
585 	init_port_config();
586 	/* Configuration of packet forwarding streams. */
587 	if (init_fwd_streams() < 0)
588 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
589 }
590 
591 int
592 init_fwd_streams(void)
593 {
594 	portid_t pid;
595 	struct rte_port *port;
596 	streamid_t sm_id, nb_fwd_streams_new;
597 
598 	/* set socket id according to numa or not */
599 	for (pid = 0; pid < nb_ports; pid++) {
600 		port = &ports[pid];
601 		if (nb_rxq > port->dev_info.max_rx_queues) {
602 			printf("Fail: nb_rxq(%d) is greater than "
603 				"max_rx_queues(%d)\n", nb_rxq,
604 				port->dev_info.max_rx_queues);
605 			return -1;
606 		}
607 		if (nb_txq > port->dev_info.max_tx_queues) {
608 			printf("Fail: nb_txq(%d) is greater than "
609 				"max_tx_queues(%d)\n", nb_txq,
610 				port->dev_info.max_tx_queues);
611 			return -1;
612 		}
613 		if (numa_support)
614 			port->socket_id = rte_eth_dev_socket_id(pid);
615 		else {
616 			if (socket_num == UMA_NO_CONFIG)
617 				port->socket_id = 0;
618 			else
619 				port->socket_id = socket_num;
620 		}
621 	}
622 
623 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
624 	if (nb_fwd_streams_new == nb_fwd_streams)
625 		return 0;
626 	/* clear the old */
627 	if (fwd_streams != NULL) {
628 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
629 			if (fwd_streams[sm_id] == NULL)
630 				continue;
631 			rte_free(fwd_streams[sm_id]);
632 			fwd_streams[sm_id] = NULL;
633 		}
634 		rte_free(fwd_streams);
635 		fwd_streams = NULL;
636 	}
637 
638 	/* init new */
639 	nb_fwd_streams = nb_fwd_streams_new;
640 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
641 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
642 	if (fwd_streams == NULL)
643 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
644 						"failed\n", nb_fwd_streams);
645 
646 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
647 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
648 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
649 		if (fwd_streams[sm_id] == NULL)
650 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
651 								" failed\n");
652 	}
653 
654 	return 0;
655 }
656 
657 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
658 static void
659 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
660 {
661 	unsigned int total_burst;
662 	unsigned int nb_burst;
663 	unsigned int burst_stats[3];
664 	uint16_t pktnb_stats[3];
665 	uint16_t nb_pkt;
666 	int burst_percent[3];
667 
668 	/*
669 	 * First compute the total number of packet bursts and the
670 	 * two highest numbers of bursts of the same number of packets.
671 	 */
672 	total_burst = 0;
673 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
674 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
675 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
676 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
677 		if (nb_burst == 0)
678 			continue;
679 		total_burst += nb_burst;
680 		if (nb_burst > burst_stats[0]) {
681 			burst_stats[1] = burst_stats[0];
682 			pktnb_stats[1] = pktnb_stats[0];
683 			burst_stats[0] = nb_burst;
684 			pktnb_stats[0] = nb_pkt;
685 		}
686 	}
687 	if (total_burst == 0)
688 		return;
689 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
690 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
691 	       burst_percent[0], (int) pktnb_stats[0]);
692 	if (burst_stats[0] == total_burst) {
693 		printf("]\n");
694 		return;
695 	}
696 	if (burst_stats[0] + burst_stats[1] == total_burst) {
697 		printf(" + %d%% of %d pkts]\n",
698 		       100 - burst_percent[0], pktnb_stats[1]);
699 		return;
700 	}
701 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
702 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
703 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
704 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
705 		return;
706 	}
707 	printf(" + %d%% of %d pkts + %d%% of others]\n",
708 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
709 }
710 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
711 
712 static void
713 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
714 {
715 	struct rte_port *port;
716 	uint8_t i;
717 
718 	static const char *fwd_stats_border = "----------------------";
719 
720 	port = &ports[port_id];
721 	printf("\n  %s Forward statistics for port %-2d %s\n",
722 	       fwd_stats_border, port_id, fwd_stats_border);
723 
724 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
725 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
726 		       "%-"PRIu64"\n",
727 		       stats->ipackets, stats->ierrors,
728 		       (uint64_t) (stats->ipackets + stats->ierrors));
729 
730 		if (cur_fwd_eng == &csum_fwd_engine)
731 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
732 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
733 
734 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
735 		       "%-"PRIu64"\n",
736 		       stats->opackets, port->tx_dropped,
737 		       (uint64_t) (stats->opackets + port->tx_dropped));
738 
739 		if (stats->rx_nombuf > 0)
740 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
741 
742 	}
743 	else {
744 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
745 		       "%14"PRIu64"\n",
746 		       stats->ipackets, stats->ierrors,
747 		       (uint64_t) (stats->ipackets + stats->ierrors));
748 
749 		if (cur_fwd_eng == &csum_fwd_engine)
750 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
751 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
752 
753 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
754 		       "%14"PRIu64"\n",
755 		       stats->opackets, port->tx_dropped,
756 		       (uint64_t) (stats->opackets + port->tx_dropped));
757 
758 		if (stats->rx_nombuf > 0)
759 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
760 	}
761 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
762 	if (port->rx_stream)
763 		pkt_burst_stats_display("RX",
764 			&port->rx_stream->rx_burst_stats);
765 	if (port->tx_stream)
766 		pkt_burst_stats_display("TX",
767 			&port->tx_stream->tx_burst_stats);
768 #endif
769 	/* stats fdir */
770 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
771 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
772 		       stats->fdirmiss,
773 		       stats->fdirmatch);
774 
775 	if (port->rx_queue_stats_mapping_enabled) {
776 		printf("\n");
777 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
778 			printf("  Stats reg %2d RX-packets:%14"PRIu64
779 			       "     RX-errors:%14"PRIu64
780 			       "    RX-bytes:%14"PRIu64"\n",
781 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
782 		}
783 		printf("\n");
784 	}
785 	if (port->tx_queue_stats_mapping_enabled) {
786 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
787 			printf("  Stats reg %2d TX-packets:%14"PRIu64
788 			       "                                 TX-bytes:%14"PRIu64"\n",
789 			       i, stats->q_opackets[i], stats->q_obytes[i]);
790 		}
791 	}
792 
793 	printf("  %s--------------------------------%s\n",
794 	       fwd_stats_border, fwd_stats_border);
795 }
796 
797 static void
798 fwd_stream_stats_display(streamid_t stream_id)
799 {
800 	struct fwd_stream *fs;
801 	static const char *fwd_top_stats_border = "-------";
802 
803 	fs = fwd_streams[stream_id];
804 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
805 	    (fs->fwd_dropped == 0))
806 		return;
807 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
808 	       "TX Port=%2d/Queue=%2d %s\n",
809 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
810 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
811 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
812 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
813 
814 	/* if checksum mode */
815 	if (cur_fwd_eng == &csum_fwd_engine) {
816 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
817 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
818 	}
819 
820 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
821 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
822 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
823 #endif
824 }
825 
826 static void
827 flush_fwd_rx_queues(void)
828 {
829 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
830 	portid_t  rxp;
831 	portid_t port_id;
832 	queueid_t rxq;
833 	uint16_t  nb_rx;
834 	uint16_t  i;
835 	uint8_t   j;
836 
837 	for (j = 0; j < 2; j++) {
838 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
839 			for (rxq = 0; rxq < nb_rxq; rxq++) {
840 				port_id = fwd_ports_ids[rxp];
841 				do {
842 					nb_rx = rte_eth_rx_burst(port_id, rxq,
843 						pkts_burst, MAX_PKT_BURST);
844 					for (i = 0; i < nb_rx; i++)
845 						rte_pktmbuf_free(pkts_burst[i]);
846 				} while (nb_rx > 0);
847 			}
848 		}
849 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
850 	}
851 }
852 
853 static void
854 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
855 {
856 	struct fwd_stream **fsm;
857 	streamid_t nb_fs;
858 	streamid_t sm_id;
859 
860 	fsm = &fwd_streams[fc->stream_idx];
861 	nb_fs = fc->stream_nb;
862 	do {
863 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
864 			(*pkt_fwd)(fsm[sm_id]);
865 	} while (! fc->stopped);
866 }
867 
868 static int
869 start_pkt_forward_on_core(void *fwd_arg)
870 {
871 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
872 			     cur_fwd_config.fwd_eng->packet_fwd);
873 	return 0;
874 }
875 
876 /*
877  * Run the TXONLY packet forwarding engine to send a single burst of packets.
878  * Used to start communication flows in network loopback test configurations.
879  */
880 static int
881 run_one_txonly_burst_on_core(void *fwd_arg)
882 {
883 	struct fwd_lcore *fwd_lc;
884 	struct fwd_lcore tmp_lcore;
885 
886 	fwd_lc = (struct fwd_lcore *) fwd_arg;
887 	tmp_lcore = *fwd_lc;
888 	tmp_lcore.stopped = 1;
889 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
890 	return 0;
891 }
892 
893 /*
894  * Launch packet forwarding:
895  *     - Setup per-port forwarding context.
896  *     - launch logical cores with their forwarding configuration.
897  */
898 static void
899 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
900 {
901 	port_fwd_begin_t port_fwd_begin;
902 	unsigned int i;
903 	unsigned int lc_id;
904 	int diag;
905 
906 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
907 	if (port_fwd_begin != NULL) {
908 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
909 			(*port_fwd_begin)(fwd_ports_ids[i]);
910 	}
911 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
912 		lc_id = fwd_lcores_cpuids[i];
913 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
914 			fwd_lcores[i]->stopped = 0;
915 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
916 						     fwd_lcores[i], lc_id);
917 			if (diag != 0)
918 				printf("launch lcore %u failed - diag=%d\n",
919 				       lc_id, diag);
920 		}
921 	}
922 }
923 
924 /*
925  * Launch packet forwarding configuration.
926  */
927 void
928 start_packet_forwarding(int with_tx_first)
929 {
930 	port_fwd_begin_t port_fwd_begin;
931 	port_fwd_end_t  port_fwd_end;
932 	struct rte_port *port;
933 	unsigned int i;
934 	portid_t   pt_id;
935 	streamid_t sm_id;
936 
937 	if (all_ports_started() == 0) {
938 		printf("Not all ports were started\n");
939 		return;
940 	}
941 	if (test_done == 0) {
942 		printf("Packet forwarding already started\n");
943 		return;
944 	}
945 	if(dcb_test) {
946 		for (i = 0; i < nb_fwd_ports; i++) {
947 			pt_id = fwd_ports_ids[i];
948 			port = &ports[pt_id];
949 			if (!port->dcb_flag) {
950 				printf("In DCB mode, all forwarding ports must "
951                                        "be configured in this mode.\n");
952 				return;
953 			}
954 		}
955 		if (nb_fwd_lcores == 1) {
956 			printf("In DCB mode,the nb forwarding cores "
957                                "should be larger than 1.\n");
958 			return;
959 		}
960 	}
961 	test_done = 0;
962 
963 	if(!no_flush_rx)
964 		flush_fwd_rx_queues();
965 
966 	fwd_config_setup();
967 	rxtx_config_display();
968 
969 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
970 		pt_id = fwd_ports_ids[i];
971 		port = &ports[pt_id];
972 		rte_eth_stats_get(pt_id, &port->stats);
973 		port->tx_dropped = 0;
974 
975 		map_port_queue_stats_mapping_registers(pt_id, port);
976 	}
977 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
978 		fwd_streams[sm_id]->rx_packets = 0;
979 		fwd_streams[sm_id]->tx_packets = 0;
980 		fwd_streams[sm_id]->fwd_dropped = 0;
981 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
982 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
983 
984 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
985 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
986 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
987 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
988 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
989 #endif
990 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
991 		fwd_streams[sm_id]->core_cycles = 0;
992 #endif
993 	}
994 	if (with_tx_first) {
995 		port_fwd_begin = tx_only_engine.port_fwd_begin;
996 		if (port_fwd_begin != NULL) {
997 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
998 				(*port_fwd_begin)(fwd_ports_ids[i]);
999 		}
1000 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1001 		rte_eal_mp_wait_lcore();
1002 		port_fwd_end = tx_only_engine.port_fwd_end;
1003 		if (port_fwd_end != NULL) {
1004 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1005 				(*port_fwd_end)(fwd_ports_ids[i]);
1006 		}
1007 	}
1008 	launch_packet_forwarding(start_pkt_forward_on_core);
1009 }
1010 
1011 void
1012 stop_packet_forwarding(void)
1013 {
1014 	struct rte_eth_stats stats;
1015 	struct rte_port *port;
1016 	port_fwd_end_t  port_fwd_end;
1017 	int i;
1018 	portid_t   pt_id;
1019 	streamid_t sm_id;
1020 	lcoreid_t  lc_id;
1021 	uint64_t total_recv;
1022 	uint64_t total_xmit;
1023 	uint64_t total_rx_dropped;
1024 	uint64_t total_tx_dropped;
1025 	uint64_t total_rx_nombuf;
1026 	uint64_t tx_dropped;
1027 	uint64_t rx_bad_ip_csum;
1028 	uint64_t rx_bad_l4_csum;
1029 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1030 	uint64_t fwd_cycles;
1031 #endif
1032 	static const char *acc_stats_border = "+++++++++++++++";
1033 
1034 	if (all_ports_started() == 0) {
1035 		printf("Not all ports were started\n");
1036 		return;
1037 	}
1038 	if (test_done) {
1039 		printf("Packet forwarding not started\n");
1040 		return;
1041 	}
1042 	printf("Telling cores to stop...");
1043 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1044 		fwd_lcores[lc_id]->stopped = 1;
1045 	printf("\nWaiting for lcores to finish...\n");
1046 	rte_eal_mp_wait_lcore();
1047 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1048 	if (port_fwd_end != NULL) {
1049 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1050 			pt_id = fwd_ports_ids[i];
1051 			(*port_fwd_end)(pt_id);
1052 		}
1053 	}
1054 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1055 	fwd_cycles = 0;
1056 #endif
1057 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1058 		if (cur_fwd_config.nb_fwd_streams >
1059 		    cur_fwd_config.nb_fwd_ports) {
1060 			fwd_stream_stats_display(sm_id);
1061 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1062 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1063 		} else {
1064 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1065 				fwd_streams[sm_id];
1066 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1067 				fwd_streams[sm_id];
1068 		}
1069 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1070 		tx_dropped = (uint64_t) (tx_dropped +
1071 					 fwd_streams[sm_id]->fwd_dropped);
1072 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1073 
1074 		rx_bad_ip_csum =
1075 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1076 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1077 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1078 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1079 							rx_bad_ip_csum;
1080 
1081 		rx_bad_l4_csum =
1082 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1083 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1084 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1085 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1086 							rx_bad_l4_csum;
1087 
1088 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1089 		fwd_cycles = (uint64_t) (fwd_cycles +
1090 					 fwd_streams[sm_id]->core_cycles);
1091 #endif
1092 	}
1093 	total_recv = 0;
1094 	total_xmit = 0;
1095 	total_rx_dropped = 0;
1096 	total_tx_dropped = 0;
1097 	total_rx_nombuf  = 0;
1098 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1099 		pt_id = fwd_ports_ids[i];
1100 
1101 		port = &ports[pt_id];
1102 		rte_eth_stats_get(pt_id, &stats);
1103 		stats.ipackets -= port->stats.ipackets;
1104 		port->stats.ipackets = 0;
1105 		stats.opackets -= port->stats.opackets;
1106 		port->stats.opackets = 0;
1107 		stats.ibytes   -= port->stats.ibytes;
1108 		port->stats.ibytes = 0;
1109 		stats.obytes   -= port->stats.obytes;
1110 		port->stats.obytes = 0;
1111 		stats.ierrors  -= port->stats.ierrors;
1112 		port->stats.ierrors = 0;
1113 		stats.oerrors  -= port->stats.oerrors;
1114 		port->stats.oerrors = 0;
1115 		stats.rx_nombuf -= port->stats.rx_nombuf;
1116 		port->stats.rx_nombuf = 0;
1117 		stats.fdirmatch -= port->stats.fdirmatch;
1118 		port->stats.rx_nombuf = 0;
1119 		stats.fdirmiss -= port->stats.fdirmiss;
1120 		port->stats.rx_nombuf = 0;
1121 
1122 		total_recv += stats.ipackets;
1123 		total_xmit += stats.opackets;
1124 		total_rx_dropped += stats.ierrors;
1125 		total_tx_dropped += port->tx_dropped;
1126 		total_rx_nombuf  += stats.rx_nombuf;
1127 
1128 		fwd_port_stats_display(pt_id, &stats);
1129 	}
1130 	printf("\n  %s Accumulated forward statistics for all ports"
1131 	       "%s\n",
1132 	       acc_stats_border, acc_stats_border);
1133 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1134 	       "%-"PRIu64"\n"
1135 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1136 	       "%-"PRIu64"\n",
1137 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1138 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1139 	if (total_rx_nombuf > 0)
1140 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1141 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1142 	       "%s\n",
1143 	       acc_stats_border, acc_stats_border);
1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1145 	if (total_recv > 0)
1146 		printf("\n  CPU cycles/packet=%u (total cycles="
1147 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1148 		       (unsigned int)(fwd_cycles / total_recv),
1149 		       fwd_cycles, total_recv);
1150 #endif
1151 	printf("\nDone.\n");
1152 	test_done = 1;
1153 }
1154 
1155 static int
1156 all_ports_started(void)
1157 {
1158 	portid_t pi;
1159 	struct rte_port *port;
1160 
1161 	for (pi = 0; pi < nb_ports; pi++) {
1162 		port = &ports[pi];
1163 		/* Check if there is a port which is not started */
1164 		if (port->port_status != RTE_PORT_STARTED)
1165 			return 0;
1166 	}
1167 
1168 	/* No port is not started */
1169 	return 1;
1170 }
1171 
1172 int
1173 start_port(portid_t pid)
1174 {
1175 	int diag, need_check_link_status = 0;
1176 	portid_t pi;
1177 	queueid_t qi;
1178 	struct rte_port *port;
1179 
1180 	if (test_done == 0) {
1181 		printf("Please stop forwarding first\n");
1182 		return -1;
1183 	}
1184 
1185 	if (init_fwd_streams() < 0) {
1186 		printf("Fail from init_fwd_streams()\n");
1187 		return -1;
1188 	}
1189 
1190 	if(dcb_config)
1191 		dcb_test = 1;
1192 	for (pi = 0; pi < nb_ports; pi++) {
1193 		if (pid < nb_ports && pid != pi)
1194 			continue;
1195 
1196 		port = &ports[pi];
1197 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1198 						 RTE_PORT_HANDLING) == 0) {
1199 			printf("Port %d is now not stopped\n", pi);
1200 			continue;
1201 		}
1202 
1203 		if (port->need_reconfig > 0) {
1204 			port->need_reconfig = 0;
1205 
1206 			printf("Configuring Port %d (socket %d)\n", pi,
1207 					rte_eth_dev_socket_id(pi));
1208 			/* configure port */
1209 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1210 						&(port->dev_conf));
1211 			if (diag != 0) {
1212 				if (rte_atomic16_cmpset(&(port->port_status),
1213 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1214 					printf("Port %d can not be set back "
1215 							"to stopped\n", pi);
1216 				printf("Fail to configure port %d\n", pi);
1217 				/* try to reconfigure port next time */
1218 				port->need_reconfig = 1;
1219 				return -1;
1220 			}
1221 		}
1222 		if (port->need_reconfig_queues > 0) {
1223 			port->need_reconfig_queues = 0;
1224 			/* setup tx queues */
1225 			for (qi = 0; qi < nb_txq; qi++) {
1226 				if ((numa_support) &&
1227 					(txring_numa[pi] != NUMA_NO_CONFIG))
1228 					diag = rte_eth_tx_queue_setup(pi, qi,
1229 						nb_txd,txring_numa[pi],
1230 						&(port->tx_conf));
1231 				else
1232 					diag = rte_eth_tx_queue_setup(pi, qi,
1233 						nb_txd,port->socket_id,
1234 						&(port->tx_conf));
1235 
1236 				if (diag == 0)
1237 					continue;
1238 
1239 				/* Fail to setup tx queue, return */
1240 				if (rte_atomic16_cmpset(&(port->port_status),
1241 							RTE_PORT_HANDLING,
1242 							RTE_PORT_STOPPED) == 0)
1243 					printf("Port %d can not be set back "
1244 							"to stopped\n", pi);
1245 				printf("Fail to configure port %d tx queues\n", pi);
1246 				/* try to reconfigure queues next time */
1247 				port->need_reconfig_queues = 1;
1248 				return -1;
1249 			}
1250 			/* setup rx queues */
1251 			for (qi = 0; qi < nb_rxq; qi++) {
1252 				if ((numa_support) &&
1253 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1254 					struct rte_mempool * mp =
1255 						mbuf_pool_find(rxring_numa[pi]);
1256 					if (mp == NULL) {
1257 						printf("Failed to setup RX queue:"
1258 							"No mempool allocation"
1259 							"on the socket %d\n",
1260 							rxring_numa[pi]);
1261 						return -1;
1262 					}
1263 
1264 					diag = rte_eth_rx_queue_setup(pi, qi,
1265 					     nb_rxd,rxring_numa[pi],
1266 					     &(port->rx_conf),mp);
1267 				}
1268 				else
1269 					diag = rte_eth_rx_queue_setup(pi, qi,
1270 					     nb_rxd,port->socket_id,
1271 					     &(port->rx_conf),
1272 				             mbuf_pool_find(port->socket_id));
1273 
1274 				if (diag == 0)
1275 					continue;
1276 
1277 
1278 				/* Fail to setup rx queue, return */
1279 				if (rte_atomic16_cmpset(&(port->port_status),
1280 							RTE_PORT_HANDLING,
1281 							RTE_PORT_STOPPED) == 0)
1282 					printf("Port %d can not be set back "
1283 							"to stopped\n", pi);
1284 				printf("Fail to configure port %d rx queues\n", pi);
1285 				/* try to reconfigure queues next time */
1286 				port->need_reconfig_queues = 1;
1287 				return -1;
1288 			}
1289 		}
1290 		/* start port */
1291 		if (rte_eth_dev_start(pi) < 0) {
1292 			printf("Fail to start port %d\n", pi);
1293 
1294 			/* Fail to setup rx queue, return */
1295 			if (rte_atomic16_cmpset(&(port->port_status),
1296 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1297 				printf("Port %d can not be set back to "
1298 							"stopped\n", pi);
1299 			continue;
1300 		}
1301 
1302 		if (rte_atomic16_cmpset(&(port->port_status),
1303 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1304 			printf("Port %d can not be set into started\n", pi);
1305 
1306 		/* at least one port started, need checking link status */
1307 		need_check_link_status = 1;
1308 	}
1309 
1310 	if (need_check_link_status)
1311 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1312 	else
1313 		printf("Please stop the ports first\n");
1314 
1315 	printf("Done\n");
1316 	return 0;
1317 }
1318 
1319 void
1320 stop_port(portid_t pid)
1321 {
1322 	portid_t pi;
1323 	struct rte_port *port;
1324 	int need_check_link_status = 0;
1325 
1326 	if (test_done == 0) {
1327 		printf("Please stop forwarding first\n");
1328 		return;
1329 	}
1330 	if (dcb_test) {
1331 		dcb_test = 0;
1332 		dcb_config = 0;
1333 	}
1334 	printf("Stopping ports...\n");
1335 
1336 	for (pi = 0; pi < nb_ports; pi++) {
1337 		if (pid < nb_ports && pid != pi)
1338 			continue;
1339 
1340 		port = &ports[pi];
1341 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1342 						RTE_PORT_HANDLING) == 0)
1343 			continue;
1344 
1345 		rte_eth_dev_stop(pi);
1346 
1347 		if (rte_atomic16_cmpset(&(port->port_status),
1348 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1349 			printf("Port %d can not be set into stopped\n", pi);
1350 		need_check_link_status = 1;
1351 	}
1352 	if (need_check_link_status)
1353 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1354 
1355 	printf("Done\n");
1356 }
1357 
1358 void
1359 close_port(portid_t pid)
1360 {
1361 	portid_t pi;
1362 	struct rte_port *port;
1363 
1364 	if (test_done == 0) {
1365 		printf("Please stop forwarding first\n");
1366 		return;
1367 	}
1368 
1369 	printf("Closing ports...\n");
1370 
1371 	for (pi = 0; pi < nb_ports; pi++) {
1372 		if (pid < nb_ports && pid != pi)
1373 			continue;
1374 
1375 		port = &ports[pi];
1376 		if (rte_atomic16_cmpset(&(port->port_status),
1377 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1378 			printf("Port %d is now not stopped\n", pi);
1379 			continue;
1380 		}
1381 
1382 		rte_eth_dev_close(pi);
1383 
1384 		if (rte_atomic16_cmpset(&(port->port_status),
1385 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1386 			printf("Port %d can not be set into stopped\n", pi);
1387 	}
1388 
1389 	printf("Done\n");
1390 }
1391 
1392 int
1393 all_ports_stopped(void)
1394 {
1395 	portid_t pi;
1396 	struct rte_port *port;
1397 
1398 	for (pi = 0; pi < nb_ports; pi++) {
1399 		port = &ports[pi];
1400 		if (port->port_status != RTE_PORT_STOPPED)
1401 			return 0;
1402 	}
1403 
1404 	return 1;
1405 }
1406 
1407 void
1408 pmd_test_exit(void)
1409 {
1410 	portid_t pt_id;
1411 
1412 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1413 		printf("Stopping port %d...", pt_id);
1414 		fflush(stdout);
1415 		rte_eth_dev_close(pt_id);
1416 		printf("done\n");
1417 	}
1418 	printf("bye...\n");
1419 }
1420 
1421 typedef void (*cmd_func_t)(void);
1422 struct pmd_test_command {
1423 	const char *cmd_name;
1424 	cmd_func_t cmd_func;
1425 };
1426 
1427 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1428 
1429 /* Check the link status of all ports in up to 9s, and print them finally */
1430 static void
1431 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1432 {
1433 #define CHECK_INTERVAL 100 /* 100ms */
1434 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1435 	uint8_t portid, count, all_ports_up, print_flag = 0;
1436 	struct rte_eth_link link;
1437 
1438 	printf("Checking link statuses...\n");
1439 	fflush(stdout);
1440 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1441 		all_ports_up = 1;
1442 		for (portid = 0; portid < port_num; portid++) {
1443 			if ((port_mask & (1 << portid)) == 0)
1444 				continue;
1445 			memset(&link, 0, sizeof(link));
1446 			rte_eth_link_get_nowait(portid, &link);
1447 			/* print link status if flag set */
1448 			if (print_flag == 1) {
1449 				if (link.link_status)
1450 					printf("Port %d Link Up - speed %u "
1451 						"Mbps - %s\n", (uint8_t)portid,
1452 						(unsigned)link.link_speed,
1453 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1454 					("full-duplex") : ("half-duplex\n"));
1455 				else
1456 					printf("Port %d Link Down\n",
1457 						(uint8_t)portid);
1458 				continue;
1459 			}
1460 			/* clear all_ports_up flag if any link down */
1461 			if (link.link_status == 0) {
1462 				all_ports_up = 0;
1463 				break;
1464 			}
1465 		}
1466 		/* after finally printing all link status, get out */
1467 		if (print_flag == 1)
1468 			break;
1469 
1470 		if (all_ports_up == 0) {
1471 			fflush(stdout);
1472 			rte_delay_ms(CHECK_INTERVAL);
1473 		}
1474 
1475 		/* set the print_flag if all ports up or timeout */
1476 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1477 			print_flag = 1;
1478 		}
1479 	}
1480 }
1481 
1482 static int
1483 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1484 {
1485 	uint16_t i;
1486 	int diag;
1487 	uint8_t mapping_found = 0;
1488 
1489 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1490 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1491 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1492 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1493 					tx_queue_stats_mappings[i].queue_id,
1494 					tx_queue_stats_mappings[i].stats_counter_id);
1495 			if (diag != 0)
1496 				return diag;
1497 			mapping_found = 1;
1498 		}
1499 	}
1500 	if (mapping_found)
1501 		port->tx_queue_stats_mapping_enabled = 1;
1502 	return 0;
1503 }
1504 
1505 static int
1506 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1507 {
1508 	uint16_t i;
1509 	int diag;
1510 	uint8_t mapping_found = 0;
1511 
1512 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1513 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1514 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1515 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1516 					rx_queue_stats_mappings[i].queue_id,
1517 					rx_queue_stats_mappings[i].stats_counter_id);
1518 			if (diag != 0)
1519 				return diag;
1520 			mapping_found = 1;
1521 		}
1522 	}
1523 	if (mapping_found)
1524 		port->rx_queue_stats_mapping_enabled = 1;
1525 	return 0;
1526 }
1527 
1528 static void
1529 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1530 {
1531 	int diag = 0;
1532 
1533 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1534 	if (diag != 0) {
1535 		if (diag == -ENOTSUP) {
1536 			port->tx_queue_stats_mapping_enabled = 0;
1537 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1538 		}
1539 		else
1540 			rte_exit(EXIT_FAILURE,
1541 					"set_tx_queue_stats_mapping_registers "
1542 					"failed for port id=%d diag=%d\n",
1543 					pi, diag);
1544 	}
1545 
1546 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1547 	if (diag != 0) {
1548 		if (diag == -ENOTSUP) {
1549 			port->rx_queue_stats_mapping_enabled = 0;
1550 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1551 		}
1552 		else
1553 			rte_exit(EXIT_FAILURE,
1554 					"set_rx_queue_stats_mapping_registers "
1555 					"failed for port id=%d diag=%d\n",
1556 					pi, diag);
1557 	}
1558 }
1559 
1560 void
1561 init_port_config(void)
1562 {
1563 	portid_t pid;
1564 	struct rte_port *port;
1565 
1566 	for (pid = 0; pid < nb_ports; pid++) {
1567 		port = &ports[pid];
1568 		port->dev_conf.rxmode = rx_mode;
1569 		port->dev_conf.fdir_conf = fdir_conf;
1570 		if (nb_rxq > 0) {
1571 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1572 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1573 		} else {
1574 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1575 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1576 		}
1577 		port->rx_conf.rx_thresh = rx_thresh;
1578 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1579 		port->rx_conf.rx_drop_en = rx_drop_en;
1580 		port->tx_conf.tx_thresh = tx_thresh;
1581 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1582 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1583 		port->tx_conf.txq_flags = txq_flags;
1584 
1585 		rte_eth_macaddr_get(pid, &port->eth_addr);
1586 
1587 		map_port_queue_stats_mapping_registers(pid, port);
1588 #ifdef RTE_NIC_BYPASS
1589 		rte_eth_dev_bypass_init(pid);
1590 #endif
1591 	}
1592 }
1593 
1594 const uint16_t vlan_tags[] = {
1595 		0,  1,  2,  3,  4,  5,  6,  7,
1596 		8,  9, 10, 11,  12, 13, 14, 15,
1597 		16, 17, 18, 19, 20, 21, 22, 23,
1598 		24, 25, 26, 27, 28, 29, 30, 31
1599 };
1600 
1601 static  int
1602 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1603 {
1604         uint8_t i;
1605 
1606  	/*
1607  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1608  	 * given above, and the number of traffic classes available for use.
1609  	 */
1610 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1611 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1612 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1613 
1614 		/* VMDQ+DCB RX and TX configrations */
1615 		vmdq_rx_conf.enable_default_pool = 0;
1616 		vmdq_rx_conf.default_pool = 0;
1617 		vmdq_rx_conf.nb_queue_pools =
1618 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1619 		vmdq_tx_conf.nb_queue_pools =
1620 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1621 
1622 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1623 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1624 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1625 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1626 		}
1627 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1628 			vmdq_rx_conf.dcb_queue[i] = i;
1629 			vmdq_tx_conf.dcb_queue[i] = i;
1630 		}
1631 
1632 		/*set DCB mode of RX and TX of multiple queues*/
1633 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1634 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1635 		if (dcb_conf->pfc_en)
1636 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1637 		else
1638 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1639 
1640 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1641                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1642 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1643                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1644 	}
1645 	else {
1646 		struct rte_eth_dcb_rx_conf rx_conf;
1647 		struct rte_eth_dcb_tx_conf tx_conf;
1648 
1649 		/* queue mapping configuration of DCB RX and TX */
1650 		if (dcb_conf->num_tcs == ETH_4_TCS)
1651 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1652 		else
1653 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1654 
1655 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1656 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1657 
1658 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1659 			rx_conf.dcb_queue[i] = i;
1660 			tx_conf.dcb_queue[i] = i;
1661 		}
1662 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1663 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1664 		if (dcb_conf->pfc_en)
1665 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1666 		else
1667 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1668 
1669 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1670                                 sizeof(struct rte_eth_dcb_rx_conf)));
1671 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1672                                 sizeof(struct rte_eth_dcb_tx_conf)));
1673 	}
1674 
1675 	return 0;
1676 }
1677 
1678 int
1679 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1680 {
1681 	struct rte_eth_conf port_conf;
1682 	struct rte_port *rte_port;
1683 	int retval;
1684 	uint16_t nb_vlan;
1685 	uint16_t i;
1686 
1687 	/* rxq and txq configuration in dcb mode */
1688 	nb_rxq = 128;
1689 	nb_txq = 128;
1690 	rx_free_thresh = 64;
1691 
1692 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1693 	/* Enter DCB configuration status */
1694 	dcb_config = 1;
1695 
1696 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1697 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1698 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1699 	if (retval < 0)
1700 		return retval;
1701 
1702 	rte_port = &ports[pid];
1703 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1704 
1705 	rte_port->rx_conf.rx_thresh = rx_thresh;
1706 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1707 	rte_port->tx_conf.tx_thresh = tx_thresh;
1708 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1709 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1710 	/* VLAN filter */
1711 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1712 	for (i = 0; i < nb_vlan; i++){
1713 		rx_vft_set(pid, vlan_tags[i], 1);
1714 	}
1715 
1716 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1717 	map_port_queue_stats_mapping_registers(pid, rte_port);
1718 
1719 	rte_port->dcb_flag = 1;
1720 
1721 	return 0;
1722 }
1723 
1724 #ifdef RTE_EXEC_ENV_BAREMETAL
1725 #define main _main
1726 #endif
1727 
1728 int
1729 main(int argc, char** argv)
1730 {
1731 	int  diag;
1732 	uint8_t port_id;
1733 
1734 	diag = rte_eal_init(argc, argv);
1735 	if (diag < 0)
1736 		rte_panic("Cannot init EAL\n");
1737 
1738 	if (rte_pmd_init_all())
1739 		rte_panic("Cannot init PMD\n");
1740 
1741 	if (rte_eal_pci_probe())
1742 		rte_panic("Cannot probe PCI\n");
1743 
1744 	nb_ports = (portid_t) rte_eth_dev_count();
1745 	if (nb_ports == 0)
1746 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1747 							"check that "
1748 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1749 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1750 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1751 			  "configuration file\n");
1752 
1753 	set_def_fwd_config();
1754 	if (nb_lcores == 0)
1755 		rte_panic("Empty set of forwarding logical cores - check the "
1756 			  "core mask supplied in the command parameters\n");
1757 
1758 	argc -= diag;
1759 	argv += diag;
1760 	if (argc > 1)
1761 		launch_args_parse(argc, argv);
1762 
1763 	if (nb_rxq > nb_txq)
1764 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1765 		       "but nb_txq=%d will prevent to fully test it.\n",
1766 		       nb_rxq, nb_txq);
1767 
1768 	init_config();
1769 	if (start_port(RTE_PORT_ALL) != 0)
1770 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1771 
1772 	/* set all ports to promiscuous mode by default */
1773 	for (port_id = 0; port_id < nb_ports; port_id++)
1774 		rte_eth_promiscuous_enable(port_id);
1775 
1776 	if (interactive == 1)
1777 		prompt();
1778 	else {
1779 		char c;
1780 		int rc;
1781 
1782 		printf("No commandline core given, start packet forwarding\n");
1783 		start_packet_forwarding(0);
1784 		printf("Press enter to exit\n");
1785 		rc = read(0, &c, 1);
1786 		if (rc < 0)
1787 			return 1;
1788 	}
1789 
1790 	return 0;
1791 }
1792