xref: /dpdk/app/test-pmd/testpmd.c (revision ca7feb2273665ac7544979461f83dbde3ed69c22)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_retry_fwd_engine,
148 	&rx_only_engine,
149 	&tx_only_engine,
150 	&csum_fwd_engine,
151 #ifdef RTE_LIBRTE_IEEE1588
152 	&ieee1588_fwd_engine,
153 #endif
154 	NULL,
155 };
156 
157 struct fwd_config cur_fwd_config;
158 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
159 
160 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
161 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
162                                       * specified on command-line. */
163 
164 /*
165  * Configuration of packet segments used by the "txonly" processing engine.
166  */
167 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
168 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
169 	TXONLY_DEF_PACKET_LEN,
170 };
171 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
172 
173 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
174 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
175 
176 /* current configuration is in DCB or not,0 means it is not in DCB mode */
177 uint8_t dcb_config = 0;
178 
179 /* Whether the dcb is in testing status */
180 uint8_t dcb_test = 0;
181 
182 /* DCB on and VT on mapping is default */
183 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
184 
185 /*
186  * Configurable number of RX/TX queues.
187  */
188 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
189 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
190 
191 /*
192  * Configurable number of RX/TX ring descriptors.
193  */
194 #define RTE_TEST_RX_DESC_DEFAULT 128
195 #define RTE_TEST_TX_DESC_DEFAULT 512
196 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
197 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
198 
199 /*
200  * Configurable values of RX and TX ring threshold registers.
201  */
202 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
203 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
204 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
205 
206 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
207 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
208 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
209 
210 struct rte_eth_thresh rx_thresh = {
211 	.pthresh = RX_PTHRESH,
212 	.hthresh = RX_HTHRESH,
213 	.wthresh = RX_WTHRESH,
214 };
215 
216 struct rte_eth_thresh tx_thresh = {
217 	.pthresh = TX_PTHRESH,
218 	.hthresh = TX_HTHRESH,
219 	.wthresh = TX_WTHRESH,
220 };
221 
222 /*
223  * Configurable value of RX free threshold.
224  */
225 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
226 
227 /*
228  * Configurable value of RX drop enable.
229  */
230 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
231 
232 /*
233  * Configurable value of TX free threshold.
234  */
235 uint16_t tx_free_thresh = 0; /* Use default values. */
236 
237 /*
238  * Configurable value of TX RS bit threshold.
239  */
240 uint16_t tx_rs_thresh = 0; /* Use default values. */
241 
242 /*
243  * Configurable value of TX queue flags.
244  */
245 uint32_t txq_flags = 0; /* No flags set. */
246 
247 /*
248  * Receive Side Scaling (RSS) configuration.
249  */
250 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
251 
252 /*
253  * Port topology configuration
254  */
255 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
256 
257 /*
258  * Avoids to flush all the RX streams before starts forwarding.
259  */
260 uint8_t no_flush_rx = 0; /* flush by default */
261 
262 /*
263  * NIC bypass mode configuration options.
264  */
265 #ifdef RTE_NIC_BYPASS
266 
267 /* The NIC bypass watchdog timeout. */
268 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
269 
270 #endif
271 
272 /*
273  * Ethernet device configuration.
274  */
275 struct rte_eth_rxmode rx_mode = {
276 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
277 	.split_hdr_size = 0,
278 	.header_split   = 0, /**< Header Split disabled. */
279 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
280 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
281 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
282 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
283 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
284 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
285 };
286 
287 struct rte_fdir_conf fdir_conf = {
288 	.mode = RTE_FDIR_MODE_NONE,
289 	.pballoc = RTE_FDIR_PBALLOC_64K,
290 	.status = RTE_FDIR_REPORT_STATUS,
291 	.flexbytes_offset = 0x6,
292 	.drop_queue = 127,
293 };
294 
295 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
296 
297 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
298 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
299 
300 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
301 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
302 
303 uint16_t nb_tx_queue_stats_mappings = 0;
304 uint16_t nb_rx_queue_stats_mappings = 0;
305 
306 /* Forward function declarations */
307 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
308 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
309 
310 /*
311  * Check if all the ports are started.
312  * If yes, return positive value. If not, return zero.
313  */
314 static int all_ports_started(void);
315 
316 /*
317  * Setup default configuration.
318  */
319 static void
320 set_default_fwd_lcores_config(void)
321 {
322 	unsigned int i;
323 	unsigned int nb_lc;
324 
325 	nb_lc = 0;
326 	for (i = 0; i < RTE_MAX_LCORE; i++) {
327 		if (! rte_lcore_is_enabled(i))
328 			continue;
329 		if (i == rte_get_master_lcore())
330 			continue;
331 		fwd_lcores_cpuids[nb_lc++] = i;
332 	}
333 	nb_lcores = (lcoreid_t) nb_lc;
334 	nb_cfg_lcores = nb_lcores;
335 	nb_fwd_lcores = 1;
336 }
337 
338 static void
339 set_def_peer_eth_addrs(void)
340 {
341 	portid_t i;
342 
343 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
344 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
345 		peer_eth_addrs[i].addr_bytes[5] = i;
346 	}
347 }
348 
349 static void
350 set_default_fwd_ports_config(void)
351 {
352 	portid_t pt_id;
353 
354 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
355 		fwd_ports_ids[pt_id] = pt_id;
356 
357 	nb_cfg_ports = nb_ports;
358 	nb_fwd_ports = nb_ports;
359 }
360 
361 void
362 set_def_fwd_config(void)
363 {
364 	set_default_fwd_lcores_config();
365 	set_def_peer_eth_addrs();
366 	set_default_fwd_ports_config();
367 }
368 
369 /*
370  * Configuration initialisation done once at init time.
371  */
372 struct mbuf_ctor_arg {
373 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
374 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
375 };
376 
377 struct mbuf_pool_ctor_arg {
378 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
379 };
380 
381 static void
382 testpmd_mbuf_ctor(struct rte_mempool *mp,
383 		  void *opaque_arg,
384 		  void *raw_mbuf,
385 		  __attribute__((unused)) unsigned i)
386 {
387 	struct mbuf_ctor_arg *mb_ctor_arg;
388 	struct rte_mbuf    *mb;
389 
390 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
391 	mb = (struct rte_mbuf *) raw_mbuf;
392 
393 	mb->type         = RTE_MBUF_PKT;
394 	mb->pool         = mp;
395 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
396 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
397 			mb_ctor_arg->seg_buf_offset);
398 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
399 	mb->type         = RTE_MBUF_PKT;
400 	mb->ol_flags     = 0;
401 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
402 	mb->pkt.nb_segs  = 1;
403 	mb->pkt.vlan_macip.data = 0;
404 	mb->pkt.hash.rss = 0;
405 }
406 
407 static void
408 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
409 		       void *opaque_arg)
410 {
411 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
412 	struct rte_pktmbuf_pool_private *mbp_priv;
413 
414 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
415 		printf("%s(%s) private_data_size %d < %d\n",
416 		       __func__, mp->name, (int) mp->private_data_size,
417 		       (int) sizeof(struct rte_pktmbuf_pool_private));
418 		return;
419 	}
420 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
421 	mbp_priv = rte_mempool_get_priv(mp);
422 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
423 }
424 
425 static void
426 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
427 		 unsigned int socket_id)
428 {
429 	char pool_name[RTE_MEMPOOL_NAMESIZE];
430 	struct rte_mempool *rte_mp;
431 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
432 	struct mbuf_ctor_arg mb_ctor_arg;
433 	uint32_t mb_size;
434 
435 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
436 						mbuf_seg_size);
437 	mb_ctor_arg.seg_buf_offset =
438 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
439 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
440 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
441 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
442 
443 #ifdef RTE_LIBRTE_PMD_XENVIRT
444 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
445                                    (unsigned) mb_mempool_cache,
446                                    sizeof(struct rte_pktmbuf_pool_private),
447                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
448                                    testpmd_mbuf_ctor, &mb_ctor_arg,
449                                    socket_id, 0);
450 
451 
452 
453 #else
454 	if (mp_anon != 0)
455 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
456 				    (unsigned) mb_mempool_cache,
457 				    sizeof(struct rte_pktmbuf_pool_private),
458 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
459 				    testpmd_mbuf_ctor, &mb_ctor_arg,
460 				    socket_id, 0);
461 	else
462 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
463 				    (unsigned) mb_mempool_cache,
464 				    sizeof(struct rte_pktmbuf_pool_private),
465 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
466 				    testpmd_mbuf_ctor, &mb_ctor_arg,
467 				    socket_id, 0);
468 
469 #endif
470 
471 	if (rte_mp == NULL) {
472 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
473 						"failed\n", socket_id);
474 	} else if (verbose_level > 0) {
475 		rte_mempool_dump(rte_mp);
476 	}
477 }
478 
479 /*
480  * Check given socket id is valid or not with NUMA mode,
481  * if valid, return 0, else return -1
482  */
483 static int
484 check_socket_id(const unsigned int socket_id)
485 {
486 	static int warning_once = 0;
487 
488 	if (socket_id >= MAX_SOCKET) {
489 		if (!warning_once && numa_support)
490 			printf("Warning: NUMA should be configured manually by"
491 			       " using --port-numa-config and"
492 			       " --ring-numa-config parameters along with"
493 			       " --numa.\n");
494 		warning_once = 1;
495 		return -1;
496 	}
497 	return 0;
498 }
499 
500 static void
501 init_config(void)
502 {
503 	portid_t pid;
504 	struct rte_port *port;
505 	struct rte_mempool *mbp;
506 	unsigned int nb_mbuf_per_pool;
507 	lcoreid_t  lc_id;
508 	uint8_t port_per_socket[MAX_SOCKET];
509 
510 	memset(port_per_socket,0,MAX_SOCKET);
511 	/* Configuration of logical cores. */
512 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
513 				sizeof(struct fwd_lcore *) * nb_lcores,
514 				CACHE_LINE_SIZE);
515 	if (fwd_lcores == NULL) {
516 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
517 							"failed\n", nb_lcores);
518 	}
519 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
520 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
521 					       sizeof(struct fwd_lcore),
522 					       CACHE_LINE_SIZE);
523 		if (fwd_lcores[lc_id] == NULL) {
524 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
525 								"failed\n");
526 		}
527 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
528 	}
529 
530 	/*
531 	 * Create pools of mbuf.
532 	 * If NUMA support is disabled, create a single pool of mbuf in
533 	 * socket 0 memory by default.
534 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
535 	 *
536 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
537 	 * nb_txd can be configured at run time.
538 	 */
539 	if (param_total_num_mbufs)
540 		nb_mbuf_per_pool = param_total_num_mbufs;
541 	else {
542 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
543 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
544 
545 		if (!numa_support)
546 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
547 	}
548 
549 	if (!numa_support) {
550 		if (socket_num == UMA_NO_CONFIG)
551 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
552 		else
553 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
554 						 socket_num);
555 	}
556 
557 	/* Configuration of Ethernet ports. */
558 	ports = rte_zmalloc("testpmd: ports",
559 			    sizeof(struct rte_port) * nb_ports,
560 			    CACHE_LINE_SIZE);
561 	if (ports == NULL) {
562 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
563 							"failed\n", nb_ports);
564 	}
565 
566 	for (pid = 0; pid < nb_ports; pid++) {
567 		port = &ports[pid];
568 		rte_eth_dev_info_get(pid, &port->dev_info);
569 
570 		if (numa_support) {
571 			if (port_numa[pid] != NUMA_NO_CONFIG)
572 				port_per_socket[port_numa[pid]]++;
573 			else {
574 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
575 
576 				/* if socket_id is invalid, set to 0 */
577 				if (check_socket_id(socket_id) < 0)
578 					socket_id = 0;
579 				port_per_socket[socket_id]++;
580 			}
581 		}
582 
583 		/* set flag to initialize port/queue */
584 		port->need_reconfig = 1;
585 		port->need_reconfig_queues = 1;
586 	}
587 
588 	if (numa_support) {
589 		uint8_t i;
590 		unsigned int nb_mbuf;
591 
592 		if (param_total_num_mbufs)
593 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
594 
595 		for (i = 0; i < MAX_SOCKET; i++) {
596 			nb_mbuf = (nb_mbuf_per_pool *
597 						port_per_socket[i]);
598 			if (nb_mbuf)
599 				mbuf_pool_create(mbuf_data_size,
600 						nb_mbuf,i);
601 		}
602 	}
603 	init_port_config();
604 
605 	/*
606 	 * Records which Mbuf pool to use by each logical core, if needed.
607 	 */
608 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
609 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
610 		if (mbp == NULL)
611 			mbp = mbuf_pool_find(0);
612 		fwd_lcores[lc_id]->mbp = mbp;
613 	}
614 
615 	/* Configuration of packet forwarding streams. */
616 	if (init_fwd_streams() < 0)
617 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
618 }
619 
620 int
621 init_fwd_streams(void)
622 {
623 	portid_t pid;
624 	struct rte_port *port;
625 	streamid_t sm_id, nb_fwd_streams_new;
626 
627 	/* set socket id according to numa or not */
628 	for (pid = 0; pid < nb_ports; pid++) {
629 		port = &ports[pid];
630 		if (nb_rxq > port->dev_info.max_rx_queues) {
631 			printf("Fail: nb_rxq(%d) is greater than "
632 				"max_rx_queues(%d)\n", nb_rxq,
633 				port->dev_info.max_rx_queues);
634 			return -1;
635 		}
636 		if (nb_txq > port->dev_info.max_tx_queues) {
637 			printf("Fail: nb_txq(%d) is greater than "
638 				"max_tx_queues(%d)\n", nb_txq,
639 				port->dev_info.max_tx_queues);
640 			return -1;
641 		}
642 		if (numa_support) {
643 			if (port_numa[pid] != NUMA_NO_CONFIG)
644 				port->socket_id = port_numa[pid];
645 			else {
646 				port->socket_id = rte_eth_dev_socket_id(pid);
647 
648 				/* if socket_id is invalid, set to 0 */
649 				if (check_socket_id(port->socket_id) < 0)
650 					port->socket_id = 0;
651 			}
652 		}
653 		else {
654 			if (socket_num == UMA_NO_CONFIG)
655 				port->socket_id = 0;
656 			else
657 				port->socket_id = socket_num;
658 		}
659 	}
660 
661 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
662 	if (nb_fwd_streams_new == nb_fwd_streams)
663 		return 0;
664 	/* clear the old */
665 	if (fwd_streams != NULL) {
666 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
667 			if (fwd_streams[sm_id] == NULL)
668 				continue;
669 			rte_free(fwd_streams[sm_id]);
670 			fwd_streams[sm_id] = NULL;
671 		}
672 		rte_free(fwd_streams);
673 		fwd_streams = NULL;
674 	}
675 
676 	/* init new */
677 	nb_fwd_streams = nb_fwd_streams_new;
678 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
679 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
680 	if (fwd_streams == NULL)
681 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
682 						"failed\n", nb_fwd_streams);
683 
684 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
685 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
686 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
687 		if (fwd_streams[sm_id] == NULL)
688 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
689 								" failed\n");
690 	}
691 
692 	return 0;
693 }
694 
695 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
696 static void
697 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
698 {
699 	unsigned int total_burst;
700 	unsigned int nb_burst;
701 	unsigned int burst_stats[3];
702 	uint16_t pktnb_stats[3];
703 	uint16_t nb_pkt;
704 	int burst_percent[3];
705 
706 	/*
707 	 * First compute the total number of packet bursts and the
708 	 * two highest numbers of bursts of the same number of packets.
709 	 */
710 	total_burst = 0;
711 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
712 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
713 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
714 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
715 		if (nb_burst == 0)
716 			continue;
717 		total_burst += nb_burst;
718 		if (nb_burst > burst_stats[0]) {
719 			burst_stats[1] = burst_stats[0];
720 			pktnb_stats[1] = pktnb_stats[0];
721 			burst_stats[0] = nb_burst;
722 			pktnb_stats[0] = nb_pkt;
723 		}
724 	}
725 	if (total_burst == 0)
726 		return;
727 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
728 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
729 	       burst_percent[0], (int) pktnb_stats[0]);
730 	if (burst_stats[0] == total_burst) {
731 		printf("]\n");
732 		return;
733 	}
734 	if (burst_stats[0] + burst_stats[1] == total_burst) {
735 		printf(" + %d%% of %d pkts]\n",
736 		       100 - burst_percent[0], pktnb_stats[1]);
737 		return;
738 	}
739 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
740 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
741 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
742 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
743 		return;
744 	}
745 	printf(" + %d%% of %d pkts + %d%% of others]\n",
746 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
747 }
748 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
749 
750 static void
751 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
752 {
753 	struct rte_port *port;
754 	uint8_t i;
755 
756 	static const char *fwd_stats_border = "----------------------";
757 
758 	port = &ports[port_id];
759 	printf("\n  %s Forward statistics for port %-2d %s\n",
760 	       fwd_stats_border, port_id, fwd_stats_border);
761 
762 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
763 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
764 		       "%-"PRIu64"\n",
765 		       stats->ipackets, stats->ierrors,
766 		       (uint64_t) (stats->ipackets + stats->ierrors));
767 
768 		if (cur_fwd_eng == &csum_fwd_engine)
769 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
770 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
771 
772 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
773 		       "%-"PRIu64"\n",
774 		       stats->opackets, port->tx_dropped,
775 		       (uint64_t) (stats->opackets + port->tx_dropped));
776 
777 		if (stats->rx_nombuf > 0)
778 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
779 
780 	}
781 	else {
782 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
783 		       "%14"PRIu64"\n",
784 		       stats->ipackets, stats->ierrors,
785 		       (uint64_t) (stats->ipackets + stats->ierrors));
786 
787 		if (cur_fwd_eng == &csum_fwd_engine)
788 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
789 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
790 
791 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
792 		       "%14"PRIu64"\n",
793 		       stats->opackets, port->tx_dropped,
794 		       (uint64_t) (stats->opackets + port->tx_dropped));
795 
796 		if (stats->rx_nombuf > 0)
797 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
798 	}
799 
800 	/* Display statistics of XON/XOFF pause frames, if any. */
801 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
802 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
803 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
804 		       stats->rx_pause_xoff, stats->rx_pause_xon);
805 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
806 		       stats->tx_pause_xoff, stats->tx_pause_xon);
807 	}
808 
809 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
810 	if (port->rx_stream)
811 		pkt_burst_stats_display("RX",
812 			&port->rx_stream->rx_burst_stats);
813 	if (port->tx_stream)
814 		pkt_burst_stats_display("TX",
815 			&port->tx_stream->tx_burst_stats);
816 #endif
817 	/* stats fdir */
818 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
819 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
820 		       stats->fdirmiss,
821 		       stats->fdirmatch);
822 
823 	if (port->rx_queue_stats_mapping_enabled) {
824 		printf("\n");
825 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
826 			printf("  Stats reg %2d RX-packets:%14"PRIu64
827 			       "     RX-errors:%14"PRIu64
828 			       "    RX-bytes:%14"PRIu64"\n",
829 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
830 		}
831 		printf("\n");
832 	}
833 	if (port->tx_queue_stats_mapping_enabled) {
834 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
835 			printf("  Stats reg %2d TX-packets:%14"PRIu64
836 			       "                                 TX-bytes:%14"PRIu64"\n",
837 			       i, stats->q_opackets[i], stats->q_obytes[i]);
838 		}
839 	}
840 
841 	printf("  %s--------------------------------%s\n",
842 	       fwd_stats_border, fwd_stats_border);
843 }
844 
845 static void
846 fwd_stream_stats_display(streamid_t stream_id)
847 {
848 	struct fwd_stream *fs;
849 	static const char *fwd_top_stats_border = "-------";
850 
851 	fs = fwd_streams[stream_id];
852 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
853 	    (fs->fwd_dropped == 0))
854 		return;
855 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
856 	       "TX Port=%2d/Queue=%2d %s\n",
857 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
858 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
859 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
860 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
861 
862 	/* if checksum mode */
863 	if (cur_fwd_eng == &csum_fwd_engine) {
864 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
865 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
866 	}
867 
868 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
869 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
870 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
871 #endif
872 }
873 
874 static void
875 flush_fwd_rx_queues(void)
876 {
877 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
878 	portid_t  rxp;
879 	portid_t port_id;
880 	queueid_t rxq;
881 	uint16_t  nb_rx;
882 	uint16_t  i;
883 	uint8_t   j;
884 
885 	for (j = 0; j < 2; j++) {
886 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
887 			for (rxq = 0; rxq < nb_rxq; rxq++) {
888 				port_id = fwd_ports_ids[rxp];
889 				do {
890 					nb_rx = rte_eth_rx_burst(port_id, rxq,
891 						pkts_burst, MAX_PKT_BURST);
892 					for (i = 0; i < nb_rx; i++)
893 						rte_pktmbuf_free(pkts_burst[i]);
894 				} while (nb_rx > 0);
895 			}
896 		}
897 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
898 	}
899 }
900 
901 static void
902 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
903 {
904 	struct fwd_stream **fsm;
905 	streamid_t nb_fs;
906 	streamid_t sm_id;
907 
908 	fsm = &fwd_streams[fc->stream_idx];
909 	nb_fs = fc->stream_nb;
910 	do {
911 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
912 			(*pkt_fwd)(fsm[sm_id]);
913 	} while (! fc->stopped);
914 }
915 
916 static int
917 start_pkt_forward_on_core(void *fwd_arg)
918 {
919 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
920 			     cur_fwd_config.fwd_eng->packet_fwd);
921 	return 0;
922 }
923 
924 /*
925  * Run the TXONLY packet forwarding engine to send a single burst of packets.
926  * Used to start communication flows in network loopback test configurations.
927  */
928 static int
929 run_one_txonly_burst_on_core(void *fwd_arg)
930 {
931 	struct fwd_lcore *fwd_lc;
932 	struct fwd_lcore tmp_lcore;
933 
934 	fwd_lc = (struct fwd_lcore *) fwd_arg;
935 	tmp_lcore = *fwd_lc;
936 	tmp_lcore.stopped = 1;
937 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
938 	return 0;
939 }
940 
941 /*
942  * Launch packet forwarding:
943  *     - Setup per-port forwarding context.
944  *     - launch logical cores with their forwarding configuration.
945  */
946 static void
947 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
948 {
949 	port_fwd_begin_t port_fwd_begin;
950 	unsigned int i;
951 	unsigned int lc_id;
952 	int diag;
953 
954 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
955 	if (port_fwd_begin != NULL) {
956 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
957 			(*port_fwd_begin)(fwd_ports_ids[i]);
958 	}
959 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
960 		lc_id = fwd_lcores_cpuids[i];
961 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
962 			fwd_lcores[i]->stopped = 0;
963 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
964 						     fwd_lcores[i], lc_id);
965 			if (diag != 0)
966 				printf("launch lcore %u failed - diag=%d\n",
967 				       lc_id, diag);
968 		}
969 	}
970 }
971 
972 /*
973  * Launch packet forwarding configuration.
974  */
975 void
976 start_packet_forwarding(int with_tx_first)
977 {
978 	port_fwd_begin_t port_fwd_begin;
979 	port_fwd_end_t  port_fwd_end;
980 	struct rte_port *port;
981 	unsigned int i;
982 	portid_t   pt_id;
983 	streamid_t sm_id;
984 
985 	if (all_ports_started() == 0) {
986 		printf("Not all ports were started\n");
987 		return;
988 	}
989 	if (test_done == 0) {
990 		printf("Packet forwarding already started\n");
991 		return;
992 	}
993 	if(dcb_test) {
994 		for (i = 0; i < nb_fwd_ports; i++) {
995 			pt_id = fwd_ports_ids[i];
996 			port = &ports[pt_id];
997 			if (!port->dcb_flag) {
998 				printf("In DCB mode, all forwarding ports must "
999                                        "be configured in this mode.\n");
1000 				return;
1001 			}
1002 		}
1003 		if (nb_fwd_lcores == 1) {
1004 			printf("In DCB mode,the nb forwarding cores "
1005                                "should be larger than 1.\n");
1006 			return;
1007 		}
1008 	}
1009 	test_done = 0;
1010 
1011 	if(!no_flush_rx)
1012 		flush_fwd_rx_queues();
1013 
1014 	fwd_config_setup();
1015 	rxtx_config_display();
1016 
1017 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1018 		pt_id = fwd_ports_ids[i];
1019 		port = &ports[pt_id];
1020 		rte_eth_stats_get(pt_id, &port->stats);
1021 		port->tx_dropped = 0;
1022 
1023 		map_port_queue_stats_mapping_registers(pt_id, port);
1024 	}
1025 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1026 		fwd_streams[sm_id]->rx_packets = 0;
1027 		fwd_streams[sm_id]->tx_packets = 0;
1028 		fwd_streams[sm_id]->fwd_dropped = 0;
1029 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1030 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1031 
1032 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1033 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1034 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1035 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1036 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1037 #endif
1038 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1039 		fwd_streams[sm_id]->core_cycles = 0;
1040 #endif
1041 	}
1042 	if (with_tx_first) {
1043 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1044 		if (port_fwd_begin != NULL) {
1045 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1046 				(*port_fwd_begin)(fwd_ports_ids[i]);
1047 		}
1048 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1049 		rte_eal_mp_wait_lcore();
1050 		port_fwd_end = tx_only_engine.port_fwd_end;
1051 		if (port_fwd_end != NULL) {
1052 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1053 				(*port_fwd_end)(fwd_ports_ids[i]);
1054 		}
1055 	}
1056 	launch_packet_forwarding(start_pkt_forward_on_core);
1057 }
1058 
1059 void
1060 stop_packet_forwarding(void)
1061 {
1062 	struct rte_eth_stats stats;
1063 	struct rte_port *port;
1064 	port_fwd_end_t  port_fwd_end;
1065 	int i;
1066 	portid_t   pt_id;
1067 	streamid_t sm_id;
1068 	lcoreid_t  lc_id;
1069 	uint64_t total_recv;
1070 	uint64_t total_xmit;
1071 	uint64_t total_rx_dropped;
1072 	uint64_t total_tx_dropped;
1073 	uint64_t total_rx_nombuf;
1074 	uint64_t tx_dropped;
1075 	uint64_t rx_bad_ip_csum;
1076 	uint64_t rx_bad_l4_csum;
1077 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1078 	uint64_t fwd_cycles;
1079 #endif
1080 	static const char *acc_stats_border = "+++++++++++++++";
1081 
1082 	if (all_ports_started() == 0) {
1083 		printf("Not all ports were started\n");
1084 		return;
1085 	}
1086 	if (test_done) {
1087 		printf("Packet forwarding not started\n");
1088 		return;
1089 	}
1090 	printf("Telling cores to stop...");
1091 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1092 		fwd_lcores[lc_id]->stopped = 1;
1093 	printf("\nWaiting for lcores to finish...\n");
1094 	rte_eal_mp_wait_lcore();
1095 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1096 	if (port_fwd_end != NULL) {
1097 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1098 			pt_id = fwd_ports_ids[i];
1099 			(*port_fwd_end)(pt_id);
1100 		}
1101 	}
1102 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1103 	fwd_cycles = 0;
1104 #endif
1105 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1106 		if (cur_fwd_config.nb_fwd_streams >
1107 		    cur_fwd_config.nb_fwd_ports) {
1108 			fwd_stream_stats_display(sm_id);
1109 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1110 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1111 		} else {
1112 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1113 				fwd_streams[sm_id];
1114 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1115 				fwd_streams[sm_id];
1116 		}
1117 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1118 		tx_dropped = (uint64_t) (tx_dropped +
1119 					 fwd_streams[sm_id]->fwd_dropped);
1120 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1121 
1122 		rx_bad_ip_csum =
1123 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1124 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1125 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1126 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1127 							rx_bad_ip_csum;
1128 
1129 		rx_bad_l4_csum =
1130 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1131 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1132 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1133 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1134 							rx_bad_l4_csum;
1135 
1136 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1137 		fwd_cycles = (uint64_t) (fwd_cycles +
1138 					 fwd_streams[sm_id]->core_cycles);
1139 #endif
1140 	}
1141 	total_recv = 0;
1142 	total_xmit = 0;
1143 	total_rx_dropped = 0;
1144 	total_tx_dropped = 0;
1145 	total_rx_nombuf  = 0;
1146 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1147 		pt_id = fwd_ports_ids[i];
1148 
1149 		port = &ports[pt_id];
1150 		rte_eth_stats_get(pt_id, &stats);
1151 		stats.ipackets -= port->stats.ipackets;
1152 		port->stats.ipackets = 0;
1153 		stats.opackets -= port->stats.opackets;
1154 		port->stats.opackets = 0;
1155 		stats.ibytes   -= port->stats.ibytes;
1156 		port->stats.ibytes = 0;
1157 		stats.obytes   -= port->stats.obytes;
1158 		port->stats.obytes = 0;
1159 		stats.ierrors  -= port->stats.ierrors;
1160 		port->stats.ierrors = 0;
1161 		stats.oerrors  -= port->stats.oerrors;
1162 		port->stats.oerrors = 0;
1163 		stats.rx_nombuf -= port->stats.rx_nombuf;
1164 		port->stats.rx_nombuf = 0;
1165 		stats.fdirmatch -= port->stats.fdirmatch;
1166 		port->stats.rx_nombuf = 0;
1167 		stats.fdirmiss -= port->stats.fdirmiss;
1168 		port->stats.rx_nombuf = 0;
1169 
1170 		total_recv += stats.ipackets;
1171 		total_xmit += stats.opackets;
1172 		total_rx_dropped += stats.ierrors;
1173 		total_tx_dropped += port->tx_dropped;
1174 		total_rx_nombuf  += stats.rx_nombuf;
1175 
1176 		fwd_port_stats_display(pt_id, &stats);
1177 	}
1178 	printf("\n  %s Accumulated forward statistics for all ports"
1179 	       "%s\n",
1180 	       acc_stats_border, acc_stats_border);
1181 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1182 	       "%-"PRIu64"\n"
1183 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1184 	       "%-"PRIu64"\n",
1185 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1186 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1187 	if (total_rx_nombuf > 0)
1188 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1189 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1190 	       "%s\n",
1191 	       acc_stats_border, acc_stats_border);
1192 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1193 	if (total_recv > 0)
1194 		printf("\n  CPU cycles/packet=%u (total cycles="
1195 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1196 		       (unsigned int)(fwd_cycles / total_recv),
1197 		       fwd_cycles, total_recv);
1198 #endif
1199 	printf("\nDone.\n");
1200 	test_done = 1;
1201 }
1202 
1203 static int
1204 all_ports_started(void)
1205 {
1206 	portid_t pi;
1207 	struct rte_port *port;
1208 
1209 	for (pi = 0; pi < nb_ports; pi++) {
1210 		port = &ports[pi];
1211 		/* Check if there is a port which is not started */
1212 		if (port->port_status != RTE_PORT_STARTED)
1213 			return 0;
1214 	}
1215 
1216 	/* No port is not started */
1217 	return 1;
1218 }
1219 
1220 int
1221 start_port(portid_t pid)
1222 {
1223 	int diag, need_check_link_status = 0;
1224 	portid_t pi;
1225 	queueid_t qi;
1226 	struct rte_port *port;
1227 
1228 	if (test_done == 0) {
1229 		printf("Please stop forwarding first\n");
1230 		return -1;
1231 	}
1232 
1233 	if (init_fwd_streams() < 0) {
1234 		printf("Fail from init_fwd_streams()\n");
1235 		return -1;
1236 	}
1237 
1238 	if(dcb_config)
1239 		dcb_test = 1;
1240 	for (pi = 0; pi < nb_ports; pi++) {
1241 		if (pid < nb_ports && pid != pi)
1242 			continue;
1243 
1244 		port = &ports[pi];
1245 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1246 						 RTE_PORT_HANDLING) == 0) {
1247 			printf("Port %d is now not stopped\n", pi);
1248 			continue;
1249 		}
1250 
1251 		if (port->need_reconfig > 0) {
1252 			port->need_reconfig = 0;
1253 
1254 			printf("Configuring Port %d (socket %u)\n", pi,
1255 					port->socket_id);
1256 			/* configure port */
1257 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1258 						&(port->dev_conf));
1259 			if (diag != 0) {
1260 				if (rte_atomic16_cmpset(&(port->port_status),
1261 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1262 					printf("Port %d can not be set back "
1263 							"to stopped\n", pi);
1264 				printf("Fail to configure port %d\n", pi);
1265 				/* try to reconfigure port next time */
1266 				port->need_reconfig = 1;
1267 				return -1;
1268 			}
1269 		}
1270 		if (port->need_reconfig_queues > 0) {
1271 			port->need_reconfig_queues = 0;
1272 			/* setup tx queues */
1273 			for (qi = 0; qi < nb_txq; qi++) {
1274 				if ((numa_support) &&
1275 					(txring_numa[pi] != NUMA_NO_CONFIG))
1276 					diag = rte_eth_tx_queue_setup(pi, qi,
1277 						nb_txd,txring_numa[pi],
1278 						&(port->tx_conf));
1279 				else
1280 					diag = rte_eth_tx_queue_setup(pi, qi,
1281 						nb_txd,port->socket_id,
1282 						&(port->tx_conf));
1283 
1284 				if (diag == 0)
1285 					continue;
1286 
1287 				/* Fail to setup tx queue, return */
1288 				if (rte_atomic16_cmpset(&(port->port_status),
1289 							RTE_PORT_HANDLING,
1290 							RTE_PORT_STOPPED) == 0)
1291 					printf("Port %d can not be set back "
1292 							"to stopped\n", pi);
1293 				printf("Fail to configure port %d tx queues\n", pi);
1294 				/* try to reconfigure queues next time */
1295 				port->need_reconfig_queues = 1;
1296 				return -1;
1297 			}
1298 			/* setup rx queues */
1299 			for (qi = 0; qi < nb_rxq; qi++) {
1300 				if ((numa_support) &&
1301 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1302 					struct rte_mempool * mp =
1303 						mbuf_pool_find(rxring_numa[pi]);
1304 					if (mp == NULL) {
1305 						printf("Failed to setup RX queue:"
1306 							"No mempool allocation"
1307 							"on the socket %d\n",
1308 							rxring_numa[pi]);
1309 						return -1;
1310 					}
1311 
1312 					diag = rte_eth_rx_queue_setup(pi, qi,
1313 					     nb_rxd,rxring_numa[pi],
1314 					     &(port->rx_conf),mp);
1315 				}
1316 				else
1317 					diag = rte_eth_rx_queue_setup(pi, qi,
1318 					     nb_rxd,port->socket_id,
1319 					     &(port->rx_conf),
1320 				             mbuf_pool_find(port->socket_id));
1321 
1322 				if (diag == 0)
1323 					continue;
1324 
1325 
1326 				/* Fail to setup rx queue, return */
1327 				if (rte_atomic16_cmpset(&(port->port_status),
1328 							RTE_PORT_HANDLING,
1329 							RTE_PORT_STOPPED) == 0)
1330 					printf("Port %d can not be set back "
1331 							"to stopped\n", pi);
1332 				printf("Fail to configure port %d rx queues\n", pi);
1333 				/* try to reconfigure queues next time */
1334 				port->need_reconfig_queues = 1;
1335 				return -1;
1336 			}
1337 		}
1338 		/* start port */
1339 		if (rte_eth_dev_start(pi) < 0) {
1340 			printf("Fail to start port %d\n", pi);
1341 
1342 			/* Fail to setup rx queue, return */
1343 			if (rte_atomic16_cmpset(&(port->port_status),
1344 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1345 				printf("Port %d can not be set back to "
1346 							"stopped\n", pi);
1347 			continue;
1348 		}
1349 
1350 		if (rte_atomic16_cmpset(&(port->port_status),
1351 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1352 			printf("Port %d can not be set into started\n", pi);
1353 
1354 		/* at least one port started, need checking link status */
1355 		need_check_link_status = 1;
1356 	}
1357 
1358 	if (need_check_link_status)
1359 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1360 	else
1361 		printf("Please stop the ports first\n");
1362 
1363 	printf("Done\n");
1364 	return 0;
1365 }
1366 
1367 void
1368 stop_port(portid_t pid)
1369 {
1370 	portid_t pi;
1371 	struct rte_port *port;
1372 	int need_check_link_status = 0;
1373 
1374 	if (test_done == 0) {
1375 		printf("Please stop forwarding first\n");
1376 		return;
1377 	}
1378 	if (dcb_test) {
1379 		dcb_test = 0;
1380 		dcb_config = 0;
1381 	}
1382 	printf("Stopping ports...\n");
1383 
1384 	for (pi = 0; pi < nb_ports; pi++) {
1385 		if (pid < nb_ports && pid != pi)
1386 			continue;
1387 
1388 		port = &ports[pi];
1389 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1390 						RTE_PORT_HANDLING) == 0)
1391 			continue;
1392 
1393 		rte_eth_dev_stop(pi);
1394 
1395 		if (rte_atomic16_cmpset(&(port->port_status),
1396 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1397 			printf("Port %d can not be set into stopped\n", pi);
1398 		need_check_link_status = 1;
1399 	}
1400 	if (need_check_link_status)
1401 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1402 
1403 	printf("Done\n");
1404 }
1405 
1406 void
1407 close_port(portid_t pid)
1408 {
1409 	portid_t pi;
1410 	struct rte_port *port;
1411 
1412 	if (test_done == 0) {
1413 		printf("Please stop forwarding first\n");
1414 		return;
1415 	}
1416 
1417 	printf("Closing ports...\n");
1418 
1419 	for (pi = 0; pi < nb_ports; pi++) {
1420 		if (pid < nb_ports && pid != pi)
1421 			continue;
1422 
1423 		port = &ports[pi];
1424 		if (rte_atomic16_cmpset(&(port->port_status),
1425 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1426 			printf("Port %d is now not stopped\n", pi);
1427 			continue;
1428 		}
1429 
1430 		rte_eth_dev_close(pi);
1431 
1432 		if (rte_atomic16_cmpset(&(port->port_status),
1433 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1434 			printf("Port %d can not be set into stopped\n", pi);
1435 	}
1436 
1437 	printf("Done\n");
1438 }
1439 
1440 int
1441 all_ports_stopped(void)
1442 {
1443 	portid_t pi;
1444 	struct rte_port *port;
1445 
1446 	for (pi = 0; pi < nb_ports; pi++) {
1447 		port = &ports[pi];
1448 		if (port->port_status != RTE_PORT_STOPPED)
1449 			return 0;
1450 	}
1451 
1452 	return 1;
1453 }
1454 
1455 void
1456 pmd_test_exit(void)
1457 {
1458 	portid_t pt_id;
1459 
1460 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1461 		printf("Stopping port %d...", pt_id);
1462 		fflush(stdout);
1463 		rte_eth_dev_close(pt_id);
1464 		printf("done\n");
1465 	}
1466 	printf("bye...\n");
1467 }
1468 
1469 typedef void (*cmd_func_t)(void);
1470 struct pmd_test_command {
1471 	const char *cmd_name;
1472 	cmd_func_t cmd_func;
1473 };
1474 
1475 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1476 
1477 /* Check the link status of all ports in up to 9s, and print them finally */
1478 static void
1479 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1480 {
1481 #define CHECK_INTERVAL 100 /* 100ms */
1482 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1483 	uint8_t portid, count, all_ports_up, print_flag = 0;
1484 	struct rte_eth_link link;
1485 
1486 	printf("Checking link statuses...\n");
1487 	fflush(stdout);
1488 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1489 		all_ports_up = 1;
1490 		for (portid = 0; portid < port_num; portid++) {
1491 			if ((port_mask & (1 << portid)) == 0)
1492 				continue;
1493 			memset(&link, 0, sizeof(link));
1494 			rte_eth_link_get_nowait(portid, &link);
1495 			/* print link status if flag set */
1496 			if (print_flag == 1) {
1497 				if (link.link_status)
1498 					printf("Port %d Link Up - speed %u "
1499 						"Mbps - %s\n", (uint8_t)portid,
1500 						(unsigned)link.link_speed,
1501 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1502 					("full-duplex") : ("half-duplex\n"));
1503 				else
1504 					printf("Port %d Link Down\n",
1505 						(uint8_t)portid);
1506 				continue;
1507 			}
1508 			/* clear all_ports_up flag if any link down */
1509 			if (link.link_status == 0) {
1510 				all_ports_up = 0;
1511 				break;
1512 			}
1513 		}
1514 		/* after finally printing all link status, get out */
1515 		if (print_flag == 1)
1516 			break;
1517 
1518 		if (all_ports_up == 0) {
1519 			fflush(stdout);
1520 			rte_delay_ms(CHECK_INTERVAL);
1521 		}
1522 
1523 		/* set the print_flag if all ports up or timeout */
1524 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1525 			print_flag = 1;
1526 		}
1527 	}
1528 }
1529 
1530 static int
1531 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1532 {
1533 	uint16_t i;
1534 	int diag;
1535 	uint8_t mapping_found = 0;
1536 
1537 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1538 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1539 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1540 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1541 					tx_queue_stats_mappings[i].queue_id,
1542 					tx_queue_stats_mappings[i].stats_counter_id);
1543 			if (diag != 0)
1544 				return diag;
1545 			mapping_found = 1;
1546 		}
1547 	}
1548 	if (mapping_found)
1549 		port->tx_queue_stats_mapping_enabled = 1;
1550 	return 0;
1551 }
1552 
1553 static int
1554 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1555 {
1556 	uint16_t i;
1557 	int diag;
1558 	uint8_t mapping_found = 0;
1559 
1560 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1561 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1562 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1563 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1564 					rx_queue_stats_mappings[i].queue_id,
1565 					rx_queue_stats_mappings[i].stats_counter_id);
1566 			if (diag != 0)
1567 				return diag;
1568 			mapping_found = 1;
1569 		}
1570 	}
1571 	if (mapping_found)
1572 		port->rx_queue_stats_mapping_enabled = 1;
1573 	return 0;
1574 }
1575 
1576 static void
1577 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1578 {
1579 	int diag = 0;
1580 
1581 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1582 	if (diag != 0) {
1583 		if (diag == -ENOTSUP) {
1584 			port->tx_queue_stats_mapping_enabled = 0;
1585 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1586 		}
1587 		else
1588 			rte_exit(EXIT_FAILURE,
1589 					"set_tx_queue_stats_mapping_registers "
1590 					"failed for port id=%d diag=%d\n",
1591 					pi, diag);
1592 	}
1593 
1594 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1595 	if (diag != 0) {
1596 		if (diag == -ENOTSUP) {
1597 			port->rx_queue_stats_mapping_enabled = 0;
1598 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1599 		}
1600 		else
1601 			rte_exit(EXIT_FAILURE,
1602 					"set_rx_queue_stats_mapping_registers "
1603 					"failed for port id=%d diag=%d\n",
1604 					pi, diag);
1605 	}
1606 }
1607 
1608 void
1609 init_port_config(void)
1610 {
1611 	portid_t pid;
1612 	struct rte_port *port;
1613 
1614 	for (pid = 0; pid < nb_ports; pid++) {
1615 		port = &ports[pid];
1616 		port->dev_conf.rxmode = rx_mode;
1617 		port->dev_conf.fdir_conf = fdir_conf;
1618 		if (nb_rxq > 1) {
1619 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1620 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1621 		} else {
1622 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1623 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1624 		}
1625 
1626 		/* In SR-IOV mode, RSS mode is not available */
1627 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1628 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1629 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1630 			else
1631 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1632 		}
1633 
1634 		port->rx_conf.rx_thresh = rx_thresh;
1635 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1636 		port->rx_conf.rx_drop_en = rx_drop_en;
1637 		port->tx_conf.tx_thresh = tx_thresh;
1638 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1639 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1640 		port->tx_conf.txq_flags = txq_flags;
1641 
1642 		rte_eth_macaddr_get(pid, &port->eth_addr);
1643 
1644 		map_port_queue_stats_mapping_registers(pid, port);
1645 #ifdef RTE_NIC_BYPASS
1646 		rte_eth_dev_bypass_init(pid);
1647 #endif
1648 	}
1649 }
1650 
1651 const uint16_t vlan_tags[] = {
1652 		0,  1,  2,  3,  4,  5,  6,  7,
1653 		8,  9, 10, 11,  12, 13, 14, 15,
1654 		16, 17, 18, 19, 20, 21, 22, 23,
1655 		24, 25, 26, 27, 28, 29, 30, 31
1656 };
1657 
1658 static  int
1659 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1660 {
1661         uint8_t i;
1662 
1663  	/*
1664  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1665  	 * given above, and the number of traffic classes available for use.
1666  	 */
1667 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1668 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1669 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1670 
1671 		/* VMDQ+DCB RX and TX configrations */
1672 		vmdq_rx_conf.enable_default_pool = 0;
1673 		vmdq_rx_conf.default_pool = 0;
1674 		vmdq_rx_conf.nb_queue_pools =
1675 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1676 		vmdq_tx_conf.nb_queue_pools =
1677 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1678 
1679 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1680 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1681 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1682 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1683 		}
1684 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1685 			vmdq_rx_conf.dcb_queue[i] = i;
1686 			vmdq_tx_conf.dcb_queue[i] = i;
1687 		}
1688 
1689 		/*set DCB mode of RX and TX of multiple queues*/
1690 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1691 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1692 		if (dcb_conf->pfc_en)
1693 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1694 		else
1695 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1696 
1697 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1698                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1699 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1700                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1701 	}
1702 	else {
1703 		struct rte_eth_dcb_rx_conf rx_conf;
1704 		struct rte_eth_dcb_tx_conf tx_conf;
1705 
1706 		/* queue mapping configuration of DCB RX and TX */
1707 		if (dcb_conf->num_tcs == ETH_4_TCS)
1708 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1709 		else
1710 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1711 
1712 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1713 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1714 
1715 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1716 			rx_conf.dcb_queue[i] = i;
1717 			tx_conf.dcb_queue[i] = i;
1718 		}
1719 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1720 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1721 		if (dcb_conf->pfc_en)
1722 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1723 		else
1724 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1725 
1726 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1727                                 sizeof(struct rte_eth_dcb_rx_conf)));
1728 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1729                                 sizeof(struct rte_eth_dcb_tx_conf)));
1730 	}
1731 
1732 	return 0;
1733 }
1734 
1735 int
1736 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1737 {
1738 	struct rte_eth_conf port_conf;
1739 	struct rte_port *rte_port;
1740 	int retval;
1741 	uint16_t nb_vlan;
1742 	uint16_t i;
1743 
1744 	/* rxq and txq configuration in dcb mode */
1745 	nb_rxq = 128;
1746 	nb_txq = 128;
1747 	rx_free_thresh = 64;
1748 
1749 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1750 	/* Enter DCB configuration status */
1751 	dcb_config = 1;
1752 
1753 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1754 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1755 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1756 	if (retval < 0)
1757 		return retval;
1758 
1759 	rte_port = &ports[pid];
1760 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1761 
1762 	rte_port->rx_conf.rx_thresh = rx_thresh;
1763 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1764 	rte_port->tx_conf.tx_thresh = tx_thresh;
1765 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1766 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1767 	/* VLAN filter */
1768 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1769 	for (i = 0; i < nb_vlan; i++){
1770 		rx_vft_set(pid, vlan_tags[i], 1);
1771 	}
1772 
1773 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1774 	map_port_queue_stats_mapping_registers(pid, rte_port);
1775 
1776 	rte_port->dcb_flag = 1;
1777 
1778 	return 0;
1779 }
1780 
1781 #ifdef RTE_EXEC_ENV_BAREMETAL
1782 #define main _main
1783 #endif
1784 
1785 int
1786 main(int argc, char** argv)
1787 {
1788 	int  diag;
1789 	uint8_t port_id;
1790 
1791 	diag = rte_eal_init(argc, argv);
1792 	if (diag < 0)
1793 		rte_panic("Cannot init EAL\n");
1794 
1795 	if (rte_pmd_init_all())
1796 		rte_panic("Cannot init PMD\n");
1797 
1798 	if (rte_eal_pci_probe())
1799 		rte_panic("Cannot probe PCI\n");
1800 
1801 	nb_ports = (portid_t) rte_eth_dev_count();
1802 	if (nb_ports == 0)
1803 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1804 							"check that "
1805 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1806 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1807 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1808 			  "configuration file\n");
1809 
1810 	set_def_fwd_config();
1811 	if (nb_lcores == 0)
1812 		rte_panic("Empty set of forwarding logical cores - check the "
1813 			  "core mask supplied in the command parameters\n");
1814 
1815 	argc -= diag;
1816 	argv += diag;
1817 	if (argc > 1)
1818 		launch_args_parse(argc, argv);
1819 
1820 	if (nb_rxq > nb_txq)
1821 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1822 		       "but nb_txq=%d will prevent to fully test it.\n",
1823 		       nb_rxq, nb_txq);
1824 
1825 	init_config();
1826 	if (start_port(RTE_PORT_ALL) != 0)
1827 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1828 
1829 	/* set all ports to promiscuous mode by default */
1830 	for (port_id = 0; port_id < nb_ports; port_id++)
1831 		rte_eth_promiscuous_enable(port_id);
1832 
1833 #ifdef RTE_LIBRTE_CMDLINE
1834 	if (interactive == 1) {
1835 		if (auto_start) {
1836 			printf("Start automatic packet forwarding\n");
1837 			start_packet_forwarding(0);
1838 		}
1839 		prompt();
1840 	} else
1841 #endif
1842 	{
1843 		char c;
1844 		int rc;
1845 
1846 		printf("No commandline core given, start packet forwarding\n");
1847 		start_packet_forwarding(0);
1848 		printf("Press enter to exit\n");
1849 		rc = read(0, &c, 1);
1850 		if (rc < 0)
1851 			return 1;
1852 	}
1853 
1854 	return 0;
1855 }
1856