xref: /dpdk/app/test-pmd/testpmd.c (revision 6f41fe75e2dd8dd38f7bea7b9501edd4f9b72fa5)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_retry_fwd_engine,
148 	&mac_swap_engine,
149 	&flow_gen_engine,
150 	&rx_only_engine,
151 	&tx_only_engine,
152 	&csum_fwd_engine,
153 	&icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 	&ieee1588_fwd_engine,
156 #endif
157 	NULL,
158 };
159 
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162 
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166 
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 	TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175 
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178 
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181 
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184 
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187 
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201 
202 /*
203  * Configurable values of RX and TX ring threshold registers.
204  */
205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */
208 
209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */
210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
212 
213 struct rte_eth_thresh rx_thresh = {
214 	.pthresh = RX_PTHRESH,
215 	.hthresh = RX_HTHRESH,
216 	.wthresh = RX_WTHRESH,
217 };
218 
219 struct rte_eth_thresh tx_thresh = {
220 	.pthresh = TX_PTHRESH,
221 	.hthresh = TX_HTHRESH,
222 	.wthresh = TX_WTHRESH,
223 };
224 
225 /*
226  * Configurable value of RX free threshold.
227  */
228 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
229 
230 /*
231  * Configurable value of RX drop enable.
232  */
233 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
234 
235 /*
236  * Configurable value of TX free threshold.
237  */
238 uint16_t tx_free_thresh = 0; /* Use default values. */
239 
240 /*
241  * Configurable value of TX RS bit threshold.
242  */
243 uint16_t tx_rs_thresh = 0; /* Use default values. */
244 
245 /*
246  * Configurable value of TX queue flags.
247  */
248 uint32_t txq_flags = 0; /* No flags set. */
249 
250 /*
251  * Receive Side Scaling (RSS) configuration.
252  */
253 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
254 
255 /*
256  * Port topology configuration
257  */
258 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
259 
260 /*
261  * Avoids to flush all the RX streams before starts forwarding.
262  */
263 uint8_t no_flush_rx = 0; /* flush by default */
264 
265 /*
266  * Avoids to check link status when starting/stopping a port.
267  */
268 uint8_t no_link_check = 0; /* check by default */
269 
270 /*
271  * NIC bypass mode configuration options.
272  */
273 #ifdef RTE_NIC_BYPASS
274 
275 /* The NIC bypass watchdog timeout. */
276 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
277 
278 #endif
279 
280 /*
281  * Ethernet device configuration.
282  */
283 struct rte_eth_rxmode rx_mode = {
284 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
285 	.split_hdr_size = 0,
286 	.header_split   = 0, /**< Header Split disabled. */
287 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
288 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
289 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
290 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
291 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
292 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
293 };
294 
295 struct rte_fdir_conf fdir_conf = {
296 	.mode = RTE_FDIR_MODE_NONE,
297 	.pballoc = RTE_FDIR_PBALLOC_64K,
298 	.status = RTE_FDIR_REPORT_STATUS,
299 	.flexbytes_offset = 0x6,
300 	.drop_queue = 127,
301 };
302 
303 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
304 
305 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
306 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
307 
308 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
309 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
310 
311 uint16_t nb_tx_queue_stats_mappings = 0;
312 uint16_t nb_rx_queue_stats_mappings = 0;
313 
314 /* Forward function declarations */
315 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
316 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
317 
318 /*
319  * Check if all the ports are started.
320  * If yes, return positive value. If not, return zero.
321  */
322 static int all_ports_started(void);
323 
324 /*
325  * Setup default configuration.
326  */
327 static void
328 set_default_fwd_lcores_config(void)
329 {
330 	unsigned int i;
331 	unsigned int nb_lc;
332 
333 	nb_lc = 0;
334 	for (i = 0; i < RTE_MAX_LCORE; i++) {
335 		if (! rte_lcore_is_enabled(i))
336 			continue;
337 		if (i == rte_get_master_lcore())
338 			continue;
339 		fwd_lcores_cpuids[nb_lc++] = i;
340 	}
341 	nb_lcores = (lcoreid_t) nb_lc;
342 	nb_cfg_lcores = nb_lcores;
343 	nb_fwd_lcores = 1;
344 }
345 
346 static void
347 set_def_peer_eth_addrs(void)
348 {
349 	portid_t i;
350 
351 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
352 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
353 		peer_eth_addrs[i].addr_bytes[5] = i;
354 	}
355 }
356 
357 static void
358 set_default_fwd_ports_config(void)
359 {
360 	portid_t pt_id;
361 
362 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
363 		fwd_ports_ids[pt_id] = pt_id;
364 
365 	nb_cfg_ports = nb_ports;
366 	nb_fwd_ports = nb_ports;
367 }
368 
369 void
370 set_def_fwd_config(void)
371 {
372 	set_default_fwd_lcores_config();
373 	set_def_peer_eth_addrs();
374 	set_default_fwd_ports_config();
375 }
376 
377 /*
378  * Configuration initialisation done once at init time.
379  */
380 struct mbuf_ctor_arg {
381 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
382 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
383 };
384 
385 struct mbuf_pool_ctor_arg {
386 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
387 };
388 
389 static void
390 testpmd_mbuf_ctor(struct rte_mempool *mp,
391 		  void *opaque_arg,
392 		  void *raw_mbuf,
393 		  __attribute__((unused)) unsigned i)
394 {
395 	struct mbuf_ctor_arg *mb_ctor_arg;
396 	struct rte_mbuf    *mb;
397 
398 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
399 	mb = (struct rte_mbuf *) raw_mbuf;
400 
401 	mb->type         = RTE_MBUF_PKT;
402 	mb->pool         = mp;
403 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
404 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
405 			mb_ctor_arg->seg_buf_offset);
406 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
407 	mb->type         = RTE_MBUF_PKT;
408 	mb->ol_flags     = 0;
409 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
410 	mb->pkt.nb_segs  = 1;
411 	mb->pkt.vlan_macip.data = 0;
412 	mb->pkt.hash.rss = 0;
413 }
414 
415 static void
416 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
417 		       void *opaque_arg)
418 {
419 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
420 	struct rte_pktmbuf_pool_private *mbp_priv;
421 
422 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
423 		printf("%s(%s) private_data_size %d < %d\n",
424 		       __func__, mp->name, (int) mp->private_data_size,
425 		       (int) sizeof(struct rte_pktmbuf_pool_private));
426 		return;
427 	}
428 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
429 	mbp_priv = rte_mempool_get_priv(mp);
430 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
431 }
432 
433 static void
434 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
435 		 unsigned int socket_id)
436 {
437 	char pool_name[RTE_MEMPOOL_NAMESIZE];
438 	struct rte_mempool *rte_mp;
439 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
440 	struct mbuf_ctor_arg mb_ctor_arg;
441 	uint32_t mb_size;
442 
443 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
444 						mbuf_seg_size);
445 	mb_ctor_arg.seg_buf_offset =
446 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
447 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
448 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
449 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
450 
451 #ifdef RTE_LIBRTE_PMD_XENVIRT
452 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
453                                    (unsigned) mb_mempool_cache,
454                                    sizeof(struct rte_pktmbuf_pool_private),
455                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
456                                    testpmd_mbuf_ctor, &mb_ctor_arg,
457                                    socket_id, 0);
458 
459 
460 
461 #else
462 	if (mp_anon != 0)
463 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
464 				    (unsigned) mb_mempool_cache,
465 				    sizeof(struct rte_pktmbuf_pool_private),
466 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
467 				    testpmd_mbuf_ctor, &mb_ctor_arg,
468 				    socket_id, 0);
469 	else
470 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
471 				    (unsigned) mb_mempool_cache,
472 				    sizeof(struct rte_pktmbuf_pool_private),
473 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
474 				    testpmd_mbuf_ctor, &mb_ctor_arg,
475 				    socket_id, 0);
476 
477 #endif
478 
479 	if (rte_mp == NULL) {
480 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
481 						"failed\n", socket_id);
482 	} else if (verbose_level > 0) {
483 		rte_mempool_dump(stdout, rte_mp);
484 	}
485 }
486 
487 /*
488  * Check given socket id is valid or not with NUMA mode,
489  * if valid, return 0, else return -1
490  */
491 static int
492 check_socket_id(const unsigned int socket_id)
493 {
494 	static int warning_once = 0;
495 
496 	if (socket_id >= MAX_SOCKET) {
497 		if (!warning_once && numa_support)
498 			printf("Warning: NUMA should be configured manually by"
499 			       " using --port-numa-config and"
500 			       " --ring-numa-config parameters along with"
501 			       " --numa.\n");
502 		warning_once = 1;
503 		return -1;
504 	}
505 	return 0;
506 }
507 
508 static void
509 init_config(void)
510 {
511 	portid_t pid;
512 	struct rte_port *port;
513 	struct rte_mempool *mbp;
514 	unsigned int nb_mbuf_per_pool;
515 	lcoreid_t  lc_id;
516 	uint8_t port_per_socket[MAX_SOCKET];
517 
518 	memset(port_per_socket,0,MAX_SOCKET);
519 	/* Configuration of logical cores. */
520 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
521 				sizeof(struct fwd_lcore *) * nb_lcores,
522 				CACHE_LINE_SIZE);
523 	if (fwd_lcores == NULL) {
524 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
525 							"failed\n", nb_lcores);
526 	}
527 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
528 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
529 					       sizeof(struct fwd_lcore),
530 					       CACHE_LINE_SIZE);
531 		if (fwd_lcores[lc_id] == NULL) {
532 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
533 								"failed\n");
534 		}
535 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
536 	}
537 
538 	/*
539 	 * Create pools of mbuf.
540 	 * If NUMA support is disabled, create a single pool of mbuf in
541 	 * socket 0 memory by default.
542 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
543 	 *
544 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
545 	 * nb_txd can be configured at run time.
546 	 */
547 	if (param_total_num_mbufs)
548 		nb_mbuf_per_pool = param_total_num_mbufs;
549 	else {
550 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
551 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
552 
553 		if (!numa_support)
554 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
555 	}
556 
557 	if (!numa_support) {
558 		if (socket_num == UMA_NO_CONFIG)
559 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
560 		else
561 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
562 						 socket_num);
563 	}
564 
565 	/* Configuration of Ethernet ports. */
566 	ports = rte_zmalloc("testpmd: ports",
567 			    sizeof(struct rte_port) * nb_ports,
568 			    CACHE_LINE_SIZE);
569 	if (ports == NULL) {
570 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
571 							"failed\n", nb_ports);
572 	}
573 
574 	for (pid = 0; pid < nb_ports; pid++) {
575 		port = &ports[pid];
576 		rte_eth_dev_info_get(pid, &port->dev_info);
577 
578 		if (numa_support) {
579 			if (port_numa[pid] != NUMA_NO_CONFIG)
580 				port_per_socket[port_numa[pid]]++;
581 			else {
582 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
583 
584 				/* if socket_id is invalid, set to 0 */
585 				if (check_socket_id(socket_id) < 0)
586 					socket_id = 0;
587 				port_per_socket[socket_id]++;
588 			}
589 		}
590 
591 		/* set flag to initialize port/queue */
592 		port->need_reconfig = 1;
593 		port->need_reconfig_queues = 1;
594 	}
595 
596 	if (numa_support) {
597 		uint8_t i;
598 		unsigned int nb_mbuf;
599 
600 		if (param_total_num_mbufs)
601 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
602 
603 		for (i = 0; i < MAX_SOCKET; i++) {
604 			nb_mbuf = (nb_mbuf_per_pool *
605 						port_per_socket[i]);
606 			if (nb_mbuf)
607 				mbuf_pool_create(mbuf_data_size,
608 						nb_mbuf,i);
609 		}
610 	}
611 	init_port_config();
612 
613 	/*
614 	 * Records which Mbuf pool to use by each logical core, if needed.
615 	 */
616 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
618 		if (mbp == NULL)
619 			mbp = mbuf_pool_find(0);
620 		fwd_lcores[lc_id]->mbp = mbp;
621 	}
622 
623 	/* Configuration of packet forwarding streams. */
624 	if (init_fwd_streams() < 0)
625 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
626 }
627 
628 int
629 init_fwd_streams(void)
630 {
631 	portid_t pid;
632 	struct rte_port *port;
633 	streamid_t sm_id, nb_fwd_streams_new;
634 
635 	/* set socket id according to numa or not */
636 	for (pid = 0; pid < nb_ports; pid++) {
637 		port = &ports[pid];
638 		if (nb_rxq > port->dev_info.max_rx_queues) {
639 			printf("Fail: nb_rxq(%d) is greater than "
640 				"max_rx_queues(%d)\n", nb_rxq,
641 				port->dev_info.max_rx_queues);
642 			return -1;
643 		}
644 		if (nb_txq > port->dev_info.max_tx_queues) {
645 			printf("Fail: nb_txq(%d) is greater than "
646 				"max_tx_queues(%d)\n", nb_txq,
647 				port->dev_info.max_tx_queues);
648 			return -1;
649 		}
650 		if (numa_support) {
651 			if (port_numa[pid] != NUMA_NO_CONFIG)
652 				port->socket_id = port_numa[pid];
653 			else {
654 				port->socket_id = rte_eth_dev_socket_id(pid);
655 
656 				/* if socket_id is invalid, set to 0 */
657 				if (check_socket_id(port->socket_id) < 0)
658 					port->socket_id = 0;
659 			}
660 		}
661 		else {
662 			if (socket_num == UMA_NO_CONFIG)
663 				port->socket_id = 0;
664 			else
665 				port->socket_id = socket_num;
666 		}
667 	}
668 
669 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
670 	if (nb_fwd_streams_new == nb_fwd_streams)
671 		return 0;
672 	/* clear the old */
673 	if (fwd_streams != NULL) {
674 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
675 			if (fwd_streams[sm_id] == NULL)
676 				continue;
677 			rte_free(fwd_streams[sm_id]);
678 			fwd_streams[sm_id] = NULL;
679 		}
680 		rte_free(fwd_streams);
681 		fwd_streams = NULL;
682 	}
683 
684 	/* init new */
685 	nb_fwd_streams = nb_fwd_streams_new;
686 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
687 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
688 	if (fwd_streams == NULL)
689 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
690 						"failed\n", nb_fwd_streams);
691 
692 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
693 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
694 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
695 		if (fwd_streams[sm_id] == NULL)
696 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
697 								" failed\n");
698 	}
699 
700 	return 0;
701 }
702 
703 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
704 static void
705 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
706 {
707 	unsigned int total_burst;
708 	unsigned int nb_burst;
709 	unsigned int burst_stats[3];
710 	uint16_t pktnb_stats[3];
711 	uint16_t nb_pkt;
712 	int burst_percent[3];
713 
714 	/*
715 	 * First compute the total number of packet bursts and the
716 	 * two highest numbers of bursts of the same number of packets.
717 	 */
718 	total_burst = 0;
719 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
720 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
721 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
722 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
723 		if (nb_burst == 0)
724 			continue;
725 		total_burst += nb_burst;
726 		if (nb_burst > burst_stats[0]) {
727 			burst_stats[1] = burst_stats[0];
728 			pktnb_stats[1] = pktnb_stats[0];
729 			burst_stats[0] = nb_burst;
730 			pktnb_stats[0] = nb_pkt;
731 		}
732 	}
733 	if (total_burst == 0)
734 		return;
735 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
736 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
737 	       burst_percent[0], (int) pktnb_stats[0]);
738 	if (burst_stats[0] == total_burst) {
739 		printf("]\n");
740 		return;
741 	}
742 	if (burst_stats[0] + burst_stats[1] == total_burst) {
743 		printf(" + %d%% of %d pkts]\n",
744 		       100 - burst_percent[0], pktnb_stats[1]);
745 		return;
746 	}
747 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
748 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
749 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
750 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
751 		return;
752 	}
753 	printf(" + %d%% of %d pkts + %d%% of others]\n",
754 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
755 }
756 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
757 
758 static void
759 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
760 {
761 	struct rte_port *port;
762 	uint8_t i;
763 
764 	static const char *fwd_stats_border = "----------------------";
765 
766 	port = &ports[port_id];
767 	printf("\n  %s Forward statistics for port %-2d %s\n",
768 	       fwd_stats_border, port_id, fwd_stats_border);
769 
770 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
771 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
772 		       "%-"PRIu64"\n",
773 		       stats->ipackets, stats->imissed,
774 		       (uint64_t) (stats->ipackets + stats->imissed));
775 
776 		if (cur_fwd_eng == &csum_fwd_engine)
777 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
778 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
779 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
780 			printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
781 			       "RX-error: %-"PRIu64"\n",
782 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
783 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
784 		}
785 
786 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
787 		       "%-"PRIu64"\n",
788 		       stats->opackets, port->tx_dropped,
789 		       (uint64_t) (stats->opackets + port->tx_dropped));
790 	}
791 	else {
792 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
793 		       "%14"PRIu64"\n",
794 		       stats->ipackets, stats->imissed,
795 		       (uint64_t) (stats->ipackets + stats->imissed));
796 
797 		if (cur_fwd_eng == &csum_fwd_engine)
798 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
799 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
800 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
801 			printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
802 			       "    RX-error:%"PRIu64"\n",
803 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
804 			printf("  RX-nombufs:             %14"PRIu64"\n",
805 			       stats->rx_nombuf);
806 		}
807 
808 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
809 		       "%14"PRIu64"\n",
810 		       stats->opackets, port->tx_dropped,
811 		       (uint64_t) (stats->opackets + port->tx_dropped));
812 	}
813 
814 	/* Display statistics of XON/XOFF pause frames, if any. */
815 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
816 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
817 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
818 		       stats->rx_pause_xoff, stats->rx_pause_xon);
819 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
820 		       stats->tx_pause_xoff, stats->tx_pause_xon);
821 	}
822 
823 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
824 	if (port->rx_stream)
825 		pkt_burst_stats_display("RX",
826 			&port->rx_stream->rx_burst_stats);
827 	if (port->tx_stream)
828 		pkt_burst_stats_display("TX",
829 			&port->tx_stream->tx_burst_stats);
830 #endif
831 	/* stats fdir */
832 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
833 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
834 		       stats->fdirmiss,
835 		       stats->fdirmatch);
836 
837 	if (port->rx_queue_stats_mapping_enabled) {
838 		printf("\n");
839 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
840 			printf("  Stats reg %2d RX-packets:%14"PRIu64
841 			       "     RX-errors:%14"PRIu64
842 			       "    RX-bytes:%14"PRIu64"\n",
843 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
844 		}
845 		printf("\n");
846 	}
847 	if (port->tx_queue_stats_mapping_enabled) {
848 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
849 			printf("  Stats reg %2d TX-packets:%14"PRIu64
850 			       "                                 TX-bytes:%14"PRIu64"\n",
851 			       i, stats->q_opackets[i], stats->q_obytes[i]);
852 		}
853 	}
854 
855 	printf("  %s--------------------------------%s\n",
856 	       fwd_stats_border, fwd_stats_border);
857 }
858 
859 static void
860 fwd_stream_stats_display(streamid_t stream_id)
861 {
862 	struct fwd_stream *fs;
863 	static const char *fwd_top_stats_border = "-------";
864 
865 	fs = fwd_streams[stream_id];
866 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
867 	    (fs->fwd_dropped == 0))
868 		return;
869 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
870 	       "TX Port=%2d/Queue=%2d %s\n",
871 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
872 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
873 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
874 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
875 
876 	/* if checksum mode */
877 	if (cur_fwd_eng == &csum_fwd_engine) {
878 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
879 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
880 	}
881 
882 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
883 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
884 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
885 #endif
886 }
887 
888 static void
889 flush_fwd_rx_queues(void)
890 {
891 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
892 	portid_t  rxp;
893 	portid_t port_id;
894 	queueid_t rxq;
895 	uint16_t  nb_rx;
896 	uint16_t  i;
897 	uint8_t   j;
898 
899 	for (j = 0; j < 2; j++) {
900 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
901 			for (rxq = 0; rxq < nb_rxq; rxq++) {
902 				port_id = fwd_ports_ids[rxp];
903 				do {
904 					nb_rx = rte_eth_rx_burst(port_id, rxq,
905 						pkts_burst, MAX_PKT_BURST);
906 					for (i = 0; i < nb_rx; i++)
907 						rte_pktmbuf_free(pkts_burst[i]);
908 				} while (nb_rx > 0);
909 			}
910 		}
911 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
912 	}
913 }
914 
915 static void
916 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
917 {
918 	struct fwd_stream **fsm;
919 	streamid_t nb_fs;
920 	streamid_t sm_id;
921 
922 	fsm = &fwd_streams[fc->stream_idx];
923 	nb_fs = fc->stream_nb;
924 	do {
925 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
926 			(*pkt_fwd)(fsm[sm_id]);
927 	} while (! fc->stopped);
928 }
929 
930 static int
931 start_pkt_forward_on_core(void *fwd_arg)
932 {
933 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
934 			     cur_fwd_config.fwd_eng->packet_fwd);
935 	return 0;
936 }
937 
938 /*
939  * Run the TXONLY packet forwarding engine to send a single burst of packets.
940  * Used to start communication flows in network loopback test configurations.
941  */
942 static int
943 run_one_txonly_burst_on_core(void *fwd_arg)
944 {
945 	struct fwd_lcore *fwd_lc;
946 	struct fwd_lcore tmp_lcore;
947 
948 	fwd_lc = (struct fwd_lcore *) fwd_arg;
949 	tmp_lcore = *fwd_lc;
950 	tmp_lcore.stopped = 1;
951 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
952 	return 0;
953 }
954 
955 /*
956  * Launch packet forwarding:
957  *     - Setup per-port forwarding context.
958  *     - launch logical cores with their forwarding configuration.
959  */
960 static void
961 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
962 {
963 	port_fwd_begin_t port_fwd_begin;
964 	unsigned int i;
965 	unsigned int lc_id;
966 	int diag;
967 
968 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
969 	if (port_fwd_begin != NULL) {
970 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
971 			(*port_fwd_begin)(fwd_ports_ids[i]);
972 	}
973 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
974 		lc_id = fwd_lcores_cpuids[i];
975 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
976 			fwd_lcores[i]->stopped = 0;
977 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
978 						     fwd_lcores[i], lc_id);
979 			if (diag != 0)
980 				printf("launch lcore %u failed - diag=%d\n",
981 				       lc_id, diag);
982 		}
983 	}
984 }
985 
986 /*
987  * Launch packet forwarding configuration.
988  */
989 void
990 start_packet_forwarding(int with_tx_first)
991 {
992 	port_fwd_begin_t port_fwd_begin;
993 	port_fwd_end_t  port_fwd_end;
994 	struct rte_port *port;
995 	unsigned int i;
996 	portid_t   pt_id;
997 	streamid_t sm_id;
998 
999 	if (all_ports_started() == 0) {
1000 		printf("Not all ports were started\n");
1001 		return;
1002 	}
1003 	if (test_done == 0) {
1004 		printf("Packet forwarding already started\n");
1005 		return;
1006 	}
1007 	if(dcb_test) {
1008 		for (i = 0; i < nb_fwd_ports; i++) {
1009 			pt_id = fwd_ports_ids[i];
1010 			port = &ports[pt_id];
1011 			if (!port->dcb_flag) {
1012 				printf("In DCB mode, all forwarding ports must "
1013                                        "be configured in this mode.\n");
1014 				return;
1015 			}
1016 		}
1017 		if (nb_fwd_lcores == 1) {
1018 			printf("In DCB mode,the nb forwarding cores "
1019                                "should be larger than 1.\n");
1020 			return;
1021 		}
1022 	}
1023 	test_done = 0;
1024 
1025 	if(!no_flush_rx)
1026 		flush_fwd_rx_queues();
1027 
1028 	fwd_config_setup();
1029 	rxtx_config_display();
1030 
1031 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1032 		pt_id = fwd_ports_ids[i];
1033 		port = &ports[pt_id];
1034 		rte_eth_stats_get(pt_id, &port->stats);
1035 		port->tx_dropped = 0;
1036 
1037 		map_port_queue_stats_mapping_registers(pt_id, port);
1038 	}
1039 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1040 		fwd_streams[sm_id]->rx_packets = 0;
1041 		fwd_streams[sm_id]->tx_packets = 0;
1042 		fwd_streams[sm_id]->fwd_dropped = 0;
1043 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1044 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1045 
1046 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1047 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1048 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1049 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1050 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1051 #endif
1052 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1053 		fwd_streams[sm_id]->core_cycles = 0;
1054 #endif
1055 	}
1056 	if (with_tx_first) {
1057 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1058 		if (port_fwd_begin != NULL) {
1059 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1060 				(*port_fwd_begin)(fwd_ports_ids[i]);
1061 		}
1062 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1063 		rte_eal_mp_wait_lcore();
1064 		port_fwd_end = tx_only_engine.port_fwd_end;
1065 		if (port_fwd_end != NULL) {
1066 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1067 				(*port_fwd_end)(fwd_ports_ids[i]);
1068 		}
1069 	}
1070 	launch_packet_forwarding(start_pkt_forward_on_core);
1071 }
1072 
1073 void
1074 stop_packet_forwarding(void)
1075 {
1076 	struct rte_eth_stats stats;
1077 	struct rte_port *port;
1078 	port_fwd_end_t  port_fwd_end;
1079 	int i;
1080 	portid_t   pt_id;
1081 	streamid_t sm_id;
1082 	lcoreid_t  lc_id;
1083 	uint64_t total_recv;
1084 	uint64_t total_xmit;
1085 	uint64_t total_rx_dropped;
1086 	uint64_t total_tx_dropped;
1087 	uint64_t total_rx_nombuf;
1088 	uint64_t tx_dropped;
1089 	uint64_t rx_bad_ip_csum;
1090 	uint64_t rx_bad_l4_csum;
1091 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1092 	uint64_t fwd_cycles;
1093 #endif
1094 	static const char *acc_stats_border = "+++++++++++++++";
1095 
1096 	if (all_ports_started() == 0) {
1097 		printf("Not all ports were started\n");
1098 		return;
1099 	}
1100 	if (test_done) {
1101 		printf("Packet forwarding not started\n");
1102 		return;
1103 	}
1104 	printf("Telling cores to stop...");
1105 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1106 		fwd_lcores[lc_id]->stopped = 1;
1107 	printf("\nWaiting for lcores to finish...\n");
1108 	rte_eal_mp_wait_lcore();
1109 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1110 	if (port_fwd_end != NULL) {
1111 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1112 			pt_id = fwd_ports_ids[i];
1113 			(*port_fwd_end)(pt_id);
1114 		}
1115 	}
1116 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1117 	fwd_cycles = 0;
1118 #endif
1119 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1120 		if (cur_fwd_config.nb_fwd_streams >
1121 		    cur_fwd_config.nb_fwd_ports) {
1122 			fwd_stream_stats_display(sm_id);
1123 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1124 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1125 		} else {
1126 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1127 				fwd_streams[sm_id];
1128 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1129 				fwd_streams[sm_id];
1130 		}
1131 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1132 		tx_dropped = (uint64_t) (tx_dropped +
1133 					 fwd_streams[sm_id]->fwd_dropped);
1134 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1135 
1136 		rx_bad_ip_csum =
1137 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1138 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1139 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1140 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1141 							rx_bad_ip_csum;
1142 
1143 		rx_bad_l4_csum =
1144 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1145 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1146 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1147 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1148 							rx_bad_l4_csum;
1149 
1150 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1151 		fwd_cycles = (uint64_t) (fwd_cycles +
1152 					 fwd_streams[sm_id]->core_cycles);
1153 #endif
1154 	}
1155 	total_recv = 0;
1156 	total_xmit = 0;
1157 	total_rx_dropped = 0;
1158 	total_tx_dropped = 0;
1159 	total_rx_nombuf  = 0;
1160 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1161 		pt_id = fwd_ports_ids[i];
1162 
1163 		port = &ports[pt_id];
1164 		rte_eth_stats_get(pt_id, &stats);
1165 		stats.ipackets -= port->stats.ipackets;
1166 		port->stats.ipackets = 0;
1167 		stats.opackets -= port->stats.opackets;
1168 		port->stats.opackets = 0;
1169 		stats.ibytes   -= port->stats.ibytes;
1170 		port->stats.ibytes = 0;
1171 		stats.obytes   -= port->stats.obytes;
1172 		port->stats.obytes = 0;
1173 		stats.imissed  -= port->stats.imissed;
1174 		port->stats.imissed = 0;
1175 		stats.oerrors  -= port->stats.oerrors;
1176 		port->stats.oerrors = 0;
1177 		stats.rx_nombuf -= port->stats.rx_nombuf;
1178 		port->stats.rx_nombuf = 0;
1179 		stats.fdirmatch -= port->stats.fdirmatch;
1180 		port->stats.rx_nombuf = 0;
1181 		stats.fdirmiss -= port->stats.fdirmiss;
1182 		port->stats.rx_nombuf = 0;
1183 
1184 		total_recv += stats.ipackets;
1185 		total_xmit += stats.opackets;
1186 		total_rx_dropped += stats.imissed;
1187 		total_tx_dropped += port->tx_dropped;
1188 		total_rx_nombuf  += stats.rx_nombuf;
1189 
1190 		fwd_port_stats_display(pt_id, &stats);
1191 	}
1192 	printf("\n  %s Accumulated forward statistics for all ports"
1193 	       "%s\n",
1194 	       acc_stats_border, acc_stats_border);
1195 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1196 	       "%-"PRIu64"\n"
1197 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1198 	       "%-"PRIu64"\n",
1199 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1200 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1201 	if (total_rx_nombuf > 0)
1202 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1203 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1204 	       "%s\n",
1205 	       acc_stats_border, acc_stats_border);
1206 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1207 	if (total_recv > 0)
1208 		printf("\n  CPU cycles/packet=%u (total cycles="
1209 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1210 		       (unsigned int)(fwd_cycles / total_recv),
1211 		       fwd_cycles, total_recv);
1212 #endif
1213 	printf("\nDone.\n");
1214 	test_done = 1;
1215 }
1216 
1217 void
1218 dev_set_link_up(portid_t pid)
1219 {
1220 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1221 		printf("\nSet link up fail.\n");
1222 }
1223 
1224 void
1225 dev_set_link_down(portid_t pid)
1226 {
1227 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1228 		printf("\nSet link down fail.\n");
1229 }
1230 
1231 static int
1232 all_ports_started(void)
1233 {
1234 	portid_t pi;
1235 	struct rte_port *port;
1236 
1237 	for (pi = 0; pi < nb_ports; pi++) {
1238 		port = &ports[pi];
1239 		/* Check if there is a port which is not started */
1240 		if (port->port_status != RTE_PORT_STARTED)
1241 			return 0;
1242 	}
1243 
1244 	/* No port is not started */
1245 	return 1;
1246 }
1247 
1248 int
1249 start_port(portid_t pid)
1250 {
1251 	int diag, need_check_link_status = 0;
1252 	portid_t pi;
1253 	queueid_t qi;
1254 	struct rte_port *port;
1255 	uint8_t *mac_addr;
1256 
1257 	if (test_done == 0) {
1258 		printf("Please stop forwarding first\n");
1259 		return -1;
1260 	}
1261 
1262 	if (init_fwd_streams() < 0) {
1263 		printf("Fail from init_fwd_streams()\n");
1264 		return -1;
1265 	}
1266 
1267 	if(dcb_config)
1268 		dcb_test = 1;
1269 	for (pi = 0; pi < nb_ports; pi++) {
1270 		if (pid < nb_ports && pid != pi)
1271 			continue;
1272 
1273 		port = &ports[pi];
1274 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1275 						 RTE_PORT_HANDLING) == 0) {
1276 			printf("Port %d is now not stopped\n", pi);
1277 			continue;
1278 		}
1279 
1280 		if (port->need_reconfig > 0) {
1281 			port->need_reconfig = 0;
1282 
1283 			printf("Configuring Port %d (socket %u)\n", pi,
1284 					port->socket_id);
1285 			/* configure port */
1286 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1287 						&(port->dev_conf));
1288 			if (diag != 0) {
1289 				if (rte_atomic16_cmpset(&(port->port_status),
1290 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1291 					printf("Port %d can not be set back "
1292 							"to stopped\n", pi);
1293 				printf("Fail to configure port %d\n", pi);
1294 				/* try to reconfigure port next time */
1295 				port->need_reconfig = 1;
1296 				return -1;
1297 			}
1298 		}
1299 		if (port->need_reconfig_queues > 0) {
1300 			port->need_reconfig_queues = 0;
1301 			/* setup tx queues */
1302 			for (qi = 0; qi < nb_txq; qi++) {
1303 				if ((numa_support) &&
1304 					(txring_numa[pi] != NUMA_NO_CONFIG))
1305 					diag = rte_eth_tx_queue_setup(pi, qi,
1306 						nb_txd,txring_numa[pi],
1307 						&(port->tx_conf));
1308 				else
1309 					diag = rte_eth_tx_queue_setup(pi, qi,
1310 						nb_txd,port->socket_id,
1311 						&(port->tx_conf));
1312 
1313 				if (diag == 0)
1314 					continue;
1315 
1316 				/* Fail to setup tx queue, return */
1317 				if (rte_atomic16_cmpset(&(port->port_status),
1318 							RTE_PORT_HANDLING,
1319 							RTE_PORT_STOPPED) == 0)
1320 					printf("Port %d can not be set back "
1321 							"to stopped\n", pi);
1322 				printf("Fail to configure port %d tx queues\n", pi);
1323 				/* try to reconfigure queues next time */
1324 				port->need_reconfig_queues = 1;
1325 				return -1;
1326 			}
1327 			/* setup rx queues */
1328 			for (qi = 0; qi < nb_rxq; qi++) {
1329 				if ((numa_support) &&
1330 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1331 					struct rte_mempool * mp =
1332 						mbuf_pool_find(rxring_numa[pi]);
1333 					if (mp == NULL) {
1334 						printf("Failed to setup RX queue:"
1335 							"No mempool allocation"
1336 							"on the socket %d\n",
1337 							rxring_numa[pi]);
1338 						return -1;
1339 					}
1340 
1341 					diag = rte_eth_rx_queue_setup(pi, qi,
1342 					     nb_rxd,rxring_numa[pi],
1343 					     &(port->rx_conf),mp);
1344 				}
1345 				else
1346 					diag = rte_eth_rx_queue_setup(pi, qi,
1347 					     nb_rxd,port->socket_id,
1348 					     &(port->rx_conf),
1349 				             mbuf_pool_find(port->socket_id));
1350 
1351 				if (diag == 0)
1352 					continue;
1353 
1354 
1355 				/* Fail to setup rx queue, return */
1356 				if (rte_atomic16_cmpset(&(port->port_status),
1357 							RTE_PORT_HANDLING,
1358 							RTE_PORT_STOPPED) == 0)
1359 					printf("Port %d can not be set back "
1360 							"to stopped\n", pi);
1361 				printf("Fail to configure port %d rx queues\n", pi);
1362 				/* try to reconfigure queues next time */
1363 				port->need_reconfig_queues = 1;
1364 				return -1;
1365 			}
1366 		}
1367 		/* start port */
1368 		if (rte_eth_dev_start(pi) < 0) {
1369 			printf("Fail to start port %d\n", pi);
1370 
1371 			/* Fail to setup rx queue, return */
1372 			if (rte_atomic16_cmpset(&(port->port_status),
1373 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1374 				printf("Port %d can not be set back to "
1375 							"stopped\n", pi);
1376 			continue;
1377 		}
1378 
1379 		if (rte_atomic16_cmpset(&(port->port_status),
1380 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1381 			printf("Port %d can not be set into started\n", pi);
1382 
1383 		mac_addr = port->eth_addr.addr_bytes;
1384 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1385 		       mac_addr[0], mac_addr[1], mac_addr[2],
1386 		       mac_addr[3], mac_addr[4], mac_addr[5]);
1387 
1388 		/* at least one port started, need checking link status */
1389 		need_check_link_status = 1;
1390 	}
1391 
1392 	if (need_check_link_status && !no_link_check)
1393 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1394 	else
1395 		printf("Please stop the ports first\n");
1396 
1397 	printf("Done\n");
1398 	return 0;
1399 }
1400 
1401 void
1402 stop_port(portid_t pid)
1403 {
1404 	portid_t pi;
1405 	struct rte_port *port;
1406 	int need_check_link_status = 0;
1407 
1408 	if (test_done == 0) {
1409 		printf("Please stop forwarding first\n");
1410 		return;
1411 	}
1412 	if (dcb_test) {
1413 		dcb_test = 0;
1414 		dcb_config = 0;
1415 	}
1416 	printf("Stopping ports...\n");
1417 
1418 	for (pi = 0; pi < nb_ports; pi++) {
1419 		if (pid < nb_ports && pid != pi)
1420 			continue;
1421 
1422 		port = &ports[pi];
1423 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1424 						RTE_PORT_HANDLING) == 0)
1425 			continue;
1426 
1427 		rte_eth_dev_stop(pi);
1428 
1429 		if (rte_atomic16_cmpset(&(port->port_status),
1430 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1431 			printf("Port %d can not be set into stopped\n", pi);
1432 		need_check_link_status = 1;
1433 	}
1434 	if (need_check_link_status && !no_link_check)
1435 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1436 
1437 	printf("Done\n");
1438 }
1439 
1440 void
1441 close_port(portid_t pid)
1442 {
1443 	portid_t pi;
1444 	struct rte_port *port;
1445 
1446 	if (test_done == 0) {
1447 		printf("Please stop forwarding first\n");
1448 		return;
1449 	}
1450 
1451 	printf("Closing ports...\n");
1452 
1453 	for (pi = 0; pi < nb_ports; pi++) {
1454 		if (pid < nb_ports && pid != pi)
1455 			continue;
1456 
1457 		port = &ports[pi];
1458 		if (rte_atomic16_cmpset(&(port->port_status),
1459 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1460 			printf("Port %d is now not stopped\n", pi);
1461 			continue;
1462 		}
1463 
1464 		rte_eth_dev_close(pi);
1465 
1466 		if (rte_atomic16_cmpset(&(port->port_status),
1467 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1468 			printf("Port %d can not be set into stopped\n", pi);
1469 	}
1470 
1471 	printf("Done\n");
1472 }
1473 
1474 int
1475 all_ports_stopped(void)
1476 {
1477 	portid_t pi;
1478 	struct rte_port *port;
1479 
1480 	for (pi = 0; pi < nb_ports; pi++) {
1481 		port = &ports[pi];
1482 		if (port->port_status != RTE_PORT_STOPPED)
1483 			return 0;
1484 	}
1485 
1486 	return 1;
1487 }
1488 
1489 void
1490 pmd_test_exit(void)
1491 {
1492 	portid_t pt_id;
1493 
1494 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1495 		printf("Stopping port %d...", pt_id);
1496 		fflush(stdout);
1497 		rte_eth_dev_close(pt_id);
1498 		printf("done\n");
1499 	}
1500 	printf("bye...\n");
1501 }
1502 
1503 typedef void (*cmd_func_t)(void);
1504 struct pmd_test_command {
1505 	const char *cmd_name;
1506 	cmd_func_t cmd_func;
1507 };
1508 
1509 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1510 
1511 /* Check the link status of all ports in up to 9s, and print them finally */
1512 static void
1513 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1514 {
1515 #define CHECK_INTERVAL 100 /* 100ms */
1516 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1517 	uint8_t portid, count, all_ports_up, print_flag = 0;
1518 	struct rte_eth_link link;
1519 
1520 	printf("Checking link statuses...\n");
1521 	fflush(stdout);
1522 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1523 		all_ports_up = 1;
1524 		for (portid = 0; portid < port_num; portid++) {
1525 			if ((port_mask & (1 << portid)) == 0)
1526 				continue;
1527 			memset(&link, 0, sizeof(link));
1528 			rte_eth_link_get_nowait(portid, &link);
1529 			/* print link status if flag set */
1530 			if (print_flag == 1) {
1531 				if (link.link_status)
1532 					printf("Port %d Link Up - speed %u "
1533 						"Mbps - %s\n", (uint8_t)portid,
1534 						(unsigned)link.link_speed,
1535 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1536 					("full-duplex") : ("half-duplex\n"));
1537 				else
1538 					printf("Port %d Link Down\n",
1539 						(uint8_t)portid);
1540 				continue;
1541 			}
1542 			/* clear all_ports_up flag if any link down */
1543 			if (link.link_status == 0) {
1544 				all_ports_up = 0;
1545 				break;
1546 			}
1547 		}
1548 		/* after finally printing all link status, get out */
1549 		if (print_flag == 1)
1550 			break;
1551 
1552 		if (all_ports_up == 0) {
1553 			fflush(stdout);
1554 			rte_delay_ms(CHECK_INTERVAL);
1555 		}
1556 
1557 		/* set the print_flag if all ports up or timeout */
1558 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1559 			print_flag = 1;
1560 		}
1561 	}
1562 }
1563 
1564 static int
1565 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1566 {
1567 	uint16_t i;
1568 	int diag;
1569 	uint8_t mapping_found = 0;
1570 
1571 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1572 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1573 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1574 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1575 					tx_queue_stats_mappings[i].queue_id,
1576 					tx_queue_stats_mappings[i].stats_counter_id);
1577 			if (diag != 0)
1578 				return diag;
1579 			mapping_found = 1;
1580 		}
1581 	}
1582 	if (mapping_found)
1583 		port->tx_queue_stats_mapping_enabled = 1;
1584 	return 0;
1585 }
1586 
1587 static int
1588 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1589 {
1590 	uint16_t i;
1591 	int diag;
1592 	uint8_t mapping_found = 0;
1593 
1594 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1595 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1596 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1597 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1598 					rx_queue_stats_mappings[i].queue_id,
1599 					rx_queue_stats_mappings[i].stats_counter_id);
1600 			if (diag != 0)
1601 				return diag;
1602 			mapping_found = 1;
1603 		}
1604 	}
1605 	if (mapping_found)
1606 		port->rx_queue_stats_mapping_enabled = 1;
1607 	return 0;
1608 }
1609 
1610 static void
1611 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1612 {
1613 	int diag = 0;
1614 
1615 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1616 	if (diag != 0) {
1617 		if (diag == -ENOTSUP) {
1618 			port->tx_queue_stats_mapping_enabled = 0;
1619 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1620 		}
1621 		else
1622 			rte_exit(EXIT_FAILURE,
1623 					"set_tx_queue_stats_mapping_registers "
1624 					"failed for port id=%d diag=%d\n",
1625 					pi, diag);
1626 	}
1627 
1628 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1629 	if (diag != 0) {
1630 		if (diag == -ENOTSUP) {
1631 			port->rx_queue_stats_mapping_enabled = 0;
1632 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1633 		}
1634 		else
1635 			rte_exit(EXIT_FAILURE,
1636 					"set_rx_queue_stats_mapping_registers "
1637 					"failed for port id=%d diag=%d\n",
1638 					pi, diag);
1639 	}
1640 }
1641 
1642 void
1643 init_port_config(void)
1644 {
1645 	portid_t pid;
1646 	struct rte_port *port;
1647 
1648 	for (pid = 0; pid < nb_ports; pid++) {
1649 		port = &ports[pid];
1650 		port->dev_conf.rxmode = rx_mode;
1651 		port->dev_conf.fdir_conf = fdir_conf;
1652 		if (nb_rxq > 1) {
1653 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1654 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1655 		} else {
1656 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1657 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1658 		}
1659 
1660 		/* In SR-IOV mode, RSS mode is not available */
1661 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1662 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1663 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1664 			else
1665 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1666 		}
1667 
1668 		port->rx_conf.rx_thresh = rx_thresh;
1669 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1670 		port->rx_conf.rx_drop_en = rx_drop_en;
1671 		port->tx_conf.tx_thresh = tx_thresh;
1672 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1673 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1674 		port->tx_conf.txq_flags = txq_flags;
1675 
1676 		rte_eth_macaddr_get(pid, &port->eth_addr);
1677 
1678 		map_port_queue_stats_mapping_registers(pid, port);
1679 #ifdef RTE_NIC_BYPASS
1680 		rte_eth_dev_bypass_init(pid);
1681 #endif
1682 	}
1683 }
1684 
1685 const uint16_t vlan_tags[] = {
1686 		0,  1,  2,  3,  4,  5,  6,  7,
1687 		8,  9, 10, 11,  12, 13, 14, 15,
1688 		16, 17, 18, 19, 20, 21, 22, 23,
1689 		24, 25, 26, 27, 28, 29, 30, 31
1690 };
1691 
1692 static  int
1693 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1694 {
1695         uint8_t i;
1696 
1697  	/*
1698  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1699  	 * given above, and the number of traffic classes available for use.
1700  	 */
1701 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1702 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1703 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1704 
1705 		/* VMDQ+DCB RX and TX configrations */
1706 		vmdq_rx_conf.enable_default_pool = 0;
1707 		vmdq_rx_conf.default_pool = 0;
1708 		vmdq_rx_conf.nb_queue_pools =
1709 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1710 		vmdq_tx_conf.nb_queue_pools =
1711 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1712 
1713 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1714 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1715 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1716 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1717 		}
1718 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1719 			vmdq_rx_conf.dcb_queue[i] = i;
1720 			vmdq_tx_conf.dcb_queue[i] = i;
1721 		}
1722 
1723 		/*set DCB mode of RX and TX of multiple queues*/
1724 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1725 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1726 		if (dcb_conf->pfc_en)
1727 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1728 		else
1729 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1730 
1731 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1732                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1733 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1734                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1735 	}
1736 	else {
1737 		struct rte_eth_dcb_rx_conf rx_conf;
1738 		struct rte_eth_dcb_tx_conf tx_conf;
1739 
1740 		/* queue mapping configuration of DCB RX and TX */
1741 		if (dcb_conf->num_tcs == ETH_4_TCS)
1742 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1743 		else
1744 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1745 
1746 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1747 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1748 
1749 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1750 			rx_conf.dcb_queue[i] = i;
1751 			tx_conf.dcb_queue[i] = i;
1752 		}
1753 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1754 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1755 		if (dcb_conf->pfc_en)
1756 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1757 		else
1758 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1759 
1760 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1761                                 sizeof(struct rte_eth_dcb_rx_conf)));
1762 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1763                                 sizeof(struct rte_eth_dcb_tx_conf)));
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 int
1770 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1771 {
1772 	struct rte_eth_conf port_conf;
1773 	struct rte_port *rte_port;
1774 	int retval;
1775 	uint16_t nb_vlan;
1776 	uint16_t i;
1777 
1778 	/* rxq and txq configuration in dcb mode */
1779 	nb_rxq = 128;
1780 	nb_txq = 128;
1781 	rx_free_thresh = 64;
1782 
1783 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1784 	/* Enter DCB configuration status */
1785 	dcb_config = 1;
1786 
1787 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1788 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1789 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1790 	if (retval < 0)
1791 		return retval;
1792 
1793 	rte_port = &ports[pid];
1794 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1795 
1796 	rte_port->rx_conf.rx_thresh = rx_thresh;
1797 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1798 	rte_port->tx_conf.tx_thresh = tx_thresh;
1799 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1800 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1801 	/* VLAN filter */
1802 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1803 	for (i = 0; i < nb_vlan; i++){
1804 		rx_vft_set(pid, vlan_tags[i], 1);
1805 	}
1806 
1807 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1808 	map_port_queue_stats_mapping_registers(pid, rte_port);
1809 
1810 	rte_port->dcb_flag = 1;
1811 
1812 	return 0;
1813 }
1814 
1815 #ifdef RTE_EXEC_ENV_BAREMETAL
1816 #define main _main
1817 #endif
1818 
1819 int
1820 main(int argc, char** argv)
1821 {
1822 	int  diag;
1823 	uint8_t port_id;
1824 
1825 	diag = rte_eal_init(argc, argv);
1826 	if (diag < 0)
1827 		rte_panic("Cannot init EAL\n");
1828 
1829 	if (rte_eal_pci_probe())
1830 		rte_panic("Cannot probe PCI\n");
1831 
1832 	nb_ports = (portid_t) rte_eth_dev_count();
1833 	if (nb_ports == 0)
1834 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1835 							"check that "
1836 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1837 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1838 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1839 			  "configuration file\n");
1840 
1841 	set_def_fwd_config();
1842 	if (nb_lcores == 0)
1843 		rte_panic("Empty set of forwarding logical cores - check the "
1844 			  "core mask supplied in the command parameters\n");
1845 
1846 	argc -= diag;
1847 	argv += diag;
1848 	if (argc > 1)
1849 		launch_args_parse(argc, argv);
1850 
1851 	if (nb_rxq > nb_txq)
1852 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1853 		       "but nb_txq=%d will prevent to fully test it.\n",
1854 		       nb_rxq, nb_txq);
1855 
1856 	init_config();
1857 	if (start_port(RTE_PORT_ALL) != 0)
1858 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1859 
1860 	/* set all ports to promiscuous mode by default */
1861 	for (port_id = 0; port_id < nb_ports; port_id++)
1862 		rte_eth_promiscuous_enable(port_id);
1863 
1864 #ifdef RTE_LIBRTE_CMDLINE
1865 	if (interactive == 1) {
1866 		if (auto_start) {
1867 			printf("Start automatic packet forwarding\n");
1868 			start_packet_forwarding(0);
1869 		}
1870 		prompt();
1871 	} else
1872 #endif
1873 	{
1874 		char c;
1875 		int rc;
1876 
1877 		printf("No commandline core given, start packet forwarding\n");
1878 		start_packet_forwarding(0);
1879 		printf("Press enter to exit\n");
1880 		rc = read(0, &c, 1);
1881 		if (rc < 0)
1882 			return 1;
1883 	}
1884 
1885 	return 0;
1886 }
1887