xref: /dpdk/app/test-pmd/testpmd.c (revision 8fd8bebc051704d7caecfed8d8a065a79c022329)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_retry_fwd_engine,
148 	&mac_swap_engine,
149 	&flow_gen_engine,
150 	&rx_only_engine,
151 	&tx_only_engine,
152 	&csum_fwd_engine,
153 	&icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 	&ieee1588_fwd_engine,
156 #endif
157 	NULL,
158 };
159 
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162 
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166 
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 	TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175 
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178 
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181 
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184 
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187 
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201 
202 /*
203  * Configurable values of RX and TX ring threshold registers.
204  */
205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */
208 
209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */
210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
212 
213 struct rte_eth_thresh rx_thresh = {
214 	.pthresh = RX_PTHRESH,
215 	.hthresh = RX_HTHRESH,
216 	.wthresh = RX_WTHRESH,
217 };
218 
219 struct rte_eth_thresh tx_thresh = {
220 	.pthresh = TX_PTHRESH,
221 	.hthresh = TX_HTHRESH,
222 	.wthresh = TX_WTHRESH,
223 };
224 
225 /*
226  * Configurable value of RX free threshold.
227  */
228 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
229 
230 /*
231  * Configurable value of RX drop enable.
232  */
233 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
234 
235 /*
236  * Configurable value of TX free threshold.
237  */
238 uint16_t tx_free_thresh = 0; /* Use default values. */
239 
240 /*
241  * Configurable value of TX RS bit threshold.
242  */
243 uint16_t tx_rs_thresh = 0; /* Use default values. */
244 
245 /*
246  * Configurable value of TX queue flags.
247  */
248 uint32_t txq_flags = 0; /* No flags set. */
249 
250 /*
251  * Receive Side Scaling (RSS) configuration.
252  */
253 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
254 
255 /*
256  * Port topology configuration
257  */
258 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
259 
260 /*
261  * Avoids to flush all the RX streams before starts forwarding.
262  */
263 uint8_t no_flush_rx = 0; /* flush by default */
264 
265 /*
266  * Avoids to check link status when starting/stopping a port.
267  */
268 uint8_t no_link_check = 0; /* check by default */
269 
270 /*
271  * NIC bypass mode configuration options.
272  */
273 #ifdef RTE_NIC_BYPASS
274 
275 /* The NIC bypass watchdog timeout. */
276 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
277 
278 #endif
279 
280 /*
281  * Ethernet device configuration.
282  */
283 struct rte_eth_rxmode rx_mode = {
284 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
285 	.split_hdr_size = 0,
286 	.header_split   = 0, /**< Header Split disabled. */
287 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
288 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
289 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
290 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
291 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
292 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
293 };
294 
295 struct rte_fdir_conf fdir_conf = {
296 	.mode = RTE_FDIR_MODE_NONE,
297 	.pballoc = RTE_FDIR_PBALLOC_64K,
298 	.status = RTE_FDIR_REPORT_STATUS,
299 	.flexbytes_offset = 0x6,
300 	.drop_queue = 127,
301 };
302 
303 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
304 
305 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
306 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
307 
308 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
309 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
310 
311 uint16_t nb_tx_queue_stats_mappings = 0;
312 uint16_t nb_rx_queue_stats_mappings = 0;
313 
314 /* Forward function declarations */
315 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
316 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
317 
318 /*
319  * Check if all the ports are started.
320  * If yes, return positive value. If not, return zero.
321  */
322 static int all_ports_started(void);
323 
324 /*
325  * Setup default configuration.
326  */
327 static void
328 set_default_fwd_lcores_config(void)
329 {
330 	unsigned int i;
331 	unsigned int nb_lc;
332 
333 	nb_lc = 0;
334 	for (i = 0; i < RTE_MAX_LCORE; i++) {
335 		if (! rte_lcore_is_enabled(i))
336 			continue;
337 		if (i == rte_get_master_lcore())
338 			continue;
339 		fwd_lcores_cpuids[nb_lc++] = i;
340 	}
341 	nb_lcores = (lcoreid_t) nb_lc;
342 	nb_cfg_lcores = nb_lcores;
343 	nb_fwd_lcores = 1;
344 }
345 
346 static void
347 set_def_peer_eth_addrs(void)
348 {
349 	portid_t i;
350 
351 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
352 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
353 		peer_eth_addrs[i].addr_bytes[5] = i;
354 	}
355 }
356 
357 static void
358 set_default_fwd_ports_config(void)
359 {
360 	portid_t pt_id;
361 
362 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
363 		fwd_ports_ids[pt_id] = pt_id;
364 
365 	nb_cfg_ports = nb_ports;
366 	nb_fwd_ports = nb_ports;
367 }
368 
369 void
370 set_def_fwd_config(void)
371 {
372 	set_default_fwd_lcores_config();
373 	set_def_peer_eth_addrs();
374 	set_default_fwd_ports_config();
375 }
376 
377 /*
378  * Configuration initialisation done once at init time.
379  */
380 struct mbuf_ctor_arg {
381 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
382 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
383 };
384 
385 struct mbuf_pool_ctor_arg {
386 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
387 };
388 
389 static void
390 testpmd_mbuf_ctor(struct rte_mempool *mp,
391 		  void *opaque_arg,
392 		  void *raw_mbuf,
393 		  __attribute__((unused)) unsigned i)
394 {
395 	struct mbuf_ctor_arg *mb_ctor_arg;
396 	struct rte_mbuf    *mb;
397 
398 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
399 	mb = (struct rte_mbuf *) raw_mbuf;
400 
401 	mb->type         = RTE_MBUF_PKT;
402 	mb->pool         = mp;
403 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
404 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
405 			mb_ctor_arg->seg_buf_offset);
406 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
407 	mb->type         = RTE_MBUF_PKT;
408 	mb->ol_flags     = 0;
409 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
410 	mb->pkt.nb_segs  = 1;
411 	mb->pkt.vlan_macip.data = 0;
412 	mb->pkt.hash.rss = 0;
413 }
414 
415 static void
416 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
417 		       void *opaque_arg)
418 {
419 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
420 	struct rte_pktmbuf_pool_private *mbp_priv;
421 
422 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
423 		printf("%s(%s) private_data_size %d < %d\n",
424 		       __func__, mp->name, (int) mp->private_data_size,
425 		       (int) sizeof(struct rte_pktmbuf_pool_private));
426 		return;
427 	}
428 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
429 	mbp_priv = rte_mempool_get_priv(mp);
430 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
431 }
432 
433 static void
434 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
435 		 unsigned int socket_id)
436 {
437 	char pool_name[RTE_MEMPOOL_NAMESIZE];
438 	struct rte_mempool *rte_mp;
439 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
440 	struct mbuf_ctor_arg mb_ctor_arg;
441 	uint32_t mb_size;
442 
443 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
444 						mbuf_seg_size);
445 	mb_ctor_arg.seg_buf_offset =
446 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
447 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
448 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
449 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
450 
451 #ifdef RTE_LIBRTE_PMD_XENVIRT
452 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
453                                    (unsigned) mb_mempool_cache,
454                                    sizeof(struct rte_pktmbuf_pool_private),
455                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
456                                    testpmd_mbuf_ctor, &mb_ctor_arg,
457                                    socket_id, 0);
458 
459 
460 
461 #else
462 	if (mp_anon != 0)
463 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
464 				    (unsigned) mb_mempool_cache,
465 				    sizeof(struct rte_pktmbuf_pool_private),
466 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
467 				    testpmd_mbuf_ctor, &mb_ctor_arg,
468 				    socket_id, 0);
469 	else
470 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
471 				    (unsigned) mb_mempool_cache,
472 				    sizeof(struct rte_pktmbuf_pool_private),
473 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
474 				    testpmd_mbuf_ctor, &mb_ctor_arg,
475 				    socket_id, 0);
476 
477 #endif
478 
479 	if (rte_mp == NULL) {
480 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
481 						"failed\n", socket_id);
482 	} else if (verbose_level > 0) {
483 		rte_mempool_dump(stdout, rte_mp);
484 	}
485 }
486 
487 /*
488  * Check given socket id is valid or not with NUMA mode,
489  * if valid, return 0, else return -1
490  */
491 static int
492 check_socket_id(const unsigned int socket_id)
493 {
494 	static int warning_once = 0;
495 
496 	if (socket_id >= MAX_SOCKET) {
497 		if (!warning_once && numa_support)
498 			printf("Warning: NUMA should be configured manually by"
499 			       " using --port-numa-config and"
500 			       " --ring-numa-config parameters along with"
501 			       " --numa.\n");
502 		warning_once = 1;
503 		return -1;
504 	}
505 	return 0;
506 }
507 
508 static void
509 init_config(void)
510 {
511 	portid_t pid;
512 	struct rte_port *port;
513 	struct rte_mempool *mbp;
514 	unsigned int nb_mbuf_per_pool;
515 	lcoreid_t  lc_id;
516 	uint8_t port_per_socket[MAX_SOCKET];
517 
518 	memset(port_per_socket,0,MAX_SOCKET);
519 	/* Configuration of logical cores. */
520 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
521 				sizeof(struct fwd_lcore *) * nb_lcores,
522 				CACHE_LINE_SIZE);
523 	if (fwd_lcores == NULL) {
524 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
525 							"failed\n", nb_lcores);
526 	}
527 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
528 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
529 					       sizeof(struct fwd_lcore),
530 					       CACHE_LINE_SIZE);
531 		if (fwd_lcores[lc_id] == NULL) {
532 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
533 								"failed\n");
534 		}
535 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
536 	}
537 
538 	/*
539 	 * Create pools of mbuf.
540 	 * If NUMA support is disabled, create a single pool of mbuf in
541 	 * socket 0 memory by default.
542 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
543 	 *
544 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
545 	 * nb_txd can be configured at run time.
546 	 */
547 	if (param_total_num_mbufs)
548 		nb_mbuf_per_pool = param_total_num_mbufs;
549 	else {
550 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
551 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
552 
553 		if (!numa_support)
554 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
555 	}
556 
557 	if (!numa_support) {
558 		if (socket_num == UMA_NO_CONFIG)
559 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
560 		else
561 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
562 						 socket_num);
563 	}
564 
565 	/* Configuration of Ethernet ports. */
566 	ports = rte_zmalloc("testpmd: ports",
567 			    sizeof(struct rte_port) * nb_ports,
568 			    CACHE_LINE_SIZE);
569 	if (ports == NULL) {
570 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
571 							"failed\n", nb_ports);
572 	}
573 
574 	for (pid = 0; pid < nb_ports; pid++) {
575 		port = &ports[pid];
576 		rte_eth_dev_info_get(pid, &port->dev_info);
577 
578 		if (numa_support) {
579 			if (port_numa[pid] != NUMA_NO_CONFIG)
580 				port_per_socket[port_numa[pid]]++;
581 			else {
582 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
583 
584 				/* if socket_id is invalid, set to 0 */
585 				if (check_socket_id(socket_id) < 0)
586 					socket_id = 0;
587 				port_per_socket[socket_id]++;
588 			}
589 		}
590 
591 		/* set flag to initialize port/queue */
592 		port->need_reconfig = 1;
593 		port->need_reconfig_queues = 1;
594 	}
595 
596 	if (numa_support) {
597 		uint8_t i;
598 		unsigned int nb_mbuf;
599 
600 		if (param_total_num_mbufs)
601 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
602 
603 		for (i = 0; i < MAX_SOCKET; i++) {
604 			nb_mbuf = (nb_mbuf_per_pool *
605 						port_per_socket[i]);
606 			if (nb_mbuf)
607 				mbuf_pool_create(mbuf_data_size,
608 						nb_mbuf,i);
609 		}
610 	}
611 	init_port_config();
612 
613 	/*
614 	 * Records which Mbuf pool to use by each logical core, if needed.
615 	 */
616 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617 		mbp = mbuf_pool_find(
618 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
619 
620 		if (mbp == NULL)
621 			mbp = mbuf_pool_find(0);
622 		fwd_lcores[lc_id]->mbp = mbp;
623 	}
624 
625 	/* Configuration of packet forwarding streams. */
626 	if (init_fwd_streams() < 0)
627 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
628 }
629 
630 
631 void
632 reconfig(portid_t new_port_id)
633 {
634 	struct rte_port *port;
635 
636 	/* Reconfiguration of Ethernet ports. */
637 	ports = rte_realloc(ports,
638 			    sizeof(struct rte_port) * nb_ports,
639 			    CACHE_LINE_SIZE);
640 	if (ports == NULL) {
641 		rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
642 				nb_ports);
643 	}
644 
645 	port = &ports[new_port_id];
646 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
647 
648 	/* set flag to initialize port/queue */
649 	port->need_reconfig = 1;
650 	port->need_reconfig_queues = 1;
651 
652 	init_port_config();
653 }
654 
655 
656 int
657 init_fwd_streams(void)
658 {
659 	portid_t pid;
660 	struct rte_port *port;
661 	streamid_t sm_id, nb_fwd_streams_new;
662 
663 	/* set socket id according to numa or not */
664 	for (pid = 0; pid < nb_ports; pid++) {
665 		port = &ports[pid];
666 		if (nb_rxq > port->dev_info.max_rx_queues) {
667 			printf("Fail: nb_rxq(%d) is greater than "
668 				"max_rx_queues(%d)\n", nb_rxq,
669 				port->dev_info.max_rx_queues);
670 			return -1;
671 		}
672 		if (nb_txq > port->dev_info.max_tx_queues) {
673 			printf("Fail: nb_txq(%d) is greater than "
674 				"max_tx_queues(%d)\n", nb_txq,
675 				port->dev_info.max_tx_queues);
676 			return -1;
677 		}
678 		if (numa_support) {
679 			if (port_numa[pid] != NUMA_NO_CONFIG)
680 				port->socket_id = port_numa[pid];
681 			else {
682 				port->socket_id = rte_eth_dev_socket_id(pid);
683 
684 				/* if socket_id is invalid, set to 0 */
685 				if (check_socket_id(port->socket_id) < 0)
686 					port->socket_id = 0;
687 			}
688 		}
689 		else {
690 			if (socket_num == UMA_NO_CONFIG)
691 				port->socket_id = 0;
692 			else
693 				port->socket_id = socket_num;
694 		}
695 	}
696 
697 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
698 	if (nb_fwd_streams_new == nb_fwd_streams)
699 		return 0;
700 	/* clear the old */
701 	if (fwd_streams != NULL) {
702 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
703 			if (fwd_streams[sm_id] == NULL)
704 				continue;
705 			rte_free(fwd_streams[sm_id]);
706 			fwd_streams[sm_id] = NULL;
707 		}
708 		rte_free(fwd_streams);
709 		fwd_streams = NULL;
710 	}
711 
712 	/* init new */
713 	nb_fwd_streams = nb_fwd_streams_new;
714 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
715 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
716 	if (fwd_streams == NULL)
717 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
718 						"failed\n", nb_fwd_streams);
719 
720 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
721 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
722 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
723 		if (fwd_streams[sm_id] == NULL)
724 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
725 								" failed\n");
726 	}
727 
728 	return 0;
729 }
730 
731 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
732 static void
733 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
734 {
735 	unsigned int total_burst;
736 	unsigned int nb_burst;
737 	unsigned int burst_stats[3];
738 	uint16_t pktnb_stats[3];
739 	uint16_t nb_pkt;
740 	int burst_percent[3];
741 
742 	/*
743 	 * First compute the total number of packet bursts and the
744 	 * two highest numbers of bursts of the same number of packets.
745 	 */
746 	total_burst = 0;
747 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
748 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
749 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
750 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
751 		if (nb_burst == 0)
752 			continue;
753 		total_burst += nb_burst;
754 		if (nb_burst > burst_stats[0]) {
755 			burst_stats[1] = burst_stats[0];
756 			pktnb_stats[1] = pktnb_stats[0];
757 			burst_stats[0] = nb_burst;
758 			pktnb_stats[0] = nb_pkt;
759 		}
760 	}
761 	if (total_burst == 0)
762 		return;
763 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
764 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
765 	       burst_percent[0], (int) pktnb_stats[0]);
766 	if (burst_stats[0] == total_burst) {
767 		printf("]\n");
768 		return;
769 	}
770 	if (burst_stats[0] + burst_stats[1] == total_burst) {
771 		printf(" + %d%% of %d pkts]\n",
772 		       100 - burst_percent[0], pktnb_stats[1]);
773 		return;
774 	}
775 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
776 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
777 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
778 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
779 		return;
780 	}
781 	printf(" + %d%% of %d pkts + %d%% of others]\n",
782 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
783 }
784 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
785 
786 static void
787 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
788 {
789 	struct rte_port *port;
790 	uint8_t i;
791 
792 	static const char *fwd_stats_border = "----------------------";
793 
794 	port = &ports[port_id];
795 	printf("\n  %s Forward statistics for port %-2d %s\n",
796 	       fwd_stats_border, port_id, fwd_stats_border);
797 
798 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
799 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
800 		       "%-"PRIu64"\n",
801 		       stats->ipackets, stats->imissed,
802 		       (uint64_t) (stats->ipackets + stats->imissed));
803 
804 		if (cur_fwd_eng == &csum_fwd_engine)
805 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
806 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
807 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
808 			printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
809 			       "RX-error: %-"PRIu64"\n",
810 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
811 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
812 		}
813 
814 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
815 		       "%-"PRIu64"\n",
816 		       stats->opackets, port->tx_dropped,
817 		       (uint64_t) (stats->opackets + port->tx_dropped));
818 	}
819 	else {
820 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
821 		       "%14"PRIu64"\n",
822 		       stats->ipackets, stats->imissed,
823 		       (uint64_t) (stats->ipackets + stats->imissed));
824 
825 		if (cur_fwd_eng == &csum_fwd_engine)
826 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
827 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
828 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
829 			printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
830 			       "    RX-error:%"PRIu64"\n",
831 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
832 			printf("  RX-nombufs:             %14"PRIu64"\n",
833 			       stats->rx_nombuf);
834 		}
835 
836 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
837 		       "%14"PRIu64"\n",
838 		       stats->opackets, port->tx_dropped,
839 		       (uint64_t) (stats->opackets + port->tx_dropped));
840 	}
841 
842 	/* Display statistics of XON/XOFF pause frames, if any. */
843 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
844 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
845 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
846 		       stats->rx_pause_xoff, stats->rx_pause_xon);
847 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
848 		       stats->tx_pause_xoff, stats->tx_pause_xon);
849 	}
850 
851 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
852 	if (port->rx_stream)
853 		pkt_burst_stats_display("RX",
854 			&port->rx_stream->rx_burst_stats);
855 	if (port->tx_stream)
856 		pkt_burst_stats_display("TX",
857 			&port->tx_stream->tx_burst_stats);
858 #endif
859 	/* stats fdir */
860 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
861 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
862 		       stats->fdirmiss,
863 		       stats->fdirmatch);
864 
865 	if (port->rx_queue_stats_mapping_enabled) {
866 		printf("\n");
867 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
868 			printf("  Stats reg %2d RX-packets:%14"PRIu64
869 			       "     RX-errors:%14"PRIu64
870 			       "    RX-bytes:%14"PRIu64"\n",
871 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
872 		}
873 		printf("\n");
874 	}
875 	if (port->tx_queue_stats_mapping_enabled) {
876 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
877 			printf("  Stats reg %2d TX-packets:%14"PRIu64
878 			       "                                 TX-bytes:%14"PRIu64"\n",
879 			       i, stats->q_opackets[i], stats->q_obytes[i]);
880 		}
881 	}
882 
883 	printf("  %s--------------------------------%s\n",
884 	       fwd_stats_border, fwd_stats_border);
885 }
886 
887 static void
888 fwd_stream_stats_display(streamid_t stream_id)
889 {
890 	struct fwd_stream *fs;
891 	static const char *fwd_top_stats_border = "-------";
892 
893 	fs = fwd_streams[stream_id];
894 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
895 	    (fs->fwd_dropped == 0))
896 		return;
897 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
898 	       "TX Port=%2d/Queue=%2d %s\n",
899 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
900 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
901 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
902 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
903 
904 	/* if checksum mode */
905 	if (cur_fwd_eng == &csum_fwd_engine) {
906 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
907 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
908 	}
909 
910 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
911 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
912 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
913 #endif
914 }
915 
916 static void
917 flush_fwd_rx_queues(void)
918 {
919 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
920 	portid_t  rxp;
921 	portid_t port_id;
922 	queueid_t rxq;
923 	uint16_t  nb_rx;
924 	uint16_t  i;
925 	uint8_t   j;
926 
927 	for (j = 0; j < 2; j++) {
928 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
929 			for (rxq = 0; rxq < nb_rxq; rxq++) {
930 				port_id = fwd_ports_ids[rxp];
931 				do {
932 					nb_rx = rte_eth_rx_burst(port_id, rxq,
933 						pkts_burst, MAX_PKT_BURST);
934 					for (i = 0; i < nb_rx; i++)
935 						rte_pktmbuf_free(pkts_burst[i]);
936 				} while (nb_rx > 0);
937 			}
938 		}
939 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
940 	}
941 }
942 
943 static void
944 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
945 {
946 	struct fwd_stream **fsm;
947 	streamid_t nb_fs;
948 	streamid_t sm_id;
949 
950 	fsm = &fwd_streams[fc->stream_idx];
951 	nb_fs = fc->stream_nb;
952 	do {
953 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
954 			(*pkt_fwd)(fsm[sm_id]);
955 	} while (! fc->stopped);
956 }
957 
958 static int
959 start_pkt_forward_on_core(void *fwd_arg)
960 {
961 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
962 			     cur_fwd_config.fwd_eng->packet_fwd);
963 	return 0;
964 }
965 
966 /*
967  * Run the TXONLY packet forwarding engine to send a single burst of packets.
968  * Used to start communication flows in network loopback test configurations.
969  */
970 static int
971 run_one_txonly_burst_on_core(void *fwd_arg)
972 {
973 	struct fwd_lcore *fwd_lc;
974 	struct fwd_lcore tmp_lcore;
975 
976 	fwd_lc = (struct fwd_lcore *) fwd_arg;
977 	tmp_lcore = *fwd_lc;
978 	tmp_lcore.stopped = 1;
979 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
980 	return 0;
981 }
982 
983 /*
984  * Launch packet forwarding:
985  *     - Setup per-port forwarding context.
986  *     - launch logical cores with their forwarding configuration.
987  */
988 static void
989 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
990 {
991 	port_fwd_begin_t port_fwd_begin;
992 	unsigned int i;
993 	unsigned int lc_id;
994 	int diag;
995 
996 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
997 	if (port_fwd_begin != NULL) {
998 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
999 			(*port_fwd_begin)(fwd_ports_ids[i]);
1000 	}
1001 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1002 		lc_id = fwd_lcores_cpuids[i];
1003 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1004 			fwd_lcores[i]->stopped = 0;
1005 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1006 						     fwd_lcores[i], lc_id);
1007 			if (diag != 0)
1008 				printf("launch lcore %u failed - diag=%d\n",
1009 				       lc_id, diag);
1010 		}
1011 	}
1012 }
1013 
1014 /*
1015  * Launch packet forwarding configuration.
1016  */
1017 void
1018 start_packet_forwarding(int with_tx_first)
1019 {
1020 	port_fwd_begin_t port_fwd_begin;
1021 	port_fwd_end_t  port_fwd_end;
1022 	struct rte_port *port;
1023 	unsigned int i;
1024 	portid_t   pt_id;
1025 	streamid_t sm_id;
1026 
1027 	if (all_ports_started() == 0) {
1028 		printf("Not all ports were started\n");
1029 		return;
1030 	}
1031 	if (test_done == 0) {
1032 		printf("Packet forwarding already started\n");
1033 		return;
1034 	}
1035 	if(dcb_test) {
1036 		for (i = 0; i < nb_fwd_ports; i++) {
1037 			pt_id = fwd_ports_ids[i];
1038 			port = &ports[pt_id];
1039 			if (!port->dcb_flag) {
1040 				printf("In DCB mode, all forwarding ports must "
1041                                        "be configured in this mode.\n");
1042 				return;
1043 			}
1044 		}
1045 		if (nb_fwd_lcores == 1) {
1046 			printf("In DCB mode,the nb forwarding cores "
1047                                "should be larger than 1.\n");
1048 			return;
1049 		}
1050 	}
1051 	test_done = 0;
1052 
1053 	if(!no_flush_rx)
1054 		flush_fwd_rx_queues();
1055 
1056 	fwd_config_setup();
1057 	rxtx_config_display();
1058 
1059 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1060 		pt_id = fwd_ports_ids[i];
1061 		port = &ports[pt_id];
1062 		rte_eth_stats_get(pt_id, &port->stats);
1063 		port->tx_dropped = 0;
1064 
1065 		map_port_queue_stats_mapping_registers(pt_id, port);
1066 	}
1067 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1068 		fwd_streams[sm_id]->rx_packets = 0;
1069 		fwd_streams[sm_id]->tx_packets = 0;
1070 		fwd_streams[sm_id]->fwd_dropped = 0;
1071 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1072 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1073 
1074 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1075 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1076 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1077 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1078 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1079 #endif
1080 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1081 		fwd_streams[sm_id]->core_cycles = 0;
1082 #endif
1083 	}
1084 	if (with_tx_first) {
1085 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1086 		if (port_fwd_begin != NULL) {
1087 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1088 				(*port_fwd_begin)(fwd_ports_ids[i]);
1089 		}
1090 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1091 		rte_eal_mp_wait_lcore();
1092 		port_fwd_end = tx_only_engine.port_fwd_end;
1093 		if (port_fwd_end != NULL) {
1094 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1095 				(*port_fwd_end)(fwd_ports_ids[i]);
1096 		}
1097 	}
1098 	launch_packet_forwarding(start_pkt_forward_on_core);
1099 }
1100 
1101 void
1102 stop_packet_forwarding(void)
1103 {
1104 	struct rte_eth_stats stats;
1105 	struct rte_port *port;
1106 	port_fwd_end_t  port_fwd_end;
1107 	int i;
1108 	portid_t   pt_id;
1109 	streamid_t sm_id;
1110 	lcoreid_t  lc_id;
1111 	uint64_t total_recv;
1112 	uint64_t total_xmit;
1113 	uint64_t total_rx_dropped;
1114 	uint64_t total_tx_dropped;
1115 	uint64_t total_rx_nombuf;
1116 	uint64_t tx_dropped;
1117 	uint64_t rx_bad_ip_csum;
1118 	uint64_t rx_bad_l4_csum;
1119 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1120 	uint64_t fwd_cycles;
1121 #endif
1122 	static const char *acc_stats_border = "+++++++++++++++";
1123 
1124 	if (all_ports_started() == 0) {
1125 		printf("Not all ports were started\n");
1126 		return;
1127 	}
1128 	if (test_done) {
1129 		printf("Packet forwarding not started\n");
1130 		return;
1131 	}
1132 	printf("Telling cores to stop...");
1133 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1134 		fwd_lcores[lc_id]->stopped = 1;
1135 	printf("\nWaiting for lcores to finish...\n");
1136 	rte_eal_mp_wait_lcore();
1137 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1138 	if (port_fwd_end != NULL) {
1139 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1140 			pt_id = fwd_ports_ids[i];
1141 			(*port_fwd_end)(pt_id);
1142 		}
1143 	}
1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1145 	fwd_cycles = 0;
1146 #endif
1147 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1148 		if (cur_fwd_config.nb_fwd_streams >
1149 		    cur_fwd_config.nb_fwd_ports) {
1150 			fwd_stream_stats_display(sm_id);
1151 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1152 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1153 		} else {
1154 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1155 				fwd_streams[sm_id];
1156 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1157 				fwd_streams[sm_id];
1158 		}
1159 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1160 		tx_dropped = (uint64_t) (tx_dropped +
1161 					 fwd_streams[sm_id]->fwd_dropped);
1162 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1163 
1164 		rx_bad_ip_csum =
1165 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1166 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1167 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1168 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1169 							rx_bad_ip_csum;
1170 
1171 		rx_bad_l4_csum =
1172 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1173 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1174 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1175 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1176 							rx_bad_l4_csum;
1177 
1178 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1179 		fwd_cycles = (uint64_t) (fwd_cycles +
1180 					 fwd_streams[sm_id]->core_cycles);
1181 #endif
1182 	}
1183 	total_recv = 0;
1184 	total_xmit = 0;
1185 	total_rx_dropped = 0;
1186 	total_tx_dropped = 0;
1187 	total_rx_nombuf  = 0;
1188 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1189 		pt_id = fwd_ports_ids[i];
1190 
1191 		port = &ports[pt_id];
1192 		rte_eth_stats_get(pt_id, &stats);
1193 		stats.ipackets -= port->stats.ipackets;
1194 		port->stats.ipackets = 0;
1195 		stats.opackets -= port->stats.opackets;
1196 		port->stats.opackets = 0;
1197 		stats.ibytes   -= port->stats.ibytes;
1198 		port->stats.ibytes = 0;
1199 		stats.obytes   -= port->stats.obytes;
1200 		port->stats.obytes = 0;
1201 		stats.imissed  -= port->stats.imissed;
1202 		port->stats.imissed = 0;
1203 		stats.oerrors  -= port->stats.oerrors;
1204 		port->stats.oerrors = 0;
1205 		stats.rx_nombuf -= port->stats.rx_nombuf;
1206 		port->stats.rx_nombuf = 0;
1207 		stats.fdirmatch -= port->stats.fdirmatch;
1208 		port->stats.rx_nombuf = 0;
1209 		stats.fdirmiss -= port->stats.fdirmiss;
1210 		port->stats.rx_nombuf = 0;
1211 
1212 		total_recv += stats.ipackets;
1213 		total_xmit += stats.opackets;
1214 		total_rx_dropped += stats.imissed;
1215 		total_tx_dropped += port->tx_dropped;
1216 		total_rx_nombuf  += stats.rx_nombuf;
1217 
1218 		fwd_port_stats_display(pt_id, &stats);
1219 	}
1220 	printf("\n  %s Accumulated forward statistics for all ports"
1221 	       "%s\n",
1222 	       acc_stats_border, acc_stats_border);
1223 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1224 	       "%-"PRIu64"\n"
1225 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1226 	       "%-"PRIu64"\n",
1227 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1228 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1229 	if (total_rx_nombuf > 0)
1230 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1231 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1232 	       "%s\n",
1233 	       acc_stats_border, acc_stats_border);
1234 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1235 	if (total_recv > 0)
1236 		printf("\n  CPU cycles/packet=%u (total cycles="
1237 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1238 		       (unsigned int)(fwd_cycles / total_recv),
1239 		       fwd_cycles, total_recv);
1240 #endif
1241 	printf("\nDone.\n");
1242 	test_done = 1;
1243 }
1244 
1245 void
1246 dev_set_link_up(portid_t pid)
1247 {
1248 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1249 		printf("\nSet link up fail.\n");
1250 }
1251 
1252 void
1253 dev_set_link_down(portid_t pid)
1254 {
1255 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1256 		printf("\nSet link down fail.\n");
1257 }
1258 
1259 static int
1260 all_ports_started(void)
1261 {
1262 	portid_t pi;
1263 	struct rte_port *port;
1264 
1265 	for (pi = 0; pi < nb_ports; pi++) {
1266 		port = &ports[pi];
1267 		/* Check if there is a port which is not started */
1268 		if (port->port_status != RTE_PORT_STARTED)
1269 			return 0;
1270 	}
1271 
1272 	/* No port is not started */
1273 	return 1;
1274 }
1275 
1276 int
1277 start_port(portid_t pid)
1278 {
1279 	int diag, need_check_link_status = 0;
1280 	portid_t pi;
1281 	queueid_t qi;
1282 	struct rte_port *port;
1283 	struct ether_addr mac_addr;
1284 
1285 	if (test_done == 0) {
1286 		printf("Please stop forwarding first\n");
1287 		return -1;
1288 	}
1289 
1290 	if (init_fwd_streams() < 0) {
1291 		printf("Fail from init_fwd_streams()\n");
1292 		return -1;
1293 	}
1294 
1295 	if(dcb_config)
1296 		dcb_test = 1;
1297 	for (pi = 0; pi < nb_ports; pi++) {
1298 		if (pid < nb_ports && pid != pi)
1299 			continue;
1300 
1301 		port = &ports[pi];
1302 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1303 						 RTE_PORT_HANDLING) == 0) {
1304 			printf("Port %d is now not stopped\n", pi);
1305 			continue;
1306 		}
1307 
1308 		if (port->need_reconfig > 0) {
1309 			port->need_reconfig = 0;
1310 
1311 			printf("Configuring Port %d (socket %u)\n", pi,
1312 					port->socket_id);
1313 			/* configure port */
1314 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1315 						&(port->dev_conf));
1316 			if (diag != 0) {
1317 				if (rte_atomic16_cmpset(&(port->port_status),
1318 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1319 					printf("Port %d can not be set back "
1320 							"to stopped\n", pi);
1321 				printf("Fail to configure port %d\n", pi);
1322 				/* try to reconfigure port next time */
1323 				port->need_reconfig = 1;
1324 				return -1;
1325 			}
1326 		}
1327 		if (port->need_reconfig_queues > 0) {
1328 			port->need_reconfig_queues = 0;
1329 			/* setup tx queues */
1330 			for (qi = 0; qi < nb_txq; qi++) {
1331 				if ((numa_support) &&
1332 					(txring_numa[pi] != NUMA_NO_CONFIG))
1333 					diag = rte_eth_tx_queue_setup(pi, qi,
1334 						nb_txd,txring_numa[pi],
1335 						&(port->tx_conf));
1336 				else
1337 					diag = rte_eth_tx_queue_setup(pi, qi,
1338 						nb_txd,port->socket_id,
1339 						&(port->tx_conf));
1340 
1341 				if (diag == 0)
1342 					continue;
1343 
1344 				/* Fail to setup tx queue, return */
1345 				if (rte_atomic16_cmpset(&(port->port_status),
1346 							RTE_PORT_HANDLING,
1347 							RTE_PORT_STOPPED) == 0)
1348 					printf("Port %d can not be set back "
1349 							"to stopped\n", pi);
1350 				printf("Fail to configure port %d tx queues\n", pi);
1351 				/* try to reconfigure queues next time */
1352 				port->need_reconfig_queues = 1;
1353 				return -1;
1354 			}
1355 			/* setup rx queues */
1356 			for (qi = 0; qi < nb_rxq; qi++) {
1357 				if ((numa_support) &&
1358 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1359 					struct rte_mempool * mp =
1360 						mbuf_pool_find(rxring_numa[pi]);
1361 					if (mp == NULL) {
1362 						printf("Failed to setup RX queue:"
1363 							"No mempool allocation"
1364 							"on the socket %d\n",
1365 							rxring_numa[pi]);
1366 						return -1;
1367 					}
1368 
1369 					diag = rte_eth_rx_queue_setup(pi, qi,
1370 					     nb_rxd,rxring_numa[pi],
1371 					     &(port->rx_conf),mp);
1372 				}
1373 				else
1374 					diag = rte_eth_rx_queue_setup(pi, qi,
1375 					     nb_rxd,port->socket_id,
1376 					     &(port->rx_conf),
1377 				             mbuf_pool_find(port->socket_id));
1378 
1379 				if (diag == 0)
1380 					continue;
1381 
1382 
1383 				/* Fail to setup rx queue, return */
1384 				if (rte_atomic16_cmpset(&(port->port_status),
1385 							RTE_PORT_HANDLING,
1386 							RTE_PORT_STOPPED) == 0)
1387 					printf("Port %d can not be set back "
1388 							"to stopped\n", pi);
1389 				printf("Fail to configure port %d rx queues\n", pi);
1390 				/* try to reconfigure queues next time */
1391 				port->need_reconfig_queues = 1;
1392 				return -1;
1393 			}
1394 		}
1395 		/* start port */
1396 		if (rte_eth_dev_start(pi) < 0) {
1397 			printf("Fail to start port %d\n", pi);
1398 
1399 			/* Fail to setup rx queue, return */
1400 			if (rte_atomic16_cmpset(&(port->port_status),
1401 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1402 				printf("Port %d can not be set back to "
1403 							"stopped\n", pi);
1404 			continue;
1405 		}
1406 
1407 		if (rte_atomic16_cmpset(&(port->port_status),
1408 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1409 			printf("Port %d can not be set into started\n", pi);
1410 
1411 		rte_eth_macaddr_get(pi, &mac_addr);
1412 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1413 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1414 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1415 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1416 
1417 		/* at least one port started, need checking link status */
1418 		need_check_link_status = 1;
1419 	}
1420 
1421 	if (need_check_link_status && !no_link_check)
1422 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1423 	else
1424 		printf("Please stop the ports first\n");
1425 
1426 	printf("Done\n");
1427 	return 0;
1428 }
1429 
1430 void
1431 stop_port(portid_t pid)
1432 {
1433 	portid_t pi;
1434 	struct rte_port *port;
1435 	int need_check_link_status = 0;
1436 
1437 	if (test_done == 0) {
1438 		printf("Please stop forwarding first\n");
1439 		return;
1440 	}
1441 	if (dcb_test) {
1442 		dcb_test = 0;
1443 		dcb_config = 0;
1444 	}
1445 	printf("Stopping ports...\n");
1446 
1447 	for (pi = 0; pi < nb_ports; pi++) {
1448 		if (pid < nb_ports && pid != pi)
1449 			continue;
1450 
1451 		port = &ports[pi];
1452 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1453 						RTE_PORT_HANDLING) == 0)
1454 			continue;
1455 
1456 		rte_eth_dev_stop(pi);
1457 
1458 		if (rte_atomic16_cmpset(&(port->port_status),
1459 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1460 			printf("Port %d can not be set into stopped\n", pi);
1461 		need_check_link_status = 1;
1462 	}
1463 	if (need_check_link_status && !no_link_check)
1464 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1465 
1466 	printf("Done\n");
1467 }
1468 
1469 void
1470 close_port(portid_t pid)
1471 {
1472 	portid_t pi;
1473 	struct rte_port *port;
1474 
1475 	if (test_done == 0) {
1476 		printf("Please stop forwarding first\n");
1477 		return;
1478 	}
1479 
1480 	printf("Closing ports...\n");
1481 
1482 	for (pi = 0; pi < nb_ports; pi++) {
1483 		if (pid < nb_ports && pid != pi)
1484 			continue;
1485 
1486 		port = &ports[pi];
1487 		if (rte_atomic16_cmpset(&(port->port_status),
1488 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1489 			printf("Port %d is now not stopped\n", pi);
1490 			continue;
1491 		}
1492 
1493 		rte_eth_dev_close(pi);
1494 
1495 		if (rte_atomic16_cmpset(&(port->port_status),
1496 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1497 			printf("Port %d can not be set into stopped\n", pi);
1498 	}
1499 
1500 	printf("Done\n");
1501 }
1502 
1503 int
1504 all_ports_stopped(void)
1505 {
1506 	portid_t pi;
1507 	struct rte_port *port;
1508 
1509 	for (pi = 0; pi < nb_ports; pi++) {
1510 		port = &ports[pi];
1511 		if (port->port_status != RTE_PORT_STOPPED)
1512 			return 0;
1513 	}
1514 
1515 	return 1;
1516 }
1517 
1518 int
1519 port_is_started(portid_t port_id)
1520 {
1521 	if (port_id_is_invalid(port_id))
1522 		return -1;
1523 
1524 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1525 		return 0;
1526 
1527 	return 1;
1528 }
1529 
1530 void
1531 pmd_test_exit(void)
1532 {
1533 	portid_t pt_id;
1534 
1535 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1536 		printf("Stopping port %d...", pt_id);
1537 		fflush(stdout);
1538 		rte_eth_dev_close(pt_id);
1539 		printf("done\n");
1540 	}
1541 	printf("bye...\n");
1542 }
1543 
1544 typedef void (*cmd_func_t)(void);
1545 struct pmd_test_command {
1546 	const char *cmd_name;
1547 	cmd_func_t cmd_func;
1548 };
1549 
1550 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1551 
1552 /* Check the link status of all ports in up to 9s, and print them finally */
1553 static void
1554 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1555 {
1556 #define CHECK_INTERVAL 100 /* 100ms */
1557 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1558 	uint8_t portid, count, all_ports_up, print_flag = 0;
1559 	struct rte_eth_link link;
1560 
1561 	printf("Checking link statuses...\n");
1562 	fflush(stdout);
1563 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1564 		all_ports_up = 1;
1565 		for (portid = 0; portid < port_num; portid++) {
1566 			if ((port_mask & (1 << portid)) == 0)
1567 				continue;
1568 			memset(&link, 0, sizeof(link));
1569 			rte_eth_link_get_nowait(portid, &link);
1570 			/* print link status if flag set */
1571 			if (print_flag == 1) {
1572 				if (link.link_status)
1573 					printf("Port %d Link Up - speed %u "
1574 						"Mbps - %s\n", (uint8_t)portid,
1575 						(unsigned)link.link_speed,
1576 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1577 					("full-duplex") : ("half-duplex\n"));
1578 				else
1579 					printf("Port %d Link Down\n",
1580 						(uint8_t)portid);
1581 				continue;
1582 			}
1583 			/* clear all_ports_up flag if any link down */
1584 			if (link.link_status == 0) {
1585 				all_ports_up = 0;
1586 				break;
1587 			}
1588 		}
1589 		/* after finally printing all link status, get out */
1590 		if (print_flag == 1)
1591 			break;
1592 
1593 		if (all_ports_up == 0) {
1594 			fflush(stdout);
1595 			rte_delay_ms(CHECK_INTERVAL);
1596 		}
1597 
1598 		/* set the print_flag if all ports up or timeout */
1599 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1600 			print_flag = 1;
1601 		}
1602 	}
1603 }
1604 
1605 static int
1606 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1607 {
1608 	uint16_t i;
1609 	int diag;
1610 	uint8_t mapping_found = 0;
1611 
1612 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1613 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1614 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1615 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1616 					tx_queue_stats_mappings[i].queue_id,
1617 					tx_queue_stats_mappings[i].stats_counter_id);
1618 			if (diag != 0)
1619 				return diag;
1620 			mapping_found = 1;
1621 		}
1622 	}
1623 	if (mapping_found)
1624 		port->tx_queue_stats_mapping_enabled = 1;
1625 	return 0;
1626 }
1627 
1628 static int
1629 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1630 {
1631 	uint16_t i;
1632 	int diag;
1633 	uint8_t mapping_found = 0;
1634 
1635 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1636 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1637 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1638 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1639 					rx_queue_stats_mappings[i].queue_id,
1640 					rx_queue_stats_mappings[i].stats_counter_id);
1641 			if (diag != 0)
1642 				return diag;
1643 			mapping_found = 1;
1644 		}
1645 	}
1646 	if (mapping_found)
1647 		port->rx_queue_stats_mapping_enabled = 1;
1648 	return 0;
1649 }
1650 
1651 static void
1652 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1653 {
1654 	int diag = 0;
1655 
1656 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1657 	if (diag != 0) {
1658 		if (diag == -ENOTSUP) {
1659 			port->tx_queue_stats_mapping_enabled = 0;
1660 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1661 		}
1662 		else
1663 			rte_exit(EXIT_FAILURE,
1664 					"set_tx_queue_stats_mapping_registers "
1665 					"failed for port id=%d diag=%d\n",
1666 					pi, diag);
1667 	}
1668 
1669 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1670 	if (diag != 0) {
1671 		if (diag == -ENOTSUP) {
1672 			port->rx_queue_stats_mapping_enabled = 0;
1673 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1674 		}
1675 		else
1676 			rte_exit(EXIT_FAILURE,
1677 					"set_rx_queue_stats_mapping_registers "
1678 					"failed for port id=%d diag=%d\n",
1679 					pi, diag);
1680 	}
1681 }
1682 
1683 void
1684 init_port_config(void)
1685 {
1686 	portid_t pid;
1687 	struct rte_port *port;
1688 
1689 	for (pid = 0; pid < nb_ports; pid++) {
1690 		port = &ports[pid];
1691 		port->dev_conf.rxmode = rx_mode;
1692 		port->dev_conf.fdir_conf = fdir_conf;
1693 		if (nb_rxq > 1) {
1694 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1695 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1696 		} else {
1697 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1698 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1699 		}
1700 
1701 		/* In SR-IOV mode, RSS mode is not available */
1702 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1703 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1704 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1705 			else
1706 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1707 		}
1708 
1709 		port->rx_conf.rx_thresh = rx_thresh;
1710 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1711 		port->rx_conf.rx_drop_en = rx_drop_en;
1712 		port->tx_conf.tx_thresh = tx_thresh;
1713 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1714 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1715 		port->tx_conf.txq_flags = txq_flags;
1716 
1717 		rte_eth_macaddr_get(pid, &port->eth_addr);
1718 
1719 		map_port_queue_stats_mapping_registers(pid, port);
1720 #ifdef RTE_NIC_BYPASS
1721 		rte_eth_dev_bypass_init(pid);
1722 #endif
1723 	}
1724 }
1725 
1726 const uint16_t vlan_tags[] = {
1727 		0,  1,  2,  3,  4,  5,  6,  7,
1728 		8,  9, 10, 11,  12, 13, 14, 15,
1729 		16, 17, 18, 19, 20, 21, 22, 23,
1730 		24, 25, 26, 27, 28, 29, 30, 31
1731 };
1732 
1733 static  int
1734 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1735 {
1736         uint8_t i;
1737 
1738  	/*
1739  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1740  	 * given above, and the number of traffic classes available for use.
1741  	 */
1742 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1743 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1744 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1745 
1746 		/* VMDQ+DCB RX and TX configrations */
1747 		vmdq_rx_conf.enable_default_pool = 0;
1748 		vmdq_rx_conf.default_pool = 0;
1749 		vmdq_rx_conf.nb_queue_pools =
1750 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1751 		vmdq_tx_conf.nb_queue_pools =
1752 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1753 
1754 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1755 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1756 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1757 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1758 		}
1759 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1760 			vmdq_rx_conf.dcb_queue[i] = i;
1761 			vmdq_tx_conf.dcb_queue[i] = i;
1762 		}
1763 
1764 		/*set DCB mode of RX and TX of multiple queues*/
1765 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1766 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1767 		if (dcb_conf->pfc_en)
1768 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1769 		else
1770 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1771 
1772 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1773                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1774 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1775                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1776 	}
1777 	else {
1778 		struct rte_eth_dcb_rx_conf rx_conf;
1779 		struct rte_eth_dcb_tx_conf tx_conf;
1780 
1781 		/* queue mapping configuration of DCB RX and TX */
1782 		if (dcb_conf->num_tcs == ETH_4_TCS)
1783 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1784 		else
1785 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1786 
1787 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1788 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1789 
1790 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1791 			rx_conf.dcb_queue[i] = i;
1792 			tx_conf.dcb_queue[i] = i;
1793 		}
1794 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1795 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1796 		if (dcb_conf->pfc_en)
1797 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1798 		else
1799 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1800 
1801 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1802                                 sizeof(struct rte_eth_dcb_rx_conf)));
1803 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1804                                 sizeof(struct rte_eth_dcb_tx_conf)));
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 int
1811 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1812 {
1813 	struct rte_eth_conf port_conf;
1814 	struct rte_port *rte_port;
1815 	int retval;
1816 	uint16_t nb_vlan;
1817 	uint16_t i;
1818 
1819 	/* rxq and txq configuration in dcb mode */
1820 	nb_rxq = 128;
1821 	nb_txq = 128;
1822 	rx_free_thresh = 64;
1823 
1824 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1825 	/* Enter DCB configuration status */
1826 	dcb_config = 1;
1827 
1828 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1829 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1830 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1831 	if (retval < 0)
1832 		return retval;
1833 
1834 	rte_port = &ports[pid];
1835 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1836 
1837 	rte_port->rx_conf.rx_thresh = rx_thresh;
1838 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1839 	rte_port->tx_conf.tx_thresh = tx_thresh;
1840 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1841 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1842 	/* VLAN filter */
1843 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1844 	for (i = 0; i < nb_vlan; i++){
1845 		rx_vft_set(pid, vlan_tags[i], 1);
1846 	}
1847 
1848 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1849 	map_port_queue_stats_mapping_registers(pid, rte_port);
1850 
1851 	rte_port->dcb_flag = 1;
1852 
1853 	return 0;
1854 }
1855 
1856 #ifdef RTE_EXEC_ENV_BAREMETAL
1857 #define main _main
1858 #endif
1859 
1860 int
1861 main(int argc, char** argv)
1862 {
1863 	int  diag;
1864 	uint8_t port_id;
1865 
1866 	diag = rte_eal_init(argc, argv);
1867 	if (diag < 0)
1868 		rte_panic("Cannot init EAL\n");
1869 
1870 	nb_ports = (portid_t) rte_eth_dev_count();
1871 	if (nb_ports == 0)
1872 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1873 							"check that "
1874 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1875 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1876 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1877 			  "configuration file\n");
1878 
1879 	set_def_fwd_config();
1880 	if (nb_lcores == 0)
1881 		rte_panic("Empty set of forwarding logical cores - check the "
1882 			  "core mask supplied in the command parameters\n");
1883 
1884 	argc -= diag;
1885 	argv += diag;
1886 	if (argc > 1)
1887 		launch_args_parse(argc, argv);
1888 
1889 	if (nb_rxq > nb_txq)
1890 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1891 		       "but nb_txq=%d will prevent to fully test it.\n",
1892 		       nb_rxq, nb_txq);
1893 
1894 	init_config();
1895 	if (start_port(RTE_PORT_ALL) != 0)
1896 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1897 
1898 	/* set all ports to promiscuous mode by default */
1899 	for (port_id = 0; port_id < nb_ports; port_id++)
1900 		rte_eth_promiscuous_enable(port_id);
1901 
1902 #ifdef RTE_LIBRTE_CMDLINE
1903 	if (interactive == 1) {
1904 		if (auto_start) {
1905 			printf("Start automatic packet forwarding\n");
1906 			start_packet_forwarding(0);
1907 		}
1908 		prompt();
1909 	} else
1910 #endif
1911 	{
1912 		char c;
1913 		int rc;
1914 
1915 		printf("No commandline core given, start packet forwarding\n");
1916 		start_packet_forwarding(0);
1917 		printf("Press enter to exit\n");
1918 		rc = read(0, &c, 1);
1919 		if (rc < 0)
1920 			return 1;
1921 	}
1922 
1923 	return 0;
1924 }
1925