xref: /dpdk/app/test-pmd/testpmd.c (revision 299191e0c9ba560a9d0f92cf75f7e9660ac8f1e2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_retry_fwd_engine,
148 	&mac_swap_engine,
149 	&flow_gen_engine,
150 	&rx_only_engine,
151 	&tx_only_engine,
152 	&csum_fwd_engine,
153 	&icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 	&ieee1588_fwd_engine,
156 #endif
157 	NULL,
158 };
159 
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162 
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166 
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 	TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175 
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
178 
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
181 
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
184 
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187 
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201 
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206 
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210 
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214 
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219 
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224 
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229 
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234 
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239 
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244 
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249 
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254 
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259 
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264 
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267 
268 #endif
269 
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275 	.split_hdr_size = 0,
276 	.header_split   = 0, /**< Header Split disabled. */
277 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284 
285 struct rte_fdir_conf fdir_conf = {
286 	.mode = RTE_FDIR_MODE_NONE,
287 	.pballoc = RTE_FDIR_PBALLOC_64K,
288 	.status = RTE_FDIR_REPORT_STATUS,
289 	.drop_queue = 127,
290 };
291 
292 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
293 
294 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
295 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
296 
297 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
298 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
299 
300 uint16_t nb_tx_queue_stats_mappings = 0;
301 uint16_t nb_rx_queue_stats_mappings = 0;
302 
303 /* Forward function declarations */
304 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
305 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
306 
307 /*
308  * Check if all the ports are started.
309  * If yes, return positive value. If not, return zero.
310  */
311 static int all_ports_started(void);
312 
313 /*
314  * Setup default configuration.
315  */
316 static void
317 set_default_fwd_lcores_config(void)
318 {
319 	unsigned int i;
320 	unsigned int nb_lc;
321 
322 	nb_lc = 0;
323 	for (i = 0; i < RTE_MAX_LCORE; i++) {
324 		if (! rte_lcore_is_enabled(i))
325 			continue;
326 		if (i == rte_get_master_lcore())
327 			continue;
328 		fwd_lcores_cpuids[nb_lc++] = i;
329 	}
330 	nb_lcores = (lcoreid_t) nb_lc;
331 	nb_cfg_lcores = nb_lcores;
332 	nb_fwd_lcores = 1;
333 }
334 
335 static void
336 set_def_peer_eth_addrs(void)
337 {
338 	portid_t i;
339 
340 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
341 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
342 		peer_eth_addrs[i].addr_bytes[5] = i;
343 	}
344 }
345 
346 static void
347 set_default_fwd_ports_config(void)
348 {
349 	portid_t pt_id;
350 
351 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
352 		fwd_ports_ids[pt_id] = pt_id;
353 
354 	nb_cfg_ports = nb_ports;
355 	nb_fwd_ports = nb_ports;
356 }
357 
358 void
359 set_def_fwd_config(void)
360 {
361 	set_default_fwd_lcores_config();
362 	set_def_peer_eth_addrs();
363 	set_default_fwd_ports_config();
364 }
365 
366 /*
367  * Configuration initialisation done once at init time.
368  */
369 struct mbuf_ctor_arg {
370 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
371 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
372 };
373 
374 struct mbuf_pool_ctor_arg {
375 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
376 };
377 
378 static void
379 testpmd_mbuf_ctor(struct rte_mempool *mp,
380 		  void *opaque_arg,
381 		  void *raw_mbuf,
382 		  __attribute__((unused)) unsigned i)
383 {
384 	struct mbuf_ctor_arg *mb_ctor_arg;
385 	struct rte_mbuf    *mb;
386 
387 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
388 	mb = (struct rte_mbuf *) raw_mbuf;
389 
390 	mb->pool         = mp;
391 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
392 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
393 			mb_ctor_arg->seg_buf_offset);
394 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
395 	mb->ol_flags     = 0;
396 	mb->data_off     = RTE_PKTMBUF_HEADROOM;
397 	mb->nb_segs      = 1;
398 	mb->tx_offload   = 0;
399 	mb->vlan_tci     = 0;
400 	mb->hash.rss     = 0;
401 }
402 
403 static void
404 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
405 		       void *opaque_arg)
406 {
407 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
408 	struct rte_pktmbuf_pool_private *mbp_priv;
409 
410 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
411 		printf("%s(%s) private_data_size %d < %d\n",
412 		       __func__, mp->name, (int) mp->private_data_size,
413 		       (int) sizeof(struct rte_pktmbuf_pool_private));
414 		return;
415 	}
416 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
417 	mbp_priv = rte_mempool_get_priv(mp);
418 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
419 }
420 
421 static void
422 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
423 		 unsigned int socket_id)
424 {
425 	char pool_name[RTE_MEMPOOL_NAMESIZE];
426 	struct rte_mempool *rte_mp;
427 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
428 	struct mbuf_ctor_arg mb_ctor_arg;
429 	uint32_t mb_size;
430 
431 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
432 						mbuf_seg_size);
433 	mb_ctor_arg.seg_buf_offset =
434 		(uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
435 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
436 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
437 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
438 
439 #ifdef RTE_LIBRTE_PMD_XENVIRT
440 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
441                                    (unsigned) mb_mempool_cache,
442                                    sizeof(struct rte_pktmbuf_pool_private),
443                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
444                                    testpmd_mbuf_ctor, &mb_ctor_arg,
445                                    socket_id, 0);
446 
447 
448 
449 #else
450 	if (mp_anon != 0)
451 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
452 				    (unsigned) mb_mempool_cache,
453 				    sizeof(struct rte_pktmbuf_pool_private),
454 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
455 				    testpmd_mbuf_ctor, &mb_ctor_arg,
456 				    socket_id, 0);
457 	else
458 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
459 				    (unsigned) mb_mempool_cache,
460 				    sizeof(struct rte_pktmbuf_pool_private),
461 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
462 				    testpmd_mbuf_ctor, &mb_ctor_arg,
463 				    socket_id, 0);
464 
465 #endif
466 
467 	if (rte_mp == NULL) {
468 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
469 						"failed\n", socket_id);
470 	} else if (verbose_level > 0) {
471 		rte_mempool_dump(stdout, rte_mp);
472 	}
473 }
474 
475 /*
476  * Check given socket id is valid or not with NUMA mode,
477  * if valid, return 0, else return -1
478  */
479 static int
480 check_socket_id(const unsigned int socket_id)
481 {
482 	static int warning_once = 0;
483 
484 	if (socket_id >= MAX_SOCKET) {
485 		if (!warning_once && numa_support)
486 			printf("Warning: NUMA should be configured manually by"
487 			       " using --port-numa-config and"
488 			       " --ring-numa-config parameters along with"
489 			       " --numa.\n");
490 		warning_once = 1;
491 		return -1;
492 	}
493 	return 0;
494 }
495 
496 static void
497 init_config(void)
498 {
499 	portid_t pid;
500 	struct rte_port *port;
501 	struct rte_mempool *mbp;
502 	unsigned int nb_mbuf_per_pool;
503 	lcoreid_t  lc_id;
504 	uint8_t port_per_socket[MAX_SOCKET];
505 
506 	memset(port_per_socket,0,MAX_SOCKET);
507 	/* Configuration of logical cores. */
508 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
509 				sizeof(struct fwd_lcore *) * nb_lcores,
510 				RTE_CACHE_LINE_SIZE);
511 	if (fwd_lcores == NULL) {
512 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
513 							"failed\n", nb_lcores);
514 	}
515 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
516 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
517 					       sizeof(struct fwd_lcore),
518 					       RTE_CACHE_LINE_SIZE);
519 		if (fwd_lcores[lc_id] == NULL) {
520 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
521 								"failed\n");
522 		}
523 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
524 	}
525 
526 	/*
527 	 * Create pools of mbuf.
528 	 * If NUMA support is disabled, create a single pool of mbuf in
529 	 * socket 0 memory by default.
530 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
531 	 *
532 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
533 	 * nb_txd can be configured at run time.
534 	 */
535 	if (param_total_num_mbufs)
536 		nb_mbuf_per_pool = param_total_num_mbufs;
537 	else {
538 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
539 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
540 
541 		if (!numa_support)
542 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
543 	}
544 
545 	if (!numa_support) {
546 		if (socket_num == UMA_NO_CONFIG)
547 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
548 		else
549 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
550 						 socket_num);
551 	}
552 
553 	/* Configuration of Ethernet ports. */
554 	ports = rte_zmalloc("testpmd: ports",
555 			    sizeof(struct rte_port) * nb_ports,
556 			    RTE_CACHE_LINE_SIZE);
557 	if (ports == NULL) {
558 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
559 							"failed\n", nb_ports);
560 	}
561 
562 	for (pid = 0; pid < nb_ports; pid++) {
563 		port = &ports[pid];
564 		rte_eth_dev_info_get(pid, &port->dev_info);
565 
566 		if (numa_support) {
567 			if (port_numa[pid] != NUMA_NO_CONFIG)
568 				port_per_socket[port_numa[pid]]++;
569 			else {
570 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
571 
572 				/* if socket_id is invalid, set to 0 */
573 				if (check_socket_id(socket_id) < 0)
574 					socket_id = 0;
575 				port_per_socket[socket_id]++;
576 			}
577 		}
578 
579 		/* set flag to initialize port/queue */
580 		port->need_reconfig = 1;
581 		port->need_reconfig_queues = 1;
582 	}
583 
584 	if (numa_support) {
585 		uint8_t i;
586 		unsigned int nb_mbuf;
587 
588 		if (param_total_num_mbufs)
589 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
590 
591 		for (i = 0; i < MAX_SOCKET; i++) {
592 			nb_mbuf = (nb_mbuf_per_pool *
593 						port_per_socket[i]);
594 			if (nb_mbuf)
595 				mbuf_pool_create(mbuf_data_size,
596 						nb_mbuf,i);
597 		}
598 	}
599 	init_port_config();
600 
601 	/*
602 	 * Records which Mbuf pool to use by each logical core, if needed.
603 	 */
604 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
605 		mbp = mbuf_pool_find(
606 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
607 
608 		if (mbp == NULL)
609 			mbp = mbuf_pool_find(0);
610 		fwd_lcores[lc_id]->mbp = mbp;
611 	}
612 
613 	/* Configuration of packet forwarding streams. */
614 	if (init_fwd_streams() < 0)
615 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
616 }
617 
618 
619 void
620 reconfig(portid_t new_port_id, unsigned socket_id)
621 {
622 	struct rte_port *port;
623 
624 	/* Reconfiguration of Ethernet ports. */
625 	ports = rte_realloc(ports,
626 			    sizeof(struct rte_port) * nb_ports,
627 			    RTE_CACHE_LINE_SIZE);
628 	if (ports == NULL) {
629 		rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
630 				nb_ports);
631 	}
632 
633 	port = &ports[new_port_id];
634 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
635 
636 	/* set flag to initialize port/queue */
637 	port->need_reconfig = 1;
638 	port->need_reconfig_queues = 1;
639 	port->socket_id = socket_id;
640 
641 	init_port_config();
642 }
643 
644 
645 int
646 init_fwd_streams(void)
647 {
648 	portid_t pid;
649 	struct rte_port *port;
650 	streamid_t sm_id, nb_fwd_streams_new;
651 
652 	/* set socket id according to numa or not */
653 	for (pid = 0; pid < nb_ports; pid++) {
654 		port = &ports[pid];
655 		if (nb_rxq > port->dev_info.max_rx_queues) {
656 			printf("Fail: nb_rxq(%d) is greater than "
657 				"max_rx_queues(%d)\n", nb_rxq,
658 				port->dev_info.max_rx_queues);
659 			return -1;
660 		}
661 		if (nb_txq > port->dev_info.max_tx_queues) {
662 			printf("Fail: nb_txq(%d) is greater than "
663 				"max_tx_queues(%d)\n", nb_txq,
664 				port->dev_info.max_tx_queues);
665 			return -1;
666 		}
667 		if (numa_support) {
668 			if (port_numa[pid] != NUMA_NO_CONFIG)
669 				port->socket_id = port_numa[pid];
670 			else {
671 				port->socket_id = rte_eth_dev_socket_id(pid);
672 
673 				/* if socket_id is invalid, set to 0 */
674 				if (check_socket_id(port->socket_id) < 0)
675 					port->socket_id = 0;
676 			}
677 		}
678 		else {
679 			if (socket_num == UMA_NO_CONFIG)
680 				port->socket_id = 0;
681 			else
682 				port->socket_id = socket_num;
683 		}
684 	}
685 
686 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
687 	if (nb_fwd_streams_new == nb_fwd_streams)
688 		return 0;
689 	/* clear the old */
690 	if (fwd_streams != NULL) {
691 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
692 			if (fwd_streams[sm_id] == NULL)
693 				continue;
694 			rte_free(fwd_streams[sm_id]);
695 			fwd_streams[sm_id] = NULL;
696 		}
697 		rte_free(fwd_streams);
698 		fwd_streams = NULL;
699 	}
700 
701 	/* init new */
702 	nb_fwd_streams = nb_fwd_streams_new;
703 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
704 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
705 	if (fwd_streams == NULL)
706 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
707 						"failed\n", nb_fwd_streams);
708 
709 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
710 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
711 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
712 		if (fwd_streams[sm_id] == NULL)
713 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
714 								" failed\n");
715 	}
716 
717 	return 0;
718 }
719 
720 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
721 static void
722 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
723 {
724 	unsigned int total_burst;
725 	unsigned int nb_burst;
726 	unsigned int burst_stats[3];
727 	uint16_t pktnb_stats[3];
728 	uint16_t nb_pkt;
729 	int burst_percent[3];
730 
731 	/*
732 	 * First compute the total number of packet bursts and the
733 	 * two highest numbers of bursts of the same number of packets.
734 	 */
735 	total_burst = 0;
736 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
737 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
738 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
739 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
740 		if (nb_burst == 0)
741 			continue;
742 		total_burst += nb_burst;
743 		if (nb_burst > burst_stats[0]) {
744 			burst_stats[1] = burst_stats[0];
745 			pktnb_stats[1] = pktnb_stats[0];
746 			burst_stats[0] = nb_burst;
747 			pktnb_stats[0] = nb_pkt;
748 		}
749 	}
750 	if (total_burst == 0)
751 		return;
752 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
753 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
754 	       burst_percent[0], (int) pktnb_stats[0]);
755 	if (burst_stats[0] == total_burst) {
756 		printf("]\n");
757 		return;
758 	}
759 	if (burst_stats[0] + burst_stats[1] == total_burst) {
760 		printf(" + %d%% of %d pkts]\n",
761 		       100 - burst_percent[0], pktnb_stats[1]);
762 		return;
763 	}
764 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
765 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
766 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
767 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
768 		return;
769 	}
770 	printf(" + %d%% of %d pkts + %d%% of others]\n",
771 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
772 }
773 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
774 
775 static void
776 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
777 {
778 	struct rte_port *port;
779 	uint8_t i;
780 
781 	static const char *fwd_stats_border = "----------------------";
782 
783 	port = &ports[port_id];
784 	printf("\n  %s Forward statistics for port %-2d %s\n",
785 	       fwd_stats_border, port_id, fwd_stats_border);
786 
787 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
788 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
789 		       "%-"PRIu64"\n",
790 		       stats->ipackets, stats->imissed,
791 		       (uint64_t) (stats->ipackets + stats->imissed));
792 
793 		if (cur_fwd_eng == &csum_fwd_engine)
794 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
795 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
796 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
797 			printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
798 			       "RX-error: %-"PRIu64"\n",
799 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
800 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
801 		}
802 
803 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
804 		       "%-"PRIu64"\n",
805 		       stats->opackets, port->tx_dropped,
806 		       (uint64_t) (stats->opackets + port->tx_dropped));
807 	}
808 	else {
809 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
810 		       "%14"PRIu64"\n",
811 		       stats->ipackets, stats->imissed,
812 		       (uint64_t) (stats->ipackets + stats->imissed));
813 
814 		if (cur_fwd_eng == &csum_fwd_engine)
815 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
816 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
817 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
818 			printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
819 			       "    RX-error:%"PRIu64"\n",
820 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
821 			printf("  RX-nombufs:             %14"PRIu64"\n",
822 			       stats->rx_nombuf);
823 		}
824 
825 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
826 		       "%14"PRIu64"\n",
827 		       stats->opackets, port->tx_dropped,
828 		       (uint64_t) (stats->opackets + port->tx_dropped));
829 	}
830 
831 	/* Display statistics of XON/XOFF pause frames, if any. */
832 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
833 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
834 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
835 		       stats->rx_pause_xoff, stats->rx_pause_xon);
836 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
837 		       stats->tx_pause_xoff, stats->tx_pause_xon);
838 	}
839 
840 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
841 	if (port->rx_stream)
842 		pkt_burst_stats_display("RX",
843 			&port->rx_stream->rx_burst_stats);
844 	if (port->tx_stream)
845 		pkt_burst_stats_display("TX",
846 			&port->tx_stream->tx_burst_stats);
847 #endif
848 	/* stats fdir */
849 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
850 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
851 		       stats->fdirmiss,
852 		       stats->fdirmatch);
853 
854 	if (port->rx_queue_stats_mapping_enabled) {
855 		printf("\n");
856 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
857 			printf("  Stats reg %2d RX-packets:%14"PRIu64
858 			       "     RX-errors:%14"PRIu64
859 			       "    RX-bytes:%14"PRIu64"\n",
860 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
861 		}
862 		printf("\n");
863 	}
864 	if (port->tx_queue_stats_mapping_enabled) {
865 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
866 			printf("  Stats reg %2d TX-packets:%14"PRIu64
867 			       "                                 TX-bytes:%14"PRIu64"\n",
868 			       i, stats->q_opackets[i], stats->q_obytes[i]);
869 		}
870 	}
871 
872 	printf("  %s--------------------------------%s\n",
873 	       fwd_stats_border, fwd_stats_border);
874 }
875 
876 static void
877 fwd_stream_stats_display(streamid_t stream_id)
878 {
879 	struct fwd_stream *fs;
880 	static const char *fwd_top_stats_border = "-------";
881 
882 	fs = fwd_streams[stream_id];
883 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
884 	    (fs->fwd_dropped == 0))
885 		return;
886 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
887 	       "TX Port=%2d/Queue=%2d %s\n",
888 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
889 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
890 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
891 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
892 
893 	/* if checksum mode */
894 	if (cur_fwd_eng == &csum_fwd_engine) {
895 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
896 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
897 	}
898 
899 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
900 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
901 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
902 #endif
903 }
904 
905 static void
906 flush_fwd_rx_queues(void)
907 {
908 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
909 	portid_t  rxp;
910 	portid_t port_id;
911 	queueid_t rxq;
912 	uint16_t  nb_rx;
913 	uint16_t  i;
914 	uint8_t   j;
915 
916 	for (j = 0; j < 2; j++) {
917 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
918 			for (rxq = 0; rxq < nb_rxq; rxq++) {
919 				port_id = fwd_ports_ids[rxp];
920 				do {
921 					nb_rx = rte_eth_rx_burst(port_id, rxq,
922 						pkts_burst, MAX_PKT_BURST);
923 					for (i = 0; i < nb_rx; i++)
924 						rte_pktmbuf_free(pkts_burst[i]);
925 				} while (nb_rx > 0);
926 			}
927 		}
928 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
929 	}
930 }
931 
932 static void
933 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
934 {
935 	struct fwd_stream **fsm;
936 	streamid_t nb_fs;
937 	streamid_t sm_id;
938 
939 	fsm = &fwd_streams[fc->stream_idx];
940 	nb_fs = fc->stream_nb;
941 	do {
942 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
943 			(*pkt_fwd)(fsm[sm_id]);
944 	} while (! fc->stopped);
945 }
946 
947 static int
948 start_pkt_forward_on_core(void *fwd_arg)
949 {
950 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
951 			     cur_fwd_config.fwd_eng->packet_fwd);
952 	return 0;
953 }
954 
955 /*
956  * Run the TXONLY packet forwarding engine to send a single burst of packets.
957  * Used to start communication flows in network loopback test configurations.
958  */
959 static int
960 run_one_txonly_burst_on_core(void *fwd_arg)
961 {
962 	struct fwd_lcore *fwd_lc;
963 	struct fwd_lcore tmp_lcore;
964 
965 	fwd_lc = (struct fwd_lcore *) fwd_arg;
966 	tmp_lcore = *fwd_lc;
967 	tmp_lcore.stopped = 1;
968 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
969 	return 0;
970 }
971 
972 /*
973  * Launch packet forwarding:
974  *     - Setup per-port forwarding context.
975  *     - launch logical cores with their forwarding configuration.
976  */
977 static void
978 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
979 {
980 	port_fwd_begin_t port_fwd_begin;
981 	unsigned int i;
982 	unsigned int lc_id;
983 	int diag;
984 
985 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
986 	if (port_fwd_begin != NULL) {
987 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
988 			(*port_fwd_begin)(fwd_ports_ids[i]);
989 	}
990 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
991 		lc_id = fwd_lcores_cpuids[i];
992 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
993 			fwd_lcores[i]->stopped = 0;
994 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
995 						     fwd_lcores[i], lc_id);
996 			if (diag != 0)
997 				printf("launch lcore %u failed - diag=%d\n",
998 				       lc_id, diag);
999 		}
1000 	}
1001 }
1002 
1003 /*
1004  * Launch packet forwarding configuration.
1005  */
1006 void
1007 start_packet_forwarding(int with_tx_first)
1008 {
1009 	port_fwd_begin_t port_fwd_begin;
1010 	port_fwd_end_t  port_fwd_end;
1011 	struct rte_port *port;
1012 	unsigned int i;
1013 	portid_t   pt_id;
1014 	streamid_t sm_id;
1015 
1016 	if (all_ports_started() == 0) {
1017 		printf("Not all ports were started\n");
1018 		return;
1019 	}
1020 	if (test_done == 0) {
1021 		printf("Packet forwarding already started\n");
1022 		return;
1023 	}
1024 	if(dcb_test) {
1025 		for (i = 0; i < nb_fwd_ports; i++) {
1026 			pt_id = fwd_ports_ids[i];
1027 			port = &ports[pt_id];
1028 			if (!port->dcb_flag) {
1029 				printf("In DCB mode, all forwarding ports must "
1030                                        "be configured in this mode.\n");
1031 				return;
1032 			}
1033 		}
1034 		if (nb_fwd_lcores == 1) {
1035 			printf("In DCB mode,the nb forwarding cores "
1036                                "should be larger than 1.\n");
1037 			return;
1038 		}
1039 	}
1040 	test_done = 0;
1041 
1042 	if(!no_flush_rx)
1043 		flush_fwd_rx_queues();
1044 
1045 	fwd_config_setup();
1046 	rxtx_config_display();
1047 
1048 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1049 		pt_id = fwd_ports_ids[i];
1050 		port = &ports[pt_id];
1051 		rte_eth_stats_get(pt_id, &port->stats);
1052 		port->tx_dropped = 0;
1053 
1054 		map_port_queue_stats_mapping_registers(pt_id, port);
1055 	}
1056 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1057 		fwd_streams[sm_id]->rx_packets = 0;
1058 		fwd_streams[sm_id]->tx_packets = 0;
1059 		fwd_streams[sm_id]->fwd_dropped = 0;
1060 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1061 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1062 
1063 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1064 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1065 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1066 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1067 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1068 #endif
1069 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1070 		fwd_streams[sm_id]->core_cycles = 0;
1071 #endif
1072 	}
1073 	if (with_tx_first) {
1074 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1075 		if (port_fwd_begin != NULL) {
1076 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1077 				(*port_fwd_begin)(fwd_ports_ids[i]);
1078 		}
1079 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1080 		rte_eal_mp_wait_lcore();
1081 		port_fwd_end = tx_only_engine.port_fwd_end;
1082 		if (port_fwd_end != NULL) {
1083 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1084 				(*port_fwd_end)(fwd_ports_ids[i]);
1085 		}
1086 	}
1087 	launch_packet_forwarding(start_pkt_forward_on_core);
1088 }
1089 
1090 void
1091 stop_packet_forwarding(void)
1092 {
1093 	struct rte_eth_stats stats;
1094 	struct rte_port *port;
1095 	port_fwd_end_t  port_fwd_end;
1096 	int i;
1097 	portid_t   pt_id;
1098 	streamid_t sm_id;
1099 	lcoreid_t  lc_id;
1100 	uint64_t total_recv;
1101 	uint64_t total_xmit;
1102 	uint64_t total_rx_dropped;
1103 	uint64_t total_tx_dropped;
1104 	uint64_t total_rx_nombuf;
1105 	uint64_t tx_dropped;
1106 	uint64_t rx_bad_ip_csum;
1107 	uint64_t rx_bad_l4_csum;
1108 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1109 	uint64_t fwd_cycles;
1110 #endif
1111 	static const char *acc_stats_border = "+++++++++++++++";
1112 
1113 	if (all_ports_started() == 0) {
1114 		printf("Not all ports were started\n");
1115 		return;
1116 	}
1117 	if (test_done) {
1118 		printf("Packet forwarding not started\n");
1119 		return;
1120 	}
1121 	printf("Telling cores to stop...");
1122 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1123 		fwd_lcores[lc_id]->stopped = 1;
1124 	printf("\nWaiting for lcores to finish...\n");
1125 	rte_eal_mp_wait_lcore();
1126 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1127 	if (port_fwd_end != NULL) {
1128 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1129 			pt_id = fwd_ports_ids[i];
1130 			(*port_fwd_end)(pt_id);
1131 		}
1132 	}
1133 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1134 	fwd_cycles = 0;
1135 #endif
1136 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1137 		if (cur_fwd_config.nb_fwd_streams >
1138 		    cur_fwd_config.nb_fwd_ports) {
1139 			fwd_stream_stats_display(sm_id);
1140 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1141 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1142 		} else {
1143 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1144 				fwd_streams[sm_id];
1145 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1146 				fwd_streams[sm_id];
1147 		}
1148 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1149 		tx_dropped = (uint64_t) (tx_dropped +
1150 					 fwd_streams[sm_id]->fwd_dropped);
1151 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1152 
1153 		rx_bad_ip_csum =
1154 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1155 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1156 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1157 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1158 							rx_bad_ip_csum;
1159 
1160 		rx_bad_l4_csum =
1161 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1162 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1163 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1164 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1165 							rx_bad_l4_csum;
1166 
1167 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1168 		fwd_cycles = (uint64_t) (fwd_cycles +
1169 					 fwd_streams[sm_id]->core_cycles);
1170 #endif
1171 	}
1172 	total_recv = 0;
1173 	total_xmit = 0;
1174 	total_rx_dropped = 0;
1175 	total_tx_dropped = 0;
1176 	total_rx_nombuf  = 0;
1177 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1178 		pt_id = fwd_ports_ids[i];
1179 
1180 		port = &ports[pt_id];
1181 		rte_eth_stats_get(pt_id, &stats);
1182 		stats.ipackets -= port->stats.ipackets;
1183 		port->stats.ipackets = 0;
1184 		stats.opackets -= port->stats.opackets;
1185 		port->stats.opackets = 0;
1186 		stats.ibytes   -= port->stats.ibytes;
1187 		port->stats.ibytes = 0;
1188 		stats.obytes   -= port->stats.obytes;
1189 		port->stats.obytes = 0;
1190 		stats.imissed  -= port->stats.imissed;
1191 		port->stats.imissed = 0;
1192 		stats.oerrors  -= port->stats.oerrors;
1193 		port->stats.oerrors = 0;
1194 		stats.rx_nombuf -= port->stats.rx_nombuf;
1195 		port->stats.rx_nombuf = 0;
1196 		stats.fdirmatch -= port->stats.fdirmatch;
1197 		port->stats.rx_nombuf = 0;
1198 		stats.fdirmiss -= port->stats.fdirmiss;
1199 		port->stats.rx_nombuf = 0;
1200 
1201 		total_recv += stats.ipackets;
1202 		total_xmit += stats.opackets;
1203 		total_rx_dropped += stats.imissed;
1204 		total_tx_dropped += port->tx_dropped;
1205 		total_rx_nombuf  += stats.rx_nombuf;
1206 
1207 		fwd_port_stats_display(pt_id, &stats);
1208 	}
1209 	printf("\n  %s Accumulated forward statistics for all ports"
1210 	       "%s\n",
1211 	       acc_stats_border, acc_stats_border);
1212 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1213 	       "%-"PRIu64"\n"
1214 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1215 	       "%-"PRIu64"\n",
1216 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1217 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1218 	if (total_rx_nombuf > 0)
1219 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1220 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1221 	       "%s\n",
1222 	       acc_stats_border, acc_stats_border);
1223 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1224 	if (total_recv > 0)
1225 		printf("\n  CPU cycles/packet=%u (total cycles="
1226 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1227 		       (unsigned int)(fwd_cycles / total_recv),
1228 		       fwd_cycles, total_recv);
1229 #endif
1230 	printf("\nDone.\n");
1231 	test_done = 1;
1232 }
1233 
1234 void
1235 dev_set_link_up(portid_t pid)
1236 {
1237 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1238 		printf("\nSet link up fail.\n");
1239 }
1240 
1241 void
1242 dev_set_link_down(portid_t pid)
1243 {
1244 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1245 		printf("\nSet link down fail.\n");
1246 }
1247 
1248 static int
1249 all_ports_started(void)
1250 {
1251 	portid_t pi;
1252 	struct rte_port *port;
1253 
1254 	for (pi = 0; pi < nb_ports; pi++) {
1255 		port = &ports[pi];
1256 		/* Check if there is a port which is not started */
1257 		if (port->port_status != RTE_PORT_STARTED)
1258 			return 0;
1259 	}
1260 
1261 	/* No port is not started */
1262 	return 1;
1263 }
1264 
1265 int
1266 start_port(portid_t pid)
1267 {
1268 	int diag, need_check_link_status = 0;
1269 	portid_t pi;
1270 	queueid_t qi;
1271 	struct rte_port *port;
1272 	struct ether_addr mac_addr;
1273 
1274 	if (test_done == 0) {
1275 		printf("Please stop forwarding first\n");
1276 		return -1;
1277 	}
1278 
1279 	if (init_fwd_streams() < 0) {
1280 		printf("Fail from init_fwd_streams()\n");
1281 		return -1;
1282 	}
1283 
1284 	if(dcb_config)
1285 		dcb_test = 1;
1286 	for (pi = 0; pi < nb_ports; pi++) {
1287 		if (pid < nb_ports && pid != pi)
1288 			continue;
1289 
1290 		port = &ports[pi];
1291 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1292 						 RTE_PORT_HANDLING) == 0) {
1293 			printf("Port %d is now not stopped\n", pi);
1294 			continue;
1295 		}
1296 
1297 		if (port->need_reconfig > 0) {
1298 			port->need_reconfig = 0;
1299 
1300 			printf("Configuring Port %d (socket %u)\n", pi,
1301 					port->socket_id);
1302 			/* configure port */
1303 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1304 						&(port->dev_conf));
1305 			if (diag != 0) {
1306 				if (rte_atomic16_cmpset(&(port->port_status),
1307 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1308 					printf("Port %d can not be set back "
1309 							"to stopped\n", pi);
1310 				printf("Fail to configure port %d\n", pi);
1311 				/* try to reconfigure port next time */
1312 				port->need_reconfig = 1;
1313 				return -1;
1314 			}
1315 		}
1316 		if (port->need_reconfig_queues > 0) {
1317 			port->need_reconfig_queues = 0;
1318 			/* setup tx queues */
1319 			for (qi = 0; qi < nb_txq; qi++) {
1320 				if ((numa_support) &&
1321 					(txring_numa[pi] != NUMA_NO_CONFIG))
1322 					diag = rte_eth_tx_queue_setup(pi, qi,
1323 						nb_txd,txring_numa[pi],
1324 						&(port->tx_conf));
1325 				else
1326 					diag = rte_eth_tx_queue_setup(pi, qi,
1327 						nb_txd,port->socket_id,
1328 						&(port->tx_conf));
1329 
1330 				if (diag == 0)
1331 					continue;
1332 
1333 				/* Fail to setup tx queue, return */
1334 				if (rte_atomic16_cmpset(&(port->port_status),
1335 							RTE_PORT_HANDLING,
1336 							RTE_PORT_STOPPED) == 0)
1337 					printf("Port %d can not be set back "
1338 							"to stopped\n", pi);
1339 				printf("Fail to configure port %d tx queues\n", pi);
1340 				/* try to reconfigure queues next time */
1341 				port->need_reconfig_queues = 1;
1342 				return -1;
1343 			}
1344 			/* setup rx queues */
1345 			for (qi = 0; qi < nb_rxq; qi++) {
1346 				if ((numa_support) &&
1347 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1348 					struct rte_mempool * mp =
1349 						mbuf_pool_find(rxring_numa[pi]);
1350 					if (mp == NULL) {
1351 						printf("Failed to setup RX queue:"
1352 							"No mempool allocation"
1353 							"on the socket %d\n",
1354 							rxring_numa[pi]);
1355 						return -1;
1356 					}
1357 
1358 					diag = rte_eth_rx_queue_setup(pi, qi,
1359 					     nb_rxd,rxring_numa[pi],
1360 					     &(port->rx_conf),mp);
1361 				}
1362 				else
1363 					diag = rte_eth_rx_queue_setup(pi, qi,
1364 					     nb_rxd,port->socket_id,
1365 					     &(port->rx_conf),
1366 				             mbuf_pool_find(port->socket_id));
1367 
1368 				if (diag == 0)
1369 					continue;
1370 
1371 
1372 				/* Fail to setup rx queue, return */
1373 				if (rte_atomic16_cmpset(&(port->port_status),
1374 							RTE_PORT_HANDLING,
1375 							RTE_PORT_STOPPED) == 0)
1376 					printf("Port %d can not be set back "
1377 							"to stopped\n", pi);
1378 				printf("Fail to configure port %d rx queues\n", pi);
1379 				/* try to reconfigure queues next time */
1380 				port->need_reconfig_queues = 1;
1381 				return -1;
1382 			}
1383 		}
1384 		/* start port */
1385 		if (rte_eth_dev_start(pi) < 0) {
1386 			printf("Fail to start port %d\n", pi);
1387 
1388 			/* Fail to setup rx queue, return */
1389 			if (rte_atomic16_cmpset(&(port->port_status),
1390 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1391 				printf("Port %d can not be set back to "
1392 							"stopped\n", pi);
1393 			continue;
1394 		}
1395 
1396 		if (rte_atomic16_cmpset(&(port->port_status),
1397 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1398 			printf("Port %d can not be set into started\n", pi);
1399 
1400 		rte_eth_macaddr_get(pi, &mac_addr);
1401 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1402 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1403 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1404 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1405 
1406 		/* at least one port started, need checking link status */
1407 		need_check_link_status = 1;
1408 	}
1409 
1410 	if (need_check_link_status && !no_link_check)
1411 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1412 	else
1413 		printf("Please stop the ports first\n");
1414 
1415 	printf("Done\n");
1416 	return 0;
1417 }
1418 
1419 void
1420 stop_port(portid_t pid)
1421 {
1422 	portid_t pi;
1423 	struct rte_port *port;
1424 	int need_check_link_status = 0;
1425 
1426 	if (test_done == 0) {
1427 		printf("Please stop forwarding first\n");
1428 		return;
1429 	}
1430 	if (dcb_test) {
1431 		dcb_test = 0;
1432 		dcb_config = 0;
1433 	}
1434 	printf("Stopping ports...\n");
1435 
1436 	for (pi = 0; pi < nb_ports; pi++) {
1437 		if (pid < nb_ports && pid != pi)
1438 			continue;
1439 
1440 		port = &ports[pi];
1441 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1442 						RTE_PORT_HANDLING) == 0)
1443 			continue;
1444 
1445 		rte_eth_dev_stop(pi);
1446 
1447 		if (rte_atomic16_cmpset(&(port->port_status),
1448 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1449 			printf("Port %d can not be set into stopped\n", pi);
1450 		need_check_link_status = 1;
1451 	}
1452 	if (need_check_link_status && !no_link_check)
1453 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1454 
1455 	printf("Done\n");
1456 }
1457 
1458 void
1459 close_port(portid_t pid)
1460 {
1461 	portid_t pi;
1462 	struct rte_port *port;
1463 
1464 	if (test_done == 0) {
1465 		printf("Please stop forwarding first\n");
1466 		return;
1467 	}
1468 
1469 	printf("Closing ports...\n");
1470 
1471 	for (pi = 0; pi < nb_ports; pi++) {
1472 		if (pid < nb_ports && pid != pi)
1473 			continue;
1474 
1475 		port = &ports[pi];
1476 		if (rte_atomic16_cmpset(&(port->port_status),
1477 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1478 			printf("Port %d is now not stopped\n", pi);
1479 			continue;
1480 		}
1481 
1482 		rte_eth_dev_close(pi);
1483 
1484 		if (rte_atomic16_cmpset(&(port->port_status),
1485 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1486 			printf("Port %d can not be set into stopped\n", pi);
1487 	}
1488 
1489 	printf("Done\n");
1490 }
1491 
1492 int
1493 all_ports_stopped(void)
1494 {
1495 	portid_t pi;
1496 	struct rte_port *port;
1497 
1498 	for (pi = 0; pi < nb_ports; pi++) {
1499 		port = &ports[pi];
1500 		if (port->port_status != RTE_PORT_STOPPED)
1501 			return 0;
1502 	}
1503 
1504 	return 1;
1505 }
1506 
1507 int
1508 port_is_started(portid_t port_id)
1509 {
1510 	if (port_id_is_invalid(port_id))
1511 		return -1;
1512 
1513 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1514 		return 0;
1515 
1516 	return 1;
1517 }
1518 
1519 void
1520 pmd_test_exit(void)
1521 {
1522 	portid_t pt_id;
1523 
1524 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1525 		printf("Stopping port %d...", pt_id);
1526 		fflush(stdout);
1527 		rte_eth_dev_close(pt_id);
1528 		printf("done\n");
1529 	}
1530 	printf("bye...\n");
1531 }
1532 
1533 typedef void (*cmd_func_t)(void);
1534 struct pmd_test_command {
1535 	const char *cmd_name;
1536 	cmd_func_t cmd_func;
1537 };
1538 
1539 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1540 
1541 /* Check the link status of all ports in up to 9s, and print them finally */
1542 static void
1543 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1544 {
1545 #define CHECK_INTERVAL 100 /* 100ms */
1546 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1547 	uint8_t portid, count, all_ports_up, print_flag = 0;
1548 	struct rte_eth_link link;
1549 
1550 	printf("Checking link statuses...\n");
1551 	fflush(stdout);
1552 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1553 		all_ports_up = 1;
1554 		for (portid = 0; portid < port_num; portid++) {
1555 			if ((port_mask & (1 << portid)) == 0)
1556 				continue;
1557 			memset(&link, 0, sizeof(link));
1558 			rte_eth_link_get_nowait(portid, &link);
1559 			/* print link status if flag set */
1560 			if (print_flag == 1) {
1561 				if (link.link_status)
1562 					printf("Port %d Link Up - speed %u "
1563 						"Mbps - %s\n", (uint8_t)portid,
1564 						(unsigned)link.link_speed,
1565 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1566 					("full-duplex") : ("half-duplex\n"));
1567 				else
1568 					printf("Port %d Link Down\n",
1569 						(uint8_t)portid);
1570 				continue;
1571 			}
1572 			/* clear all_ports_up flag if any link down */
1573 			if (link.link_status == 0) {
1574 				all_ports_up = 0;
1575 				break;
1576 			}
1577 		}
1578 		/* after finally printing all link status, get out */
1579 		if (print_flag == 1)
1580 			break;
1581 
1582 		if (all_ports_up == 0) {
1583 			fflush(stdout);
1584 			rte_delay_ms(CHECK_INTERVAL);
1585 		}
1586 
1587 		/* set the print_flag if all ports up or timeout */
1588 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1589 			print_flag = 1;
1590 		}
1591 	}
1592 }
1593 
1594 static int
1595 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1596 {
1597 	uint16_t i;
1598 	int diag;
1599 	uint8_t mapping_found = 0;
1600 
1601 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1602 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1603 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1604 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1605 					tx_queue_stats_mappings[i].queue_id,
1606 					tx_queue_stats_mappings[i].stats_counter_id);
1607 			if (diag != 0)
1608 				return diag;
1609 			mapping_found = 1;
1610 		}
1611 	}
1612 	if (mapping_found)
1613 		port->tx_queue_stats_mapping_enabled = 1;
1614 	return 0;
1615 }
1616 
1617 static int
1618 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1619 {
1620 	uint16_t i;
1621 	int diag;
1622 	uint8_t mapping_found = 0;
1623 
1624 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1625 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1626 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1627 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1628 					rx_queue_stats_mappings[i].queue_id,
1629 					rx_queue_stats_mappings[i].stats_counter_id);
1630 			if (diag != 0)
1631 				return diag;
1632 			mapping_found = 1;
1633 		}
1634 	}
1635 	if (mapping_found)
1636 		port->rx_queue_stats_mapping_enabled = 1;
1637 	return 0;
1638 }
1639 
1640 static void
1641 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1642 {
1643 	int diag = 0;
1644 
1645 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1646 	if (diag != 0) {
1647 		if (diag == -ENOTSUP) {
1648 			port->tx_queue_stats_mapping_enabled = 0;
1649 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1650 		}
1651 		else
1652 			rte_exit(EXIT_FAILURE,
1653 					"set_tx_queue_stats_mapping_registers "
1654 					"failed for port id=%d diag=%d\n",
1655 					pi, diag);
1656 	}
1657 
1658 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1659 	if (diag != 0) {
1660 		if (diag == -ENOTSUP) {
1661 			port->rx_queue_stats_mapping_enabled = 0;
1662 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1663 		}
1664 		else
1665 			rte_exit(EXIT_FAILURE,
1666 					"set_rx_queue_stats_mapping_registers "
1667 					"failed for port id=%d diag=%d\n",
1668 					pi, diag);
1669 	}
1670 }
1671 
1672 static void
1673 rxtx_port_config(struct rte_port *port)
1674 {
1675 	port->rx_conf = port->dev_info.default_rxconf;
1676 	port->tx_conf = port->dev_info.default_txconf;
1677 
1678 	/* Check if any RX/TX parameters have been passed */
1679 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1680 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1681 
1682 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1683 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1684 
1685 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1686 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1687 
1688 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1689 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1690 
1691 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1692 		port->rx_conf.rx_drop_en = rx_drop_en;
1693 
1694 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1695 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1696 
1697 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1698 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1699 
1700 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1701 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1702 
1703 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1704 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1705 
1706 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1707 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1708 
1709 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1710 		port->tx_conf.txq_flags = txq_flags;
1711 }
1712 
1713 void
1714 init_port_config(void)
1715 {
1716 	portid_t pid;
1717 	struct rte_port *port;
1718 
1719 	for (pid = 0; pid < nb_ports; pid++) {
1720 		port = &ports[pid];
1721 		port->dev_conf.rxmode = rx_mode;
1722 		port->dev_conf.fdir_conf = fdir_conf;
1723 		if (nb_rxq > 1) {
1724 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1725 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1726 		} else {
1727 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1728 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1729 		}
1730 
1731 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1732 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1733 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1734 			else
1735 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1736 		}
1737 
1738 		if (port->dev_info.max_vfs != 0) {
1739 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1740 				port->dev_conf.rxmode.mq_mode =
1741 					ETH_MQ_RX_VMDQ_RSS;
1742 			else
1743 				port->dev_conf.rxmode.mq_mode =
1744 					ETH_MQ_RX_NONE;
1745 
1746 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1747 		}
1748 
1749 		rxtx_port_config(port);
1750 
1751 		rte_eth_macaddr_get(pid, &port->eth_addr);
1752 
1753 		map_port_queue_stats_mapping_registers(pid, port);
1754 #ifdef RTE_NIC_BYPASS
1755 		rte_eth_dev_bypass_init(pid);
1756 #endif
1757 	}
1758 }
1759 
1760 const uint16_t vlan_tags[] = {
1761 		0,  1,  2,  3,  4,  5,  6,  7,
1762 		8,  9, 10, 11,  12, 13, 14, 15,
1763 		16, 17, 18, 19, 20, 21, 22, 23,
1764 		24, 25, 26, 27, 28, 29, 30, 31
1765 };
1766 
1767 static  int
1768 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1769 {
1770         uint8_t i;
1771 
1772  	/*
1773  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1774  	 * given above, and the number of traffic classes available for use.
1775  	 */
1776 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1777 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1778 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1779 
1780 		/* VMDQ+DCB RX and TX configrations */
1781 		vmdq_rx_conf.enable_default_pool = 0;
1782 		vmdq_rx_conf.default_pool = 0;
1783 		vmdq_rx_conf.nb_queue_pools =
1784 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1785 		vmdq_tx_conf.nb_queue_pools =
1786 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1787 
1788 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1789 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1790 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1791 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1792 		}
1793 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1794 			vmdq_rx_conf.dcb_queue[i] = i;
1795 			vmdq_tx_conf.dcb_queue[i] = i;
1796 		}
1797 
1798 		/*set DCB mode of RX and TX of multiple queues*/
1799 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1800 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1801 		if (dcb_conf->pfc_en)
1802 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1803 		else
1804 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1805 
1806 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1807                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1808 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1809                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1810 	}
1811 	else {
1812 		struct rte_eth_dcb_rx_conf rx_conf;
1813 		struct rte_eth_dcb_tx_conf tx_conf;
1814 
1815 		/* queue mapping configuration of DCB RX and TX */
1816 		if (dcb_conf->num_tcs == ETH_4_TCS)
1817 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1818 		else
1819 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1820 
1821 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1822 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1823 
1824 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1825 			rx_conf.dcb_queue[i] = i;
1826 			tx_conf.dcb_queue[i] = i;
1827 		}
1828 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1829 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1830 		if (dcb_conf->pfc_en)
1831 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1832 		else
1833 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1834 
1835 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1836                                 sizeof(struct rte_eth_dcb_rx_conf)));
1837 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1838                                 sizeof(struct rte_eth_dcb_tx_conf)));
1839 	}
1840 
1841 	return 0;
1842 }
1843 
1844 int
1845 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1846 {
1847 	struct rte_eth_conf port_conf;
1848 	struct rte_port *rte_port;
1849 	int retval;
1850 	uint16_t nb_vlan;
1851 	uint16_t i;
1852 
1853 	/* rxq and txq configuration in dcb mode */
1854 	nb_rxq = 128;
1855 	nb_txq = 128;
1856 	rx_free_thresh = 64;
1857 
1858 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1859 	/* Enter DCB configuration status */
1860 	dcb_config = 1;
1861 
1862 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1863 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1864 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1865 	if (retval < 0)
1866 		return retval;
1867 
1868 	rte_port = &ports[pid];
1869 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1870 
1871 	rxtx_port_config(rte_port);
1872 	/* VLAN filter */
1873 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1874 	for (i = 0; i < nb_vlan; i++){
1875 		rx_vft_set(pid, vlan_tags[i], 1);
1876 	}
1877 
1878 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1879 	map_port_queue_stats_mapping_registers(pid, rte_port);
1880 
1881 	rte_port->dcb_flag = 1;
1882 
1883 	return 0;
1884 }
1885 
1886 int
1887 main(int argc, char** argv)
1888 {
1889 	int  diag;
1890 	uint8_t port_id;
1891 
1892 	diag = rte_eal_init(argc, argv);
1893 	if (diag < 0)
1894 		rte_panic("Cannot init EAL\n");
1895 
1896 	nb_ports = (portid_t) rte_eth_dev_count();
1897 	if (nb_ports == 0)
1898 		rte_exit(EXIT_FAILURE, "No probed ethernet device\n");
1899 
1900 	set_def_fwd_config();
1901 	if (nb_lcores == 0)
1902 		rte_panic("Empty set of forwarding logical cores - check the "
1903 			  "core mask supplied in the command parameters\n");
1904 
1905 	argc -= diag;
1906 	argv += diag;
1907 	if (argc > 1)
1908 		launch_args_parse(argc, argv);
1909 
1910 	if (nb_rxq > nb_txq)
1911 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1912 		       "but nb_txq=%d will prevent to fully test it.\n",
1913 		       nb_rxq, nb_txq);
1914 
1915 	init_config();
1916 	if (start_port(RTE_PORT_ALL) != 0)
1917 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1918 
1919 	/* set all ports to promiscuous mode by default */
1920 	for (port_id = 0; port_id < nb_ports; port_id++)
1921 		rte_eth_promiscuous_enable(port_id);
1922 
1923 #ifdef RTE_LIBRTE_CMDLINE
1924 	if (interactive == 1) {
1925 		if (auto_start) {
1926 			printf("Start automatic packet forwarding\n");
1927 			start_packet_forwarding(0);
1928 		}
1929 		prompt();
1930 	} else
1931 #endif
1932 	{
1933 		char c;
1934 		int rc;
1935 
1936 		printf("No commandline core given, start packet forwarding\n");
1937 		start_packet_forwarding(0);
1938 		printf("Press enter to exit\n");
1939 		rc = read(0, &c, 1);
1940 		if (rc < 0)
1941 			return 1;
1942 	}
1943 
1944 	return 0;
1945 }
1946