xref: /dpdk/app/test-pmd/testpmd.c (revision ffc468ff3cfe768b0132f851cef6382cd16a3c68)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 
80 #include "testpmd.h"
81 #include "mempool_osdep.h"
82 
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 
85 /* use master core for command line ? */
86 uint8_t interactive = 0;
87 uint8_t auto_start = 0;
88 
89 /*
90  * NUMA support configuration.
91  * When set, the NUMA support attempts to dispatch the allocation of the
92  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
93  * probed ports among the CPU sockets 0 and 1.
94  * Otherwise, all memory is allocated from CPU socket 0.
95  */
96 uint8_t numa_support = 0; /**< No numa support by default */
97 
98 /*
99  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
100  * not configured.
101  */
102 uint8_t socket_num = UMA_NO_CONFIG;
103 
104 /*
105  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
106  */
107 uint8_t mp_anon = 0;
108 
109 /*
110  * Record the Ethernet address of peer target ports to which packets are
111  * forwarded.
112  * Must be instanciated with the ethernet addresses of peer traffic generator
113  * ports.
114  */
115 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
116 portid_t nb_peer_eth_addrs = 0;
117 
118 /*
119  * Probed Target Environment.
120  */
121 struct rte_port *ports;	       /**< For all probed ethernet ports. */
122 portid_t nb_ports;             /**< Number of probed ethernet ports. */
123 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
124 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
125 
126 /*
127  * Test Forwarding Configuration.
128  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
129  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
130  */
131 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
132 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
133 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
134 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
135 
136 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
137 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
138 
139 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
140 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
141 
142 /*
143  * Forwarding engines.
144  */
145 struct fwd_engine * fwd_engines[] = {
146 	&io_fwd_engine,
147 	&mac_fwd_engine,
148 	&mac_retry_fwd_engine,
149 	&mac_swap_engine,
150 	&flow_gen_engine,
151 	&rx_only_engine,
152 	&tx_only_engine,
153 	&csum_fwd_engine,
154 	&icmp_echo_engine,
155 #ifdef RTE_LIBRTE_IEEE1588
156 	&ieee1588_fwd_engine,
157 #endif
158 	NULL,
159 };
160 
161 struct fwd_config cur_fwd_config;
162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 
164 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
165 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
166                                       * specified on command-line. */
167 
168 /*
169  * Configuration of packet segments used by the "txonly" processing engine.
170  */
171 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
172 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
173 	TXONLY_DEF_PACKET_LEN,
174 };
175 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182 
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185 
186 /* DCB on and VT on mapping is default */
187 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
188 
189 /*
190  * Configurable number of RX/TX queues.
191  */
192 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
193 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
194 
195 /*
196  * Configurable number of RX/TX ring descriptors.
197  */
198 #define RTE_TEST_RX_DESC_DEFAULT 128
199 #define RTE_TEST_TX_DESC_DEFAULT 512
200 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
201 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 
203 #define RTE_PMD_PARAM_UNSET -1
204 /*
205  * Configurable values of RX and TX ring threshold registers.
206  */
207 
208 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
210 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 
212 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
215 
216 /*
217  * Configurable value of RX free threshold.
218  */
219 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
220 
221 /*
222  * Configurable value of RX drop enable.
223  */
224 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
225 
226 /*
227  * Configurable value of TX free threshold.
228  */
229 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
230 
231 /*
232  * Configurable value of TX RS bit threshold.
233  */
234 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
235 
236 /*
237  * Configurable value of TX queue flags.
238  */
239 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
240 
241 /*
242  * Receive Side Scaling (RSS) configuration.
243  */
244 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
245 
246 /*
247  * Port topology configuration
248  */
249 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
250 
251 /*
252  * Avoids to flush all the RX streams before starts forwarding.
253  */
254 uint8_t no_flush_rx = 0; /* flush by default */
255 
256 /*
257  * Avoids to check link status when starting/stopping a port.
258  */
259 uint8_t no_link_check = 0; /* check by default */
260 
261 /*
262  * NIC bypass mode configuration options.
263  */
264 #ifdef RTE_NIC_BYPASS
265 
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
268 
269 #endif
270 
271 /*
272  * Ethernet device configuration.
273  */
274 struct rte_eth_rxmode rx_mode = {
275 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 	.split_hdr_size = 0,
277 	.header_split   = 0, /**< Header Split disabled. */
278 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
281 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
283 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
284 };
285 
286 struct rte_fdir_conf fdir_conf = {
287 	.mode = RTE_FDIR_MODE_NONE,
288 	.pballoc = RTE_FDIR_PBALLOC_64K,
289 	.status = RTE_FDIR_REPORT_STATUS,
290 	.mask = {
291 		.vlan_tci_mask = 0x0,
292 		.ipv4_mask     = {
293 			.src_ip = 0xFFFFFFFF,
294 			.dst_ip = 0xFFFFFFFF,
295 		},
296 		.ipv6_mask     = {
297 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 		},
300 		.src_port_mask = 0xFFFF,
301 		.dst_port_mask = 0xFFFF,
302 	},
303 	.drop_queue = 127,
304 };
305 
306 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
307 
308 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
309 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
310 
311 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
312 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
313 
314 uint16_t nb_tx_queue_stats_mappings = 0;
315 uint16_t nb_rx_queue_stats_mappings = 0;
316 
317 /* Forward function declarations */
318 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
319 static void check_all_ports_link_status(uint32_t port_mask);
320 
321 /*
322  * Check if all the ports are started.
323  * If yes, return positive value. If not, return zero.
324  */
325 static int all_ports_started(void);
326 
327 /*
328  * Find next enabled port
329  */
330 portid_t
331 find_next_port(portid_t p, struct rte_port *ports, int size)
332 {
333 	if (ports == NULL)
334 		rte_exit(-EINVAL, "failed to find a next port id\n");
335 
336 	while ((ports[p].enabled == 0) && (p < size))
337 		p++;
338 	return p;
339 }
340 
341 /*
342  * Setup default configuration.
343  */
344 static void
345 set_default_fwd_lcores_config(void)
346 {
347 	unsigned int i;
348 	unsigned int nb_lc;
349 
350 	nb_lc = 0;
351 	for (i = 0; i < RTE_MAX_LCORE; i++) {
352 		if (! rte_lcore_is_enabled(i))
353 			continue;
354 		if (i == rte_get_master_lcore())
355 			continue;
356 		fwd_lcores_cpuids[nb_lc++] = i;
357 	}
358 	nb_lcores = (lcoreid_t) nb_lc;
359 	nb_cfg_lcores = nb_lcores;
360 	nb_fwd_lcores = 1;
361 }
362 
363 static void
364 set_def_peer_eth_addrs(void)
365 {
366 	portid_t i;
367 
368 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
369 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
370 		peer_eth_addrs[i].addr_bytes[5] = i;
371 	}
372 }
373 
374 static void
375 set_default_fwd_ports_config(void)
376 {
377 	portid_t pt_id;
378 
379 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
380 		fwd_ports_ids[pt_id] = pt_id;
381 
382 	nb_cfg_ports = nb_ports;
383 	nb_fwd_ports = nb_ports;
384 }
385 
386 void
387 set_def_fwd_config(void)
388 {
389 	set_default_fwd_lcores_config();
390 	set_def_peer_eth_addrs();
391 	set_default_fwd_ports_config();
392 }
393 
394 /*
395  * Configuration initialisation done once at init time.
396  */
397 struct mbuf_ctor_arg {
398 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
399 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
400 };
401 
402 struct mbuf_pool_ctor_arg {
403 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
404 };
405 
406 static void
407 testpmd_mbuf_ctor(struct rte_mempool *mp,
408 		  void *opaque_arg,
409 		  void *raw_mbuf,
410 		  __attribute__((unused)) unsigned i)
411 {
412 	struct mbuf_ctor_arg *mb_ctor_arg;
413 	struct rte_mbuf    *mb;
414 
415 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
416 	mb = (struct rte_mbuf *) raw_mbuf;
417 
418 	mb->pool         = mp;
419 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
420 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
421 			mb_ctor_arg->seg_buf_offset);
422 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
423 	mb->ol_flags     = 0;
424 	mb->data_off     = RTE_PKTMBUF_HEADROOM;
425 	mb->nb_segs      = 1;
426 	mb->tx_offload   = 0;
427 	mb->vlan_tci     = 0;
428 	mb->hash.rss     = 0;
429 }
430 
431 static void
432 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
433 		       void *opaque_arg)
434 {
435 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
436 	struct rte_pktmbuf_pool_private *mbp_priv;
437 
438 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
439 		printf("%s(%s) private_data_size %d < %d\n",
440 		       __func__, mp->name, (int) mp->private_data_size,
441 		       (int) sizeof(struct rte_pktmbuf_pool_private));
442 		return;
443 	}
444 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
445 	mbp_priv = rte_mempool_get_priv(mp);
446 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
447 }
448 
449 static void
450 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
451 		 unsigned int socket_id)
452 {
453 	char pool_name[RTE_MEMPOOL_NAMESIZE];
454 	struct rte_mempool *rte_mp;
455 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
456 	struct mbuf_ctor_arg mb_ctor_arg;
457 	uint32_t mb_size;
458 
459 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
460 						mbuf_seg_size);
461 	mb_ctor_arg.seg_buf_offset =
462 		(uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
463 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
464 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
465 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
466 
467 #ifdef RTE_LIBRTE_PMD_XENVIRT
468 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
469                                    (unsigned) mb_mempool_cache,
470                                    sizeof(struct rte_pktmbuf_pool_private),
471                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
472                                    testpmd_mbuf_ctor, &mb_ctor_arg,
473                                    socket_id, 0);
474 
475 
476 
477 #else
478 	if (mp_anon != 0)
479 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
480 				    (unsigned) mb_mempool_cache,
481 				    sizeof(struct rte_pktmbuf_pool_private),
482 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
483 				    testpmd_mbuf_ctor, &mb_ctor_arg,
484 				    socket_id, 0);
485 	else
486 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
487 				    (unsigned) mb_mempool_cache,
488 				    sizeof(struct rte_pktmbuf_pool_private),
489 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
490 				    testpmd_mbuf_ctor, &mb_ctor_arg,
491 				    socket_id, 0);
492 
493 #endif
494 
495 	if (rte_mp == NULL) {
496 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
497 						"failed\n", socket_id);
498 	} else if (verbose_level > 0) {
499 		rte_mempool_dump(stdout, rte_mp);
500 	}
501 }
502 
503 /*
504  * Check given socket id is valid or not with NUMA mode,
505  * if valid, return 0, else return -1
506  */
507 static int
508 check_socket_id(const unsigned int socket_id)
509 {
510 	static int warning_once = 0;
511 
512 	if (socket_id >= MAX_SOCKET) {
513 		if (!warning_once && numa_support)
514 			printf("Warning: NUMA should be configured manually by"
515 			       " using --port-numa-config and"
516 			       " --ring-numa-config parameters along with"
517 			       " --numa.\n");
518 		warning_once = 1;
519 		return -1;
520 	}
521 	return 0;
522 }
523 
524 static void
525 init_config(void)
526 {
527 	portid_t pid;
528 	struct rte_port *port;
529 	struct rte_mempool *mbp;
530 	unsigned int nb_mbuf_per_pool;
531 	lcoreid_t  lc_id;
532 	uint8_t port_per_socket[MAX_SOCKET];
533 
534 	memset(port_per_socket,0,MAX_SOCKET);
535 	/* Configuration of logical cores. */
536 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
537 				sizeof(struct fwd_lcore *) * nb_lcores,
538 				RTE_CACHE_LINE_SIZE);
539 	if (fwd_lcores == NULL) {
540 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
541 							"failed\n", nb_lcores);
542 	}
543 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
544 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
545 					       sizeof(struct fwd_lcore),
546 					       RTE_CACHE_LINE_SIZE);
547 		if (fwd_lcores[lc_id] == NULL) {
548 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
549 								"failed\n");
550 		}
551 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
552 	}
553 
554 	/*
555 	 * Create pools of mbuf.
556 	 * If NUMA support is disabled, create a single pool of mbuf in
557 	 * socket 0 memory by default.
558 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
559 	 *
560 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
561 	 * nb_txd can be configured at run time.
562 	 */
563 	if (param_total_num_mbufs)
564 		nb_mbuf_per_pool = param_total_num_mbufs;
565 	else {
566 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
567 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
568 
569 		if (!numa_support)
570 			nb_mbuf_per_pool =
571 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
572 	}
573 
574 	if (!numa_support) {
575 		if (socket_num == UMA_NO_CONFIG)
576 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
577 		else
578 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
579 						 socket_num);
580 	}
581 
582 	FOREACH_PORT(pid, ports) {
583 		port = &ports[pid];
584 		rte_eth_dev_info_get(pid, &port->dev_info);
585 
586 		if (numa_support) {
587 			if (port_numa[pid] != NUMA_NO_CONFIG)
588 				port_per_socket[port_numa[pid]]++;
589 			else {
590 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
591 
592 				/* if socket_id is invalid, set to 0 */
593 				if (check_socket_id(socket_id) < 0)
594 					socket_id = 0;
595 				port_per_socket[socket_id]++;
596 			}
597 		}
598 
599 		/* set flag to initialize port/queue */
600 		port->need_reconfig = 1;
601 		port->need_reconfig_queues = 1;
602 	}
603 
604 	if (numa_support) {
605 		uint8_t i;
606 		unsigned int nb_mbuf;
607 
608 		if (param_total_num_mbufs)
609 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
610 
611 		for (i = 0; i < MAX_SOCKET; i++) {
612 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
613 			if (nb_mbuf)
614 				mbuf_pool_create(mbuf_data_size,
615 						nb_mbuf,i);
616 		}
617 	}
618 	init_port_config();
619 
620 	/*
621 	 * Records which Mbuf pool to use by each logical core, if needed.
622 	 */
623 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
624 		mbp = mbuf_pool_find(
625 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
626 
627 		if (mbp == NULL)
628 			mbp = mbuf_pool_find(0);
629 		fwd_lcores[lc_id]->mbp = mbp;
630 	}
631 
632 	/* Configuration of packet forwarding streams. */
633 	if (init_fwd_streams() < 0)
634 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
635 }
636 
637 
638 void
639 reconfig(portid_t new_port_id, unsigned socket_id)
640 {
641 	struct rte_port *port;
642 
643 	/* Reconfiguration of Ethernet ports. */
644 	port = &ports[new_port_id];
645 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
646 
647 	/* set flag to initialize port/queue */
648 	port->need_reconfig = 1;
649 	port->need_reconfig_queues = 1;
650 	port->socket_id = socket_id;
651 
652 	init_port_config();
653 }
654 
655 
656 int
657 init_fwd_streams(void)
658 {
659 	portid_t pid;
660 	struct rte_port *port;
661 	streamid_t sm_id, nb_fwd_streams_new;
662 
663 	/* set socket id according to numa or not */
664 	FOREACH_PORT(pid, ports) {
665 		port = &ports[pid];
666 		if (nb_rxq > port->dev_info.max_rx_queues) {
667 			printf("Fail: nb_rxq(%d) is greater than "
668 				"max_rx_queues(%d)\n", nb_rxq,
669 				port->dev_info.max_rx_queues);
670 			return -1;
671 		}
672 		if (nb_txq > port->dev_info.max_tx_queues) {
673 			printf("Fail: nb_txq(%d) is greater than "
674 				"max_tx_queues(%d)\n", nb_txq,
675 				port->dev_info.max_tx_queues);
676 			return -1;
677 		}
678 		if (numa_support) {
679 			if (port_numa[pid] != NUMA_NO_CONFIG)
680 				port->socket_id = port_numa[pid];
681 			else {
682 				port->socket_id = rte_eth_dev_socket_id(pid);
683 
684 				/* if socket_id is invalid, set to 0 */
685 				if (check_socket_id(port->socket_id) < 0)
686 					port->socket_id = 0;
687 			}
688 		}
689 		else {
690 			if (socket_num == UMA_NO_CONFIG)
691 				port->socket_id = 0;
692 			else
693 				port->socket_id = socket_num;
694 		}
695 	}
696 
697 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
698 	if (nb_fwd_streams_new == nb_fwd_streams)
699 		return 0;
700 	/* clear the old */
701 	if (fwd_streams != NULL) {
702 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
703 			if (fwd_streams[sm_id] == NULL)
704 				continue;
705 			rte_free(fwd_streams[sm_id]);
706 			fwd_streams[sm_id] = NULL;
707 		}
708 		rte_free(fwd_streams);
709 		fwd_streams = NULL;
710 	}
711 
712 	/* init new */
713 	nb_fwd_streams = nb_fwd_streams_new;
714 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
715 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
716 	if (fwd_streams == NULL)
717 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
718 						"failed\n", nb_fwd_streams);
719 
720 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
721 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
722 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
723 		if (fwd_streams[sm_id] == NULL)
724 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
725 								" failed\n");
726 	}
727 
728 	return 0;
729 }
730 
731 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
732 static void
733 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
734 {
735 	unsigned int total_burst;
736 	unsigned int nb_burst;
737 	unsigned int burst_stats[3];
738 	uint16_t pktnb_stats[3];
739 	uint16_t nb_pkt;
740 	int burst_percent[3];
741 
742 	/*
743 	 * First compute the total number of packet bursts and the
744 	 * two highest numbers of bursts of the same number of packets.
745 	 */
746 	total_burst = 0;
747 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
748 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
749 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
750 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
751 		if (nb_burst == 0)
752 			continue;
753 		total_burst += nb_burst;
754 		if (nb_burst > burst_stats[0]) {
755 			burst_stats[1] = burst_stats[0];
756 			pktnb_stats[1] = pktnb_stats[0];
757 			burst_stats[0] = nb_burst;
758 			pktnb_stats[0] = nb_pkt;
759 		}
760 	}
761 	if (total_burst == 0)
762 		return;
763 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
764 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
765 	       burst_percent[0], (int) pktnb_stats[0]);
766 	if (burst_stats[0] == total_burst) {
767 		printf("]\n");
768 		return;
769 	}
770 	if (burst_stats[0] + burst_stats[1] == total_burst) {
771 		printf(" + %d%% of %d pkts]\n",
772 		       100 - burst_percent[0], pktnb_stats[1]);
773 		return;
774 	}
775 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
776 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
777 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
778 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
779 		return;
780 	}
781 	printf(" + %d%% of %d pkts + %d%% of others]\n",
782 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
783 }
784 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
785 
786 static void
787 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
788 {
789 	struct rte_port *port;
790 	uint8_t i;
791 
792 	static const char *fwd_stats_border = "----------------------";
793 
794 	port = &ports[port_id];
795 	printf("\n  %s Forward statistics for port %-2d %s\n",
796 	       fwd_stats_border, port_id, fwd_stats_border);
797 
798 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
799 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
800 		       "%-"PRIu64"\n",
801 		       stats->ipackets, stats->imissed,
802 		       (uint64_t) (stats->ipackets + stats->imissed));
803 
804 		if (cur_fwd_eng == &csum_fwd_engine)
805 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
806 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
807 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
808 			printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
809 			       "RX-error: %-"PRIu64"\n",
810 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
811 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
812 		}
813 
814 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
815 		       "%-"PRIu64"\n",
816 		       stats->opackets, port->tx_dropped,
817 		       (uint64_t) (stats->opackets + port->tx_dropped));
818 	}
819 	else {
820 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
821 		       "%14"PRIu64"\n",
822 		       stats->ipackets, stats->imissed,
823 		       (uint64_t) (stats->ipackets + stats->imissed));
824 
825 		if (cur_fwd_eng == &csum_fwd_engine)
826 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
827 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
828 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
829 			printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
830 			       "    RX-error:%"PRIu64"\n",
831 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
832 			printf("  RX-nombufs:             %14"PRIu64"\n",
833 			       stats->rx_nombuf);
834 		}
835 
836 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
837 		       "%14"PRIu64"\n",
838 		       stats->opackets, port->tx_dropped,
839 		       (uint64_t) (stats->opackets + port->tx_dropped));
840 	}
841 
842 	/* Display statistics of XON/XOFF pause frames, if any. */
843 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
844 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
845 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
846 		       stats->rx_pause_xoff, stats->rx_pause_xon);
847 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
848 		       stats->tx_pause_xoff, stats->tx_pause_xon);
849 	}
850 
851 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
852 	if (port->rx_stream)
853 		pkt_burst_stats_display("RX",
854 			&port->rx_stream->rx_burst_stats);
855 	if (port->tx_stream)
856 		pkt_burst_stats_display("TX",
857 			&port->tx_stream->tx_burst_stats);
858 #endif
859 	/* stats fdir */
860 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
861 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
862 		       stats->fdirmiss,
863 		       stats->fdirmatch);
864 
865 	if (port->rx_queue_stats_mapping_enabled) {
866 		printf("\n");
867 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
868 			printf("  Stats reg %2d RX-packets:%14"PRIu64
869 			       "     RX-errors:%14"PRIu64
870 			       "    RX-bytes:%14"PRIu64"\n",
871 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
872 		}
873 		printf("\n");
874 	}
875 	if (port->tx_queue_stats_mapping_enabled) {
876 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
877 			printf("  Stats reg %2d TX-packets:%14"PRIu64
878 			       "                                 TX-bytes:%14"PRIu64"\n",
879 			       i, stats->q_opackets[i], stats->q_obytes[i]);
880 		}
881 	}
882 
883 	printf("  %s--------------------------------%s\n",
884 	       fwd_stats_border, fwd_stats_border);
885 }
886 
887 static void
888 fwd_stream_stats_display(streamid_t stream_id)
889 {
890 	struct fwd_stream *fs;
891 	static const char *fwd_top_stats_border = "-------";
892 
893 	fs = fwd_streams[stream_id];
894 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
895 	    (fs->fwd_dropped == 0))
896 		return;
897 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
898 	       "TX Port=%2d/Queue=%2d %s\n",
899 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
900 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
901 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
902 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
903 
904 	/* if checksum mode */
905 	if (cur_fwd_eng == &csum_fwd_engine) {
906 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
907 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
908 	}
909 
910 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
911 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
912 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
913 #endif
914 }
915 
916 static void
917 flush_fwd_rx_queues(void)
918 {
919 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
920 	portid_t  rxp;
921 	portid_t port_id;
922 	queueid_t rxq;
923 	uint16_t  nb_rx;
924 	uint16_t  i;
925 	uint8_t   j;
926 
927 	for (j = 0; j < 2; j++) {
928 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
929 			for (rxq = 0; rxq < nb_rxq; rxq++) {
930 				port_id = fwd_ports_ids[rxp];
931 				do {
932 					nb_rx = rte_eth_rx_burst(port_id, rxq,
933 						pkts_burst, MAX_PKT_BURST);
934 					for (i = 0; i < nb_rx; i++)
935 						rte_pktmbuf_free(pkts_burst[i]);
936 				} while (nb_rx > 0);
937 			}
938 		}
939 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
940 	}
941 }
942 
943 static void
944 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
945 {
946 	struct fwd_stream **fsm;
947 	streamid_t nb_fs;
948 	streamid_t sm_id;
949 
950 	fsm = &fwd_streams[fc->stream_idx];
951 	nb_fs = fc->stream_nb;
952 	do {
953 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
954 			(*pkt_fwd)(fsm[sm_id]);
955 	} while (! fc->stopped);
956 }
957 
958 static int
959 start_pkt_forward_on_core(void *fwd_arg)
960 {
961 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
962 			     cur_fwd_config.fwd_eng->packet_fwd);
963 	return 0;
964 }
965 
966 /*
967  * Run the TXONLY packet forwarding engine to send a single burst of packets.
968  * Used to start communication flows in network loopback test configurations.
969  */
970 static int
971 run_one_txonly_burst_on_core(void *fwd_arg)
972 {
973 	struct fwd_lcore *fwd_lc;
974 	struct fwd_lcore tmp_lcore;
975 
976 	fwd_lc = (struct fwd_lcore *) fwd_arg;
977 	tmp_lcore = *fwd_lc;
978 	tmp_lcore.stopped = 1;
979 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
980 	return 0;
981 }
982 
983 /*
984  * Launch packet forwarding:
985  *     - Setup per-port forwarding context.
986  *     - launch logical cores with their forwarding configuration.
987  */
988 static void
989 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
990 {
991 	port_fwd_begin_t port_fwd_begin;
992 	unsigned int i;
993 	unsigned int lc_id;
994 	int diag;
995 
996 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
997 	if (port_fwd_begin != NULL) {
998 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
999 			(*port_fwd_begin)(fwd_ports_ids[i]);
1000 	}
1001 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1002 		lc_id = fwd_lcores_cpuids[i];
1003 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1004 			fwd_lcores[i]->stopped = 0;
1005 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1006 						     fwd_lcores[i], lc_id);
1007 			if (diag != 0)
1008 				printf("launch lcore %u failed - diag=%d\n",
1009 				       lc_id, diag);
1010 		}
1011 	}
1012 }
1013 
1014 /*
1015  * Launch packet forwarding configuration.
1016  */
1017 void
1018 start_packet_forwarding(int with_tx_first)
1019 {
1020 	port_fwd_begin_t port_fwd_begin;
1021 	port_fwd_end_t  port_fwd_end;
1022 	struct rte_port *port;
1023 	unsigned int i;
1024 	portid_t   pt_id;
1025 	streamid_t sm_id;
1026 
1027 	if (all_ports_started() == 0) {
1028 		printf("Not all ports were started\n");
1029 		return;
1030 	}
1031 	if (test_done == 0) {
1032 		printf("Packet forwarding already started\n");
1033 		return;
1034 	}
1035 	if(dcb_test) {
1036 		for (i = 0; i < nb_fwd_ports; i++) {
1037 			pt_id = fwd_ports_ids[i];
1038 			port = &ports[pt_id];
1039 			if (!port->dcb_flag) {
1040 				printf("In DCB mode, all forwarding ports must "
1041                                        "be configured in this mode.\n");
1042 				return;
1043 			}
1044 		}
1045 		if (nb_fwd_lcores == 1) {
1046 			printf("In DCB mode,the nb forwarding cores "
1047                                "should be larger than 1.\n");
1048 			return;
1049 		}
1050 	}
1051 	test_done = 0;
1052 
1053 	if(!no_flush_rx)
1054 		flush_fwd_rx_queues();
1055 
1056 	fwd_config_setup();
1057 	rxtx_config_display();
1058 
1059 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1060 		pt_id = fwd_ports_ids[i];
1061 		port = &ports[pt_id];
1062 		rte_eth_stats_get(pt_id, &port->stats);
1063 		port->tx_dropped = 0;
1064 
1065 		map_port_queue_stats_mapping_registers(pt_id, port);
1066 	}
1067 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1068 		fwd_streams[sm_id]->rx_packets = 0;
1069 		fwd_streams[sm_id]->tx_packets = 0;
1070 		fwd_streams[sm_id]->fwd_dropped = 0;
1071 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1072 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1073 
1074 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1075 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1076 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1077 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1078 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1079 #endif
1080 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1081 		fwd_streams[sm_id]->core_cycles = 0;
1082 #endif
1083 	}
1084 	if (with_tx_first) {
1085 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1086 		if (port_fwd_begin != NULL) {
1087 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1088 				(*port_fwd_begin)(fwd_ports_ids[i]);
1089 		}
1090 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1091 		rte_eal_mp_wait_lcore();
1092 		port_fwd_end = tx_only_engine.port_fwd_end;
1093 		if (port_fwd_end != NULL) {
1094 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1095 				(*port_fwd_end)(fwd_ports_ids[i]);
1096 		}
1097 	}
1098 	launch_packet_forwarding(start_pkt_forward_on_core);
1099 }
1100 
1101 void
1102 stop_packet_forwarding(void)
1103 {
1104 	struct rte_eth_stats stats;
1105 	struct rte_port *port;
1106 	port_fwd_end_t  port_fwd_end;
1107 	int i;
1108 	portid_t   pt_id;
1109 	streamid_t sm_id;
1110 	lcoreid_t  lc_id;
1111 	uint64_t total_recv;
1112 	uint64_t total_xmit;
1113 	uint64_t total_rx_dropped;
1114 	uint64_t total_tx_dropped;
1115 	uint64_t total_rx_nombuf;
1116 	uint64_t tx_dropped;
1117 	uint64_t rx_bad_ip_csum;
1118 	uint64_t rx_bad_l4_csum;
1119 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1120 	uint64_t fwd_cycles;
1121 #endif
1122 	static const char *acc_stats_border = "+++++++++++++++";
1123 
1124 	if (all_ports_started() == 0) {
1125 		printf("Not all ports were started\n");
1126 		return;
1127 	}
1128 	if (test_done) {
1129 		printf("Packet forwarding not started\n");
1130 		return;
1131 	}
1132 	printf("Telling cores to stop...");
1133 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1134 		fwd_lcores[lc_id]->stopped = 1;
1135 	printf("\nWaiting for lcores to finish...\n");
1136 	rte_eal_mp_wait_lcore();
1137 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1138 	if (port_fwd_end != NULL) {
1139 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1140 			pt_id = fwd_ports_ids[i];
1141 			(*port_fwd_end)(pt_id);
1142 		}
1143 	}
1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1145 	fwd_cycles = 0;
1146 #endif
1147 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1148 		if (cur_fwd_config.nb_fwd_streams >
1149 		    cur_fwd_config.nb_fwd_ports) {
1150 			fwd_stream_stats_display(sm_id);
1151 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1152 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1153 		} else {
1154 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1155 				fwd_streams[sm_id];
1156 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1157 				fwd_streams[sm_id];
1158 		}
1159 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1160 		tx_dropped = (uint64_t) (tx_dropped +
1161 					 fwd_streams[sm_id]->fwd_dropped);
1162 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1163 
1164 		rx_bad_ip_csum =
1165 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1166 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1167 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1168 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1169 							rx_bad_ip_csum;
1170 
1171 		rx_bad_l4_csum =
1172 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1173 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1174 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1175 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1176 							rx_bad_l4_csum;
1177 
1178 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1179 		fwd_cycles = (uint64_t) (fwd_cycles +
1180 					 fwd_streams[sm_id]->core_cycles);
1181 #endif
1182 	}
1183 	total_recv = 0;
1184 	total_xmit = 0;
1185 	total_rx_dropped = 0;
1186 	total_tx_dropped = 0;
1187 	total_rx_nombuf  = 0;
1188 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1189 		pt_id = fwd_ports_ids[i];
1190 
1191 		port = &ports[pt_id];
1192 		rte_eth_stats_get(pt_id, &stats);
1193 		stats.ipackets -= port->stats.ipackets;
1194 		port->stats.ipackets = 0;
1195 		stats.opackets -= port->stats.opackets;
1196 		port->stats.opackets = 0;
1197 		stats.ibytes   -= port->stats.ibytes;
1198 		port->stats.ibytes = 0;
1199 		stats.obytes   -= port->stats.obytes;
1200 		port->stats.obytes = 0;
1201 		stats.imissed  -= port->stats.imissed;
1202 		port->stats.imissed = 0;
1203 		stats.oerrors  -= port->stats.oerrors;
1204 		port->stats.oerrors = 0;
1205 		stats.rx_nombuf -= port->stats.rx_nombuf;
1206 		port->stats.rx_nombuf = 0;
1207 		stats.fdirmatch -= port->stats.fdirmatch;
1208 		port->stats.rx_nombuf = 0;
1209 		stats.fdirmiss -= port->stats.fdirmiss;
1210 		port->stats.rx_nombuf = 0;
1211 
1212 		total_recv += stats.ipackets;
1213 		total_xmit += stats.opackets;
1214 		total_rx_dropped += stats.imissed;
1215 		total_tx_dropped += port->tx_dropped;
1216 		total_rx_nombuf  += stats.rx_nombuf;
1217 
1218 		fwd_port_stats_display(pt_id, &stats);
1219 	}
1220 	printf("\n  %s Accumulated forward statistics for all ports"
1221 	       "%s\n",
1222 	       acc_stats_border, acc_stats_border);
1223 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1224 	       "%-"PRIu64"\n"
1225 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1226 	       "%-"PRIu64"\n",
1227 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1228 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1229 	if (total_rx_nombuf > 0)
1230 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1231 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1232 	       "%s\n",
1233 	       acc_stats_border, acc_stats_border);
1234 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1235 	if (total_recv > 0)
1236 		printf("\n  CPU cycles/packet=%u (total cycles="
1237 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1238 		       (unsigned int)(fwd_cycles / total_recv),
1239 		       fwd_cycles, total_recv);
1240 #endif
1241 	printf("\nDone.\n");
1242 	test_done = 1;
1243 }
1244 
1245 void
1246 dev_set_link_up(portid_t pid)
1247 {
1248 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1249 		printf("\nSet link up fail.\n");
1250 }
1251 
1252 void
1253 dev_set_link_down(portid_t pid)
1254 {
1255 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1256 		printf("\nSet link down fail.\n");
1257 }
1258 
1259 static int
1260 all_ports_started(void)
1261 {
1262 	portid_t pi;
1263 	struct rte_port *port;
1264 
1265 	FOREACH_PORT(pi, ports) {
1266 		port = &ports[pi];
1267 		/* Check if there is a port which is not started */
1268 		if (port->port_status != RTE_PORT_STARTED)
1269 			return 0;
1270 	}
1271 
1272 	/* No port is not started */
1273 	return 1;
1274 }
1275 
1276 int
1277 all_ports_stopped(void)
1278 {
1279 	portid_t pi;
1280 	struct rte_port *port;
1281 
1282 	FOREACH_PORT(pi, ports) {
1283 		port = &ports[pi];
1284 		if (port->port_status != RTE_PORT_STOPPED)
1285 			return 0;
1286 	}
1287 
1288 	return 1;
1289 }
1290 
1291 int
1292 port_is_started(portid_t port_id)
1293 {
1294 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1295 		return 0;
1296 
1297 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1298 		return 0;
1299 
1300 	return 1;
1301 }
1302 
1303 static int
1304 port_is_closed(portid_t port_id)
1305 {
1306 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1307 		return 0;
1308 
1309 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1310 		return 0;
1311 
1312 	return 1;
1313 }
1314 
1315 int
1316 start_port(portid_t pid)
1317 {
1318 	int diag, need_check_link_status = 0;
1319 	portid_t pi;
1320 	queueid_t qi;
1321 	struct rte_port *port;
1322 	struct ether_addr mac_addr;
1323 
1324 	if (test_done == 0) {
1325 		printf("Please stop forwarding first\n");
1326 		return -1;
1327 	}
1328 
1329 	if (init_fwd_streams() < 0) {
1330 		printf("Fail from init_fwd_streams()\n");
1331 		return -1;
1332 	}
1333 
1334 	if(dcb_config)
1335 		dcb_test = 1;
1336 	FOREACH_PORT(pi, ports) {
1337 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1338 			continue;
1339 
1340 		port = &ports[pi];
1341 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1342 						 RTE_PORT_HANDLING) == 0) {
1343 			printf("Port %d is now not stopped\n", pi);
1344 			continue;
1345 		}
1346 
1347 		if (port->need_reconfig > 0) {
1348 			port->need_reconfig = 0;
1349 
1350 			printf("Configuring Port %d (socket %u)\n", pi,
1351 					port->socket_id);
1352 			/* configure port */
1353 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1354 						&(port->dev_conf));
1355 			if (diag != 0) {
1356 				if (rte_atomic16_cmpset(&(port->port_status),
1357 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1358 					printf("Port %d can not be set back "
1359 							"to stopped\n", pi);
1360 				printf("Fail to configure port %d\n", pi);
1361 				/* try to reconfigure port next time */
1362 				port->need_reconfig = 1;
1363 				return -1;
1364 			}
1365 		}
1366 		if (port->need_reconfig_queues > 0) {
1367 			port->need_reconfig_queues = 0;
1368 			/* setup tx queues */
1369 			for (qi = 0; qi < nb_txq; qi++) {
1370 				if ((numa_support) &&
1371 					(txring_numa[pi] != NUMA_NO_CONFIG))
1372 					diag = rte_eth_tx_queue_setup(pi, qi,
1373 						nb_txd,txring_numa[pi],
1374 						&(port->tx_conf));
1375 				else
1376 					diag = rte_eth_tx_queue_setup(pi, qi,
1377 						nb_txd,port->socket_id,
1378 						&(port->tx_conf));
1379 
1380 				if (diag == 0)
1381 					continue;
1382 
1383 				/* Fail to setup tx queue, return */
1384 				if (rte_atomic16_cmpset(&(port->port_status),
1385 							RTE_PORT_HANDLING,
1386 							RTE_PORT_STOPPED) == 0)
1387 					printf("Port %d can not be set back "
1388 							"to stopped\n", pi);
1389 				printf("Fail to configure port %d tx queues\n", pi);
1390 				/* try to reconfigure queues next time */
1391 				port->need_reconfig_queues = 1;
1392 				return -1;
1393 			}
1394 			/* setup rx queues */
1395 			for (qi = 0; qi < nb_rxq; qi++) {
1396 				if ((numa_support) &&
1397 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1398 					struct rte_mempool * mp =
1399 						mbuf_pool_find(rxring_numa[pi]);
1400 					if (mp == NULL) {
1401 						printf("Failed to setup RX queue:"
1402 							"No mempool allocation"
1403 							"on the socket %d\n",
1404 							rxring_numa[pi]);
1405 						return -1;
1406 					}
1407 
1408 					diag = rte_eth_rx_queue_setup(pi, qi,
1409 					     nb_rxd,rxring_numa[pi],
1410 					     &(port->rx_conf),mp);
1411 				}
1412 				else
1413 					diag = rte_eth_rx_queue_setup(pi, qi,
1414 					     nb_rxd,port->socket_id,
1415 					     &(port->rx_conf),
1416 				             mbuf_pool_find(port->socket_id));
1417 
1418 				if (diag == 0)
1419 					continue;
1420 
1421 
1422 				/* Fail to setup rx queue, return */
1423 				if (rte_atomic16_cmpset(&(port->port_status),
1424 							RTE_PORT_HANDLING,
1425 							RTE_PORT_STOPPED) == 0)
1426 					printf("Port %d can not be set back "
1427 							"to stopped\n", pi);
1428 				printf("Fail to configure port %d rx queues\n", pi);
1429 				/* try to reconfigure queues next time */
1430 				port->need_reconfig_queues = 1;
1431 				return -1;
1432 			}
1433 		}
1434 		/* start port */
1435 		if (rte_eth_dev_start(pi) < 0) {
1436 			printf("Fail to start port %d\n", pi);
1437 
1438 			/* Fail to setup rx queue, return */
1439 			if (rte_atomic16_cmpset(&(port->port_status),
1440 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1441 				printf("Port %d can not be set back to "
1442 							"stopped\n", pi);
1443 			continue;
1444 		}
1445 
1446 		if (rte_atomic16_cmpset(&(port->port_status),
1447 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1448 			printf("Port %d can not be set into started\n", pi);
1449 
1450 		rte_eth_macaddr_get(pi, &mac_addr);
1451 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1452 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1453 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1454 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1455 
1456 		/* at least one port started, need checking link status */
1457 		need_check_link_status = 1;
1458 	}
1459 
1460 	if (need_check_link_status && !no_link_check)
1461 		check_all_ports_link_status(RTE_PORT_ALL);
1462 	else
1463 		printf("Please stop the ports first\n");
1464 
1465 	printf("Done\n");
1466 	return 0;
1467 }
1468 
1469 void
1470 stop_port(portid_t pid)
1471 {
1472 	portid_t pi;
1473 	struct rte_port *port;
1474 	int need_check_link_status = 0;
1475 
1476 	if (test_done == 0) {
1477 		printf("Please stop forwarding first\n");
1478 		return;
1479 	}
1480 	if (dcb_test) {
1481 		dcb_test = 0;
1482 		dcb_config = 0;
1483 	}
1484 	printf("Stopping ports...\n");
1485 
1486 	FOREACH_PORT(pi, ports) {
1487 		if (!port_id_is_invalid(pid, DISABLED_WARN) && pid != pi)
1488 			continue;
1489 
1490 		port = &ports[pi];
1491 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1492 						RTE_PORT_HANDLING) == 0)
1493 			continue;
1494 
1495 		rte_eth_dev_stop(pi);
1496 
1497 		if (rte_atomic16_cmpset(&(port->port_status),
1498 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1499 			printf("Port %d can not be set into stopped\n", pi);
1500 		need_check_link_status = 1;
1501 	}
1502 	if (need_check_link_status && !no_link_check)
1503 		check_all_ports_link_status(RTE_PORT_ALL);
1504 
1505 	printf("Done\n");
1506 }
1507 
1508 void
1509 close_port(portid_t pid)
1510 {
1511 	portid_t pi;
1512 	struct rte_port *port;
1513 
1514 	if (test_done == 0) {
1515 		printf("Please stop forwarding first\n");
1516 		return;
1517 	}
1518 
1519 	printf("Closing ports...\n");
1520 
1521 	FOREACH_PORT(pi, ports) {
1522 		if (!port_id_is_invalid(pid, DISABLED_WARN) && pid != pi)
1523 			continue;
1524 
1525 		port = &ports[pi];
1526 		if (rte_atomic16_cmpset(&(port->port_status),
1527 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1528 			printf("Port %d is now not stopped\n", pi);
1529 			continue;
1530 		}
1531 
1532 		rte_eth_dev_close(pi);
1533 
1534 		if (rte_atomic16_cmpset(&(port->port_status),
1535 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1536 			printf("Port %d can not be set into stopped\n", pi);
1537 	}
1538 
1539 	printf("Done\n");
1540 }
1541 
1542 void
1543 attach_port(char *identifier)
1544 {
1545 	portid_t i, j, pi = 0;
1546 
1547 	printf("Attaching a new port...\n");
1548 
1549 	if (identifier == NULL) {
1550 		printf("Invalid parameters are specified\n");
1551 		return;
1552 	}
1553 
1554 	if (test_done == 0) {
1555 		printf("Please stop forwarding first\n");
1556 		return;
1557 	}
1558 
1559 	if (rte_eth_dev_attach(identifier, &pi))
1560 		return;
1561 
1562 	ports[pi].enabled = 1;
1563 	reconfig(pi, rte_eth_dev_socket_id(pi));
1564 	rte_eth_promiscuous_enable(pi);
1565 
1566 	nb_ports = rte_eth_dev_count();
1567 
1568 	/* set_default_fwd_ports_config(); */
1569 	bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1570 	i = 0;
1571 	FOREACH_PORT(j, ports) {
1572 		fwd_ports_ids[i] = j;
1573 		i++;
1574 	}
1575 	nb_cfg_ports = nb_ports;
1576 	nb_fwd_ports++;
1577 
1578 	ports[pi].port_status = RTE_PORT_STOPPED;
1579 
1580 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1581 	printf("Done\n");
1582 }
1583 
1584 void
1585 detach_port(uint8_t port_id)
1586 {
1587 	portid_t i, pi = 0;
1588 	char name[RTE_ETH_NAME_MAX_LEN];
1589 
1590 	printf("Detaching a port...\n");
1591 
1592 	if (!port_is_closed(port_id)) {
1593 		printf("Please close port first\n");
1594 		return;
1595 	}
1596 
1597 	rte_eth_promiscuous_disable(port_id);
1598 
1599 	if (rte_eth_dev_detach(port_id, name))
1600 		return;
1601 
1602 	ports[port_id].enabled = 0;
1603 	nb_ports = rte_eth_dev_count();
1604 
1605 	/* set_default_fwd_ports_config(); */
1606 	bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1607 	i = 0;
1608 	FOREACH_PORT(pi, ports) {
1609 		fwd_ports_ids[i] = pi;
1610 		i++;
1611 	}
1612 	nb_cfg_ports = nb_ports;
1613 	nb_fwd_ports--;
1614 
1615 	printf("Port '%s' is detached. Now total ports is %d\n",
1616 			name, nb_ports);
1617 	printf("Done\n");
1618 	return;
1619 }
1620 
1621 void
1622 pmd_test_exit(void)
1623 {
1624 	portid_t pt_id;
1625 
1626 	FOREACH_PORT(pt_id, ports) {
1627 		printf("Stopping port %d...", pt_id);
1628 		fflush(stdout);
1629 		rte_eth_dev_close(pt_id);
1630 		printf("done\n");
1631 	}
1632 	printf("bye...\n");
1633 }
1634 
1635 typedef void (*cmd_func_t)(void);
1636 struct pmd_test_command {
1637 	const char *cmd_name;
1638 	cmd_func_t cmd_func;
1639 };
1640 
1641 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1642 
1643 /* Check the link status of all ports in up to 9s, and print them finally */
1644 static void
1645 check_all_ports_link_status(uint32_t port_mask)
1646 {
1647 #define CHECK_INTERVAL 100 /* 100ms */
1648 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1649 	uint8_t portid, count, all_ports_up, print_flag = 0;
1650 	struct rte_eth_link link;
1651 
1652 	printf("Checking link statuses...\n");
1653 	fflush(stdout);
1654 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1655 		all_ports_up = 1;
1656 		FOREACH_PORT(portid, ports) {
1657 			if ((port_mask & (1 << portid)) == 0)
1658 				continue;
1659 			memset(&link, 0, sizeof(link));
1660 			rte_eth_link_get_nowait(portid, &link);
1661 			/* print link status if flag set */
1662 			if (print_flag == 1) {
1663 				if (link.link_status)
1664 					printf("Port %d Link Up - speed %u "
1665 						"Mbps - %s\n", (uint8_t)portid,
1666 						(unsigned)link.link_speed,
1667 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1668 					("full-duplex") : ("half-duplex\n"));
1669 				else
1670 					printf("Port %d Link Down\n",
1671 						(uint8_t)portid);
1672 				continue;
1673 			}
1674 			/* clear all_ports_up flag if any link down */
1675 			if (link.link_status == 0) {
1676 				all_ports_up = 0;
1677 				break;
1678 			}
1679 		}
1680 		/* after finally printing all link status, get out */
1681 		if (print_flag == 1)
1682 			break;
1683 
1684 		if (all_ports_up == 0) {
1685 			fflush(stdout);
1686 			rte_delay_ms(CHECK_INTERVAL);
1687 		}
1688 
1689 		/* set the print_flag if all ports up or timeout */
1690 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1691 			print_flag = 1;
1692 		}
1693 	}
1694 }
1695 
1696 static int
1697 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1698 {
1699 	uint16_t i;
1700 	int diag;
1701 	uint8_t mapping_found = 0;
1702 
1703 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1704 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1705 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1706 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1707 					tx_queue_stats_mappings[i].queue_id,
1708 					tx_queue_stats_mappings[i].stats_counter_id);
1709 			if (diag != 0)
1710 				return diag;
1711 			mapping_found = 1;
1712 		}
1713 	}
1714 	if (mapping_found)
1715 		port->tx_queue_stats_mapping_enabled = 1;
1716 	return 0;
1717 }
1718 
1719 static int
1720 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1721 {
1722 	uint16_t i;
1723 	int diag;
1724 	uint8_t mapping_found = 0;
1725 
1726 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1727 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1728 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1729 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1730 					rx_queue_stats_mappings[i].queue_id,
1731 					rx_queue_stats_mappings[i].stats_counter_id);
1732 			if (diag != 0)
1733 				return diag;
1734 			mapping_found = 1;
1735 		}
1736 	}
1737 	if (mapping_found)
1738 		port->rx_queue_stats_mapping_enabled = 1;
1739 	return 0;
1740 }
1741 
1742 static void
1743 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1744 {
1745 	int diag = 0;
1746 
1747 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1748 	if (diag != 0) {
1749 		if (diag == -ENOTSUP) {
1750 			port->tx_queue_stats_mapping_enabled = 0;
1751 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1752 		}
1753 		else
1754 			rte_exit(EXIT_FAILURE,
1755 					"set_tx_queue_stats_mapping_registers "
1756 					"failed for port id=%d diag=%d\n",
1757 					pi, diag);
1758 	}
1759 
1760 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1761 	if (diag != 0) {
1762 		if (diag == -ENOTSUP) {
1763 			port->rx_queue_stats_mapping_enabled = 0;
1764 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1765 		}
1766 		else
1767 			rte_exit(EXIT_FAILURE,
1768 					"set_rx_queue_stats_mapping_registers "
1769 					"failed for port id=%d diag=%d\n",
1770 					pi, diag);
1771 	}
1772 }
1773 
1774 static void
1775 rxtx_port_config(struct rte_port *port)
1776 {
1777 	port->rx_conf = port->dev_info.default_rxconf;
1778 	port->tx_conf = port->dev_info.default_txconf;
1779 
1780 	/* Check if any RX/TX parameters have been passed */
1781 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1782 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1783 
1784 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1785 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1786 
1787 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1788 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1789 
1790 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1791 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1792 
1793 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1794 		port->rx_conf.rx_drop_en = rx_drop_en;
1795 
1796 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1797 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1798 
1799 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1800 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1801 
1802 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1803 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1804 
1805 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1806 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1807 
1808 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1809 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1810 
1811 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1812 		port->tx_conf.txq_flags = txq_flags;
1813 }
1814 
1815 void
1816 init_port_config(void)
1817 {
1818 	portid_t pid;
1819 	struct rte_port *port;
1820 
1821 	FOREACH_PORT(pid, ports) {
1822 		port = &ports[pid];
1823 		port->dev_conf.rxmode = rx_mode;
1824 		port->dev_conf.fdir_conf = fdir_conf;
1825 		if (nb_rxq > 1) {
1826 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1827 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1828 		} else {
1829 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1830 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1831 		}
1832 
1833 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1834 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1835 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1836 			else
1837 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1838 		}
1839 
1840 		if (port->dev_info.max_vfs != 0) {
1841 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1842 				port->dev_conf.rxmode.mq_mode =
1843 					ETH_MQ_RX_VMDQ_RSS;
1844 			else
1845 				port->dev_conf.rxmode.mq_mode =
1846 					ETH_MQ_RX_NONE;
1847 
1848 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1849 		}
1850 
1851 		rxtx_port_config(port);
1852 
1853 		rte_eth_macaddr_get(pid, &port->eth_addr);
1854 
1855 		map_port_queue_stats_mapping_registers(pid, port);
1856 #ifdef RTE_NIC_BYPASS
1857 		rte_eth_dev_bypass_init(pid);
1858 #endif
1859 	}
1860 }
1861 
1862 const uint16_t vlan_tags[] = {
1863 		0,  1,  2,  3,  4,  5,  6,  7,
1864 		8,  9, 10, 11,  12, 13, 14, 15,
1865 		16, 17, 18, 19, 20, 21, 22, 23,
1866 		24, 25, 26, 27, 28, 29, 30, 31
1867 };
1868 
1869 static  int
1870 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1871 {
1872         uint8_t i;
1873 
1874  	/*
1875  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1876  	 * given above, and the number of traffic classes available for use.
1877  	 */
1878 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1879 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1880 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1881 
1882 		/* VMDQ+DCB RX and TX configrations */
1883 		vmdq_rx_conf.enable_default_pool = 0;
1884 		vmdq_rx_conf.default_pool = 0;
1885 		vmdq_rx_conf.nb_queue_pools =
1886 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1887 		vmdq_tx_conf.nb_queue_pools =
1888 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1889 
1890 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1891 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1892 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1893 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1894 		}
1895 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1896 			vmdq_rx_conf.dcb_queue[i] = i;
1897 			vmdq_tx_conf.dcb_queue[i] = i;
1898 		}
1899 
1900 		/*set DCB mode of RX and TX of multiple queues*/
1901 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1902 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1903 		if (dcb_conf->pfc_en)
1904 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1905 		else
1906 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1907 
1908 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1909                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1910 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1911                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1912 	}
1913 	else {
1914 		struct rte_eth_dcb_rx_conf rx_conf;
1915 		struct rte_eth_dcb_tx_conf tx_conf;
1916 
1917 		/* queue mapping configuration of DCB RX and TX */
1918 		if (dcb_conf->num_tcs == ETH_4_TCS)
1919 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1920 		else
1921 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1922 
1923 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1924 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1925 
1926 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1927 			rx_conf.dcb_queue[i] = i;
1928 			tx_conf.dcb_queue[i] = i;
1929 		}
1930 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1931 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1932 		if (dcb_conf->pfc_en)
1933 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1934 		else
1935 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1936 
1937 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1938                                 sizeof(struct rte_eth_dcb_rx_conf)));
1939 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1940                                 sizeof(struct rte_eth_dcb_tx_conf)));
1941 	}
1942 
1943 	return 0;
1944 }
1945 
1946 int
1947 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1948 {
1949 	struct rte_eth_conf port_conf;
1950 	struct rte_port *rte_port;
1951 	int retval;
1952 	uint16_t nb_vlan;
1953 	uint16_t i;
1954 
1955 	/* rxq and txq configuration in dcb mode */
1956 	nb_rxq = 128;
1957 	nb_txq = 128;
1958 	rx_free_thresh = 64;
1959 
1960 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1961 	/* Enter DCB configuration status */
1962 	dcb_config = 1;
1963 
1964 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1965 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1966 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1967 	if (retval < 0)
1968 		return retval;
1969 
1970 	rte_port = &ports[pid];
1971 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1972 
1973 	rxtx_port_config(rte_port);
1974 	/* VLAN filter */
1975 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1976 	for (i = 0; i < nb_vlan; i++){
1977 		rx_vft_set(pid, vlan_tags[i], 1);
1978 	}
1979 
1980 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1981 	map_port_queue_stats_mapping_registers(pid, rte_port);
1982 
1983 	rte_port->dcb_flag = 1;
1984 
1985 	return 0;
1986 }
1987 
1988 static void
1989 init_port(void)
1990 {
1991 	portid_t pid;
1992 
1993 	/* Configuration of Ethernet ports. */
1994 	ports = rte_zmalloc("testpmd: ports",
1995 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1996 			    RTE_CACHE_LINE_SIZE);
1997 	if (ports == NULL) {
1998 		rte_exit(EXIT_FAILURE,
1999 				"rte_zmalloc(%d struct rte_port) failed\n",
2000 				RTE_MAX_ETHPORTS);
2001 	}
2002 
2003 	/* enabled allocated ports */
2004 	for (pid = 0; pid < nb_ports; pid++)
2005 		ports[pid].enabled = 1;
2006 }
2007 
2008 int
2009 main(int argc, char** argv)
2010 {
2011 	int  diag;
2012 	uint8_t port_id;
2013 
2014 	diag = rte_eal_init(argc, argv);
2015 	if (diag < 0)
2016 		rte_panic("Cannot init EAL\n");
2017 
2018 	nb_ports = (portid_t) rte_eth_dev_count();
2019 	if (nb_ports == 0)
2020 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2021 
2022 	/* allocate port structures, and init them */
2023 	init_port();
2024 
2025 	set_def_fwd_config();
2026 	if (nb_lcores == 0)
2027 		rte_panic("Empty set of forwarding logical cores - check the "
2028 			  "core mask supplied in the command parameters\n");
2029 
2030 	argc -= diag;
2031 	argv += diag;
2032 	if (argc > 1)
2033 		launch_args_parse(argc, argv);
2034 
2035 	if (nb_rxq > nb_txq)
2036 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2037 		       "but nb_txq=%d will prevent to fully test it.\n",
2038 		       nb_rxq, nb_txq);
2039 
2040 	init_config();
2041 	if (start_port(RTE_PORT_ALL) != 0)
2042 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2043 
2044 	/* set all ports to promiscuous mode by default */
2045 	FOREACH_PORT(port_id, ports)
2046 		rte_eth_promiscuous_enable(port_id);
2047 
2048 #ifdef RTE_LIBRTE_CMDLINE
2049 	if (interactive == 1) {
2050 		if (auto_start) {
2051 			printf("Start automatic packet forwarding\n");
2052 			start_packet_forwarding(0);
2053 		}
2054 		prompt();
2055 	} else
2056 #endif
2057 	{
2058 		char c;
2059 		int rc;
2060 
2061 		printf("No commandline core given, start packet forwarding\n");
2062 		start_packet_forwarding(0);
2063 		printf("Press enter to exit\n");
2064 		rc = read(0, &c, 1);
2065 		if (rc < 0)
2066 			return 1;
2067 	}
2068 
2069 	return 0;
2070 }
2071