xref: /dpdk/app/test-pmd/testpmd.c (revision a7dde4ff0dcdf382bc2a5fc86c40106a8f43e1f3)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 
80 #include "testpmd.h"
81 #include "mempool_osdep.h"
82 
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 
85 /* use master core for command line ? */
86 uint8_t interactive = 0;
87 uint8_t auto_start = 0;
88 
89 /*
90  * NUMA support configuration.
91  * When set, the NUMA support attempts to dispatch the allocation of the
92  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
93  * probed ports among the CPU sockets 0 and 1.
94  * Otherwise, all memory is allocated from CPU socket 0.
95  */
96 uint8_t numa_support = 0; /**< No numa support by default */
97 
98 /*
99  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
100  * not configured.
101  */
102 uint8_t socket_num = UMA_NO_CONFIG;
103 
104 /*
105  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
106  */
107 uint8_t mp_anon = 0;
108 
109 /*
110  * Record the Ethernet address of peer target ports to which packets are
111  * forwarded.
112  * Must be instanciated with the ethernet addresses of peer traffic generator
113  * ports.
114  */
115 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
116 portid_t nb_peer_eth_addrs = 0;
117 
118 /*
119  * Probed Target Environment.
120  */
121 struct rte_port *ports;	       /**< For all probed ethernet ports. */
122 portid_t nb_ports;             /**< Number of probed ethernet ports. */
123 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
124 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
125 
126 /*
127  * Test Forwarding Configuration.
128  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
129  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
130  */
131 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
132 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
133 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
134 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
135 
136 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
137 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
138 
139 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
140 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
141 
142 /*
143  * Forwarding engines.
144  */
145 struct fwd_engine * fwd_engines[] = {
146 	&io_fwd_engine,
147 	&mac_fwd_engine,
148 	&mac_retry_fwd_engine,
149 	&mac_swap_engine,
150 	&flow_gen_engine,
151 	&rx_only_engine,
152 	&tx_only_engine,
153 	&csum_fwd_engine,
154 	&icmp_echo_engine,
155 #ifdef RTE_LIBRTE_IEEE1588
156 	&ieee1588_fwd_engine,
157 #endif
158 	NULL,
159 };
160 
161 struct fwd_config cur_fwd_config;
162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 
164 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
165 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
166                                       * specified on command-line. */
167 
168 /*
169  * Configuration of packet segments used by the "txonly" processing engine.
170  */
171 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
172 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
173 	TXONLY_DEF_PACKET_LEN,
174 };
175 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182 
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185 
186 /* DCB on and VT on mapping is default */
187 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
188 
189 /*
190  * Configurable number of RX/TX queues.
191  */
192 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
193 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
194 
195 /*
196  * Configurable number of RX/TX ring descriptors.
197  */
198 #define RTE_TEST_RX_DESC_DEFAULT 128
199 #define RTE_TEST_TX_DESC_DEFAULT 512
200 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
201 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 
203 #define RTE_PMD_PARAM_UNSET -1
204 /*
205  * Configurable values of RX and TX ring threshold registers.
206  */
207 
208 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
210 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 
212 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
215 
216 /*
217  * Configurable value of RX free threshold.
218  */
219 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
220 
221 /*
222  * Configurable value of RX drop enable.
223  */
224 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
225 
226 /*
227  * Configurable value of TX free threshold.
228  */
229 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
230 
231 /*
232  * Configurable value of TX RS bit threshold.
233  */
234 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
235 
236 /*
237  * Configurable value of TX queue flags.
238  */
239 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
240 
241 /*
242  * Receive Side Scaling (RSS) configuration.
243  */
244 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
245 
246 /*
247  * Port topology configuration
248  */
249 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
250 
251 /*
252  * Avoids to flush all the RX streams before starts forwarding.
253  */
254 uint8_t no_flush_rx = 0; /* flush by default */
255 
256 /*
257  * Avoids to check link status when starting/stopping a port.
258  */
259 uint8_t no_link_check = 0; /* check by default */
260 
261 /*
262  * NIC bypass mode configuration options.
263  */
264 #ifdef RTE_NIC_BYPASS
265 
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
268 
269 #endif
270 
271 /*
272  * Ethernet device configuration.
273  */
274 struct rte_eth_rxmode rx_mode = {
275 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 	.split_hdr_size = 0,
277 	.header_split   = 0, /**< Header Split disabled. */
278 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
281 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
283 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
284 };
285 
286 struct rte_fdir_conf fdir_conf = {
287 	.mode = RTE_FDIR_MODE_NONE,
288 	.pballoc = RTE_FDIR_PBALLOC_64K,
289 	.status = RTE_FDIR_REPORT_STATUS,
290 	.mask = {
291 		.vlan_tci_mask = 0x0,
292 		.ipv4_mask     = {
293 			.src_ip = 0xFFFFFFFF,
294 			.dst_ip = 0xFFFFFFFF,
295 		},
296 		.ipv6_mask     = {
297 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 		},
300 		.src_port_mask = 0xFFFF,
301 		.dst_port_mask = 0xFFFF,
302 	},
303 	.drop_queue = 127,
304 };
305 
306 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
307 
308 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
309 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
310 
311 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
312 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
313 
314 uint16_t nb_tx_queue_stats_mappings = 0;
315 uint16_t nb_rx_queue_stats_mappings = 0;
316 
317 /* Forward function declarations */
318 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
319 static void check_all_ports_link_status(uint32_t port_mask);
320 
321 /*
322  * Check if all the ports are started.
323  * If yes, return positive value. If not, return zero.
324  */
325 static int all_ports_started(void);
326 
327 /*
328  * Find next enabled port
329  */
330 portid_t
331 find_next_port(portid_t p, struct rte_port *ports, int size)
332 {
333 	if (ports == NULL)
334 		rte_exit(-EINVAL, "failed to find a next port id\n");
335 
336 	while ((ports[p].enabled == 0) && (p < size))
337 		p++;
338 	return p;
339 }
340 
341 /*
342  * Setup default configuration.
343  */
344 static void
345 set_default_fwd_lcores_config(void)
346 {
347 	unsigned int i;
348 	unsigned int nb_lc;
349 
350 	nb_lc = 0;
351 	for (i = 0; i < RTE_MAX_LCORE; i++) {
352 		if (! rte_lcore_is_enabled(i))
353 			continue;
354 		if (i == rte_get_master_lcore())
355 			continue;
356 		fwd_lcores_cpuids[nb_lc++] = i;
357 	}
358 	nb_lcores = (lcoreid_t) nb_lc;
359 	nb_cfg_lcores = nb_lcores;
360 	nb_fwd_lcores = 1;
361 }
362 
363 static void
364 set_def_peer_eth_addrs(void)
365 {
366 	portid_t i;
367 
368 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
369 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
370 		peer_eth_addrs[i].addr_bytes[5] = i;
371 	}
372 }
373 
374 static void
375 set_default_fwd_ports_config(void)
376 {
377 	portid_t pt_id;
378 
379 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
380 		fwd_ports_ids[pt_id] = pt_id;
381 
382 	nb_cfg_ports = nb_ports;
383 	nb_fwd_ports = nb_ports;
384 }
385 
386 void
387 set_def_fwd_config(void)
388 {
389 	set_default_fwd_lcores_config();
390 	set_def_peer_eth_addrs();
391 	set_default_fwd_ports_config();
392 }
393 
394 /*
395  * Configuration initialisation done once at init time.
396  */
397 struct mbuf_ctor_arg {
398 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
399 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
400 };
401 
402 struct mbuf_pool_ctor_arg {
403 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
404 };
405 
406 static void
407 testpmd_mbuf_ctor(struct rte_mempool *mp,
408 		  void *opaque_arg,
409 		  void *raw_mbuf,
410 		  __attribute__((unused)) unsigned i)
411 {
412 	struct mbuf_ctor_arg *mb_ctor_arg;
413 	struct rte_mbuf    *mb;
414 
415 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
416 	mb = (struct rte_mbuf *) raw_mbuf;
417 
418 	mb->pool         = mp;
419 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
420 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
421 			mb_ctor_arg->seg_buf_offset);
422 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
423 	mb->ol_flags     = 0;
424 	mb->data_off     = RTE_PKTMBUF_HEADROOM;
425 	mb->nb_segs      = 1;
426 	mb->tx_offload   = 0;
427 	mb->vlan_tci     = 0;
428 	mb->hash.rss     = 0;
429 }
430 
431 static void
432 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
433 		       void *opaque_arg)
434 {
435 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
436 	struct rte_pktmbuf_pool_private *mbp_priv;
437 
438 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
439 		printf("%s(%s) private_data_size %d < %d\n",
440 		       __func__, mp->name, (int) mp->private_data_size,
441 		       (int) sizeof(struct rte_pktmbuf_pool_private));
442 		return;
443 	}
444 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
445 	mbp_priv = rte_mempool_get_priv(mp);
446 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
447 }
448 
449 static void
450 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
451 		 unsigned int socket_id)
452 {
453 	char pool_name[RTE_MEMPOOL_NAMESIZE];
454 	struct rte_mempool *rte_mp;
455 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
456 	struct mbuf_ctor_arg mb_ctor_arg;
457 	uint32_t mb_size;
458 
459 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
460 						mbuf_seg_size);
461 	mb_ctor_arg.seg_buf_offset =
462 		(uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
463 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
464 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
465 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
466 
467 #ifdef RTE_LIBRTE_PMD_XENVIRT
468 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
469                                    (unsigned) mb_mempool_cache,
470                                    sizeof(struct rte_pktmbuf_pool_private),
471                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
472                                    testpmd_mbuf_ctor, &mb_ctor_arg,
473                                    socket_id, 0);
474 
475 
476 
477 #else
478 	if (mp_anon != 0)
479 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
480 				    (unsigned) mb_mempool_cache,
481 				    sizeof(struct rte_pktmbuf_pool_private),
482 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
483 				    testpmd_mbuf_ctor, &mb_ctor_arg,
484 				    socket_id, 0);
485 	else
486 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
487 				    (unsigned) mb_mempool_cache,
488 				    sizeof(struct rte_pktmbuf_pool_private),
489 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
490 				    testpmd_mbuf_ctor, &mb_ctor_arg,
491 				    socket_id, 0);
492 
493 #endif
494 
495 	if (rte_mp == NULL) {
496 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
497 						"failed\n", socket_id);
498 	} else if (verbose_level > 0) {
499 		rte_mempool_dump(stdout, rte_mp);
500 	}
501 }
502 
503 /*
504  * Check given socket id is valid or not with NUMA mode,
505  * if valid, return 0, else return -1
506  */
507 static int
508 check_socket_id(const unsigned int socket_id)
509 {
510 	static int warning_once = 0;
511 
512 	if (socket_id >= MAX_SOCKET) {
513 		if (!warning_once && numa_support)
514 			printf("Warning: NUMA should be configured manually by"
515 			       " using --port-numa-config and"
516 			       " --ring-numa-config parameters along with"
517 			       " --numa.\n");
518 		warning_once = 1;
519 		return -1;
520 	}
521 	return 0;
522 }
523 
524 static void
525 init_config(void)
526 {
527 	portid_t pid;
528 	struct rte_port *port;
529 	struct rte_mempool *mbp;
530 	unsigned int nb_mbuf_per_pool;
531 	lcoreid_t  lc_id;
532 	uint8_t port_per_socket[MAX_SOCKET];
533 
534 	memset(port_per_socket,0,MAX_SOCKET);
535 	/* Configuration of logical cores. */
536 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
537 				sizeof(struct fwd_lcore *) * nb_lcores,
538 				RTE_CACHE_LINE_SIZE);
539 	if (fwd_lcores == NULL) {
540 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
541 							"failed\n", nb_lcores);
542 	}
543 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
544 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
545 					       sizeof(struct fwd_lcore),
546 					       RTE_CACHE_LINE_SIZE);
547 		if (fwd_lcores[lc_id] == NULL) {
548 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
549 								"failed\n");
550 		}
551 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
552 	}
553 
554 	/*
555 	 * Create pools of mbuf.
556 	 * If NUMA support is disabled, create a single pool of mbuf in
557 	 * socket 0 memory by default.
558 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
559 	 *
560 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
561 	 * nb_txd can be configured at run time.
562 	 */
563 	if (param_total_num_mbufs)
564 		nb_mbuf_per_pool = param_total_num_mbufs;
565 	else {
566 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
567 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
568 
569 		if (!numa_support)
570 			nb_mbuf_per_pool =
571 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
572 	}
573 
574 	if (!numa_support) {
575 		if (socket_num == UMA_NO_CONFIG)
576 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
577 		else
578 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
579 						 socket_num);
580 	}
581 
582 	FOREACH_PORT(pid, ports) {
583 		port = &ports[pid];
584 		rte_eth_dev_info_get(pid, &port->dev_info);
585 
586 		if (numa_support) {
587 			if (port_numa[pid] != NUMA_NO_CONFIG)
588 				port_per_socket[port_numa[pid]]++;
589 			else {
590 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
591 
592 				/* if socket_id is invalid, set to 0 */
593 				if (check_socket_id(socket_id) < 0)
594 					socket_id = 0;
595 				port_per_socket[socket_id]++;
596 			}
597 		}
598 
599 		/* set flag to initialize port/queue */
600 		port->need_reconfig = 1;
601 		port->need_reconfig_queues = 1;
602 	}
603 
604 	if (numa_support) {
605 		uint8_t i;
606 		unsigned int nb_mbuf;
607 
608 		if (param_total_num_mbufs)
609 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
610 
611 		for (i = 0; i < MAX_SOCKET; i++) {
612 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
613 			if (nb_mbuf)
614 				mbuf_pool_create(mbuf_data_size,
615 						nb_mbuf,i);
616 		}
617 	}
618 	init_port_config();
619 
620 	/*
621 	 * Records which Mbuf pool to use by each logical core, if needed.
622 	 */
623 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
624 		mbp = mbuf_pool_find(
625 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
626 
627 		if (mbp == NULL)
628 			mbp = mbuf_pool_find(0);
629 		fwd_lcores[lc_id]->mbp = mbp;
630 	}
631 
632 	/* Configuration of packet forwarding streams. */
633 	if (init_fwd_streams() < 0)
634 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
635 }
636 
637 
638 void
639 reconfig(portid_t new_port_id, unsigned socket_id)
640 {
641 	struct rte_port *port;
642 
643 	/* Reconfiguration of Ethernet ports. */
644 	port = &ports[new_port_id];
645 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
646 
647 	/* set flag to initialize port/queue */
648 	port->need_reconfig = 1;
649 	port->need_reconfig_queues = 1;
650 	port->socket_id = socket_id;
651 
652 	init_port_config();
653 }
654 
655 
656 int
657 init_fwd_streams(void)
658 {
659 	portid_t pid;
660 	struct rte_port *port;
661 	streamid_t sm_id, nb_fwd_streams_new;
662 
663 	/* set socket id according to numa or not */
664 	FOREACH_PORT(pid, ports) {
665 		port = &ports[pid];
666 		if (nb_rxq > port->dev_info.max_rx_queues) {
667 			printf("Fail: nb_rxq(%d) is greater than "
668 				"max_rx_queues(%d)\n", nb_rxq,
669 				port->dev_info.max_rx_queues);
670 			return -1;
671 		}
672 		if (nb_txq > port->dev_info.max_tx_queues) {
673 			printf("Fail: nb_txq(%d) is greater than "
674 				"max_tx_queues(%d)\n", nb_txq,
675 				port->dev_info.max_tx_queues);
676 			return -1;
677 		}
678 		if (numa_support) {
679 			if (port_numa[pid] != NUMA_NO_CONFIG)
680 				port->socket_id = port_numa[pid];
681 			else {
682 				port->socket_id = rte_eth_dev_socket_id(pid);
683 
684 				/* if socket_id is invalid, set to 0 */
685 				if (check_socket_id(port->socket_id) < 0)
686 					port->socket_id = 0;
687 			}
688 		}
689 		else {
690 			if (socket_num == UMA_NO_CONFIG)
691 				port->socket_id = 0;
692 			else
693 				port->socket_id = socket_num;
694 		}
695 	}
696 
697 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
698 	if (nb_fwd_streams_new == nb_fwd_streams)
699 		return 0;
700 	/* clear the old */
701 	if (fwd_streams != NULL) {
702 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
703 			if (fwd_streams[sm_id] == NULL)
704 				continue;
705 			rte_free(fwd_streams[sm_id]);
706 			fwd_streams[sm_id] = NULL;
707 		}
708 		rte_free(fwd_streams);
709 		fwd_streams = NULL;
710 	}
711 
712 	/* init new */
713 	nb_fwd_streams = nb_fwd_streams_new;
714 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
715 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
716 	if (fwd_streams == NULL)
717 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
718 						"failed\n", nb_fwd_streams);
719 
720 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
721 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
722 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
723 		if (fwd_streams[sm_id] == NULL)
724 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
725 								" failed\n");
726 	}
727 
728 	return 0;
729 }
730 
731 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
732 static void
733 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
734 {
735 	unsigned int total_burst;
736 	unsigned int nb_burst;
737 	unsigned int burst_stats[3];
738 	uint16_t pktnb_stats[3];
739 	uint16_t nb_pkt;
740 	int burst_percent[3];
741 
742 	/*
743 	 * First compute the total number of packet bursts and the
744 	 * two highest numbers of bursts of the same number of packets.
745 	 */
746 	total_burst = 0;
747 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
748 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
749 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
750 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
751 		if (nb_burst == 0)
752 			continue;
753 		total_burst += nb_burst;
754 		if (nb_burst > burst_stats[0]) {
755 			burst_stats[1] = burst_stats[0];
756 			pktnb_stats[1] = pktnb_stats[0];
757 			burst_stats[0] = nb_burst;
758 			pktnb_stats[0] = nb_pkt;
759 		}
760 	}
761 	if (total_burst == 0)
762 		return;
763 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
764 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
765 	       burst_percent[0], (int) pktnb_stats[0]);
766 	if (burst_stats[0] == total_burst) {
767 		printf("]\n");
768 		return;
769 	}
770 	if (burst_stats[0] + burst_stats[1] == total_burst) {
771 		printf(" + %d%% of %d pkts]\n",
772 		       100 - burst_percent[0], pktnb_stats[1]);
773 		return;
774 	}
775 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
776 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
777 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
778 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
779 		return;
780 	}
781 	printf(" + %d%% of %d pkts + %d%% of others]\n",
782 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
783 }
784 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
785 
786 static void
787 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
788 {
789 	struct rte_port *port;
790 	uint8_t i;
791 
792 	static const char *fwd_stats_border = "----------------------";
793 
794 	port = &ports[port_id];
795 	printf("\n  %s Forward statistics for port %-2d %s\n",
796 	       fwd_stats_border, port_id, fwd_stats_border);
797 
798 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
799 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
800 		       "%-"PRIu64"\n",
801 		       stats->ipackets, stats->imissed,
802 		       (uint64_t) (stats->ipackets + stats->imissed));
803 
804 		if (cur_fwd_eng == &csum_fwd_engine)
805 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
806 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
807 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
808 			printf("  RX-badcrc:  %-14"PRIu64" RX-badlen:  %-14"PRIu64
809 			       "RX-error: %-"PRIu64"\n",
810 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
811 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
812 		}
813 
814 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
815 		       "%-"PRIu64"\n",
816 		       stats->opackets, port->tx_dropped,
817 		       (uint64_t) (stats->opackets + port->tx_dropped));
818 	}
819 	else {
820 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
821 		       "%14"PRIu64"\n",
822 		       stats->ipackets, stats->imissed,
823 		       (uint64_t) (stats->ipackets + stats->imissed));
824 
825 		if (cur_fwd_eng == &csum_fwd_engine)
826 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
827 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
828 		if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
829 			printf("  RX-badcrc:              %14"PRIu64"    RX-badlen: %14"PRIu64
830 			       "    RX-error:%"PRIu64"\n",
831 			       stats->ibadcrc, stats->ibadlen, stats->ierrors);
832 			printf("  RX-nombufs:             %14"PRIu64"\n",
833 			       stats->rx_nombuf);
834 		}
835 
836 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
837 		       "%14"PRIu64"\n",
838 		       stats->opackets, port->tx_dropped,
839 		       (uint64_t) (stats->opackets + port->tx_dropped));
840 	}
841 
842 	/* Display statistics of XON/XOFF pause frames, if any. */
843 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
844 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
845 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
846 		       stats->rx_pause_xoff, stats->rx_pause_xon);
847 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
848 		       stats->tx_pause_xoff, stats->tx_pause_xon);
849 	}
850 
851 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
852 	if (port->rx_stream)
853 		pkt_burst_stats_display("RX",
854 			&port->rx_stream->rx_burst_stats);
855 	if (port->tx_stream)
856 		pkt_burst_stats_display("TX",
857 			&port->tx_stream->tx_burst_stats);
858 #endif
859 	/* stats fdir */
860 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
861 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
862 		       stats->fdirmiss,
863 		       stats->fdirmatch);
864 
865 	if (port->rx_queue_stats_mapping_enabled) {
866 		printf("\n");
867 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
868 			printf("  Stats reg %2d RX-packets:%14"PRIu64
869 			       "     RX-errors:%14"PRIu64
870 			       "    RX-bytes:%14"PRIu64"\n",
871 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
872 		}
873 		printf("\n");
874 	}
875 	if (port->tx_queue_stats_mapping_enabled) {
876 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
877 			printf("  Stats reg %2d TX-packets:%14"PRIu64
878 			       "                                 TX-bytes:%14"PRIu64"\n",
879 			       i, stats->q_opackets[i], stats->q_obytes[i]);
880 		}
881 	}
882 
883 	printf("  %s--------------------------------%s\n",
884 	       fwd_stats_border, fwd_stats_border);
885 }
886 
887 static void
888 fwd_stream_stats_display(streamid_t stream_id)
889 {
890 	struct fwd_stream *fs;
891 	static const char *fwd_top_stats_border = "-------";
892 
893 	fs = fwd_streams[stream_id];
894 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
895 	    (fs->fwd_dropped == 0))
896 		return;
897 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
898 	       "TX Port=%2d/Queue=%2d %s\n",
899 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
900 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
901 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
902 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
903 
904 	/* if checksum mode */
905 	if (cur_fwd_eng == &csum_fwd_engine) {
906 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
907 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
908 	}
909 
910 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
911 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
912 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
913 #endif
914 }
915 
916 static void
917 flush_fwd_rx_queues(void)
918 {
919 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
920 	portid_t  rxp;
921 	portid_t port_id;
922 	queueid_t rxq;
923 	uint16_t  nb_rx;
924 	uint16_t  i;
925 	uint8_t   j;
926 
927 	for (j = 0; j < 2; j++) {
928 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
929 			for (rxq = 0; rxq < nb_rxq; rxq++) {
930 				port_id = fwd_ports_ids[rxp];
931 				do {
932 					nb_rx = rte_eth_rx_burst(port_id, rxq,
933 						pkts_burst, MAX_PKT_BURST);
934 					for (i = 0; i < nb_rx; i++)
935 						rte_pktmbuf_free(pkts_burst[i]);
936 				} while (nb_rx > 0);
937 			}
938 		}
939 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
940 	}
941 }
942 
943 static void
944 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
945 {
946 	struct fwd_stream **fsm;
947 	streamid_t nb_fs;
948 	streamid_t sm_id;
949 
950 	fsm = &fwd_streams[fc->stream_idx];
951 	nb_fs = fc->stream_nb;
952 	do {
953 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
954 			(*pkt_fwd)(fsm[sm_id]);
955 	} while (! fc->stopped);
956 }
957 
958 static int
959 start_pkt_forward_on_core(void *fwd_arg)
960 {
961 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
962 			     cur_fwd_config.fwd_eng->packet_fwd);
963 	return 0;
964 }
965 
966 /*
967  * Run the TXONLY packet forwarding engine to send a single burst of packets.
968  * Used to start communication flows in network loopback test configurations.
969  */
970 static int
971 run_one_txonly_burst_on_core(void *fwd_arg)
972 {
973 	struct fwd_lcore *fwd_lc;
974 	struct fwd_lcore tmp_lcore;
975 
976 	fwd_lc = (struct fwd_lcore *) fwd_arg;
977 	tmp_lcore = *fwd_lc;
978 	tmp_lcore.stopped = 1;
979 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
980 	return 0;
981 }
982 
983 /*
984  * Launch packet forwarding:
985  *     - Setup per-port forwarding context.
986  *     - launch logical cores with their forwarding configuration.
987  */
988 static void
989 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
990 {
991 	port_fwd_begin_t port_fwd_begin;
992 	unsigned int i;
993 	unsigned int lc_id;
994 	int diag;
995 
996 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
997 	if (port_fwd_begin != NULL) {
998 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
999 			(*port_fwd_begin)(fwd_ports_ids[i]);
1000 	}
1001 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1002 		lc_id = fwd_lcores_cpuids[i];
1003 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1004 			fwd_lcores[i]->stopped = 0;
1005 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1006 						     fwd_lcores[i], lc_id);
1007 			if (diag != 0)
1008 				printf("launch lcore %u failed - diag=%d\n",
1009 				       lc_id, diag);
1010 		}
1011 	}
1012 }
1013 
1014 /*
1015  * Launch packet forwarding configuration.
1016  */
1017 void
1018 start_packet_forwarding(int with_tx_first)
1019 {
1020 	port_fwd_begin_t port_fwd_begin;
1021 	port_fwd_end_t  port_fwd_end;
1022 	struct rte_port *port;
1023 	unsigned int i;
1024 	portid_t   pt_id;
1025 	streamid_t sm_id;
1026 
1027 	if (all_ports_started() == 0) {
1028 		printf("Not all ports were started\n");
1029 		return;
1030 	}
1031 	if (test_done == 0) {
1032 		printf("Packet forwarding already started\n");
1033 		return;
1034 	}
1035 	if(dcb_test) {
1036 		for (i = 0; i < nb_fwd_ports; i++) {
1037 			pt_id = fwd_ports_ids[i];
1038 			port = &ports[pt_id];
1039 			if (!port->dcb_flag) {
1040 				printf("In DCB mode, all forwarding ports must "
1041                                        "be configured in this mode.\n");
1042 				return;
1043 			}
1044 		}
1045 		if (nb_fwd_lcores == 1) {
1046 			printf("In DCB mode,the nb forwarding cores "
1047                                "should be larger than 1.\n");
1048 			return;
1049 		}
1050 	}
1051 	test_done = 0;
1052 
1053 	if(!no_flush_rx)
1054 		flush_fwd_rx_queues();
1055 
1056 	fwd_config_setup();
1057 	rxtx_config_display();
1058 
1059 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1060 		pt_id = fwd_ports_ids[i];
1061 		port = &ports[pt_id];
1062 		rte_eth_stats_get(pt_id, &port->stats);
1063 		port->tx_dropped = 0;
1064 
1065 		map_port_queue_stats_mapping_registers(pt_id, port);
1066 	}
1067 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1068 		fwd_streams[sm_id]->rx_packets = 0;
1069 		fwd_streams[sm_id]->tx_packets = 0;
1070 		fwd_streams[sm_id]->fwd_dropped = 0;
1071 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1072 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1073 
1074 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1075 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1076 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1077 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1078 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1079 #endif
1080 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1081 		fwd_streams[sm_id]->core_cycles = 0;
1082 #endif
1083 	}
1084 	if (with_tx_first) {
1085 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1086 		if (port_fwd_begin != NULL) {
1087 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1088 				(*port_fwd_begin)(fwd_ports_ids[i]);
1089 		}
1090 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1091 		rte_eal_mp_wait_lcore();
1092 		port_fwd_end = tx_only_engine.port_fwd_end;
1093 		if (port_fwd_end != NULL) {
1094 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1095 				(*port_fwd_end)(fwd_ports_ids[i]);
1096 		}
1097 	}
1098 	launch_packet_forwarding(start_pkt_forward_on_core);
1099 }
1100 
1101 void
1102 stop_packet_forwarding(void)
1103 {
1104 	struct rte_eth_stats stats;
1105 	struct rte_port *port;
1106 	port_fwd_end_t  port_fwd_end;
1107 	int i;
1108 	portid_t   pt_id;
1109 	streamid_t sm_id;
1110 	lcoreid_t  lc_id;
1111 	uint64_t total_recv;
1112 	uint64_t total_xmit;
1113 	uint64_t total_rx_dropped;
1114 	uint64_t total_tx_dropped;
1115 	uint64_t total_rx_nombuf;
1116 	uint64_t tx_dropped;
1117 	uint64_t rx_bad_ip_csum;
1118 	uint64_t rx_bad_l4_csum;
1119 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1120 	uint64_t fwd_cycles;
1121 #endif
1122 	static const char *acc_stats_border = "+++++++++++++++";
1123 
1124 	if (all_ports_started() == 0) {
1125 		printf("Not all ports were started\n");
1126 		return;
1127 	}
1128 	if (test_done) {
1129 		printf("Packet forwarding not started\n");
1130 		return;
1131 	}
1132 	printf("Telling cores to stop...");
1133 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1134 		fwd_lcores[lc_id]->stopped = 1;
1135 	printf("\nWaiting for lcores to finish...\n");
1136 	rte_eal_mp_wait_lcore();
1137 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1138 	if (port_fwd_end != NULL) {
1139 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1140 			pt_id = fwd_ports_ids[i];
1141 			(*port_fwd_end)(pt_id);
1142 		}
1143 	}
1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1145 	fwd_cycles = 0;
1146 #endif
1147 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1148 		if (cur_fwd_config.nb_fwd_streams >
1149 		    cur_fwd_config.nb_fwd_ports) {
1150 			fwd_stream_stats_display(sm_id);
1151 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1152 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1153 		} else {
1154 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1155 				fwd_streams[sm_id];
1156 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1157 				fwd_streams[sm_id];
1158 		}
1159 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1160 		tx_dropped = (uint64_t) (tx_dropped +
1161 					 fwd_streams[sm_id]->fwd_dropped);
1162 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1163 
1164 		rx_bad_ip_csum =
1165 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1166 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1167 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1168 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1169 							rx_bad_ip_csum;
1170 
1171 		rx_bad_l4_csum =
1172 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1173 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1174 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1175 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1176 							rx_bad_l4_csum;
1177 
1178 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1179 		fwd_cycles = (uint64_t) (fwd_cycles +
1180 					 fwd_streams[sm_id]->core_cycles);
1181 #endif
1182 	}
1183 	total_recv = 0;
1184 	total_xmit = 0;
1185 	total_rx_dropped = 0;
1186 	total_tx_dropped = 0;
1187 	total_rx_nombuf  = 0;
1188 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1189 		pt_id = fwd_ports_ids[i];
1190 
1191 		port = &ports[pt_id];
1192 		rte_eth_stats_get(pt_id, &stats);
1193 		stats.ipackets -= port->stats.ipackets;
1194 		port->stats.ipackets = 0;
1195 		stats.opackets -= port->stats.opackets;
1196 		port->stats.opackets = 0;
1197 		stats.ibytes   -= port->stats.ibytes;
1198 		port->stats.ibytes = 0;
1199 		stats.obytes   -= port->stats.obytes;
1200 		port->stats.obytes = 0;
1201 		stats.imissed  -= port->stats.imissed;
1202 		port->stats.imissed = 0;
1203 		stats.oerrors  -= port->stats.oerrors;
1204 		port->stats.oerrors = 0;
1205 		stats.rx_nombuf -= port->stats.rx_nombuf;
1206 		port->stats.rx_nombuf = 0;
1207 		stats.fdirmatch -= port->stats.fdirmatch;
1208 		port->stats.rx_nombuf = 0;
1209 		stats.fdirmiss -= port->stats.fdirmiss;
1210 		port->stats.rx_nombuf = 0;
1211 
1212 		total_recv += stats.ipackets;
1213 		total_xmit += stats.opackets;
1214 		total_rx_dropped += stats.imissed;
1215 		total_tx_dropped += port->tx_dropped;
1216 		total_rx_nombuf  += stats.rx_nombuf;
1217 
1218 		fwd_port_stats_display(pt_id, &stats);
1219 	}
1220 	printf("\n  %s Accumulated forward statistics for all ports"
1221 	       "%s\n",
1222 	       acc_stats_border, acc_stats_border);
1223 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1224 	       "%-"PRIu64"\n"
1225 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1226 	       "%-"PRIu64"\n",
1227 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1228 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1229 	if (total_rx_nombuf > 0)
1230 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1231 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1232 	       "%s\n",
1233 	       acc_stats_border, acc_stats_border);
1234 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1235 	if (total_recv > 0)
1236 		printf("\n  CPU cycles/packet=%u (total cycles="
1237 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1238 		       (unsigned int)(fwd_cycles / total_recv),
1239 		       fwd_cycles, total_recv);
1240 #endif
1241 	printf("\nDone.\n");
1242 	test_done = 1;
1243 }
1244 
1245 void
1246 dev_set_link_up(portid_t pid)
1247 {
1248 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1249 		printf("\nSet link up fail.\n");
1250 }
1251 
1252 void
1253 dev_set_link_down(portid_t pid)
1254 {
1255 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1256 		printf("\nSet link down fail.\n");
1257 }
1258 
1259 static int
1260 all_ports_started(void)
1261 {
1262 	portid_t pi;
1263 	struct rte_port *port;
1264 
1265 	FOREACH_PORT(pi, ports) {
1266 		port = &ports[pi];
1267 		/* Check if there is a port which is not started */
1268 		if (port->port_status != RTE_PORT_STARTED)
1269 			return 0;
1270 	}
1271 
1272 	/* No port is not started */
1273 	return 1;
1274 }
1275 
1276 int
1277 all_ports_stopped(void)
1278 {
1279 	portid_t pi;
1280 	struct rte_port *port;
1281 
1282 	FOREACH_PORT(pi, ports) {
1283 		port = &ports[pi];
1284 		if (port->port_status != RTE_PORT_STOPPED)
1285 			return 0;
1286 	}
1287 
1288 	return 1;
1289 }
1290 
1291 int
1292 port_is_started(portid_t port_id)
1293 {
1294 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1295 		return 0;
1296 
1297 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1298 		return 0;
1299 
1300 	return 1;
1301 }
1302 
1303 static int
1304 port_is_closed(portid_t port_id)
1305 {
1306 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1307 		return 0;
1308 
1309 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1310 		return 0;
1311 
1312 	return 1;
1313 }
1314 
1315 int
1316 start_port(portid_t pid)
1317 {
1318 	int diag, need_check_link_status = -1;
1319 	portid_t pi;
1320 	queueid_t qi;
1321 	struct rte_port *port;
1322 	struct ether_addr mac_addr;
1323 
1324 	if (test_done == 0) {
1325 		printf("Please stop forwarding first\n");
1326 		return -1;
1327 	}
1328 
1329 	if (port_id_is_invalid(pid, ENABLED_WARN))
1330 		return 0;
1331 
1332 	if (init_fwd_streams() < 0) {
1333 		printf("Fail from init_fwd_streams()\n");
1334 		return -1;
1335 	}
1336 
1337 	if(dcb_config)
1338 		dcb_test = 1;
1339 	FOREACH_PORT(pi, ports) {
1340 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1341 			continue;
1342 
1343 		need_check_link_status = 0;
1344 		port = &ports[pi];
1345 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1346 						 RTE_PORT_HANDLING) == 0) {
1347 			printf("Port %d is now not stopped\n", pi);
1348 			continue;
1349 		}
1350 
1351 		if (port->need_reconfig > 0) {
1352 			port->need_reconfig = 0;
1353 
1354 			printf("Configuring Port %d (socket %u)\n", pi,
1355 					port->socket_id);
1356 			/* configure port */
1357 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1358 						&(port->dev_conf));
1359 			if (diag != 0) {
1360 				if (rte_atomic16_cmpset(&(port->port_status),
1361 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1362 					printf("Port %d can not be set back "
1363 							"to stopped\n", pi);
1364 				printf("Fail to configure port %d\n", pi);
1365 				/* try to reconfigure port next time */
1366 				port->need_reconfig = 1;
1367 				return -1;
1368 			}
1369 		}
1370 		if (port->need_reconfig_queues > 0) {
1371 			port->need_reconfig_queues = 0;
1372 			/* setup tx queues */
1373 			for (qi = 0; qi < nb_txq; qi++) {
1374 				if ((numa_support) &&
1375 					(txring_numa[pi] != NUMA_NO_CONFIG))
1376 					diag = rte_eth_tx_queue_setup(pi, qi,
1377 						nb_txd,txring_numa[pi],
1378 						&(port->tx_conf));
1379 				else
1380 					diag = rte_eth_tx_queue_setup(pi, qi,
1381 						nb_txd,port->socket_id,
1382 						&(port->tx_conf));
1383 
1384 				if (diag == 0)
1385 					continue;
1386 
1387 				/* Fail to setup tx queue, return */
1388 				if (rte_atomic16_cmpset(&(port->port_status),
1389 							RTE_PORT_HANDLING,
1390 							RTE_PORT_STOPPED) == 0)
1391 					printf("Port %d can not be set back "
1392 							"to stopped\n", pi);
1393 				printf("Fail to configure port %d tx queues\n", pi);
1394 				/* try to reconfigure queues next time */
1395 				port->need_reconfig_queues = 1;
1396 				return -1;
1397 			}
1398 			/* setup rx queues */
1399 			for (qi = 0; qi < nb_rxq; qi++) {
1400 				if ((numa_support) &&
1401 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1402 					struct rte_mempool * mp =
1403 						mbuf_pool_find(rxring_numa[pi]);
1404 					if (mp == NULL) {
1405 						printf("Failed to setup RX queue:"
1406 							"No mempool allocation"
1407 							"on the socket %d\n",
1408 							rxring_numa[pi]);
1409 						return -1;
1410 					}
1411 
1412 					diag = rte_eth_rx_queue_setup(pi, qi,
1413 					     nb_rxd,rxring_numa[pi],
1414 					     &(port->rx_conf),mp);
1415 				}
1416 				else
1417 					diag = rte_eth_rx_queue_setup(pi, qi,
1418 					     nb_rxd,port->socket_id,
1419 					     &(port->rx_conf),
1420 				             mbuf_pool_find(port->socket_id));
1421 
1422 				if (diag == 0)
1423 					continue;
1424 
1425 
1426 				/* Fail to setup rx queue, return */
1427 				if (rte_atomic16_cmpset(&(port->port_status),
1428 							RTE_PORT_HANDLING,
1429 							RTE_PORT_STOPPED) == 0)
1430 					printf("Port %d can not be set back "
1431 							"to stopped\n", pi);
1432 				printf("Fail to configure port %d rx queues\n", pi);
1433 				/* try to reconfigure queues next time */
1434 				port->need_reconfig_queues = 1;
1435 				return -1;
1436 			}
1437 		}
1438 		/* start port */
1439 		if (rte_eth_dev_start(pi) < 0) {
1440 			printf("Fail to start port %d\n", pi);
1441 
1442 			/* Fail to setup rx queue, return */
1443 			if (rte_atomic16_cmpset(&(port->port_status),
1444 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1445 				printf("Port %d can not be set back to "
1446 							"stopped\n", pi);
1447 			continue;
1448 		}
1449 
1450 		if (rte_atomic16_cmpset(&(port->port_status),
1451 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1452 			printf("Port %d can not be set into started\n", pi);
1453 
1454 		rte_eth_macaddr_get(pi, &mac_addr);
1455 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1456 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1457 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1458 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1459 
1460 		/* at least one port started, need checking link status */
1461 		need_check_link_status = 1;
1462 	}
1463 
1464 	if (need_check_link_status == 1 && !no_link_check)
1465 		check_all_ports_link_status(RTE_PORT_ALL);
1466 	else if (need_check_link_status == 0)
1467 		printf("Please stop the ports first\n");
1468 
1469 	printf("Done\n");
1470 	return 0;
1471 }
1472 
1473 void
1474 stop_port(portid_t pid)
1475 {
1476 	portid_t pi;
1477 	struct rte_port *port;
1478 	int need_check_link_status = 0;
1479 
1480 	if (test_done == 0) {
1481 		printf("Please stop forwarding first\n");
1482 		return;
1483 	}
1484 	if (dcb_test) {
1485 		dcb_test = 0;
1486 		dcb_config = 0;
1487 	}
1488 
1489 	if (port_id_is_invalid(pid, ENABLED_WARN))
1490 		return;
1491 
1492 	printf("Stopping ports...\n");
1493 
1494 	FOREACH_PORT(pi, ports) {
1495 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1496 			continue;
1497 
1498 		port = &ports[pi];
1499 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1500 						RTE_PORT_HANDLING) == 0)
1501 			continue;
1502 
1503 		rte_eth_dev_stop(pi);
1504 
1505 		if (rte_atomic16_cmpset(&(port->port_status),
1506 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1507 			printf("Port %d can not be set into stopped\n", pi);
1508 		need_check_link_status = 1;
1509 	}
1510 	if (need_check_link_status && !no_link_check)
1511 		check_all_ports_link_status(RTE_PORT_ALL);
1512 
1513 	printf("Done\n");
1514 }
1515 
1516 void
1517 close_port(portid_t pid)
1518 {
1519 	portid_t pi;
1520 	struct rte_port *port;
1521 
1522 	if (test_done == 0) {
1523 		printf("Please stop forwarding first\n");
1524 		return;
1525 	}
1526 
1527 	if (port_id_is_invalid(pid, ENABLED_WARN))
1528 		return;
1529 
1530 	printf("Closing ports...\n");
1531 
1532 	FOREACH_PORT(pi, ports) {
1533 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1534 			continue;
1535 
1536 		port = &ports[pi];
1537 		if (rte_atomic16_cmpset(&(port->port_status),
1538 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1539 			printf("Port %d is now not stopped\n", pi);
1540 			continue;
1541 		}
1542 
1543 		rte_eth_dev_close(pi);
1544 
1545 		if (rte_atomic16_cmpset(&(port->port_status),
1546 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1547 			printf("Port %d can not be set into stopped\n", pi);
1548 	}
1549 
1550 	printf("Done\n");
1551 }
1552 
1553 void
1554 attach_port(char *identifier)
1555 {
1556 	portid_t i, j, pi = 0;
1557 
1558 	printf("Attaching a new port...\n");
1559 
1560 	if (identifier == NULL) {
1561 		printf("Invalid parameters are specified\n");
1562 		return;
1563 	}
1564 
1565 	if (test_done == 0) {
1566 		printf("Please stop forwarding first\n");
1567 		return;
1568 	}
1569 
1570 	if (rte_eth_dev_attach(identifier, &pi))
1571 		return;
1572 
1573 	ports[pi].enabled = 1;
1574 	reconfig(pi, rte_eth_dev_socket_id(pi));
1575 	rte_eth_promiscuous_enable(pi);
1576 
1577 	nb_ports = rte_eth_dev_count();
1578 
1579 	/* set_default_fwd_ports_config(); */
1580 	bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1581 	i = 0;
1582 	FOREACH_PORT(j, ports) {
1583 		fwd_ports_ids[i] = j;
1584 		i++;
1585 	}
1586 	nb_cfg_ports = nb_ports;
1587 	nb_fwd_ports++;
1588 
1589 	ports[pi].port_status = RTE_PORT_STOPPED;
1590 
1591 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1592 	printf("Done\n");
1593 }
1594 
1595 void
1596 detach_port(uint8_t port_id)
1597 {
1598 	portid_t i, pi = 0;
1599 	char name[RTE_ETH_NAME_MAX_LEN];
1600 
1601 	printf("Detaching a port...\n");
1602 
1603 	if (!port_is_closed(port_id)) {
1604 		printf("Please close port first\n");
1605 		return;
1606 	}
1607 
1608 	rte_eth_promiscuous_disable(port_id);
1609 
1610 	if (rte_eth_dev_detach(port_id, name))
1611 		return;
1612 
1613 	ports[port_id].enabled = 0;
1614 	nb_ports = rte_eth_dev_count();
1615 
1616 	/* set_default_fwd_ports_config(); */
1617 	bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1618 	i = 0;
1619 	FOREACH_PORT(pi, ports) {
1620 		fwd_ports_ids[i] = pi;
1621 		i++;
1622 	}
1623 	nb_cfg_ports = nb_ports;
1624 	nb_fwd_ports--;
1625 
1626 	printf("Port '%s' is detached. Now total ports is %d\n",
1627 			name, nb_ports);
1628 	printf("Done\n");
1629 	return;
1630 }
1631 
1632 void
1633 pmd_test_exit(void)
1634 {
1635 	portid_t pt_id;
1636 
1637 	FOREACH_PORT(pt_id, ports) {
1638 		printf("Stopping port %d...", pt_id);
1639 		fflush(stdout);
1640 		rte_eth_dev_close(pt_id);
1641 		printf("done\n");
1642 	}
1643 	printf("bye...\n");
1644 }
1645 
1646 typedef void (*cmd_func_t)(void);
1647 struct pmd_test_command {
1648 	const char *cmd_name;
1649 	cmd_func_t cmd_func;
1650 };
1651 
1652 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1653 
1654 /* Check the link status of all ports in up to 9s, and print them finally */
1655 static void
1656 check_all_ports_link_status(uint32_t port_mask)
1657 {
1658 #define CHECK_INTERVAL 100 /* 100ms */
1659 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1660 	uint8_t portid, count, all_ports_up, print_flag = 0;
1661 	struct rte_eth_link link;
1662 
1663 	printf("Checking link statuses...\n");
1664 	fflush(stdout);
1665 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1666 		all_ports_up = 1;
1667 		FOREACH_PORT(portid, ports) {
1668 			if ((port_mask & (1 << portid)) == 0)
1669 				continue;
1670 			memset(&link, 0, sizeof(link));
1671 			rte_eth_link_get_nowait(portid, &link);
1672 			/* print link status if flag set */
1673 			if (print_flag == 1) {
1674 				if (link.link_status)
1675 					printf("Port %d Link Up - speed %u "
1676 						"Mbps - %s\n", (uint8_t)portid,
1677 						(unsigned)link.link_speed,
1678 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1679 					("full-duplex") : ("half-duplex\n"));
1680 				else
1681 					printf("Port %d Link Down\n",
1682 						(uint8_t)portid);
1683 				continue;
1684 			}
1685 			/* clear all_ports_up flag if any link down */
1686 			if (link.link_status == 0) {
1687 				all_ports_up = 0;
1688 				break;
1689 			}
1690 		}
1691 		/* after finally printing all link status, get out */
1692 		if (print_flag == 1)
1693 			break;
1694 
1695 		if (all_ports_up == 0) {
1696 			fflush(stdout);
1697 			rte_delay_ms(CHECK_INTERVAL);
1698 		}
1699 
1700 		/* set the print_flag if all ports up or timeout */
1701 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1702 			print_flag = 1;
1703 		}
1704 	}
1705 }
1706 
1707 static int
1708 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1709 {
1710 	uint16_t i;
1711 	int diag;
1712 	uint8_t mapping_found = 0;
1713 
1714 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1715 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1716 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1717 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1718 					tx_queue_stats_mappings[i].queue_id,
1719 					tx_queue_stats_mappings[i].stats_counter_id);
1720 			if (diag != 0)
1721 				return diag;
1722 			mapping_found = 1;
1723 		}
1724 	}
1725 	if (mapping_found)
1726 		port->tx_queue_stats_mapping_enabled = 1;
1727 	return 0;
1728 }
1729 
1730 static int
1731 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1732 {
1733 	uint16_t i;
1734 	int diag;
1735 	uint8_t mapping_found = 0;
1736 
1737 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1738 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1739 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1740 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1741 					rx_queue_stats_mappings[i].queue_id,
1742 					rx_queue_stats_mappings[i].stats_counter_id);
1743 			if (diag != 0)
1744 				return diag;
1745 			mapping_found = 1;
1746 		}
1747 	}
1748 	if (mapping_found)
1749 		port->rx_queue_stats_mapping_enabled = 1;
1750 	return 0;
1751 }
1752 
1753 static void
1754 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1755 {
1756 	int diag = 0;
1757 
1758 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1759 	if (diag != 0) {
1760 		if (diag == -ENOTSUP) {
1761 			port->tx_queue_stats_mapping_enabled = 0;
1762 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1763 		}
1764 		else
1765 			rte_exit(EXIT_FAILURE,
1766 					"set_tx_queue_stats_mapping_registers "
1767 					"failed for port id=%d diag=%d\n",
1768 					pi, diag);
1769 	}
1770 
1771 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1772 	if (diag != 0) {
1773 		if (diag == -ENOTSUP) {
1774 			port->rx_queue_stats_mapping_enabled = 0;
1775 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1776 		}
1777 		else
1778 			rte_exit(EXIT_FAILURE,
1779 					"set_rx_queue_stats_mapping_registers "
1780 					"failed for port id=%d diag=%d\n",
1781 					pi, diag);
1782 	}
1783 }
1784 
1785 static void
1786 rxtx_port_config(struct rte_port *port)
1787 {
1788 	port->rx_conf = port->dev_info.default_rxconf;
1789 	port->tx_conf = port->dev_info.default_txconf;
1790 
1791 	/* Check if any RX/TX parameters have been passed */
1792 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1793 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1794 
1795 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1796 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1797 
1798 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1799 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1800 
1801 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1802 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1803 
1804 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1805 		port->rx_conf.rx_drop_en = rx_drop_en;
1806 
1807 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1808 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1809 
1810 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1811 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1812 
1813 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1814 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1815 
1816 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1817 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1818 
1819 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1820 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1821 
1822 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1823 		port->tx_conf.txq_flags = txq_flags;
1824 }
1825 
1826 void
1827 init_port_config(void)
1828 {
1829 	portid_t pid;
1830 	struct rte_port *port;
1831 
1832 	FOREACH_PORT(pid, ports) {
1833 		port = &ports[pid];
1834 		port->dev_conf.rxmode = rx_mode;
1835 		port->dev_conf.fdir_conf = fdir_conf;
1836 		if (nb_rxq > 1) {
1837 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1838 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1839 		} else {
1840 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1841 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1842 		}
1843 
1844 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1845 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1846 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1847 			else
1848 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1849 		}
1850 
1851 		if (port->dev_info.max_vfs != 0) {
1852 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1853 				port->dev_conf.rxmode.mq_mode =
1854 					ETH_MQ_RX_VMDQ_RSS;
1855 			else
1856 				port->dev_conf.rxmode.mq_mode =
1857 					ETH_MQ_RX_NONE;
1858 
1859 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1860 		}
1861 
1862 		rxtx_port_config(port);
1863 
1864 		rte_eth_macaddr_get(pid, &port->eth_addr);
1865 
1866 		map_port_queue_stats_mapping_registers(pid, port);
1867 #ifdef RTE_NIC_BYPASS
1868 		rte_eth_dev_bypass_init(pid);
1869 #endif
1870 	}
1871 }
1872 
1873 const uint16_t vlan_tags[] = {
1874 		0,  1,  2,  3,  4,  5,  6,  7,
1875 		8,  9, 10, 11,  12, 13, 14, 15,
1876 		16, 17, 18, 19, 20, 21, 22, 23,
1877 		24, 25, 26, 27, 28, 29, 30, 31
1878 };
1879 
1880 static  int
1881 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1882 {
1883         uint8_t i;
1884 
1885  	/*
1886  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1887  	 * given above, and the number of traffic classes available for use.
1888  	 */
1889 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1890 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1891 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1892 
1893 		/* VMDQ+DCB RX and TX configrations */
1894 		vmdq_rx_conf.enable_default_pool = 0;
1895 		vmdq_rx_conf.default_pool = 0;
1896 		vmdq_rx_conf.nb_queue_pools =
1897 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1898 		vmdq_tx_conf.nb_queue_pools =
1899 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1900 
1901 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1902 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1903 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1904 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1905 		}
1906 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1907 			vmdq_rx_conf.dcb_queue[i] = i;
1908 			vmdq_tx_conf.dcb_queue[i] = i;
1909 		}
1910 
1911 		/*set DCB mode of RX and TX of multiple queues*/
1912 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1913 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1914 		if (dcb_conf->pfc_en)
1915 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1916 		else
1917 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1918 
1919 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1920                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1921 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1922                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1923 	}
1924 	else {
1925 		struct rte_eth_dcb_rx_conf rx_conf;
1926 		struct rte_eth_dcb_tx_conf tx_conf;
1927 
1928 		/* queue mapping configuration of DCB RX and TX */
1929 		if (dcb_conf->num_tcs == ETH_4_TCS)
1930 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1931 		else
1932 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1933 
1934 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1935 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1936 
1937 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1938 			rx_conf.dcb_queue[i] = i;
1939 			tx_conf.dcb_queue[i] = i;
1940 		}
1941 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1942 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1943 		if (dcb_conf->pfc_en)
1944 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1945 		else
1946 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1947 
1948 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1949                                 sizeof(struct rte_eth_dcb_rx_conf)));
1950 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1951                                 sizeof(struct rte_eth_dcb_tx_conf)));
1952 	}
1953 
1954 	return 0;
1955 }
1956 
1957 int
1958 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1959 {
1960 	struct rte_eth_conf port_conf;
1961 	struct rte_port *rte_port;
1962 	int retval;
1963 	uint16_t nb_vlan;
1964 	uint16_t i;
1965 
1966 	/* rxq and txq configuration in dcb mode */
1967 	nb_rxq = 128;
1968 	nb_txq = 128;
1969 	rx_free_thresh = 64;
1970 
1971 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1972 	/* Enter DCB configuration status */
1973 	dcb_config = 1;
1974 
1975 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1976 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1977 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1978 	if (retval < 0)
1979 		return retval;
1980 
1981 	rte_port = &ports[pid];
1982 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1983 
1984 	rxtx_port_config(rte_port);
1985 	/* VLAN filter */
1986 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1987 	for (i = 0; i < nb_vlan; i++){
1988 		rx_vft_set(pid, vlan_tags[i], 1);
1989 	}
1990 
1991 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1992 	map_port_queue_stats_mapping_registers(pid, rte_port);
1993 
1994 	rte_port->dcb_flag = 1;
1995 
1996 	return 0;
1997 }
1998 
1999 static void
2000 init_port(void)
2001 {
2002 	portid_t pid;
2003 
2004 	/* Configuration of Ethernet ports. */
2005 	ports = rte_zmalloc("testpmd: ports",
2006 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2007 			    RTE_CACHE_LINE_SIZE);
2008 	if (ports == NULL) {
2009 		rte_exit(EXIT_FAILURE,
2010 				"rte_zmalloc(%d struct rte_port) failed\n",
2011 				RTE_MAX_ETHPORTS);
2012 	}
2013 
2014 	/* enabled allocated ports */
2015 	for (pid = 0; pid < nb_ports; pid++)
2016 		ports[pid].enabled = 1;
2017 }
2018 
2019 int
2020 main(int argc, char** argv)
2021 {
2022 	int  diag;
2023 	uint8_t port_id;
2024 
2025 	diag = rte_eal_init(argc, argv);
2026 	if (diag < 0)
2027 		rte_panic("Cannot init EAL\n");
2028 
2029 	nb_ports = (portid_t) rte_eth_dev_count();
2030 	if (nb_ports == 0)
2031 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2032 
2033 	/* allocate port structures, and init them */
2034 	init_port();
2035 
2036 	set_def_fwd_config();
2037 	if (nb_lcores == 0)
2038 		rte_panic("Empty set of forwarding logical cores - check the "
2039 			  "core mask supplied in the command parameters\n");
2040 
2041 	argc -= diag;
2042 	argv += diag;
2043 	if (argc > 1)
2044 		launch_args_parse(argc, argv);
2045 
2046 	if (nb_rxq > nb_txq)
2047 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2048 		       "but nb_txq=%d will prevent to fully test it.\n",
2049 		       nb_rxq, nb_txq);
2050 
2051 	init_config();
2052 	if (start_port(RTE_PORT_ALL) != 0)
2053 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2054 
2055 	/* set all ports to promiscuous mode by default */
2056 	FOREACH_PORT(port_id, ports)
2057 		rte_eth_promiscuous_enable(port_id);
2058 
2059 #ifdef RTE_LIBRTE_CMDLINE
2060 	if (interactive == 1) {
2061 		if (auto_start) {
2062 			printf("Start automatic packet forwarding\n");
2063 			start_packet_forwarding(0);
2064 		}
2065 		prompt();
2066 	} else
2067 #endif
2068 	{
2069 		char c;
2070 		int rc;
2071 
2072 		printf("No commandline core given, start packet forwarding\n");
2073 		start_packet_forwarding(0);
2074 		printf("Press enter to exit\n");
2075 		rc = read(0, &c, 1);
2076 		if (rc < 0)
2077 			return 1;
2078 	}
2079 
2080 	return 0;
2081 }
2082