xref: /dpdk/app/test-pmd/testpmd.c (revision f487715f36f52980d264ab9ef80d2ea14ef9f5fb)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 
83 #include "testpmd.h"
84 
85 uint16_t verbose_level = 0; /**< Silent by default. */
86 
87 /* use master core for command line ? */
88 uint8_t interactive = 0;
89 uint8_t auto_start = 0;
90 
91 /*
92  * NUMA support configuration.
93  * When set, the NUMA support attempts to dispatch the allocation of the
94  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95  * probed ports among the CPU sockets 0 and 1.
96  * Otherwise, all memory is allocated from CPU socket 0.
97  */
98 uint8_t numa_support = 0; /**< No numa support by default */
99 
100 /*
101  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
102  * not configured.
103  */
104 uint8_t socket_num = UMA_NO_CONFIG;
105 
106 /*
107  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108  */
109 uint8_t mp_anon = 0;
110 
111 /*
112  * Record the Ethernet address of peer target ports to which packets are
113  * forwarded.
114  * Must be instanciated with the ethernet addresses of peer traffic generator
115  * ports.
116  */
117 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
118 portid_t nb_peer_eth_addrs = 0;
119 
120 /*
121  * Probed Target Environment.
122  */
123 struct rte_port *ports;	       /**< For all probed ethernet ports. */
124 portid_t nb_ports;             /**< Number of probed ethernet ports. */
125 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
126 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
127 
128 /*
129  * Test Forwarding Configuration.
130  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
131  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
132  */
133 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
134 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
135 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
136 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
137 
138 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
139 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
140 
141 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
142 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
143 
144 /*
145  * Forwarding engines.
146  */
147 struct fwd_engine * fwd_engines[] = {
148 	&io_fwd_engine,
149 	&mac_fwd_engine,
150 	&mac_swap_engine,
151 	&flow_gen_engine,
152 	&rx_only_engine,
153 	&tx_only_engine,
154 	&csum_fwd_engine,
155 	&icmp_echo_engine,
156 #ifdef RTE_LIBRTE_IEEE1588
157 	&ieee1588_fwd_engine,
158 #endif
159 	NULL,
160 };
161 
162 struct fwd_config cur_fwd_config;
163 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
164 uint32_t retry_enabled;
165 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
166 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
167 
168 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
169 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
170                                       * specified on command-line. */
171 
172 /*
173  * Configuration of packet segments used by the "txonly" processing engine.
174  */
175 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
176 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
177 	TXONLY_DEF_PACKET_LEN,
178 };
179 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
180 
181 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
182 /**< Split policy for packets to TX. */
183 
184 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
185 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
186 
187 /* current configuration is in DCB or not,0 means it is not in DCB mode */
188 uint8_t dcb_config = 0;
189 
190 /* Whether the dcb is in testing status */
191 uint8_t dcb_test = 0;
192 
193 /*
194  * Configurable number of RX/TX queues.
195  */
196 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
197 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
198 
199 /*
200  * Configurable number of RX/TX ring descriptors.
201  */
202 #define RTE_TEST_RX_DESC_DEFAULT 128
203 #define RTE_TEST_TX_DESC_DEFAULT 512
204 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
205 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
206 
207 #define RTE_PMD_PARAM_UNSET -1
208 /*
209  * Configurable values of RX and TX ring threshold registers.
210  */
211 
212 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
215 
216 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
218 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
219 
220 /*
221  * Configurable value of RX free threshold.
222  */
223 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
224 
225 /*
226  * Configurable value of RX drop enable.
227  */
228 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
229 
230 /*
231  * Configurable value of TX free threshold.
232  */
233 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
234 
235 /*
236  * Configurable value of TX RS bit threshold.
237  */
238 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
239 
240 /*
241  * Configurable value of TX queue flags.
242  */
243 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
244 
245 /*
246  * Receive Side Scaling (RSS) configuration.
247  */
248 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
249 
250 /*
251  * Port topology configuration
252  */
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
254 
255 /*
256  * Avoids to flush all the RX streams before starts forwarding.
257  */
258 uint8_t no_flush_rx = 0; /* flush by default */
259 
260 /*
261  * Avoids to check link status when starting/stopping a port.
262  */
263 uint8_t no_link_check = 0; /* check by default */
264 
265 /*
266  * NIC bypass mode configuration options.
267  */
268 #ifdef RTE_NIC_BYPASS
269 
270 /* The NIC bypass watchdog timeout. */
271 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
272 
273 #endif
274 
275 /* default period is 1 second */
276 static uint64_t timer_period = 1;
277 
278 /*
279  * Ethernet device configuration.
280  */
281 struct rte_eth_rxmode rx_mode = {
282 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
283 	.split_hdr_size = 0,
284 	.header_split   = 0, /**< Header Split disabled. */
285 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
286 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
287 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
288 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
289 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
290 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
291 };
292 
293 struct rte_fdir_conf fdir_conf = {
294 	.mode = RTE_FDIR_MODE_NONE,
295 	.pballoc = RTE_FDIR_PBALLOC_64K,
296 	.status = RTE_FDIR_REPORT_STATUS,
297 	.mask = {
298 		.vlan_tci_mask = 0x0,
299 		.ipv4_mask     = {
300 			.src_ip = 0xFFFFFFFF,
301 			.dst_ip = 0xFFFFFFFF,
302 		},
303 		.ipv6_mask     = {
304 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
305 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
306 		},
307 		.src_port_mask = 0xFFFF,
308 		.dst_port_mask = 0xFFFF,
309 		.mac_addr_byte_mask = 0xFF,
310 		.tunnel_type_mask = 1,
311 		.tunnel_id_mask = 0xFFFFFFFF,
312 	},
313 	.drop_queue = 127,
314 };
315 
316 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
317 
318 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
319 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
320 
321 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
322 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
323 
324 uint16_t nb_tx_queue_stats_mappings = 0;
325 uint16_t nb_rx_queue_stats_mappings = 0;
326 
327 unsigned max_socket = 0;
328 
329 /* Forward function declarations */
330 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
331 static void check_all_ports_link_status(uint32_t port_mask);
332 
333 /*
334  * Check if all the ports are started.
335  * If yes, return positive value. If not, return zero.
336  */
337 static int all_ports_started(void);
338 
339 /*
340  * Find next enabled port
341  */
342 portid_t
343 find_next_port(portid_t p, struct rte_port *ports, int size)
344 {
345 	if (ports == NULL)
346 		rte_exit(-EINVAL, "failed to find a next port id\n");
347 
348 	while ((p < size) && (ports[p].enabled == 0))
349 		p++;
350 	return p;
351 }
352 
353 /*
354  * Setup default configuration.
355  */
356 static void
357 set_default_fwd_lcores_config(void)
358 {
359 	unsigned int i;
360 	unsigned int nb_lc;
361 	unsigned int sock_num;
362 
363 	nb_lc = 0;
364 	for (i = 0; i < RTE_MAX_LCORE; i++) {
365 		sock_num = rte_lcore_to_socket_id(i) + 1;
366 		if (sock_num > max_socket) {
367 			if (sock_num > RTE_MAX_NUMA_NODES)
368 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
369 			max_socket = sock_num;
370 		}
371 		if (!rte_lcore_is_enabled(i))
372 			continue;
373 		if (i == rte_get_master_lcore())
374 			continue;
375 		fwd_lcores_cpuids[nb_lc++] = i;
376 	}
377 	nb_lcores = (lcoreid_t) nb_lc;
378 	nb_cfg_lcores = nb_lcores;
379 	nb_fwd_lcores = 1;
380 }
381 
382 static void
383 set_def_peer_eth_addrs(void)
384 {
385 	portid_t i;
386 
387 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
388 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
389 		peer_eth_addrs[i].addr_bytes[5] = i;
390 	}
391 }
392 
393 static void
394 set_default_fwd_ports_config(void)
395 {
396 	portid_t pt_id;
397 
398 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
399 		fwd_ports_ids[pt_id] = pt_id;
400 
401 	nb_cfg_ports = nb_ports;
402 	nb_fwd_ports = nb_ports;
403 }
404 
405 void
406 set_def_fwd_config(void)
407 {
408 	set_default_fwd_lcores_config();
409 	set_def_peer_eth_addrs();
410 	set_default_fwd_ports_config();
411 }
412 
413 /*
414  * Configuration initialisation done once at init time.
415  */
416 static void
417 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
418 		 unsigned int socket_id)
419 {
420 	char pool_name[RTE_MEMPOOL_NAMESIZE];
421 	struct rte_mempool *rte_mp = NULL;
422 	uint32_t mb_size;
423 
424 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
425 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
426 
427 	RTE_LOG(INFO, USER1,
428 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
429 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
430 
431 #ifdef RTE_LIBRTE_PMD_XENVIRT
432 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
433 		(unsigned) mb_mempool_cache,
434 		sizeof(struct rte_pktmbuf_pool_private),
435 		rte_pktmbuf_pool_init, NULL,
436 		rte_pktmbuf_init, NULL,
437 		socket_id, 0);
438 #endif
439 
440 	/* if the former XEN allocation failed fall back to normal allocation */
441 	if (rte_mp == NULL) {
442 		if (mp_anon != 0) {
443 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
444 				mb_size, (unsigned) mb_mempool_cache,
445 				sizeof(struct rte_pktmbuf_pool_private),
446 				socket_id, 0);
447 
448 			if (rte_mempool_populate_anon(rte_mp) == 0) {
449 				rte_mempool_free(rte_mp);
450 				rte_mp = NULL;
451 			}
452 			rte_pktmbuf_pool_init(rte_mp, NULL);
453 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
454 		} else {
455 			/* wrapper to rte_mempool_create() */
456 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
457 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
458 		}
459 	}
460 
461 	if (rte_mp == NULL) {
462 		rte_exit(EXIT_FAILURE,
463 			"Creation of mbuf pool for socket %u failed: %s\n",
464 			socket_id, rte_strerror(rte_errno));
465 	} else if (verbose_level > 0) {
466 		rte_mempool_dump(stdout, rte_mp);
467 	}
468 }
469 
470 /*
471  * Check given socket id is valid or not with NUMA mode,
472  * if valid, return 0, else return -1
473  */
474 static int
475 check_socket_id(const unsigned int socket_id)
476 {
477 	static int warning_once = 0;
478 
479 	if (socket_id >= max_socket) {
480 		if (!warning_once && numa_support)
481 			printf("Warning: NUMA should be configured manually by"
482 			       " using --port-numa-config and"
483 			       " --ring-numa-config parameters along with"
484 			       " --numa.\n");
485 		warning_once = 1;
486 		return -1;
487 	}
488 	return 0;
489 }
490 
491 static void
492 init_config(void)
493 {
494 	portid_t pid;
495 	struct rte_port *port;
496 	struct rte_mempool *mbp;
497 	unsigned int nb_mbuf_per_pool;
498 	lcoreid_t  lc_id;
499 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
500 
501 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
502 	/* Configuration of logical cores. */
503 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
504 				sizeof(struct fwd_lcore *) * nb_lcores,
505 				RTE_CACHE_LINE_SIZE);
506 	if (fwd_lcores == NULL) {
507 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
508 							"failed\n", nb_lcores);
509 	}
510 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
511 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
512 					       sizeof(struct fwd_lcore),
513 					       RTE_CACHE_LINE_SIZE);
514 		if (fwd_lcores[lc_id] == NULL) {
515 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
516 								"failed\n");
517 		}
518 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
519 	}
520 
521 	/*
522 	 * Create pools of mbuf.
523 	 * If NUMA support is disabled, create a single pool of mbuf in
524 	 * socket 0 memory by default.
525 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
526 	 *
527 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
528 	 * nb_txd can be configured at run time.
529 	 */
530 	if (param_total_num_mbufs)
531 		nb_mbuf_per_pool = param_total_num_mbufs;
532 	else {
533 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
534 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
535 
536 		if (!numa_support)
537 			nb_mbuf_per_pool =
538 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
539 	}
540 
541 	if (!numa_support) {
542 		if (socket_num == UMA_NO_CONFIG)
543 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
544 		else
545 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
546 						 socket_num);
547 	}
548 
549 	FOREACH_PORT(pid, ports) {
550 		port = &ports[pid];
551 		rte_eth_dev_info_get(pid, &port->dev_info);
552 
553 		if (numa_support) {
554 			if (port_numa[pid] != NUMA_NO_CONFIG)
555 				port_per_socket[port_numa[pid]]++;
556 			else {
557 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
558 
559 				/* if socket_id is invalid, set to 0 */
560 				if (check_socket_id(socket_id) < 0)
561 					socket_id = 0;
562 				port_per_socket[socket_id]++;
563 			}
564 		}
565 
566 		/* set flag to initialize port/queue */
567 		port->need_reconfig = 1;
568 		port->need_reconfig_queues = 1;
569 	}
570 
571 	if (numa_support) {
572 		uint8_t i;
573 		unsigned int nb_mbuf;
574 
575 		if (param_total_num_mbufs)
576 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
577 
578 		for (i = 0; i < max_socket; i++) {
579 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
580 			if (nb_mbuf)
581 				mbuf_pool_create(mbuf_data_size,
582 						nb_mbuf,i);
583 		}
584 	}
585 	init_port_config();
586 
587 	/*
588 	 * Records which Mbuf pool to use by each logical core, if needed.
589 	 */
590 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
591 		mbp = mbuf_pool_find(
592 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
593 
594 		if (mbp == NULL)
595 			mbp = mbuf_pool_find(0);
596 		fwd_lcores[lc_id]->mbp = mbp;
597 	}
598 
599 	/* Configuration of packet forwarding streams. */
600 	if (init_fwd_streams() < 0)
601 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
602 
603 	fwd_config_setup();
604 }
605 
606 
607 void
608 reconfig(portid_t new_port_id, unsigned socket_id)
609 {
610 	struct rte_port *port;
611 
612 	/* Reconfiguration of Ethernet ports. */
613 	port = &ports[new_port_id];
614 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
615 
616 	/* set flag to initialize port/queue */
617 	port->need_reconfig = 1;
618 	port->need_reconfig_queues = 1;
619 	port->socket_id = socket_id;
620 
621 	init_port_config();
622 }
623 
624 
625 int
626 init_fwd_streams(void)
627 {
628 	portid_t pid;
629 	struct rte_port *port;
630 	streamid_t sm_id, nb_fwd_streams_new;
631 	queueid_t q;
632 
633 	/* set socket id according to numa or not */
634 	FOREACH_PORT(pid, ports) {
635 		port = &ports[pid];
636 		if (nb_rxq > port->dev_info.max_rx_queues) {
637 			printf("Fail: nb_rxq(%d) is greater than "
638 				"max_rx_queues(%d)\n", nb_rxq,
639 				port->dev_info.max_rx_queues);
640 			return -1;
641 		}
642 		if (nb_txq > port->dev_info.max_tx_queues) {
643 			printf("Fail: nb_txq(%d) is greater than "
644 				"max_tx_queues(%d)\n", nb_txq,
645 				port->dev_info.max_tx_queues);
646 			return -1;
647 		}
648 		if (numa_support) {
649 			if (port_numa[pid] != NUMA_NO_CONFIG)
650 				port->socket_id = port_numa[pid];
651 			else {
652 				port->socket_id = rte_eth_dev_socket_id(pid);
653 
654 				/* if socket_id is invalid, set to 0 */
655 				if (check_socket_id(port->socket_id) < 0)
656 					port->socket_id = 0;
657 			}
658 		}
659 		else {
660 			if (socket_num == UMA_NO_CONFIG)
661 				port->socket_id = 0;
662 			else
663 				port->socket_id = socket_num;
664 		}
665 	}
666 
667 	q = RTE_MAX(nb_rxq, nb_txq);
668 	if (q == 0) {
669 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
670 		return -1;
671 	}
672 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
673 	if (nb_fwd_streams_new == nb_fwd_streams)
674 		return 0;
675 	/* clear the old */
676 	if (fwd_streams != NULL) {
677 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
678 			if (fwd_streams[sm_id] == NULL)
679 				continue;
680 			rte_free(fwd_streams[sm_id]);
681 			fwd_streams[sm_id] = NULL;
682 		}
683 		rte_free(fwd_streams);
684 		fwd_streams = NULL;
685 	}
686 
687 	/* init new */
688 	nb_fwd_streams = nb_fwd_streams_new;
689 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
690 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
691 	if (fwd_streams == NULL)
692 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
693 						"failed\n", nb_fwd_streams);
694 
695 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
696 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
697 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
698 		if (fwd_streams[sm_id] == NULL)
699 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
700 								" failed\n");
701 	}
702 
703 	return 0;
704 }
705 
706 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
707 static void
708 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
709 {
710 	unsigned int total_burst;
711 	unsigned int nb_burst;
712 	unsigned int burst_stats[3];
713 	uint16_t pktnb_stats[3];
714 	uint16_t nb_pkt;
715 	int burst_percent[3];
716 
717 	/*
718 	 * First compute the total number of packet bursts and the
719 	 * two highest numbers of bursts of the same number of packets.
720 	 */
721 	total_burst = 0;
722 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
723 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
724 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
725 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
726 		if (nb_burst == 0)
727 			continue;
728 		total_burst += nb_burst;
729 		if (nb_burst > burst_stats[0]) {
730 			burst_stats[1] = burst_stats[0];
731 			pktnb_stats[1] = pktnb_stats[0];
732 			burst_stats[0] = nb_burst;
733 			pktnb_stats[0] = nb_pkt;
734 		}
735 	}
736 	if (total_burst == 0)
737 		return;
738 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
739 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
740 	       burst_percent[0], (int) pktnb_stats[0]);
741 	if (burst_stats[0] == total_burst) {
742 		printf("]\n");
743 		return;
744 	}
745 	if (burst_stats[0] + burst_stats[1] == total_burst) {
746 		printf(" + %d%% of %d pkts]\n",
747 		       100 - burst_percent[0], pktnb_stats[1]);
748 		return;
749 	}
750 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
751 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
752 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
753 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
754 		return;
755 	}
756 	printf(" + %d%% of %d pkts + %d%% of others]\n",
757 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
758 }
759 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
760 
761 static void
762 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
763 {
764 	struct rte_port *port;
765 	uint8_t i;
766 
767 	static const char *fwd_stats_border = "----------------------";
768 
769 	port = &ports[port_id];
770 	printf("\n  %s Forward statistics for port %-2d %s\n",
771 	       fwd_stats_border, port_id, fwd_stats_border);
772 
773 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
774 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
775 		       "%-"PRIu64"\n",
776 		       stats->ipackets, stats->imissed,
777 		       (uint64_t) (stats->ipackets + stats->imissed));
778 
779 		if (cur_fwd_eng == &csum_fwd_engine)
780 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
781 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
782 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
783 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
784 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
785 		}
786 
787 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
788 		       "%-"PRIu64"\n",
789 		       stats->opackets, port->tx_dropped,
790 		       (uint64_t) (stats->opackets + port->tx_dropped));
791 	}
792 	else {
793 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
794 		       "%14"PRIu64"\n",
795 		       stats->ipackets, stats->imissed,
796 		       (uint64_t) (stats->ipackets + stats->imissed));
797 
798 		if (cur_fwd_eng == &csum_fwd_engine)
799 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
800 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
801 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
802 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
803 			printf("  RX-nombufs:             %14"PRIu64"\n",
804 			       stats->rx_nombuf);
805 		}
806 
807 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
808 		       "%14"PRIu64"\n",
809 		       stats->opackets, port->tx_dropped,
810 		       (uint64_t) (stats->opackets + port->tx_dropped));
811 	}
812 
813 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
814 	if (port->rx_stream)
815 		pkt_burst_stats_display("RX",
816 			&port->rx_stream->rx_burst_stats);
817 	if (port->tx_stream)
818 		pkt_burst_stats_display("TX",
819 			&port->tx_stream->tx_burst_stats);
820 #endif
821 
822 	if (port->rx_queue_stats_mapping_enabled) {
823 		printf("\n");
824 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
825 			printf("  Stats reg %2d RX-packets:%14"PRIu64
826 			       "     RX-errors:%14"PRIu64
827 			       "    RX-bytes:%14"PRIu64"\n",
828 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
829 		}
830 		printf("\n");
831 	}
832 	if (port->tx_queue_stats_mapping_enabled) {
833 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
834 			printf("  Stats reg %2d TX-packets:%14"PRIu64
835 			       "                                 TX-bytes:%14"PRIu64"\n",
836 			       i, stats->q_opackets[i], stats->q_obytes[i]);
837 		}
838 	}
839 
840 	printf("  %s--------------------------------%s\n",
841 	       fwd_stats_border, fwd_stats_border);
842 }
843 
844 static void
845 fwd_stream_stats_display(streamid_t stream_id)
846 {
847 	struct fwd_stream *fs;
848 	static const char *fwd_top_stats_border = "-------";
849 
850 	fs = fwd_streams[stream_id];
851 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
852 	    (fs->fwd_dropped == 0))
853 		return;
854 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
855 	       "TX Port=%2d/Queue=%2d %s\n",
856 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
857 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
858 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
859 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
860 
861 	/* if checksum mode */
862 	if (cur_fwd_eng == &csum_fwd_engine) {
863 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
864 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
865 	}
866 
867 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
868 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
869 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
870 #endif
871 }
872 
873 static void
874 flush_fwd_rx_queues(void)
875 {
876 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
877 	portid_t  rxp;
878 	portid_t port_id;
879 	queueid_t rxq;
880 	uint16_t  nb_rx;
881 	uint16_t  i;
882 	uint8_t   j;
883 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
884 
885 	/* convert to number of cycles */
886 	timer_period *= rte_get_timer_hz();
887 
888 	for (j = 0; j < 2; j++) {
889 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
890 			for (rxq = 0; rxq < nb_rxq; rxq++) {
891 				port_id = fwd_ports_ids[rxp];
892 				/**
893 				* testpmd can stuck in the below do while loop
894 				* if rte_eth_rx_burst() always returns nonzero
895 				* packets. So timer is added to exit this loop
896 				* after 1sec timer expiry.
897 				*/
898 				prev_tsc = rte_rdtsc();
899 				do {
900 					nb_rx = rte_eth_rx_burst(port_id, rxq,
901 						pkts_burst, MAX_PKT_BURST);
902 					for (i = 0; i < nb_rx; i++)
903 						rte_pktmbuf_free(pkts_burst[i]);
904 
905 					cur_tsc = rte_rdtsc();
906 					diff_tsc = cur_tsc - prev_tsc;
907 					timer_tsc += diff_tsc;
908 				} while ((nb_rx > 0) &&
909 					(timer_tsc < timer_period));
910 				prev_tsc = cur_tsc;
911 				timer_tsc = 0;
912 			}
913 		}
914 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
915 	}
916 }
917 
918 static void
919 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
920 {
921 	struct fwd_stream **fsm;
922 	streamid_t nb_fs;
923 	streamid_t sm_id;
924 
925 	fsm = &fwd_streams[fc->stream_idx];
926 	nb_fs = fc->stream_nb;
927 	do {
928 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
929 			(*pkt_fwd)(fsm[sm_id]);
930 	} while (! fc->stopped);
931 }
932 
933 static int
934 start_pkt_forward_on_core(void *fwd_arg)
935 {
936 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
937 			     cur_fwd_config.fwd_eng->packet_fwd);
938 	return 0;
939 }
940 
941 /*
942  * Run the TXONLY packet forwarding engine to send a single burst of packets.
943  * Used to start communication flows in network loopback test configurations.
944  */
945 static int
946 run_one_txonly_burst_on_core(void *fwd_arg)
947 {
948 	struct fwd_lcore *fwd_lc;
949 	struct fwd_lcore tmp_lcore;
950 
951 	fwd_lc = (struct fwd_lcore *) fwd_arg;
952 	tmp_lcore = *fwd_lc;
953 	tmp_lcore.stopped = 1;
954 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
955 	return 0;
956 }
957 
958 /*
959  * Launch packet forwarding:
960  *     - Setup per-port forwarding context.
961  *     - launch logical cores with their forwarding configuration.
962  */
963 static void
964 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
965 {
966 	port_fwd_begin_t port_fwd_begin;
967 	unsigned int i;
968 	unsigned int lc_id;
969 	int diag;
970 
971 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
972 	if (port_fwd_begin != NULL) {
973 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
974 			(*port_fwd_begin)(fwd_ports_ids[i]);
975 	}
976 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
977 		lc_id = fwd_lcores_cpuids[i];
978 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
979 			fwd_lcores[i]->stopped = 0;
980 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
981 						     fwd_lcores[i], lc_id);
982 			if (diag != 0)
983 				printf("launch lcore %u failed - diag=%d\n",
984 				       lc_id, diag);
985 		}
986 	}
987 }
988 
989 /*
990  * Launch packet forwarding configuration.
991  */
992 void
993 start_packet_forwarding(int with_tx_first)
994 {
995 	port_fwd_begin_t port_fwd_begin;
996 	port_fwd_end_t  port_fwd_end;
997 	struct rte_port *port;
998 	unsigned int i;
999 	portid_t   pt_id;
1000 	streamid_t sm_id;
1001 
1002 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1003 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1004 
1005 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1006 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1007 
1008 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1009 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1010 		(!nb_rxq || !nb_txq))
1011 		rte_exit(EXIT_FAILURE,
1012 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1013 			cur_fwd_eng->fwd_mode_name);
1014 
1015 	if (all_ports_started() == 0) {
1016 		printf("Not all ports were started\n");
1017 		return;
1018 	}
1019 	if (test_done == 0) {
1020 		printf("Packet forwarding already started\n");
1021 		return;
1022 	}
1023 
1024 	if (init_fwd_streams() < 0) {
1025 		printf("Fail from init_fwd_streams()\n");
1026 		return;
1027 	}
1028 
1029 	if(dcb_test) {
1030 		for (i = 0; i < nb_fwd_ports; i++) {
1031 			pt_id = fwd_ports_ids[i];
1032 			port = &ports[pt_id];
1033 			if (!port->dcb_flag) {
1034 				printf("In DCB mode, all forwarding ports must "
1035                                        "be configured in this mode.\n");
1036 				return;
1037 			}
1038 		}
1039 		if (nb_fwd_lcores == 1) {
1040 			printf("In DCB mode,the nb forwarding cores "
1041                                "should be larger than 1.\n");
1042 			return;
1043 		}
1044 	}
1045 	test_done = 0;
1046 
1047 	if(!no_flush_rx)
1048 		flush_fwd_rx_queues();
1049 
1050 	fwd_config_setup();
1051 	pkt_fwd_config_display(&cur_fwd_config);
1052 	rxtx_config_display();
1053 
1054 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1055 		pt_id = fwd_ports_ids[i];
1056 		port = &ports[pt_id];
1057 		rte_eth_stats_get(pt_id, &port->stats);
1058 		port->tx_dropped = 0;
1059 
1060 		map_port_queue_stats_mapping_registers(pt_id, port);
1061 	}
1062 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1063 		fwd_streams[sm_id]->rx_packets = 0;
1064 		fwd_streams[sm_id]->tx_packets = 0;
1065 		fwd_streams[sm_id]->fwd_dropped = 0;
1066 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1067 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1068 
1069 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1070 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1071 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1072 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1073 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1074 #endif
1075 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1076 		fwd_streams[sm_id]->core_cycles = 0;
1077 #endif
1078 	}
1079 	if (with_tx_first) {
1080 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1081 		if (port_fwd_begin != NULL) {
1082 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1083 				(*port_fwd_begin)(fwd_ports_ids[i]);
1084 		}
1085 		while (with_tx_first--) {
1086 			launch_packet_forwarding(
1087 					run_one_txonly_burst_on_core);
1088 			rte_eal_mp_wait_lcore();
1089 		}
1090 		port_fwd_end = tx_only_engine.port_fwd_end;
1091 		if (port_fwd_end != NULL) {
1092 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1093 				(*port_fwd_end)(fwd_ports_ids[i]);
1094 		}
1095 	}
1096 	launch_packet_forwarding(start_pkt_forward_on_core);
1097 }
1098 
1099 void
1100 stop_packet_forwarding(void)
1101 {
1102 	struct rte_eth_stats stats;
1103 	struct rte_port *port;
1104 	port_fwd_end_t  port_fwd_end;
1105 	int i;
1106 	portid_t   pt_id;
1107 	streamid_t sm_id;
1108 	lcoreid_t  lc_id;
1109 	uint64_t total_recv;
1110 	uint64_t total_xmit;
1111 	uint64_t total_rx_dropped;
1112 	uint64_t total_tx_dropped;
1113 	uint64_t total_rx_nombuf;
1114 	uint64_t tx_dropped;
1115 	uint64_t rx_bad_ip_csum;
1116 	uint64_t rx_bad_l4_csum;
1117 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1118 	uint64_t fwd_cycles;
1119 #endif
1120 	static const char *acc_stats_border = "+++++++++++++++";
1121 
1122 	if (test_done) {
1123 		printf("Packet forwarding not started\n");
1124 		return;
1125 	}
1126 	printf("Telling cores to stop...");
1127 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1128 		fwd_lcores[lc_id]->stopped = 1;
1129 	printf("\nWaiting for lcores to finish...\n");
1130 	rte_eal_mp_wait_lcore();
1131 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1132 	if (port_fwd_end != NULL) {
1133 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1134 			pt_id = fwd_ports_ids[i];
1135 			(*port_fwd_end)(pt_id);
1136 		}
1137 	}
1138 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1139 	fwd_cycles = 0;
1140 #endif
1141 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1142 		if (cur_fwd_config.nb_fwd_streams >
1143 		    cur_fwd_config.nb_fwd_ports) {
1144 			fwd_stream_stats_display(sm_id);
1145 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1146 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1147 		} else {
1148 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1149 				fwd_streams[sm_id];
1150 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1151 				fwd_streams[sm_id];
1152 		}
1153 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1154 		tx_dropped = (uint64_t) (tx_dropped +
1155 					 fwd_streams[sm_id]->fwd_dropped);
1156 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1157 
1158 		rx_bad_ip_csum =
1159 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1160 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1161 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1162 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1163 							rx_bad_ip_csum;
1164 
1165 		rx_bad_l4_csum =
1166 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1167 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1168 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1169 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1170 							rx_bad_l4_csum;
1171 
1172 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1173 		fwd_cycles = (uint64_t) (fwd_cycles +
1174 					 fwd_streams[sm_id]->core_cycles);
1175 #endif
1176 	}
1177 	total_recv = 0;
1178 	total_xmit = 0;
1179 	total_rx_dropped = 0;
1180 	total_tx_dropped = 0;
1181 	total_rx_nombuf  = 0;
1182 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1183 		pt_id = fwd_ports_ids[i];
1184 
1185 		port = &ports[pt_id];
1186 		rte_eth_stats_get(pt_id, &stats);
1187 		stats.ipackets -= port->stats.ipackets;
1188 		port->stats.ipackets = 0;
1189 		stats.opackets -= port->stats.opackets;
1190 		port->stats.opackets = 0;
1191 		stats.ibytes   -= port->stats.ibytes;
1192 		port->stats.ibytes = 0;
1193 		stats.obytes   -= port->stats.obytes;
1194 		port->stats.obytes = 0;
1195 		stats.imissed  -= port->stats.imissed;
1196 		port->stats.imissed = 0;
1197 		stats.oerrors  -= port->stats.oerrors;
1198 		port->stats.oerrors = 0;
1199 		stats.rx_nombuf -= port->stats.rx_nombuf;
1200 		port->stats.rx_nombuf = 0;
1201 
1202 		total_recv += stats.ipackets;
1203 		total_xmit += stats.opackets;
1204 		total_rx_dropped += stats.imissed;
1205 		total_tx_dropped += port->tx_dropped;
1206 		total_rx_nombuf  += stats.rx_nombuf;
1207 
1208 		fwd_port_stats_display(pt_id, &stats);
1209 	}
1210 	printf("\n  %s Accumulated forward statistics for all ports"
1211 	       "%s\n",
1212 	       acc_stats_border, acc_stats_border);
1213 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1214 	       "%-"PRIu64"\n"
1215 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1216 	       "%-"PRIu64"\n",
1217 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1218 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1219 	if (total_rx_nombuf > 0)
1220 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1221 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1222 	       "%s\n",
1223 	       acc_stats_border, acc_stats_border);
1224 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1225 	if (total_recv > 0)
1226 		printf("\n  CPU cycles/packet=%u (total cycles="
1227 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1228 		       (unsigned int)(fwd_cycles / total_recv),
1229 		       fwd_cycles, total_recv);
1230 #endif
1231 	printf("\nDone.\n");
1232 	test_done = 1;
1233 }
1234 
1235 void
1236 dev_set_link_up(portid_t pid)
1237 {
1238 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1239 		printf("\nSet link up fail.\n");
1240 }
1241 
1242 void
1243 dev_set_link_down(portid_t pid)
1244 {
1245 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1246 		printf("\nSet link down fail.\n");
1247 }
1248 
1249 static int
1250 all_ports_started(void)
1251 {
1252 	portid_t pi;
1253 	struct rte_port *port;
1254 
1255 	FOREACH_PORT(pi, ports) {
1256 		port = &ports[pi];
1257 		/* Check if there is a port which is not started */
1258 		if ((port->port_status != RTE_PORT_STARTED) &&
1259 			(port->slave_flag == 0))
1260 			return 0;
1261 	}
1262 
1263 	/* No port is not started */
1264 	return 1;
1265 }
1266 
1267 int
1268 all_ports_stopped(void)
1269 {
1270 	portid_t pi;
1271 	struct rte_port *port;
1272 
1273 	FOREACH_PORT(pi, ports) {
1274 		port = &ports[pi];
1275 		if ((port->port_status != RTE_PORT_STOPPED) &&
1276 			(port->slave_flag == 0))
1277 			return 0;
1278 	}
1279 
1280 	return 1;
1281 }
1282 
1283 int
1284 port_is_started(portid_t port_id)
1285 {
1286 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1287 		return 0;
1288 
1289 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1290 		return 0;
1291 
1292 	return 1;
1293 }
1294 
1295 static int
1296 port_is_closed(portid_t port_id)
1297 {
1298 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1299 		return 0;
1300 
1301 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1302 		return 0;
1303 
1304 	return 1;
1305 }
1306 
1307 int
1308 start_port(portid_t pid)
1309 {
1310 	int diag, need_check_link_status = -1;
1311 	portid_t pi;
1312 	queueid_t qi;
1313 	struct rte_port *port;
1314 	struct ether_addr mac_addr;
1315 
1316 	if (port_id_is_invalid(pid, ENABLED_WARN))
1317 		return 0;
1318 
1319 	if(dcb_config)
1320 		dcb_test = 1;
1321 	FOREACH_PORT(pi, ports) {
1322 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1323 			continue;
1324 
1325 		need_check_link_status = 0;
1326 		port = &ports[pi];
1327 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1328 						 RTE_PORT_HANDLING) == 0) {
1329 			printf("Port %d is now not stopped\n", pi);
1330 			continue;
1331 		}
1332 
1333 		if (port->need_reconfig > 0) {
1334 			port->need_reconfig = 0;
1335 
1336 			printf("Configuring Port %d (socket %u)\n", pi,
1337 					port->socket_id);
1338 			/* configure port */
1339 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1340 						&(port->dev_conf));
1341 			if (diag != 0) {
1342 				if (rte_atomic16_cmpset(&(port->port_status),
1343 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1344 					printf("Port %d can not be set back "
1345 							"to stopped\n", pi);
1346 				printf("Fail to configure port %d\n", pi);
1347 				/* try to reconfigure port next time */
1348 				port->need_reconfig = 1;
1349 				return -1;
1350 			}
1351 		}
1352 		if (port->need_reconfig_queues > 0) {
1353 			port->need_reconfig_queues = 0;
1354 			/* setup tx queues */
1355 			for (qi = 0; qi < nb_txq; qi++) {
1356 				if ((numa_support) &&
1357 					(txring_numa[pi] != NUMA_NO_CONFIG))
1358 					diag = rte_eth_tx_queue_setup(pi, qi,
1359 						nb_txd,txring_numa[pi],
1360 						&(port->tx_conf));
1361 				else
1362 					diag = rte_eth_tx_queue_setup(pi, qi,
1363 						nb_txd,port->socket_id,
1364 						&(port->tx_conf));
1365 
1366 				if (diag == 0)
1367 					continue;
1368 
1369 				/* Fail to setup tx queue, return */
1370 				if (rte_atomic16_cmpset(&(port->port_status),
1371 							RTE_PORT_HANDLING,
1372 							RTE_PORT_STOPPED) == 0)
1373 					printf("Port %d can not be set back "
1374 							"to stopped\n", pi);
1375 				printf("Fail to configure port %d tx queues\n", pi);
1376 				/* try to reconfigure queues next time */
1377 				port->need_reconfig_queues = 1;
1378 				return -1;
1379 			}
1380 			/* setup rx queues */
1381 			for (qi = 0; qi < nb_rxq; qi++) {
1382 				if ((numa_support) &&
1383 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1384 					struct rte_mempool * mp =
1385 						mbuf_pool_find(rxring_numa[pi]);
1386 					if (mp == NULL) {
1387 						printf("Failed to setup RX queue:"
1388 							"No mempool allocation"
1389 							" on the socket %d\n",
1390 							rxring_numa[pi]);
1391 						return -1;
1392 					}
1393 
1394 					diag = rte_eth_rx_queue_setup(pi, qi,
1395 					     nb_rxd,rxring_numa[pi],
1396 					     &(port->rx_conf),mp);
1397 				} else {
1398 					struct rte_mempool *mp =
1399 						mbuf_pool_find(port->socket_id);
1400 					if (mp == NULL) {
1401 						printf("Failed to setup RX queue:"
1402 							"No mempool allocation"
1403 							" on the socket %d\n",
1404 							port->socket_id);
1405 						return -1;
1406 					}
1407 					diag = rte_eth_rx_queue_setup(pi, qi,
1408 					     nb_rxd,port->socket_id,
1409 					     &(port->rx_conf), mp);
1410 				}
1411 				if (diag == 0)
1412 					continue;
1413 
1414 				/* Fail to setup rx queue, return */
1415 				if (rte_atomic16_cmpset(&(port->port_status),
1416 							RTE_PORT_HANDLING,
1417 							RTE_PORT_STOPPED) == 0)
1418 					printf("Port %d can not be set back "
1419 							"to stopped\n", pi);
1420 				printf("Fail to configure port %d rx queues\n", pi);
1421 				/* try to reconfigure queues next time */
1422 				port->need_reconfig_queues = 1;
1423 				return -1;
1424 			}
1425 		}
1426 		/* start port */
1427 		if (rte_eth_dev_start(pi) < 0) {
1428 			printf("Fail to start port %d\n", pi);
1429 
1430 			/* Fail to setup rx queue, return */
1431 			if (rte_atomic16_cmpset(&(port->port_status),
1432 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1433 				printf("Port %d can not be set back to "
1434 							"stopped\n", pi);
1435 			continue;
1436 		}
1437 
1438 		if (rte_atomic16_cmpset(&(port->port_status),
1439 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1440 			printf("Port %d can not be set into started\n", pi);
1441 
1442 		rte_eth_macaddr_get(pi, &mac_addr);
1443 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1444 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1445 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1446 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1447 
1448 		/* at least one port started, need checking link status */
1449 		need_check_link_status = 1;
1450 	}
1451 
1452 	if (need_check_link_status == 1 && !no_link_check)
1453 		check_all_ports_link_status(RTE_PORT_ALL);
1454 	else if (need_check_link_status == 0)
1455 		printf("Please stop the ports first\n");
1456 
1457 	printf("Done\n");
1458 	return 0;
1459 }
1460 
1461 void
1462 stop_port(portid_t pid)
1463 {
1464 	portid_t pi;
1465 	struct rte_port *port;
1466 	int need_check_link_status = 0;
1467 
1468 	if (dcb_test) {
1469 		dcb_test = 0;
1470 		dcb_config = 0;
1471 	}
1472 
1473 	if (port_id_is_invalid(pid, ENABLED_WARN))
1474 		return;
1475 
1476 	printf("Stopping ports...\n");
1477 
1478 	FOREACH_PORT(pi, ports) {
1479 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1480 			continue;
1481 
1482 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1483 			printf("Please remove port %d from forwarding configuration.\n", pi);
1484 			continue;
1485 		}
1486 
1487 		if (port_is_bonding_slave(pi)) {
1488 			printf("Please remove port %d from bonded device.\n", pi);
1489 			continue;
1490 		}
1491 
1492 		port = &ports[pi];
1493 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1494 						RTE_PORT_HANDLING) == 0)
1495 			continue;
1496 
1497 		rte_eth_dev_stop(pi);
1498 
1499 		if (rte_atomic16_cmpset(&(port->port_status),
1500 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1501 			printf("Port %d can not be set into stopped\n", pi);
1502 		need_check_link_status = 1;
1503 	}
1504 	if (need_check_link_status && !no_link_check)
1505 		check_all_ports_link_status(RTE_PORT_ALL);
1506 
1507 	printf("Done\n");
1508 }
1509 
1510 void
1511 close_port(portid_t pid)
1512 {
1513 	portid_t pi;
1514 	struct rte_port *port;
1515 
1516 	if (port_id_is_invalid(pid, ENABLED_WARN))
1517 		return;
1518 
1519 	printf("Closing ports...\n");
1520 
1521 	FOREACH_PORT(pi, ports) {
1522 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1523 			continue;
1524 
1525 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1526 			printf("Please remove port %d from forwarding configuration.\n", pi);
1527 			continue;
1528 		}
1529 
1530 		if (port_is_bonding_slave(pi)) {
1531 			printf("Please remove port %d from bonded device.\n", pi);
1532 			continue;
1533 		}
1534 
1535 		port = &ports[pi];
1536 		if (rte_atomic16_cmpset(&(port->port_status),
1537 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1538 			printf("Port %d is already closed\n", pi);
1539 			continue;
1540 		}
1541 
1542 		if (rte_atomic16_cmpset(&(port->port_status),
1543 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1544 			printf("Port %d is now not stopped\n", pi);
1545 			continue;
1546 		}
1547 
1548 		rte_eth_dev_close(pi);
1549 
1550 		if (rte_atomic16_cmpset(&(port->port_status),
1551 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1552 			printf("Port %d cannot be set to closed\n", pi);
1553 	}
1554 
1555 	printf("Done\n");
1556 }
1557 
1558 void
1559 attach_port(char *identifier)
1560 {
1561 	portid_t pi = 0;
1562 	unsigned int socket_id;
1563 
1564 	printf("Attaching a new port...\n");
1565 
1566 	if (identifier == NULL) {
1567 		printf("Invalid parameters are specified\n");
1568 		return;
1569 	}
1570 
1571 	if (rte_eth_dev_attach(identifier, &pi))
1572 		return;
1573 
1574 	ports[pi].enabled = 1;
1575 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1576 	/* if socket_id is invalid, set to 0 */
1577 	if (check_socket_id(socket_id) < 0)
1578 		socket_id = 0;
1579 	reconfig(pi, socket_id);
1580 	rte_eth_promiscuous_enable(pi);
1581 
1582 	nb_ports = rte_eth_dev_count();
1583 
1584 	ports[pi].port_status = RTE_PORT_STOPPED;
1585 
1586 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1587 	printf("Done\n");
1588 }
1589 
1590 void
1591 detach_port(uint8_t port_id)
1592 {
1593 	char name[RTE_ETH_NAME_MAX_LEN];
1594 
1595 	printf("Detaching a port...\n");
1596 
1597 	if (!port_is_closed(port_id)) {
1598 		printf("Please close port first\n");
1599 		return;
1600 	}
1601 
1602 	if (rte_eth_dev_detach(port_id, name))
1603 		return;
1604 
1605 	ports[port_id].enabled = 0;
1606 	nb_ports = rte_eth_dev_count();
1607 
1608 	printf("Port '%s' is detached. Now total ports is %d\n",
1609 			name, nb_ports);
1610 	printf("Done\n");
1611 	return;
1612 }
1613 
1614 void
1615 pmd_test_exit(void)
1616 {
1617 	portid_t pt_id;
1618 
1619 	if (test_done == 0)
1620 		stop_packet_forwarding();
1621 
1622 	if (ports != NULL) {
1623 		no_link_check = 1;
1624 		FOREACH_PORT(pt_id, ports) {
1625 			printf("\nShutting down port %d...\n", pt_id);
1626 			fflush(stdout);
1627 			stop_port(pt_id);
1628 			close_port(pt_id);
1629 		}
1630 	}
1631 	printf("\nBye...\n");
1632 }
1633 
1634 typedef void (*cmd_func_t)(void);
1635 struct pmd_test_command {
1636 	const char *cmd_name;
1637 	cmd_func_t cmd_func;
1638 };
1639 
1640 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1641 
1642 /* Check the link status of all ports in up to 9s, and print them finally */
1643 static void
1644 check_all_ports_link_status(uint32_t port_mask)
1645 {
1646 #define CHECK_INTERVAL 100 /* 100ms */
1647 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1648 	uint8_t portid, count, all_ports_up, print_flag = 0;
1649 	struct rte_eth_link link;
1650 
1651 	printf("Checking link statuses...\n");
1652 	fflush(stdout);
1653 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1654 		all_ports_up = 1;
1655 		FOREACH_PORT(portid, ports) {
1656 			if ((port_mask & (1 << portid)) == 0)
1657 				continue;
1658 			memset(&link, 0, sizeof(link));
1659 			rte_eth_link_get_nowait(portid, &link);
1660 			/* print link status if flag set */
1661 			if (print_flag == 1) {
1662 				if (link.link_status)
1663 					printf("Port %d Link Up - speed %u "
1664 						"Mbps - %s\n", (uint8_t)portid,
1665 						(unsigned)link.link_speed,
1666 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1667 					("full-duplex") : ("half-duplex\n"));
1668 				else
1669 					printf("Port %d Link Down\n",
1670 						(uint8_t)portid);
1671 				continue;
1672 			}
1673 			/* clear all_ports_up flag if any link down */
1674 			if (link.link_status == ETH_LINK_DOWN) {
1675 				all_ports_up = 0;
1676 				break;
1677 			}
1678 		}
1679 		/* after finally printing all link status, get out */
1680 		if (print_flag == 1)
1681 			break;
1682 
1683 		if (all_ports_up == 0) {
1684 			fflush(stdout);
1685 			rte_delay_ms(CHECK_INTERVAL);
1686 		}
1687 
1688 		/* set the print_flag if all ports up or timeout */
1689 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1690 			print_flag = 1;
1691 		}
1692 	}
1693 }
1694 
1695 static int
1696 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1697 {
1698 	uint16_t i;
1699 	int diag;
1700 	uint8_t mapping_found = 0;
1701 
1702 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1703 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1704 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1705 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1706 					tx_queue_stats_mappings[i].queue_id,
1707 					tx_queue_stats_mappings[i].stats_counter_id);
1708 			if (diag != 0)
1709 				return diag;
1710 			mapping_found = 1;
1711 		}
1712 	}
1713 	if (mapping_found)
1714 		port->tx_queue_stats_mapping_enabled = 1;
1715 	return 0;
1716 }
1717 
1718 static int
1719 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1720 {
1721 	uint16_t i;
1722 	int diag;
1723 	uint8_t mapping_found = 0;
1724 
1725 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1726 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1727 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1728 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1729 					rx_queue_stats_mappings[i].queue_id,
1730 					rx_queue_stats_mappings[i].stats_counter_id);
1731 			if (diag != 0)
1732 				return diag;
1733 			mapping_found = 1;
1734 		}
1735 	}
1736 	if (mapping_found)
1737 		port->rx_queue_stats_mapping_enabled = 1;
1738 	return 0;
1739 }
1740 
1741 static void
1742 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1743 {
1744 	int diag = 0;
1745 
1746 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1747 	if (diag != 0) {
1748 		if (diag == -ENOTSUP) {
1749 			port->tx_queue_stats_mapping_enabled = 0;
1750 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1751 		}
1752 		else
1753 			rte_exit(EXIT_FAILURE,
1754 					"set_tx_queue_stats_mapping_registers "
1755 					"failed for port id=%d diag=%d\n",
1756 					pi, diag);
1757 	}
1758 
1759 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1760 	if (diag != 0) {
1761 		if (diag == -ENOTSUP) {
1762 			port->rx_queue_stats_mapping_enabled = 0;
1763 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1764 		}
1765 		else
1766 			rte_exit(EXIT_FAILURE,
1767 					"set_rx_queue_stats_mapping_registers "
1768 					"failed for port id=%d diag=%d\n",
1769 					pi, diag);
1770 	}
1771 }
1772 
1773 static void
1774 rxtx_port_config(struct rte_port *port)
1775 {
1776 	port->rx_conf = port->dev_info.default_rxconf;
1777 	port->tx_conf = port->dev_info.default_txconf;
1778 
1779 	/* Check if any RX/TX parameters have been passed */
1780 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1781 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1782 
1783 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1784 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1785 
1786 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1787 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1788 
1789 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1790 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1791 
1792 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1793 		port->rx_conf.rx_drop_en = rx_drop_en;
1794 
1795 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1796 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1797 
1798 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1799 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1800 
1801 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1802 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1803 
1804 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1805 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1806 
1807 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1808 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1809 
1810 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1811 		port->tx_conf.txq_flags = txq_flags;
1812 }
1813 
1814 void
1815 init_port_config(void)
1816 {
1817 	portid_t pid;
1818 	struct rte_port *port;
1819 
1820 	FOREACH_PORT(pid, ports) {
1821 		port = &ports[pid];
1822 		port->dev_conf.rxmode = rx_mode;
1823 		port->dev_conf.fdir_conf = fdir_conf;
1824 		if (nb_rxq > 1) {
1825 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1826 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1827 		} else {
1828 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1829 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1830 		}
1831 
1832 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1833 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1834 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1835 			else
1836 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1837 		}
1838 
1839 		if (port->dev_info.max_vfs != 0) {
1840 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1841 				port->dev_conf.rxmode.mq_mode =
1842 					ETH_MQ_RX_VMDQ_RSS;
1843 			else
1844 				port->dev_conf.rxmode.mq_mode =
1845 					ETH_MQ_RX_NONE;
1846 
1847 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1848 		}
1849 
1850 		rxtx_port_config(port);
1851 
1852 		rte_eth_macaddr_get(pid, &port->eth_addr);
1853 
1854 		map_port_queue_stats_mapping_registers(pid, port);
1855 #ifdef RTE_NIC_BYPASS
1856 		rte_eth_dev_bypass_init(pid);
1857 #endif
1858 	}
1859 }
1860 
1861 void set_port_slave_flag(portid_t slave_pid)
1862 {
1863 	struct rte_port *port;
1864 
1865 	port = &ports[slave_pid];
1866 	port->slave_flag = 1;
1867 }
1868 
1869 void clear_port_slave_flag(portid_t slave_pid)
1870 {
1871 	struct rte_port *port;
1872 
1873 	port = &ports[slave_pid];
1874 	port->slave_flag = 0;
1875 }
1876 
1877 uint8_t port_is_bonding_slave(portid_t slave_pid)
1878 {
1879 	struct rte_port *port;
1880 
1881 	port = &ports[slave_pid];
1882 	return port->slave_flag;
1883 }
1884 
1885 const uint16_t vlan_tags[] = {
1886 		0,  1,  2,  3,  4,  5,  6,  7,
1887 		8,  9, 10, 11,  12, 13, 14, 15,
1888 		16, 17, 18, 19, 20, 21, 22, 23,
1889 		24, 25, 26, 27, 28, 29, 30, 31
1890 };
1891 
1892 static  int
1893 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1894 		 enum dcb_mode_enable dcb_mode,
1895 		 enum rte_eth_nb_tcs num_tcs,
1896 		 uint8_t pfc_en)
1897 {
1898 	uint8_t i;
1899 
1900 	/*
1901 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1902 	 * given above, and the number of traffic classes available for use.
1903 	 */
1904 	if (dcb_mode == DCB_VT_ENABLED) {
1905 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1906 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1907 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1908 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1909 
1910 		/* VMDQ+DCB RX and TX configrations */
1911 		vmdq_rx_conf->enable_default_pool = 0;
1912 		vmdq_rx_conf->default_pool = 0;
1913 		vmdq_rx_conf->nb_queue_pools =
1914 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1915 		vmdq_tx_conf->nb_queue_pools =
1916 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1917 
1918 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1919 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1920 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1921 			vmdq_rx_conf->pool_map[i].pools =
1922 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1923 		}
1924 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1925 			vmdq_rx_conf->dcb_tc[i] = i;
1926 			vmdq_tx_conf->dcb_tc[i] = i;
1927 		}
1928 
1929 		/* set DCB mode of RX and TX of multiple queues */
1930 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1931 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1932 	} else {
1933 		struct rte_eth_dcb_rx_conf *rx_conf =
1934 				&eth_conf->rx_adv_conf.dcb_rx_conf;
1935 		struct rte_eth_dcb_tx_conf *tx_conf =
1936 				&eth_conf->tx_adv_conf.dcb_tx_conf;
1937 
1938 		rx_conf->nb_tcs = num_tcs;
1939 		tx_conf->nb_tcs = num_tcs;
1940 
1941 		for (i = 0; i < num_tcs; i++) {
1942 			rx_conf->dcb_tc[i] = i;
1943 			tx_conf->dcb_tc[i] = i;
1944 		}
1945 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1946 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1947 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1948 	}
1949 
1950 	if (pfc_en)
1951 		eth_conf->dcb_capability_en =
1952 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1953 	else
1954 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1955 
1956 	return 0;
1957 }
1958 
1959 int
1960 init_port_dcb_config(portid_t pid,
1961 		     enum dcb_mode_enable dcb_mode,
1962 		     enum rte_eth_nb_tcs num_tcs,
1963 		     uint8_t pfc_en)
1964 {
1965 	struct rte_eth_conf port_conf;
1966 	struct rte_eth_dev_info dev_info;
1967 	struct rte_port *rte_port;
1968 	int retval;
1969 	uint16_t i;
1970 
1971 	rte_eth_dev_info_get(pid, &dev_info);
1972 
1973 	/* If dev_info.vmdq_pool_base is greater than 0,
1974 	 * the queue id of vmdq pools is started after pf queues.
1975 	 */
1976 	if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1977 		printf("VMDQ_DCB multi-queue mode is nonsensical"
1978 			" for port %d.", pid);
1979 		return -1;
1980 	}
1981 
1982 	/* Assume the ports in testpmd have the same dcb capability
1983 	 * and has the same number of rxq and txq in dcb mode
1984 	 */
1985 	if (dcb_mode == DCB_VT_ENABLED) {
1986 		nb_rxq = dev_info.max_rx_queues;
1987 		nb_txq = dev_info.max_tx_queues;
1988 	} else {
1989 		/*if vt is disabled, use all pf queues */
1990 		if (dev_info.vmdq_pool_base == 0) {
1991 			nb_rxq = dev_info.max_rx_queues;
1992 			nb_txq = dev_info.max_tx_queues;
1993 		} else {
1994 			nb_rxq = (queueid_t)num_tcs;
1995 			nb_txq = (queueid_t)num_tcs;
1996 
1997 		}
1998 	}
1999 	rx_free_thresh = 64;
2000 
2001 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2002 	/* Enter DCB configuration status */
2003 	dcb_config = 1;
2004 
2005 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2006 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2007 	if (retval < 0)
2008 		return retval;
2009 
2010 	rte_port = &ports[pid];
2011 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2012 
2013 	rxtx_port_config(rte_port);
2014 	/* VLAN filter */
2015 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2016 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2017 		rx_vft_set(pid, vlan_tags[i], 1);
2018 
2019 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2020 	map_port_queue_stats_mapping_registers(pid, rte_port);
2021 
2022 	rte_port->dcb_flag = 1;
2023 
2024 	return 0;
2025 }
2026 
2027 static void
2028 init_port(void)
2029 {
2030 	portid_t pid;
2031 
2032 	/* Configuration of Ethernet ports. */
2033 	ports = rte_zmalloc("testpmd: ports",
2034 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2035 			    RTE_CACHE_LINE_SIZE);
2036 	if (ports == NULL) {
2037 		rte_exit(EXIT_FAILURE,
2038 				"rte_zmalloc(%d struct rte_port) failed\n",
2039 				RTE_MAX_ETHPORTS);
2040 	}
2041 
2042 	/* enabled allocated ports */
2043 	for (pid = 0; pid < nb_ports; pid++)
2044 		ports[pid].enabled = 1;
2045 }
2046 
2047 static void
2048 force_quit(void)
2049 {
2050 	pmd_test_exit();
2051 	prompt_exit();
2052 }
2053 
2054 static void
2055 signal_handler(int signum)
2056 {
2057 	if (signum == SIGINT || signum == SIGTERM) {
2058 		printf("\nSignal %d received, preparing to exit...\n",
2059 				signum);
2060 #ifdef RTE_LIBRTE_PDUMP
2061 		/* uninitialize packet capture framework */
2062 		rte_pdump_uninit();
2063 #endif
2064 		force_quit();
2065 		/* exit with the expected status */
2066 		signal(signum, SIG_DFL);
2067 		kill(getpid(), signum);
2068 	}
2069 }
2070 
2071 int
2072 main(int argc, char** argv)
2073 {
2074 	int  diag;
2075 	uint8_t port_id;
2076 
2077 	signal(SIGINT, signal_handler);
2078 	signal(SIGTERM, signal_handler);
2079 
2080 	diag = rte_eal_init(argc, argv);
2081 	if (diag < 0)
2082 		rte_panic("Cannot init EAL\n");
2083 
2084 #ifdef RTE_LIBRTE_PDUMP
2085 	/* initialize packet capture framework */
2086 	rte_pdump_init(NULL);
2087 #endif
2088 
2089 	nb_ports = (portid_t) rte_eth_dev_count();
2090 	if (nb_ports == 0)
2091 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2092 
2093 	/* allocate port structures, and init them */
2094 	init_port();
2095 
2096 	set_def_fwd_config();
2097 	if (nb_lcores == 0)
2098 		rte_panic("Empty set of forwarding logical cores - check the "
2099 			  "core mask supplied in the command parameters\n");
2100 
2101 	argc -= diag;
2102 	argv += diag;
2103 	if (argc > 1)
2104 		launch_args_parse(argc, argv);
2105 
2106 	if (!nb_rxq && !nb_txq)
2107 		printf("Warning: Either rx or tx queues should be non-zero\n");
2108 
2109 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2110 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2111 		       "but nb_txq=%d will prevent to fully test it.\n",
2112 		       nb_rxq, nb_txq);
2113 
2114 	init_config();
2115 	if (start_port(RTE_PORT_ALL) != 0)
2116 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2117 
2118 	/* set all ports to promiscuous mode by default */
2119 	FOREACH_PORT(port_id, ports)
2120 		rte_eth_promiscuous_enable(port_id);
2121 
2122 #ifdef RTE_LIBRTE_CMDLINE
2123 	if (interactive == 1) {
2124 		if (auto_start) {
2125 			printf("Start automatic packet forwarding\n");
2126 			start_packet_forwarding(0);
2127 		}
2128 		prompt();
2129 	} else
2130 #endif
2131 	{
2132 		char c;
2133 		int rc;
2134 
2135 		printf("No commandline core given, start packet forwarding\n");
2136 		start_packet_forwarding(0);
2137 		printf("Press enter to exit\n");
2138 		rc = read(0, &c, 1);
2139 		pmd_test_exit();
2140 		if (rc < 0)
2141 			return 1;
2142 	}
2143 
2144 	return 0;
2145 }
2146