xref: /dpdk/app/test-pmd/testpmd.c (revision 1e1d6bdd6d0f7bbe29b283c25acd65381fc5a046)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 
80 #include "testpmd.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_retry_fwd_engine,
148 	&mac_swap_engine,
149 	&flow_gen_engine,
150 	&rx_only_engine,
151 	&tx_only_engine,
152 	&csum_fwd_engine,
153 	&icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 	&ieee1588_fwd_engine,
156 #endif
157 	NULL,
158 };
159 
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162 
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166 
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 	TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175 
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
178 
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
181 
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
184 
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
187 
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201 
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206 
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210 
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214 
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219 
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224 
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229 
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234 
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239 
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244 
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249 
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254 
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259 
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264 
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267 
268 #endif
269 
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275 	.split_hdr_size = 0,
276 	.header_split   = 0, /**< Header Split disabled. */
277 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284 
285 struct rte_fdir_conf fdir_conf = {
286 	.mode = RTE_FDIR_MODE_NONE,
287 	.pballoc = RTE_FDIR_PBALLOC_64K,
288 	.status = RTE_FDIR_REPORT_STATUS,
289 	.mask = {
290 		.vlan_tci_mask = 0x0,
291 		.ipv4_mask     = {
292 			.src_ip = 0xFFFFFFFF,
293 			.dst_ip = 0xFFFFFFFF,
294 		},
295 		.ipv6_mask     = {
296 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298 		},
299 		.src_port_mask = 0xFFFF,
300 		.dst_port_mask = 0xFFFF,
301 		.mac_addr_byte_mask = 0xFF,
302 		.tunnel_type_mask = 1,
303 		.tunnel_id_mask = 0xFFFFFFFF,
304 	},
305 	.drop_queue = 127,
306 };
307 
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
309 
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
312 
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
315 
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
318 
319 unsigned max_socket = 0;
320 
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
324 
325 /*
326  * Check if all the ports are started.
327  * If yes, return positive value. If not, return zero.
328  */
329 static int all_ports_started(void);
330 
331 /*
332  * Find next enabled port
333  */
334 portid_t
335 find_next_port(portid_t p, struct rte_port *ports, int size)
336 {
337 	if (ports == NULL)
338 		rte_exit(-EINVAL, "failed to find a next port id\n");
339 
340 	while ((p < size) && (ports[p].enabled == 0))
341 		p++;
342 	return p;
343 }
344 
345 /*
346  * Setup default configuration.
347  */
348 static void
349 set_default_fwd_lcores_config(void)
350 {
351 	unsigned int i;
352 	unsigned int nb_lc;
353 	unsigned int sock_num;
354 
355 	nb_lc = 0;
356 	for (i = 0; i < RTE_MAX_LCORE; i++) {
357 		sock_num = rte_lcore_to_socket_id(i) + 1;
358 		if (sock_num > max_socket) {
359 			if (sock_num > RTE_MAX_NUMA_NODES)
360 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361 			max_socket = sock_num;
362 		}
363 		if (!rte_lcore_is_enabled(i))
364 			continue;
365 		if (i == rte_get_master_lcore())
366 			continue;
367 		fwd_lcores_cpuids[nb_lc++] = i;
368 	}
369 	nb_lcores = (lcoreid_t) nb_lc;
370 	nb_cfg_lcores = nb_lcores;
371 	nb_fwd_lcores = 1;
372 }
373 
374 static void
375 set_def_peer_eth_addrs(void)
376 {
377 	portid_t i;
378 
379 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381 		peer_eth_addrs[i].addr_bytes[5] = i;
382 	}
383 }
384 
385 static void
386 set_default_fwd_ports_config(void)
387 {
388 	portid_t pt_id;
389 
390 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
391 		fwd_ports_ids[pt_id] = pt_id;
392 
393 	nb_cfg_ports = nb_ports;
394 	nb_fwd_ports = nb_ports;
395 }
396 
397 void
398 set_def_fwd_config(void)
399 {
400 	set_default_fwd_lcores_config();
401 	set_def_peer_eth_addrs();
402 	set_default_fwd_ports_config();
403 }
404 
405 /*
406  * Configuration initialisation done once at init time.
407  */
408 static void
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410 		 unsigned int socket_id)
411 {
412 	char pool_name[RTE_MEMPOOL_NAMESIZE];
413 	struct rte_mempool *rte_mp = NULL;
414 	uint32_t mb_size;
415 
416 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418 
419 	RTE_LOG(INFO, USER1,
420 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
421 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
422 
423 #ifdef RTE_LIBRTE_PMD_XENVIRT
424 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
425 		(unsigned) mb_mempool_cache,
426 		sizeof(struct rte_pktmbuf_pool_private),
427 		rte_pktmbuf_pool_init, NULL,
428 		rte_pktmbuf_init, NULL,
429 		socket_id, 0);
430 #endif
431 
432 	/* if the former XEN allocation failed fall back to normal allocation */
433 	if (rte_mp == NULL) {
434 		if (mp_anon != 0) {
435 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
436 				mb_size, (unsigned) mb_mempool_cache,
437 				sizeof(struct rte_pktmbuf_pool_private),
438 				socket_id, 0);
439 
440 			if (rte_mempool_populate_anon(rte_mp) == 0) {
441 				rte_mempool_free(rte_mp);
442 				rte_mp = NULL;
443 			}
444 			rte_pktmbuf_pool_init(rte_mp, NULL);
445 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
446 		} else {
447 			/* wrapper to rte_mempool_create() */
448 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
449 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
450 		}
451 	}
452 
453 	if (rte_mp == NULL) {
454 		rte_exit(EXIT_FAILURE,
455 			"Creation of mbuf pool for socket %u failed: %s\n",
456 			socket_id, rte_strerror(rte_errno));
457 	} else if (verbose_level > 0) {
458 		rte_mempool_dump(stdout, rte_mp);
459 	}
460 }
461 
462 /*
463  * Check given socket id is valid or not with NUMA mode,
464  * if valid, return 0, else return -1
465  */
466 static int
467 check_socket_id(const unsigned int socket_id)
468 {
469 	static int warning_once = 0;
470 
471 	if (socket_id >= max_socket) {
472 		if (!warning_once && numa_support)
473 			printf("Warning: NUMA should be configured manually by"
474 			       " using --port-numa-config and"
475 			       " --ring-numa-config parameters along with"
476 			       " --numa.\n");
477 		warning_once = 1;
478 		return -1;
479 	}
480 	return 0;
481 }
482 
483 static void
484 init_config(void)
485 {
486 	portid_t pid;
487 	struct rte_port *port;
488 	struct rte_mempool *mbp;
489 	unsigned int nb_mbuf_per_pool;
490 	lcoreid_t  lc_id;
491 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
492 
493 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
494 	/* Configuration of logical cores. */
495 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
496 				sizeof(struct fwd_lcore *) * nb_lcores,
497 				RTE_CACHE_LINE_SIZE);
498 	if (fwd_lcores == NULL) {
499 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
500 							"failed\n", nb_lcores);
501 	}
502 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
503 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
504 					       sizeof(struct fwd_lcore),
505 					       RTE_CACHE_LINE_SIZE);
506 		if (fwd_lcores[lc_id] == NULL) {
507 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
508 								"failed\n");
509 		}
510 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
511 	}
512 
513 	/*
514 	 * Create pools of mbuf.
515 	 * If NUMA support is disabled, create a single pool of mbuf in
516 	 * socket 0 memory by default.
517 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
518 	 *
519 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
520 	 * nb_txd can be configured at run time.
521 	 */
522 	if (param_total_num_mbufs)
523 		nb_mbuf_per_pool = param_total_num_mbufs;
524 	else {
525 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
526 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
527 
528 		if (!numa_support)
529 			nb_mbuf_per_pool =
530 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
531 	}
532 
533 	if (!numa_support) {
534 		if (socket_num == UMA_NO_CONFIG)
535 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
536 		else
537 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
538 						 socket_num);
539 	}
540 
541 	FOREACH_PORT(pid, ports) {
542 		port = &ports[pid];
543 		rte_eth_dev_info_get(pid, &port->dev_info);
544 
545 		if (numa_support) {
546 			if (port_numa[pid] != NUMA_NO_CONFIG)
547 				port_per_socket[port_numa[pid]]++;
548 			else {
549 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
550 
551 				/* if socket_id is invalid, set to 0 */
552 				if (check_socket_id(socket_id) < 0)
553 					socket_id = 0;
554 				port_per_socket[socket_id]++;
555 			}
556 		}
557 
558 		/* set flag to initialize port/queue */
559 		port->need_reconfig = 1;
560 		port->need_reconfig_queues = 1;
561 	}
562 
563 	if (numa_support) {
564 		uint8_t i;
565 		unsigned int nb_mbuf;
566 
567 		if (param_total_num_mbufs)
568 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
569 
570 		for (i = 0; i < max_socket; i++) {
571 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
572 			if (nb_mbuf)
573 				mbuf_pool_create(mbuf_data_size,
574 						nb_mbuf,i);
575 		}
576 	}
577 	init_port_config();
578 
579 	/*
580 	 * Records which Mbuf pool to use by each logical core, if needed.
581 	 */
582 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583 		mbp = mbuf_pool_find(
584 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
585 
586 		if (mbp == NULL)
587 			mbp = mbuf_pool_find(0);
588 		fwd_lcores[lc_id]->mbp = mbp;
589 	}
590 
591 	/* Configuration of packet forwarding streams. */
592 	if (init_fwd_streams() < 0)
593 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
594 
595 	fwd_config_setup();
596 }
597 
598 
599 void
600 reconfig(portid_t new_port_id, unsigned socket_id)
601 {
602 	struct rte_port *port;
603 
604 	/* Reconfiguration of Ethernet ports. */
605 	port = &ports[new_port_id];
606 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
607 
608 	/* set flag to initialize port/queue */
609 	port->need_reconfig = 1;
610 	port->need_reconfig_queues = 1;
611 	port->socket_id = socket_id;
612 
613 	init_port_config();
614 }
615 
616 
617 int
618 init_fwd_streams(void)
619 {
620 	portid_t pid;
621 	struct rte_port *port;
622 	streamid_t sm_id, nb_fwd_streams_new;
623 	queueid_t q;
624 
625 	/* set socket id according to numa or not */
626 	FOREACH_PORT(pid, ports) {
627 		port = &ports[pid];
628 		if (nb_rxq > port->dev_info.max_rx_queues) {
629 			printf("Fail: nb_rxq(%d) is greater than "
630 				"max_rx_queues(%d)\n", nb_rxq,
631 				port->dev_info.max_rx_queues);
632 			return -1;
633 		}
634 		if (nb_txq > port->dev_info.max_tx_queues) {
635 			printf("Fail: nb_txq(%d) is greater than "
636 				"max_tx_queues(%d)\n", nb_txq,
637 				port->dev_info.max_tx_queues);
638 			return -1;
639 		}
640 		if (numa_support) {
641 			if (port_numa[pid] != NUMA_NO_CONFIG)
642 				port->socket_id = port_numa[pid];
643 			else {
644 				port->socket_id = rte_eth_dev_socket_id(pid);
645 
646 				/* if socket_id is invalid, set to 0 */
647 				if (check_socket_id(port->socket_id) < 0)
648 					port->socket_id = 0;
649 			}
650 		}
651 		else {
652 			if (socket_num == UMA_NO_CONFIG)
653 				port->socket_id = 0;
654 			else
655 				port->socket_id = socket_num;
656 		}
657 	}
658 
659 	q = RTE_MAX(nb_rxq, nb_txq);
660 	if (q == 0) {
661 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
662 		return -1;
663 	}
664 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
665 	if (nb_fwd_streams_new == nb_fwd_streams)
666 		return 0;
667 	/* clear the old */
668 	if (fwd_streams != NULL) {
669 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
670 			if (fwd_streams[sm_id] == NULL)
671 				continue;
672 			rte_free(fwd_streams[sm_id]);
673 			fwd_streams[sm_id] = NULL;
674 		}
675 		rte_free(fwd_streams);
676 		fwd_streams = NULL;
677 	}
678 
679 	/* init new */
680 	nb_fwd_streams = nb_fwd_streams_new;
681 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
682 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
683 	if (fwd_streams == NULL)
684 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
685 						"failed\n", nb_fwd_streams);
686 
687 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
688 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
689 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
690 		if (fwd_streams[sm_id] == NULL)
691 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
692 								" failed\n");
693 	}
694 
695 	return 0;
696 }
697 
698 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
699 static void
700 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
701 {
702 	unsigned int total_burst;
703 	unsigned int nb_burst;
704 	unsigned int burst_stats[3];
705 	uint16_t pktnb_stats[3];
706 	uint16_t nb_pkt;
707 	int burst_percent[3];
708 
709 	/*
710 	 * First compute the total number of packet bursts and the
711 	 * two highest numbers of bursts of the same number of packets.
712 	 */
713 	total_burst = 0;
714 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
715 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
716 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
717 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
718 		if (nb_burst == 0)
719 			continue;
720 		total_burst += nb_burst;
721 		if (nb_burst > burst_stats[0]) {
722 			burst_stats[1] = burst_stats[0];
723 			pktnb_stats[1] = pktnb_stats[0];
724 			burst_stats[0] = nb_burst;
725 			pktnb_stats[0] = nb_pkt;
726 		}
727 	}
728 	if (total_burst == 0)
729 		return;
730 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
731 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
732 	       burst_percent[0], (int) pktnb_stats[0]);
733 	if (burst_stats[0] == total_burst) {
734 		printf("]\n");
735 		return;
736 	}
737 	if (burst_stats[0] + burst_stats[1] == total_burst) {
738 		printf(" + %d%% of %d pkts]\n",
739 		       100 - burst_percent[0], pktnb_stats[1]);
740 		return;
741 	}
742 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
743 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
744 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
745 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
746 		return;
747 	}
748 	printf(" + %d%% of %d pkts + %d%% of others]\n",
749 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
750 }
751 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
752 
753 static void
754 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
755 {
756 	struct rte_port *port;
757 	uint8_t i;
758 
759 	static const char *fwd_stats_border = "----------------------";
760 
761 	port = &ports[port_id];
762 	printf("\n  %s Forward statistics for port %-2d %s\n",
763 	       fwd_stats_border, port_id, fwd_stats_border);
764 
765 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
766 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
767 		       "%-"PRIu64"\n",
768 		       stats->ipackets, stats->imissed,
769 		       (uint64_t) (stats->ipackets + stats->imissed));
770 
771 		if (cur_fwd_eng == &csum_fwd_engine)
772 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
773 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
774 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
775 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
776 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
777 		}
778 
779 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
780 		       "%-"PRIu64"\n",
781 		       stats->opackets, port->tx_dropped,
782 		       (uint64_t) (stats->opackets + port->tx_dropped));
783 	}
784 	else {
785 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
786 		       "%14"PRIu64"\n",
787 		       stats->ipackets, stats->imissed,
788 		       (uint64_t) (stats->ipackets + stats->imissed));
789 
790 		if (cur_fwd_eng == &csum_fwd_engine)
791 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
792 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
793 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
794 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
795 			printf("  RX-nombufs:             %14"PRIu64"\n",
796 			       stats->rx_nombuf);
797 		}
798 
799 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
800 		       "%14"PRIu64"\n",
801 		       stats->opackets, port->tx_dropped,
802 		       (uint64_t) (stats->opackets + port->tx_dropped));
803 	}
804 
805 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
806 	if (port->rx_stream)
807 		pkt_burst_stats_display("RX",
808 			&port->rx_stream->rx_burst_stats);
809 	if (port->tx_stream)
810 		pkt_burst_stats_display("TX",
811 			&port->tx_stream->tx_burst_stats);
812 #endif
813 
814 	if (port->rx_queue_stats_mapping_enabled) {
815 		printf("\n");
816 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
817 			printf("  Stats reg %2d RX-packets:%14"PRIu64
818 			       "     RX-errors:%14"PRIu64
819 			       "    RX-bytes:%14"PRIu64"\n",
820 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
821 		}
822 		printf("\n");
823 	}
824 	if (port->tx_queue_stats_mapping_enabled) {
825 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
826 			printf("  Stats reg %2d TX-packets:%14"PRIu64
827 			       "                                 TX-bytes:%14"PRIu64"\n",
828 			       i, stats->q_opackets[i], stats->q_obytes[i]);
829 		}
830 	}
831 
832 	printf("  %s--------------------------------%s\n",
833 	       fwd_stats_border, fwd_stats_border);
834 }
835 
836 static void
837 fwd_stream_stats_display(streamid_t stream_id)
838 {
839 	struct fwd_stream *fs;
840 	static const char *fwd_top_stats_border = "-------";
841 
842 	fs = fwd_streams[stream_id];
843 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
844 	    (fs->fwd_dropped == 0))
845 		return;
846 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
847 	       "TX Port=%2d/Queue=%2d %s\n",
848 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
849 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
850 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
851 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
852 
853 	/* if checksum mode */
854 	if (cur_fwd_eng == &csum_fwd_engine) {
855 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
856 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
857 	}
858 
859 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
860 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
861 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
862 #endif
863 }
864 
865 static void
866 flush_fwd_rx_queues(void)
867 {
868 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
869 	portid_t  rxp;
870 	portid_t port_id;
871 	queueid_t rxq;
872 	uint16_t  nb_rx;
873 	uint16_t  i;
874 	uint8_t   j;
875 
876 	for (j = 0; j < 2; j++) {
877 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
878 			for (rxq = 0; rxq < nb_rxq; rxq++) {
879 				port_id = fwd_ports_ids[rxp];
880 				do {
881 					nb_rx = rte_eth_rx_burst(port_id, rxq,
882 						pkts_burst, MAX_PKT_BURST);
883 					for (i = 0; i < nb_rx; i++)
884 						rte_pktmbuf_free(pkts_burst[i]);
885 				} while (nb_rx > 0);
886 			}
887 		}
888 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
889 	}
890 }
891 
892 static void
893 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
894 {
895 	struct fwd_stream **fsm;
896 	streamid_t nb_fs;
897 	streamid_t sm_id;
898 
899 	fsm = &fwd_streams[fc->stream_idx];
900 	nb_fs = fc->stream_nb;
901 	do {
902 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
903 			(*pkt_fwd)(fsm[sm_id]);
904 	} while (! fc->stopped);
905 }
906 
907 static int
908 start_pkt_forward_on_core(void *fwd_arg)
909 {
910 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
911 			     cur_fwd_config.fwd_eng->packet_fwd);
912 	return 0;
913 }
914 
915 /*
916  * Run the TXONLY packet forwarding engine to send a single burst of packets.
917  * Used to start communication flows in network loopback test configurations.
918  */
919 static int
920 run_one_txonly_burst_on_core(void *fwd_arg)
921 {
922 	struct fwd_lcore *fwd_lc;
923 	struct fwd_lcore tmp_lcore;
924 
925 	fwd_lc = (struct fwd_lcore *) fwd_arg;
926 	tmp_lcore = *fwd_lc;
927 	tmp_lcore.stopped = 1;
928 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
929 	return 0;
930 }
931 
932 /*
933  * Launch packet forwarding:
934  *     - Setup per-port forwarding context.
935  *     - launch logical cores with their forwarding configuration.
936  */
937 static void
938 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
939 {
940 	port_fwd_begin_t port_fwd_begin;
941 	unsigned int i;
942 	unsigned int lc_id;
943 	int diag;
944 
945 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
946 	if (port_fwd_begin != NULL) {
947 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
948 			(*port_fwd_begin)(fwd_ports_ids[i]);
949 	}
950 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
951 		lc_id = fwd_lcores_cpuids[i];
952 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
953 			fwd_lcores[i]->stopped = 0;
954 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
955 						     fwd_lcores[i], lc_id);
956 			if (diag != 0)
957 				printf("launch lcore %u failed - diag=%d\n",
958 				       lc_id, diag);
959 		}
960 	}
961 }
962 
963 /*
964  * Launch packet forwarding configuration.
965  */
966 void
967 start_packet_forwarding(int with_tx_first)
968 {
969 	port_fwd_begin_t port_fwd_begin;
970 	port_fwd_end_t  port_fwd_end;
971 	struct rte_port *port;
972 	unsigned int i;
973 	portid_t   pt_id;
974 	streamid_t sm_id;
975 
976 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
977 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
978 
979 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
980 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
981 
982 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
983 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
984 		(!nb_rxq || !nb_txq))
985 		rte_exit(EXIT_FAILURE,
986 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
987 			cur_fwd_eng->fwd_mode_name);
988 
989 	if (all_ports_started() == 0) {
990 		printf("Not all ports were started\n");
991 		return;
992 	}
993 	if (test_done == 0) {
994 		printf("Packet forwarding already started\n");
995 		return;
996 	}
997 
998 	if (init_fwd_streams() < 0) {
999 		printf("Fail from init_fwd_streams()\n");
1000 		return;
1001 	}
1002 
1003 	if(dcb_test) {
1004 		for (i = 0; i < nb_fwd_ports; i++) {
1005 			pt_id = fwd_ports_ids[i];
1006 			port = &ports[pt_id];
1007 			if (!port->dcb_flag) {
1008 				printf("In DCB mode, all forwarding ports must "
1009                                        "be configured in this mode.\n");
1010 				return;
1011 			}
1012 		}
1013 		if (nb_fwd_lcores == 1) {
1014 			printf("In DCB mode,the nb forwarding cores "
1015                                "should be larger than 1.\n");
1016 			return;
1017 		}
1018 	}
1019 	test_done = 0;
1020 
1021 	if(!no_flush_rx)
1022 		flush_fwd_rx_queues();
1023 
1024 	fwd_config_setup();
1025 	rxtx_config_display();
1026 
1027 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1028 		pt_id = fwd_ports_ids[i];
1029 		port = &ports[pt_id];
1030 		rte_eth_stats_get(pt_id, &port->stats);
1031 		port->tx_dropped = 0;
1032 
1033 		map_port_queue_stats_mapping_registers(pt_id, port);
1034 	}
1035 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1036 		fwd_streams[sm_id]->rx_packets = 0;
1037 		fwd_streams[sm_id]->tx_packets = 0;
1038 		fwd_streams[sm_id]->fwd_dropped = 0;
1039 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1040 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1041 
1042 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1043 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1044 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1045 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1046 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1047 #endif
1048 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1049 		fwd_streams[sm_id]->core_cycles = 0;
1050 #endif
1051 	}
1052 	if (with_tx_first) {
1053 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1054 		if (port_fwd_begin != NULL) {
1055 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1056 				(*port_fwd_begin)(fwd_ports_ids[i]);
1057 		}
1058 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1059 		rte_eal_mp_wait_lcore();
1060 		port_fwd_end = tx_only_engine.port_fwd_end;
1061 		if (port_fwd_end != NULL) {
1062 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1063 				(*port_fwd_end)(fwd_ports_ids[i]);
1064 		}
1065 	}
1066 	launch_packet_forwarding(start_pkt_forward_on_core);
1067 }
1068 
1069 void
1070 stop_packet_forwarding(void)
1071 {
1072 	struct rte_eth_stats stats;
1073 	struct rte_port *port;
1074 	port_fwd_end_t  port_fwd_end;
1075 	int i;
1076 	portid_t   pt_id;
1077 	streamid_t sm_id;
1078 	lcoreid_t  lc_id;
1079 	uint64_t total_recv;
1080 	uint64_t total_xmit;
1081 	uint64_t total_rx_dropped;
1082 	uint64_t total_tx_dropped;
1083 	uint64_t total_rx_nombuf;
1084 	uint64_t tx_dropped;
1085 	uint64_t rx_bad_ip_csum;
1086 	uint64_t rx_bad_l4_csum;
1087 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1088 	uint64_t fwd_cycles;
1089 #endif
1090 	static const char *acc_stats_border = "+++++++++++++++";
1091 
1092 	if (all_ports_started() == 0) {
1093 		printf("Not all ports were started\n");
1094 		return;
1095 	}
1096 	if (test_done) {
1097 		printf("Packet forwarding not started\n");
1098 		return;
1099 	}
1100 	printf("Telling cores to stop...");
1101 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1102 		fwd_lcores[lc_id]->stopped = 1;
1103 	printf("\nWaiting for lcores to finish...\n");
1104 	rte_eal_mp_wait_lcore();
1105 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1106 	if (port_fwd_end != NULL) {
1107 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1108 			pt_id = fwd_ports_ids[i];
1109 			(*port_fwd_end)(pt_id);
1110 		}
1111 	}
1112 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1113 	fwd_cycles = 0;
1114 #endif
1115 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1116 		if (cur_fwd_config.nb_fwd_streams >
1117 		    cur_fwd_config.nb_fwd_ports) {
1118 			fwd_stream_stats_display(sm_id);
1119 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1120 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1121 		} else {
1122 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1123 				fwd_streams[sm_id];
1124 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1125 				fwd_streams[sm_id];
1126 		}
1127 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1128 		tx_dropped = (uint64_t) (tx_dropped +
1129 					 fwd_streams[sm_id]->fwd_dropped);
1130 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1131 
1132 		rx_bad_ip_csum =
1133 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1134 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1135 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1136 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1137 							rx_bad_ip_csum;
1138 
1139 		rx_bad_l4_csum =
1140 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1141 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1142 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1143 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1144 							rx_bad_l4_csum;
1145 
1146 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1147 		fwd_cycles = (uint64_t) (fwd_cycles +
1148 					 fwd_streams[sm_id]->core_cycles);
1149 #endif
1150 	}
1151 	total_recv = 0;
1152 	total_xmit = 0;
1153 	total_rx_dropped = 0;
1154 	total_tx_dropped = 0;
1155 	total_rx_nombuf  = 0;
1156 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1157 		pt_id = fwd_ports_ids[i];
1158 
1159 		port = &ports[pt_id];
1160 		rte_eth_stats_get(pt_id, &stats);
1161 		stats.ipackets -= port->stats.ipackets;
1162 		port->stats.ipackets = 0;
1163 		stats.opackets -= port->stats.opackets;
1164 		port->stats.opackets = 0;
1165 		stats.ibytes   -= port->stats.ibytes;
1166 		port->stats.ibytes = 0;
1167 		stats.obytes   -= port->stats.obytes;
1168 		port->stats.obytes = 0;
1169 		stats.imissed  -= port->stats.imissed;
1170 		port->stats.imissed = 0;
1171 		stats.oerrors  -= port->stats.oerrors;
1172 		port->stats.oerrors = 0;
1173 		stats.rx_nombuf -= port->stats.rx_nombuf;
1174 		port->stats.rx_nombuf = 0;
1175 
1176 		total_recv += stats.ipackets;
1177 		total_xmit += stats.opackets;
1178 		total_rx_dropped += stats.imissed;
1179 		total_tx_dropped += port->tx_dropped;
1180 		total_rx_nombuf  += stats.rx_nombuf;
1181 
1182 		fwd_port_stats_display(pt_id, &stats);
1183 	}
1184 	printf("\n  %s Accumulated forward statistics for all ports"
1185 	       "%s\n",
1186 	       acc_stats_border, acc_stats_border);
1187 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1188 	       "%-"PRIu64"\n"
1189 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1190 	       "%-"PRIu64"\n",
1191 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1192 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1193 	if (total_rx_nombuf > 0)
1194 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1195 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1196 	       "%s\n",
1197 	       acc_stats_border, acc_stats_border);
1198 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1199 	if (total_recv > 0)
1200 		printf("\n  CPU cycles/packet=%u (total cycles="
1201 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1202 		       (unsigned int)(fwd_cycles / total_recv),
1203 		       fwd_cycles, total_recv);
1204 #endif
1205 	printf("\nDone.\n");
1206 	test_done = 1;
1207 }
1208 
1209 void
1210 dev_set_link_up(portid_t pid)
1211 {
1212 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1213 		printf("\nSet link up fail.\n");
1214 }
1215 
1216 void
1217 dev_set_link_down(portid_t pid)
1218 {
1219 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1220 		printf("\nSet link down fail.\n");
1221 }
1222 
1223 static int
1224 all_ports_started(void)
1225 {
1226 	portid_t pi;
1227 	struct rte_port *port;
1228 
1229 	FOREACH_PORT(pi, ports) {
1230 		port = &ports[pi];
1231 		/* Check if there is a port which is not started */
1232 		if ((port->port_status != RTE_PORT_STARTED) &&
1233 			(port->slave_flag == 0))
1234 			return 0;
1235 	}
1236 
1237 	/* No port is not started */
1238 	return 1;
1239 }
1240 
1241 int
1242 all_ports_stopped(void)
1243 {
1244 	portid_t pi;
1245 	struct rte_port *port;
1246 
1247 	FOREACH_PORT(pi, ports) {
1248 		port = &ports[pi];
1249 		if ((port->port_status != RTE_PORT_STOPPED) &&
1250 			(port->slave_flag == 0))
1251 			return 0;
1252 	}
1253 
1254 	return 1;
1255 }
1256 
1257 int
1258 port_is_started(portid_t port_id)
1259 {
1260 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1261 		return 0;
1262 
1263 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1264 		return 0;
1265 
1266 	return 1;
1267 }
1268 
1269 static int
1270 port_is_closed(portid_t port_id)
1271 {
1272 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1273 		return 0;
1274 
1275 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1276 		return 0;
1277 
1278 	return 1;
1279 }
1280 
1281 int
1282 start_port(portid_t pid)
1283 {
1284 	int diag, need_check_link_status = -1;
1285 	portid_t pi;
1286 	queueid_t qi;
1287 	struct rte_port *port;
1288 	struct ether_addr mac_addr;
1289 
1290 	if (port_id_is_invalid(pid, ENABLED_WARN))
1291 		return 0;
1292 
1293 	if(dcb_config)
1294 		dcb_test = 1;
1295 	FOREACH_PORT(pi, ports) {
1296 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1297 			continue;
1298 
1299 		need_check_link_status = 0;
1300 		port = &ports[pi];
1301 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1302 						 RTE_PORT_HANDLING) == 0) {
1303 			printf("Port %d is now not stopped\n", pi);
1304 			continue;
1305 		}
1306 
1307 		if (port->need_reconfig > 0) {
1308 			port->need_reconfig = 0;
1309 
1310 			printf("Configuring Port %d (socket %u)\n", pi,
1311 					port->socket_id);
1312 			/* configure port */
1313 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1314 						&(port->dev_conf));
1315 			if (diag != 0) {
1316 				if (rte_atomic16_cmpset(&(port->port_status),
1317 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1318 					printf("Port %d can not be set back "
1319 							"to stopped\n", pi);
1320 				printf("Fail to configure port %d\n", pi);
1321 				/* try to reconfigure port next time */
1322 				port->need_reconfig = 1;
1323 				return -1;
1324 			}
1325 		}
1326 		if (port->need_reconfig_queues > 0) {
1327 			port->need_reconfig_queues = 0;
1328 			/* setup tx queues */
1329 			for (qi = 0; qi < nb_txq; qi++) {
1330 				if ((numa_support) &&
1331 					(txring_numa[pi] != NUMA_NO_CONFIG))
1332 					diag = rte_eth_tx_queue_setup(pi, qi,
1333 						nb_txd,txring_numa[pi],
1334 						&(port->tx_conf));
1335 				else
1336 					diag = rte_eth_tx_queue_setup(pi, qi,
1337 						nb_txd,port->socket_id,
1338 						&(port->tx_conf));
1339 
1340 				if (diag == 0)
1341 					continue;
1342 
1343 				/* Fail to setup tx queue, return */
1344 				if (rte_atomic16_cmpset(&(port->port_status),
1345 							RTE_PORT_HANDLING,
1346 							RTE_PORT_STOPPED) == 0)
1347 					printf("Port %d can not be set back "
1348 							"to stopped\n", pi);
1349 				printf("Fail to configure port %d tx queues\n", pi);
1350 				/* try to reconfigure queues next time */
1351 				port->need_reconfig_queues = 1;
1352 				return -1;
1353 			}
1354 			/* setup rx queues */
1355 			for (qi = 0; qi < nb_rxq; qi++) {
1356 				if ((numa_support) &&
1357 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1358 					struct rte_mempool * mp =
1359 						mbuf_pool_find(rxring_numa[pi]);
1360 					if (mp == NULL) {
1361 						printf("Failed to setup RX queue:"
1362 							"No mempool allocation"
1363 							" on the socket %d\n",
1364 							rxring_numa[pi]);
1365 						return -1;
1366 					}
1367 
1368 					diag = rte_eth_rx_queue_setup(pi, qi,
1369 					     nb_rxd,rxring_numa[pi],
1370 					     &(port->rx_conf),mp);
1371 				} else {
1372 					struct rte_mempool *mp =
1373 						mbuf_pool_find(port->socket_id);
1374 					if (mp == NULL) {
1375 						printf("Failed to setup RX queue:"
1376 							"No mempool allocation"
1377 							" on the socket %d\n",
1378 							port->socket_id);
1379 						return -1;
1380 					}
1381 					diag = rte_eth_rx_queue_setup(pi, qi,
1382 					     nb_rxd,port->socket_id,
1383 					     &(port->rx_conf), mp);
1384 				}
1385 				if (diag == 0)
1386 					continue;
1387 
1388 				/* Fail to setup rx queue, return */
1389 				if (rte_atomic16_cmpset(&(port->port_status),
1390 							RTE_PORT_HANDLING,
1391 							RTE_PORT_STOPPED) == 0)
1392 					printf("Port %d can not be set back "
1393 							"to stopped\n", pi);
1394 				printf("Fail to configure port %d rx queues\n", pi);
1395 				/* try to reconfigure queues next time */
1396 				port->need_reconfig_queues = 1;
1397 				return -1;
1398 			}
1399 		}
1400 		/* start port */
1401 		if (rte_eth_dev_start(pi) < 0) {
1402 			printf("Fail to start port %d\n", pi);
1403 
1404 			/* Fail to setup rx queue, return */
1405 			if (rte_atomic16_cmpset(&(port->port_status),
1406 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1407 				printf("Port %d can not be set back to "
1408 							"stopped\n", pi);
1409 			continue;
1410 		}
1411 
1412 		if (rte_atomic16_cmpset(&(port->port_status),
1413 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1414 			printf("Port %d can not be set into started\n", pi);
1415 
1416 		rte_eth_macaddr_get(pi, &mac_addr);
1417 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1418 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1419 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1420 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1421 
1422 		/* at least one port started, need checking link status */
1423 		need_check_link_status = 1;
1424 	}
1425 
1426 	if (need_check_link_status == 1 && !no_link_check)
1427 		check_all_ports_link_status(RTE_PORT_ALL);
1428 	else if (need_check_link_status == 0)
1429 		printf("Please stop the ports first\n");
1430 
1431 	printf("Done\n");
1432 	return 0;
1433 }
1434 
1435 void
1436 stop_port(portid_t pid)
1437 {
1438 	portid_t pi;
1439 	struct rte_port *port;
1440 	int need_check_link_status = 0;
1441 
1442 	if (dcb_test) {
1443 		dcb_test = 0;
1444 		dcb_config = 0;
1445 	}
1446 
1447 	if (port_id_is_invalid(pid, ENABLED_WARN))
1448 		return;
1449 
1450 	printf("Stopping ports...\n");
1451 
1452 	FOREACH_PORT(pi, ports) {
1453 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1454 			continue;
1455 
1456 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1457 			printf("Please remove port %d from forwarding configuration.\n", pi);
1458 			continue;
1459 		}
1460 
1461 		if (port_is_bonding_slave(pi)) {
1462 			printf("Please remove port %d from bonded device.\n", pi);
1463 			continue;
1464 		}
1465 
1466 		port = &ports[pi];
1467 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1468 						RTE_PORT_HANDLING) == 0)
1469 			continue;
1470 
1471 		rte_eth_dev_stop(pi);
1472 
1473 		if (rte_atomic16_cmpset(&(port->port_status),
1474 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1475 			printf("Port %d can not be set into stopped\n", pi);
1476 		need_check_link_status = 1;
1477 	}
1478 	if (need_check_link_status && !no_link_check)
1479 		check_all_ports_link_status(RTE_PORT_ALL);
1480 
1481 	printf("Done\n");
1482 }
1483 
1484 void
1485 close_port(portid_t pid)
1486 {
1487 	portid_t pi;
1488 	struct rte_port *port;
1489 
1490 	if (port_id_is_invalid(pid, ENABLED_WARN))
1491 		return;
1492 
1493 	printf("Closing ports...\n");
1494 
1495 	FOREACH_PORT(pi, ports) {
1496 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1497 			continue;
1498 
1499 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1500 			printf("Please remove port %d from forwarding configuration.\n", pi);
1501 			continue;
1502 		}
1503 
1504 		if (port_is_bonding_slave(pi)) {
1505 			printf("Please remove port %d from bonded device.\n", pi);
1506 			continue;
1507 		}
1508 
1509 		port = &ports[pi];
1510 		if (rte_atomic16_cmpset(&(port->port_status),
1511 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1512 			printf("Port %d is already closed\n", pi);
1513 			continue;
1514 		}
1515 
1516 		if (rte_atomic16_cmpset(&(port->port_status),
1517 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1518 			printf("Port %d is now not stopped\n", pi);
1519 			continue;
1520 		}
1521 
1522 		rte_eth_dev_close(pi);
1523 
1524 		if (rte_atomic16_cmpset(&(port->port_status),
1525 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1526 			printf("Port %d cannot be set to closed\n", pi);
1527 	}
1528 
1529 	printf("Done\n");
1530 }
1531 
1532 void
1533 attach_port(char *identifier)
1534 {
1535 	portid_t pi = 0;
1536 	unsigned int socket_id;
1537 
1538 	printf("Attaching a new port...\n");
1539 
1540 	if (identifier == NULL) {
1541 		printf("Invalid parameters are specified\n");
1542 		return;
1543 	}
1544 
1545 	if (rte_eth_dev_attach(identifier, &pi))
1546 		return;
1547 
1548 	ports[pi].enabled = 1;
1549 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1550 	/* if socket_id is invalid, set to 0 */
1551 	if (check_socket_id(socket_id) < 0)
1552 		socket_id = 0;
1553 	reconfig(pi, socket_id);
1554 	rte_eth_promiscuous_enable(pi);
1555 
1556 	nb_ports = rte_eth_dev_count();
1557 
1558 	ports[pi].port_status = RTE_PORT_STOPPED;
1559 
1560 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1561 	printf("Done\n");
1562 }
1563 
1564 void
1565 detach_port(uint8_t port_id)
1566 {
1567 	char name[RTE_ETH_NAME_MAX_LEN];
1568 
1569 	printf("Detaching a port...\n");
1570 
1571 	if (!port_is_closed(port_id)) {
1572 		printf("Please close port first\n");
1573 		return;
1574 	}
1575 
1576 	if (rte_eth_dev_detach(port_id, name))
1577 		return;
1578 
1579 	ports[port_id].enabled = 0;
1580 	nb_ports = rte_eth_dev_count();
1581 
1582 	printf("Port '%s' is detached. Now total ports is %d\n",
1583 			name, nb_ports);
1584 	printf("Done\n");
1585 	return;
1586 }
1587 
1588 void
1589 pmd_test_exit(void)
1590 {
1591 	portid_t pt_id;
1592 
1593 	if (test_done == 0)
1594 		stop_packet_forwarding();
1595 
1596 	if (ports != NULL) {
1597 		no_link_check = 1;
1598 		FOREACH_PORT(pt_id, ports) {
1599 			printf("\nShutting down port %d...\n", pt_id);
1600 			fflush(stdout);
1601 			stop_port(pt_id);
1602 			close_port(pt_id);
1603 		}
1604 	}
1605 	printf("\nBye...\n");
1606 }
1607 
1608 typedef void (*cmd_func_t)(void);
1609 struct pmd_test_command {
1610 	const char *cmd_name;
1611 	cmd_func_t cmd_func;
1612 };
1613 
1614 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1615 
1616 /* Check the link status of all ports in up to 9s, and print them finally */
1617 static void
1618 check_all_ports_link_status(uint32_t port_mask)
1619 {
1620 #define CHECK_INTERVAL 100 /* 100ms */
1621 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1622 	uint8_t portid, count, all_ports_up, print_flag = 0;
1623 	struct rte_eth_link link;
1624 
1625 	printf("Checking link statuses...\n");
1626 	fflush(stdout);
1627 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1628 		all_ports_up = 1;
1629 		FOREACH_PORT(portid, ports) {
1630 			if ((port_mask & (1 << portid)) == 0)
1631 				continue;
1632 			memset(&link, 0, sizeof(link));
1633 			rte_eth_link_get_nowait(portid, &link);
1634 			/* print link status if flag set */
1635 			if (print_flag == 1) {
1636 				if (link.link_status)
1637 					printf("Port %d Link Up - speed %u "
1638 						"Mbps - %s\n", (uint8_t)portid,
1639 						(unsigned)link.link_speed,
1640 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1641 					("full-duplex") : ("half-duplex\n"));
1642 				else
1643 					printf("Port %d Link Down\n",
1644 						(uint8_t)portid);
1645 				continue;
1646 			}
1647 			/* clear all_ports_up flag if any link down */
1648 			if (link.link_status == ETH_LINK_DOWN) {
1649 				all_ports_up = 0;
1650 				break;
1651 			}
1652 		}
1653 		/* after finally printing all link status, get out */
1654 		if (print_flag == 1)
1655 			break;
1656 
1657 		if (all_ports_up == 0) {
1658 			fflush(stdout);
1659 			rte_delay_ms(CHECK_INTERVAL);
1660 		}
1661 
1662 		/* set the print_flag if all ports up or timeout */
1663 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1664 			print_flag = 1;
1665 		}
1666 	}
1667 }
1668 
1669 static int
1670 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1671 {
1672 	uint16_t i;
1673 	int diag;
1674 	uint8_t mapping_found = 0;
1675 
1676 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1677 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1678 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1679 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1680 					tx_queue_stats_mappings[i].queue_id,
1681 					tx_queue_stats_mappings[i].stats_counter_id);
1682 			if (diag != 0)
1683 				return diag;
1684 			mapping_found = 1;
1685 		}
1686 	}
1687 	if (mapping_found)
1688 		port->tx_queue_stats_mapping_enabled = 1;
1689 	return 0;
1690 }
1691 
1692 static int
1693 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1694 {
1695 	uint16_t i;
1696 	int diag;
1697 	uint8_t mapping_found = 0;
1698 
1699 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1700 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1701 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1702 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1703 					rx_queue_stats_mappings[i].queue_id,
1704 					rx_queue_stats_mappings[i].stats_counter_id);
1705 			if (diag != 0)
1706 				return diag;
1707 			mapping_found = 1;
1708 		}
1709 	}
1710 	if (mapping_found)
1711 		port->rx_queue_stats_mapping_enabled = 1;
1712 	return 0;
1713 }
1714 
1715 static void
1716 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1717 {
1718 	int diag = 0;
1719 
1720 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1721 	if (diag != 0) {
1722 		if (diag == -ENOTSUP) {
1723 			port->tx_queue_stats_mapping_enabled = 0;
1724 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1725 		}
1726 		else
1727 			rte_exit(EXIT_FAILURE,
1728 					"set_tx_queue_stats_mapping_registers "
1729 					"failed for port id=%d diag=%d\n",
1730 					pi, diag);
1731 	}
1732 
1733 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1734 	if (diag != 0) {
1735 		if (diag == -ENOTSUP) {
1736 			port->rx_queue_stats_mapping_enabled = 0;
1737 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1738 		}
1739 		else
1740 			rte_exit(EXIT_FAILURE,
1741 					"set_rx_queue_stats_mapping_registers "
1742 					"failed for port id=%d diag=%d\n",
1743 					pi, diag);
1744 	}
1745 }
1746 
1747 static void
1748 rxtx_port_config(struct rte_port *port)
1749 {
1750 	port->rx_conf = port->dev_info.default_rxconf;
1751 	port->tx_conf = port->dev_info.default_txconf;
1752 
1753 	/* Check if any RX/TX parameters have been passed */
1754 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1755 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1756 
1757 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1758 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1759 
1760 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1761 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1762 
1763 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1764 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1765 
1766 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1767 		port->rx_conf.rx_drop_en = rx_drop_en;
1768 
1769 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1770 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1771 
1772 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1773 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1774 
1775 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1776 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1777 
1778 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1779 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1780 
1781 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1782 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1783 
1784 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1785 		port->tx_conf.txq_flags = txq_flags;
1786 }
1787 
1788 void
1789 init_port_config(void)
1790 {
1791 	portid_t pid;
1792 	struct rte_port *port;
1793 
1794 	FOREACH_PORT(pid, ports) {
1795 		port = &ports[pid];
1796 		port->dev_conf.rxmode = rx_mode;
1797 		port->dev_conf.fdir_conf = fdir_conf;
1798 		if (nb_rxq > 1) {
1799 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1800 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1801 		} else {
1802 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1803 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1804 		}
1805 
1806 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1807 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1808 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1809 			else
1810 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1811 		}
1812 
1813 		if (port->dev_info.max_vfs != 0) {
1814 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1815 				port->dev_conf.rxmode.mq_mode =
1816 					ETH_MQ_RX_VMDQ_RSS;
1817 			else
1818 				port->dev_conf.rxmode.mq_mode =
1819 					ETH_MQ_RX_NONE;
1820 
1821 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1822 		}
1823 
1824 		rxtx_port_config(port);
1825 
1826 		rte_eth_macaddr_get(pid, &port->eth_addr);
1827 
1828 		map_port_queue_stats_mapping_registers(pid, port);
1829 #ifdef RTE_NIC_BYPASS
1830 		rte_eth_dev_bypass_init(pid);
1831 #endif
1832 	}
1833 }
1834 
1835 void set_port_slave_flag(portid_t slave_pid)
1836 {
1837 	struct rte_port *port;
1838 
1839 	port = &ports[slave_pid];
1840 	port->slave_flag = 1;
1841 }
1842 
1843 void clear_port_slave_flag(portid_t slave_pid)
1844 {
1845 	struct rte_port *port;
1846 
1847 	port = &ports[slave_pid];
1848 	port->slave_flag = 0;
1849 }
1850 
1851 uint8_t port_is_bonding_slave(portid_t slave_pid)
1852 {
1853 	struct rte_port *port;
1854 
1855 	port = &ports[slave_pid];
1856 	return port->slave_flag;
1857 }
1858 
1859 const uint16_t vlan_tags[] = {
1860 		0,  1,  2,  3,  4,  5,  6,  7,
1861 		8,  9, 10, 11,  12, 13, 14, 15,
1862 		16, 17, 18, 19, 20, 21, 22, 23,
1863 		24, 25, 26, 27, 28, 29, 30, 31
1864 };
1865 
1866 static  int
1867 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1868 		 enum dcb_mode_enable dcb_mode,
1869 		 enum rte_eth_nb_tcs num_tcs,
1870 		 uint8_t pfc_en)
1871 {
1872 	uint8_t i;
1873 
1874 	/*
1875 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1876 	 * given above, and the number of traffic classes available for use.
1877 	 */
1878 	if (dcb_mode == DCB_VT_ENABLED) {
1879 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1880 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1881 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1882 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1883 
1884 		/* VMDQ+DCB RX and TX configrations */
1885 		vmdq_rx_conf->enable_default_pool = 0;
1886 		vmdq_rx_conf->default_pool = 0;
1887 		vmdq_rx_conf->nb_queue_pools =
1888 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1889 		vmdq_tx_conf->nb_queue_pools =
1890 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1891 
1892 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1893 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1894 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1895 			vmdq_rx_conf->pool_map[i].pools =
1896 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1897 		}
1898 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1899 			vmdq_rx_conf->dcb_tc[i] = i;
1900 			vmdq_tx_conf->dcb_tc[i] = i;
1901 		}
1902 
1903 		/* set DCB mode of RX and TX of multiple queues */
1904 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1905 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1906 	} else {
1907 		struct rte_eth_dcb_rx_conf *rx_conf =
1908 				&eth_conf->rx_adv_conf.dcb_rx_conf;
1909 		struct rte_eth_dcb_tx_conf *tx_conf =
1910 				&eth_conf->tx_adv_conf.dcb_tx_conf;
1911 
1912 		rx_conf->nb_tcs = num_tcs;
1913 		tx_conf->nb_tcs = num_tcs;
1914 
1915 		for (i = 0; i < num_tcs; i++) {
1916 			rx_conf->dcb_tc[i] = i;
1917 			tx_conf->dcb_tc[i] = i;
1918 		}
1919 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1920 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1921 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1922 	}
1923 
1924 	if (pfc_en)
1925 		eth_conf->dcb_capability_en =
1926 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1927 	else
1928 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1929 
1930 	return 0;
1931 }
1932 
1933 int
1934 init_port_dcb_config(portid_t pid,
1935 		     enum dcb_mode_enable dcb_mode,
1936 		     enum rte_eth_nb_tcs num_tcs,
1937 		     uint8_t pfc_en)
1938 {
1939 	struct rte_eth_conf port_conf;
1940 	struct rte_eth_dev_info dev_info;
1941 	struct rte_port *rte_port;
1942 	int retval;
1943 	uint16_t i;
1944 
1945 	rte_eth_dev_info_get(pid, &dev_info);
1946 
1947 	/* If dev_info.vmdq_pool_base is greater than 0,
1948 	 * the queue id of vmdq pools is started after pf queues.
1949 	 */
1950 	if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1951 		printf("VMDQ_DCB multi-queue mode is nonsensical"
1952 			" for port %d.", pid);
1953 		return -1;
1954 	}
1955 
1956 	/* Assume the ports in testpmd have the same dcb capability
1957 	 * and has the same number of rxq and txq in dcb mode
1958 	 */
1959 	if (dcb_mode == DCB_VT_ENABLED) {
1960 		nb_rxq = dev_info.max_rx_queues;
1961 		nb_txq = dev_info.max_tx_queues;
1962 	} else {
1963 		/*if vt is disabled, use all pf queues */
1964 		if (dev_info.vmdq_pool_base == 0) {
1965 			nb_rxq = dev_info.max_rx_queues;
1966 			nb_txq = dev_info.max_tx_queues;
1967 		} else {
1968 			nb_rxq = (queueid_t)num_tcs;
1969 			nb_txq = (queueid_t)num_tcs;
1970 
1971 		}
1972 	}
1973 	rx_free_thresh = 64;
1974 
1975 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1976 	/* Enter DCB configuration status */
1977 	dcb_config = 1;
1978 
1979 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1980 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1981 	if (retval < 0)
1982 		return retval;
1983 
1984 	rte_port = &ports[pid];
1985 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1986 
1987 	rxtx_port_config(rte_port);
1988 	/* VLAN filter */
1989 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1990 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
1991 		rx_vft_set(pid, vlan_tags[i], 1);
1992 
1993 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1994 	map_port_queue_stats_mapping_registers(pid, rte_port);
1995 
1996 	rte_port->dcb_flag = 1;
1997 
1998 	return 0;
1999 }
2000 
2001 static void
2002 init_port(void)
2003 {
2004 	portid_t pid;
2005 
2006 	/* Configuration of Ethernet ports. */
2007 	ports = rte_zmalloc("testpmd: ports",
2008 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2009 			    RTE_CACHE_LINE_SIZE);
2010 	if (ports == NULL) {
2011 		rte_exit(EXIT_FAILURE,
2012 				"rte_zmalloc(%d struct rte_port) failed\n",
2013 				RTE_MAX_ETHPORTS);
2014 	}
2015 
2016 	/* enabled allocated ports */
2017 	for (pid = 0; pid < nb_ports; pid++)
2018 		ports[pid].enabled = 1;
2019 }
2020 
2021 static void
2022 force_quit(void)
2023 {
2024 	pmd_test_exit();
2025 	prompt_exit();
2026 }
2027 
2028 static void
2029 signal_handler(int signum)
2030 {
2031 	if (signum == SIGINT || signum == SIGTERM) {
2032 		printf("\nSignal %d received, preparing to exit...\n",
2033 				signum);
2034 		force_quit();
2035 		/* exit with the expected status */
2036 		signal(signum, SIG_DFL);
2037 		kill(getpid(), signum);
2038 	}
2039 }
2040 
2041 int
2042 main(int argc, char** argv)
2043 {
2044 	int  diag;
2045 	uint8_t port_id;
2046 
2047 	signal(SIGINT, signal_handler);
2048 	signal(SIGTERM, signal_handler);
2049 
2050 	diag = rte_eal_init(argc, argv);
2051 	if (diag < 0)
2052 		rte_panic("Cannot init EAL\n");
2053 
2054 	nb_ports = (portid_t) rte_eth_dev_count();
2055 	if (nb_ports == 0)
2056 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2057 
2058 	/* allocate port structures, and init them */
2059 	init_port();
2060 
2061 	set_def_fwd_config();
2062 	if (nb_lcores == 0)
2063 		rte_panic("Empty set of forwarding logical cores - check the "
2064 			  "core mask supplied in the command parameters\n");
2065 
2066 	argc -= diag;
2067 	argv += diag;
2068 	if (argc > 1)
2069 		launch_args_parse(argc, argv);
2070 
2071 	if (!nb_rxq && !nb_txq)
2072 		printf("Warning: Either rx or tx queues should be non-zero\n");
2073 
2074 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2075 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2076 		       "but nb_txq=%d will prevent to fully test it.\n",
2077 		       nb_rxq, nb_txq);
2078 
2079 	init_config();
2080 	if (start_port(RTE_PORT_ALL) != 0)
2081 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2082 
2083 	/* set all ports to promiscuous mode by default */
2084 	FOREACH_PORT(port_id, ports)
2085 		rte_eth_promiscuous_enable(port_id);
2086 
2087 #ifdef RTE_LIBRTE_CMDLINE
2088 	if (interactive == 1) {
2089 		if (auto_start) {
2090 			printf("Start automatic packet forwarding\n");
2091 			start_packet_forwarding(0);
2092 		}
2093 		prompt();
2094 	} else
2095 #endif
2096 	{
2097 		char c;
2098 		int rc;
2099 
2100 		printf("No commandline core given, start packet forwarding\n");
2101 		start_packet_forwarding(0);
2102 		printf("Press enter to exit\n");
2103 		rc = read(0, &c, 1);
2104 		pmd_test_exit();
2105 		if (rc < 0)
2106 			return 1;
2107 	}
2108 
2109 	return 0;
2110 }
2111