xref: /dpdk/app/test-pmd/testpmd.c (revision 869fede8d6795ea2b1e528f79b93a363eeff3ad0)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90 
91 #include "testpmd.h"
92 
93 uint16_t verbose_level = 0; /**< Silent by default. */
94 
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 
99 /*
100  * NUMA support configuration.
101  * When set, the NUMA support attempts to dispatch the allocation of the
102  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103  * probed ports among the CPU sockets 0 and 1.
104  * Otherwise, all memory is allocated from CPU socket 0.
105  */
106 uint8_t numa_support = 1; /**< numa enabled by default */
107 
108 /*
109  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110  * not configured.
111  */
112 uint8_t socket_num = UMA_NO_CONFIG;
113 
114 /*
115  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
116  */
117 uint8_t mp_anon = 0;
118 
119 /*
120  * Record the Ethernet address of peer target ports to which packets are
121  * forwarded.
122  * Must be instantiated with the ethernet addresses of peer traffic generator
123  * ports.
124  */
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
127 
128 /*
129  * Probed Target Environment.
130  */
131 struct rte_port *ports;	       /**< For all probed ethernet ports. */
132 portid_t nb_ports;             /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
135 
136 /*
137  * Test Forwarding Configuration.
138  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
140  */
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
144 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
145 
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
148 
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
151 
152 /*
153  * Forwarding engines.
154  */
155 struct fwd_engine * fwd_engines[] = {
156 	&io_fwd_engine,
157 	&mac_fwd_engine,
158 	&mac_swap_engine,
159 	&flow_gen_engine,
160 	&rx_only_engine,
161 	&tx_only_engine,
162 	&csum_fwd_engine,
163 	&icmp_echo_engine,
164 #ifdef RTE_LIBRTE_IEEE1588
165 	&ieee1588_fwd_engine,
166 #endif
167 	NULL,
168 };
169 
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
175 
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
178                                       * specified on command-line. */
179 
180 /*
181  * Configuration of packet segments used by the "txonly" processing engine.
182  */
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 	TXONLY_DEF_PACKET_LEN,
186 };
187 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
188 
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
191 
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
194 
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
197 
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
200 
201 /*
202  * Configurable number of RX/TX queues.
203  */
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
206 
207 /*
208  * Configurable number of RX/TX ring descriptors.
209  */
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
214 
215 #define RTE_PMD_PARAM_UNSET -1
216 /*
217  * Configurable values of RX and TX ring threshold registers.
218  */
219 
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
223 
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
227 
228 /*
229  * Configurable value of RX free threshold.
230  */
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Configurable value of RX drop enable.
235  */
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
237 
238 /*
239  * Configurable value of TX free threshold.
240  */
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
242 
243 /*
244  * Configurable value of TX RS bit threshold.
245  */
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
247 
248 /*
249  * Configurable value of TX queue flags.
250  */
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
252 
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257 
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262 
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267 
268 /*
269  * Avoids to check link status when starting/stopping a port.
270  */
271 uint8_t no_link_check = 0; /* check by default */
272 
273 /*
274  * Enable link status change notification
275  */
276 uint8_t lsc_interrupt = 1; /* enabled by default */
277 
278 /*
279  * Enable device removal notification.
280  */
281 uint8_t rmv_interrupt = 1; /* enabled by default */
282 
283 /*
284  * NIC bypass mode configuration options.
285  */
286 #ifdef RTE_NIC_BYPASS
287 
288 /* The NIC bypass watchdog timeout. */
289 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
290 
291 #endif
292 
293 #ifdef RTE_LIBRTE_LATENCY_STATS
294 
295 /*
296  * Set when latency stats is enabled in the commandline
297  */
298 uint8_t latencystats_enabled;
299 
300 /*
301  * Lcore ID to serive latency statistics.
302  */
303 lcoreid_t latencystats_lcore_id = -1;
304 
305 #endif
306 
307 /*
308  * Ethernet device configuration.
309  */
310 struct rte_eth_rxmode rx_mode = {
311 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
312 	.split_hdr_size = 0,
313 	.header_split   = 0, /**< Header Split disabled. */
314 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
315 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
316 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
317 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
318 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
319 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
320 };
321 
322 struct rte_fdir_conf fdir_conf = {
323 	.mode = RTE_FDIR_MODE_NONE,
324 	.pballoc = RTE_FDIR_PBALLOC_64K,
325 	.status = RTE_FDIR_REPORT_STATUS,
326 	.mask = {
327 		.vlan_tci_mask = 0x0,
328 		.ipv4_mask     = {
329 			.src_ip = 0xFFFFFFFF,
330 			.dst_ip = 0xFFFFFFFF,
331 		},
332 		.ipv6_mask     = {
333 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
334 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335 		},
336 		.src_port_mask = 0xFFFF,
337 		.dst_port_mask = 0xFFFF,
338 		.mac_addr_byte_mask = 0xFF,
339 		.tunnel_type_mask = 1,
340 		.tunnel_id_mask = 0xFFFFFFFF,
341 	},
342 	.drop_queue = 127,
343 };
344 
345 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
346 
347 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
348 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
349 
350 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
351 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
352 
353 uint16_t nb_tx_queue_stats_mappings = 0;
354 uint16_t nb_rx_queue_stats_mappings = 0;
355 
356 unsigned max_socket = 0;
357 
358 /* Bitrate statistics */
359 struct rte_stats_bitrates *bitrate_data;
360 
361 /* Forward function declarations */
362 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
363 static void check_all_ports_link_status(uint32_t port_mask);
364 static void eth_event_callback(uint8_t port_id,
365 			       enum rte_eth_event_type type,
366 			       void *param);
367 
368 /*
369  * Check if all the ports are started.
370  * If yes, return positive value. If not, return zero.
371  */
372 static int all_ports_started(void);
373 
374 /*
375  * Setup default configuration.
376  */
377 static void
378 set_default_fwd_lcores_config(void)
379 {
380 	unsigned int i;
381 	unsigned int nb_lc;
382 	unsigned int sock_num;
383 
384 	nb_lc = 0;
385 	for (i = 0; i < RTE_MAX_LCORE; i++) {
386 		sock_num = rte_lcore_to_socket_id(i) + 1;
387 		if (sock_num > max_socket) {
388 			if (sock_num > RTE_MAX_NUMA_NODES)
389 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
390 			max_socket = sock_num;
391 		}
392 		if (!rte_lcore_is_enabled(i))
393 			continue;
394 		if (i == rte_get_master_lcore())
395 			continue;
396 		fwd_lcores_cpuids[nb_lc++] = i;
397 	}
398 	nb_lcores = (lcoreid_t) nb_lc;
399 	nb_cfg_lcores = nb_lcores;
400 	nb_fwd_lcores = 1;
401 }
402 
403 static void
404 set_def_peer_eth_addrs(void)
405 {
406 	portid_t i;
407 
408 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
409 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
410 		peer_eth_addrs[i].addr_bytes[5] = i;
411 	}
412 }
413 
414 static void
415 set_default_fwd_ports_config(void)
416 {
417 	portid_t pt_id;
418 
419 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
420 		fwd_ports_ids[pt_id] = pt_id;
421 
422 	nb_cfg_ports = nb_ports;
423 	nb_fwd_ports = nb_ports;
424 }
425 
426 void
427 set_def_fwd_config(void)
428 {
429 	set_default_fwd_lcores_config();
430 	set_def_peer_eth_addrs();
431 	set_default_fwd_ports_config();
432 }
433 
434 /*
435  * Configuration initialisation done once at init time.
436  */
437 static void
438 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
439 		 unsigned int socket_id)
440 {
441 	char pool_name[RTE_MEMPOOL_NAMESIZE];
442 	struct rte_mempool *rte_mp = NULL;
443 	uint32_t mb_size;
444 
445 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
446 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
447 
448 	RTE_LOG(INFO, USER1,
449 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
450 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
451 
452 #ifdef RTE_LIBRTE_PMD_XENVIRT
453 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
454 		(unsigned) mb_mempool_cache,
455 		sizeof(struct rte_pktmbuf_pool_private),
456 		rte_pktmbuf_pool_init, NULL,
457 		rte_pktmbuf_init, NULL,
458 		socket_id, 0);
459 #endif
460 
461 	/* if the former XEN allocation failed fall back to normal allocation */
462 	if (rte_mp == NULL) {
463 		if (mp_anon != 0) {
464 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
465 				mb_size, (unsigned) mb_mempool_cache,
466 				sizeof(struct rte_pktmbuf_pool_private),
467 				socket_id, 0);
468 			if (rte_mp == NULL)
469 				goto err;
470 
471 			if (rte_mempool_populate_anon(rte_mp) == 0) {
472 				rte_mempool_free(rte_mp);
473 				rte_mp = NULL;
474 				goto err;
475 			}
476 			rte_pktmbuf_pool_init(rte_mp, NULL);
477 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
478 		} else {
479 			/* wrapper to rte_mempool_create() */
480 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
481 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
482 		}
483 	}
484 
485 err:
486 	if (rte_mp == NULL) {
487 		rte_exit(EXIT_FAILURE,
488 			"Creation of mbuf pool for socket %u failed: %s\n",
489 			socket_id, rte_strerror(rte_errno));
490 	} else if (verbose_level > 0) {
491 		rte_mempool_dump(stdout, rte_mp);
492 	}
493 }
494 
495 /*
496  * Check given socket id is valid or not with NUMA mode,
497  * if valid, return 0, else return -1
498  */
499 static int
500 check_socket_id(const unsigned int socket_id)
501 {
502 	static int warning_once = 0;
503 
504 	if (socket_id >= max_socket) {
505 		if (!warning_once && numa_support)
506 			printf("Warning: NUMA should be configured manually by"
507 			       " using --port-numa-config and"
508 			       " --ring-numa-config parameters along with"
509 			       " --numa.\n");
510 		warning_once = 1;
511 		return -1;
512 	}
513 	return 0;
514 }
515 
516 static void
517 init_config(void)
518 {
519 	portid_t pid;
520 	struct rte_port *port;
521 	struct rte_mempool *mbp;
522 	unsigned int nb_mbuf_per_pool;
523 	lcoreid_t  lc_id;
524 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
525 
526 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
527 	/* Configuration of logical cores. */
528 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
529 				sizeof(struct fwd_lcore *) * nb_lcores,
530 				RTE_CACHE_LINE_SIZE);
531 	if (fwd_lcores == NULL) {
532 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
533 							"failed\n", nb_lcores);
534 	}
535 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
536 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
537 					       sizeof(struct fwd_lcore),
538 					       RTE_CACHE_LINE_SIZE);
539 		if (fwd_lcores[lc_id] == NULL) {
540 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
541 								"failed\n");
542 		}
543 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
544 	}
545 
546 	/*
547 	 * Create pools of mbuf.
548 	 * If NUMA support is disabled, create a single pool of mbuf in
549 	 * socket 0 memory by default.
550 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
551 	 *
552 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
553 	 * nb_txd can be configured at run time.
554 	 */
555 	if (param_total_num_mbufs)
556 		nb_mbuf_per_pool = param_total_num_mbufs;
557 	else {
558 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
559 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
560 
561 		if (!numa_support)
562 			nb_mbuf_per_pool =
563 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
564 	}
565 
566 	if (!numa_support) {
567 		if (socket_num == UMA_NO_CONFIG)
568 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
569 		else
570 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
571 						 socket_num);
572 	}
573 
574 	RTE_ETH_FOREACH_DEV(pid) {
575 		port = &ports[pid];
576 		rte_eth_dev_info_get(pid, &port->dev_info);
577 
578 		if (numa_support) {
579 			if (port_numa[pid] != NUMA_NO_CONFIG)
580 				port_per_socket[port_numa[pid]]++;
581 			else {
582 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
583 
584 				/* if socket_id is invalid, set to 0 */
585 				if (check_socket_id(socket_id) < 0)
586 					socket_id = 0;
587 				port_per_socket[socket_id]++;
588 			}
589 		}
590 
591 		/* set flag to initialize port/queue */
592 		port->need_reconfig = 1;
593 		port->need_reconfig_queues = 1;
594 	}
595 
596 	if (numa_support) {
597 		uint8_t i;
598 		unsigned int nb_mbuf;
599 
600 		if (param_total_num_mbufs)
601 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
602 
603 		for (i = 0; i < max_socket; i++) {
604 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
605 			if (nb_mbuf)
606 				mbuf_pool_create(mbuf_data_size,
607 						nb_mbuf,i);
608 		}
609 	}
610 	init_port_config();
611 
612 	/*
613 	 * Records which Mbuf pool to use by each logical core, if needed.
614 	 */
615 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
616 		mbp = mbuf_pool_find(
617 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
618 
619 		if (mbp == NULL)
620 			mbp = mbuf_pool_find(0);
621 		fwd_lcores[lc_id]->mbp = mbp;
622 	}
623 
624 	/* Configuration of packet forwarding streams. */
625 	if (init_fwd_streams() < 0)
626 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
627 
628 	fwd_config_setup();
629 }
630 
631 
632 void
633 reconfig(portid_t new_port_id, unsigned socket_id)
634 {
635 	struct rte_port *port;
636 
637 	/* Reconfiguration of Ethernet ports. */
638 	port = &ports[new_port_id];
639 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
640 
641 	/* set flag to initialize port/queue */
642 	port->need_reconfig = 1;
643 	port->need_reconfig_queues = 1;
644 	port->socket_id = socket_id;
645 
646 	init_port_config();
647 }
648 
649 
650 int
651 init_fwd_streams(void)
652 {
653 	portid_t pid;
654 	struct rte_port *port;
655 	streamid_t sm_id, nb_fwd_streams_new;
656 	queueid_t q;
657 
658 	/* set socket id according to numa or not */
659 	RTE_ETH_FOREACH_DEV(pid) {
660 		port = &ports[pid];
661 		if (nb_rxq > port->dev_info.max_rx_queues) {
662 			printf("Fail: nb_rxq(%d) is greater than "
663 				"max_rx_queues(%d)\n", nb_rxq,
664 				port->dev_info.max_rx_queues);
665 			return -1;
666 		}
667 		if (nb_txq > port->dev_info.max_tx_queues) {
668 			printf("Fail: nb_txq(%d) is greater than "
669 				"max_tx_queues(%d)\n", nb_txq,
670 				port->dev_info.max_tx_queues);
671 			return -1;
672 		}
673 		if (numa_support) {
674 			if (port_numa[pid] != NUMA_NO_CONFIG)
675 				port->socket_id = port_numa[pid];
676 			else {
677 				port->socket_id = rte_eth_dev_socket_id(pid);
678 
679 				/* if socket_id is invalid, set to 0 */
680 				if (check_socket_id(port->socket_id) < 0)
681 					port->socket_id = 0;
682 			}
683 		}
684 		else {
685 			if (socket_num == UMA_NO_CONFIG)
686 				port->socket_id = 0;
687 			else
688 				port->socket_id = socket_num;
689 		}
690 	}
691 
692 	q = RTE_MAX(nb_rxq, nb_txq);
693 	if (q == 0) {
694 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
695 		return -1;
696 	}
697 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
698 	if (nb_fwd_streams_new == nb_fwd_streams)
699 		return 0;
700 	/* clear the old */
701 	if (fwd_streams != NULL) {
702 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
703 			if (fwd_streams[sm_id] == NULL)
704 				continue;
705 			rte_free(fwd_streams[sm_id]);
706 			fwd_streams[sm_id] = NULL;
707 		}
708 		rte_free(fwd_streams);
709 		fwd_streams = NULL;
710 	}
711 
712 	/* init new */
713 	nb_fwd_streams = nb_fwd_streams_new;
714 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
715 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
716 	if (fwd_streams == NULL)
717 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
718 						"failed\n", nb_fwd_streams);
719 
720 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
721 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
722 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
723 		if (fwd_streams[sm_id] == NULL)
724 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
725 								" failed\n");
726 	}
727 
728 	return 0;
729 }
730 
731 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
732 static void
733 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
734 {
735 	unsigned int total_burst;
736 	unsigned int nb_burst;
737 	unsigned int burst_stats[3];
738 	uint16_t pktnb_stats[3];
739 	uint16_t nb_pkt;
740 	int burst_percent[3];
741 
742 	/*
743 	 * First compute the total number of packet bursts and the
744 	 * two highest numbers of bursts of the same number of packets.
745 	 */
746 	total_burst = 0;
747 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
748 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
749 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
750 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
751 		if (nb_burst == 0)
752 			continue;
753 		total_burst += nb_burst;
754 		if (nb_burst > burst_stats[0]) {
755 			burst_stats[1] = burst_stats[0];
756 			pktnb_stats[1] = pktnb_stats[0];
757 			burst_stats[0] = nb_burst;
758 			pktnb_stats[0] = nb_pkt;
759 		}
760 	}
761 	if (total_burst == 0)
762 		return;
763 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
764 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
765 	       burst_percent[0], (int) pktnb_stats[0]);
766 	if (burst_stats[0] == total_burst) {
767 		printf("]\n");
768 		return;
769 	}
770 	if (burst_stats[0] + burst_stats[1] == total_burst) {
771 		printf(" + %d%% of %d pkts]\n",
772 		       100 - burst_percent[0], pktnb_stats[1]);
773 		return;
774 	}
775 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
776 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
777 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
778 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
779 		return;
780 	}
781 	printf(" + %d%% of %d pkts + %d%% of others]\n",
782 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
783 }
784 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
785 
786 static void
787 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
788 {
789 	struct rte_port *port;
790 	uint8_t i;
791 
792 	static const char *fwd_stats_border = "----------------------";
793 
794 	port = &ports[port_id];
795 	printf("\n  %s Forward statistics for port %-2d %s\n",
796 	       fwd_stats_border, port_id, fwd_stats_border);
797 
798 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
799 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
800 		       "%-"PRIu64"\n",
801 		       stats->ipackets, stats->imissed,
802 		       (uint64_t) (stats->ipackets + stats->imissed));
803 
804 		if (cur_fwd_eng == &csum_fwd_engine)
805 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
806 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
807 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
808 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
809 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
810 		}
811 
812 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
813 		       "%-"PRIu64"\n",
814 		       stats->opackets, port->tx_dropped,
815 		       (uint64_t) (stats->opackets + port->tx_dropped));
816 	}
817 	else {
818 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
819 		       "%14"PRIu64"\n",
820 		       stats->ipackets, stats->imissed,
821 		       (uint64_t) (stats->ipackets + stats->imissed));
822 
823 		if (cur_fwd_eng == &csum_fwd_engine)
824 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
825 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
826 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
827 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
828 			printf("  RX-nombufs:             %14"PRIu64"\n",
829 			       stats->rx_nombuf);
830 		}
831 
832 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
833 		       "%14"PRIu64"\n",
834 		       stats->opackets, port->tx_dropped,
835 		       (uint64_t) (stats->opackets + port->tx_dropped));
836 	}
837 
838 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
839 	if (port->rx_stream)
840 		pkt_burst_stats_display("RX",
841 			&port->rx_stream->rx_burst_stats);
842 	if (port->tx_stream)
843 		pkt_burst_stats_display("TX",
844 			&port->tx_stream->tx_burst_stats);
845 #endif
846 
847 	if (port->rx_queue_stats_mapping_enabled) {
848 		printf("\n");
849 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
850 			printf("  Stats reg %2d RX-packets:%14"PRIu64
851 			       "     RX-errors:%14"PRIu64
852 			       "    RX-bytes:%14"PRIu64"\n",
853 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
854 		}
855 		printf("\n");
856 	}
857 	if (port->tx_queue_stats_mapping_enabled) {
858 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
859 			printf("  Stats reg %2d TX-packets:%14"PRIu64
860 			       "                                 TX-bytes:%14"PRIu64"\n",
861 			       i, stats->q_opackets[i], stats->q_obytes[i]);
862 		}
863 	}
864 
865 	printf("  %s--------------------------------%s\n",
866 	       fwd_stats_border, fwd_stats_border);
867 }
868 
869 static void
870 fwd_stream_stats_display(streamid_t stream_id)
871 {
872 	struct fwd_stream *fs;
873 	static const char *fwd_top_stats_border = "-------";
874 
875 	fs = fwd_streams[stream_id];
876 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
877 	    (fs->fwd_dropped == 0))
878 		return;
879 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
880 	       "TX Port=%2d/Queue=%2d %s\n",
881 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
882 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
883 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
884 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
885 
886 	/* if checksum mode */
887 	if (cur_fwd_eng == &csum_fwd_engine) {
888 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
889 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
890 	}
891 
892 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
893 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
894 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
895 #endif
896 }
897 
898 static void
899 flush_fwd_rx_queues(void)
900 {
901 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
902 	portid_t  rxp;
903 	portid_t port_id;
904 	queueid_t rxq;
905 	uint16_t  nb_rx;
906 	uint16_t  i;
907 	uint8_t   j;
908 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
909 	uint64_t timer_period;
910 
911 	/* convert to number of cycles */
912 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
913 
914 	for (j = 0; j < 2; j++) {
915 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
916 			for (rxq = 0; rxq < nb_rxq; rxq++) {
917 				port_id = fwd_ports_ids[rxp];
918 				/**
919 				* testpmd can stuck in the below do while loop
920 				* if rte_eth_rx_burst() always returns nonzero
921 				* packets. So timer is added to exit this loop
922 				* after 1sec timer expiry.
923 				*/
924 				prev_tsc = rte_rdtsc();
925 				do {
926 					nb_rx = rte_eth_rx_burst(port_id, rxq,
927 						pkts_burst, MAX_PKT_BURST);
928 					for (i = 0; i < nb_rx; i++)
929 						rte_pktmbuf_free(pkts_burst[i]);
930 
931 					cur_tsc = rte_rdtsc();
932 					diff_tsc = cur_tsc - prev_tsc;
933 					timer_tsc += diff_tsc;
934 				} while ((nb_rx > 0) &&
935 					(timer_tsc < timer_period));
936 				timer_tsc = 0;
937 			}
938 		}
939 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
940 	}
941 }
942 
943 static void
944 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
945 {
946 	struct fwd_stream **fsm;
947 	streamid_t nb_fs;
948 	streamid_t sm_id;
949 #ifdef RTE_LIBRTE_BITRATE
950 	uint64_t tics_per_1sec;
951 	uint64_t tics_datum;
952 	uint64_t tics_current;
953 	uint8_t idx_port, cnt_ports;
954 
955 	cnt_ports = rte_eth_dev_count();
956 	tics_datum = rte_rdtsc();
957 	tics_per_1sec = rte_get_timer_hz();
958 #endif
959 	fsm = &fwd_streams[fc->stream_idx];
960 	nb_fs = fc->stream_nb;
961 	do {
962 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
963 			(*pkt_fwd)(fsm[sm_id]);
964 #ifdef RTE_LIBRTE_BITRATE
965 		tics_current = rte_rdtsc();
966 		if (tics_current - tics_datum >= tics_per_1sec) {
967 			/* Periodic bitrate calculation */
968 			for (idx_port = 0; idx_port < cnt_ports; idx_port++)
969 				rte_stats_bitrate_calc(bitrate_data, idx_port);
970 			tics_datum = tics_current;
971 		}
972 #endif
973 #ifdef RTE_LIBRTE_LATENCY_STATS
974 		if (latencystats_lcore_id == rte_lcore_id())
975 			rte_latencystats_update();
976 #endif
977 
978 	} while (! fc->stopped);
979 }
980 
981 static int
982 start_pkt_forward_on_core(void *fwd_arg)
983 {
984 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
985 			     cur_fwd_config.fwd_eng->packet_fwd);
986 	return 0;
987 }
988 
989 /*
990  * Run the TXONLY packet forwarding engine to send a single burst of packets.
991  * Used to start communication flows in network loopback test configurations.
992  */
993 static int
994 run_one_txonly_burst_on_core(void *fwd_arg)
995 {
996 	struct fwd_lcore *fwd_lc;
997 	struct fwd_lcore tmp_lcore;
998 
999 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1000 	tmp_lcore = *fwd_lc;
1001 	tmp_lcore.stopped = 1;
1002 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1003 	return 0;
1004 }
1005 
1006 /*
1007  * Launch packet forwarding:
1008  *     - Setup per-port forwarding context.
1009  *     - launch logical cores with their forwarding configuration.
1010  */
1011 static void
1012 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1013 {
1014 	port_fwd_begin_t port_fwd_begin;
1015 	unsigned int i;
1016 	unsigned int lc_id;
1017 	int diag;
1018 
1019 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1020 	if (port_fwd_begin != NULL) {
1021 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1022 			(*port_fwd_begin)(fwd_ports_ids[i]);
1023 	}
1024 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1025 		lc_id = fwd_lcores_cpuids[i];
1026 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1027 			fwd_lcores[i]->stopped = 0;
1028 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1029 						     fwd_lcores[i], lc_id);
1030 			if (diag != 0)
1031 				printf("launch lcore %u failed - diag=%d\n",
1032 				       lc_id, diag);
1033 		}
1034 	}
1035 }
1036 
1037 /*
1038  * Launch packet forwarding configuration.
1039  */
1040 void
1041 start_packet_forwarding(int with_tx_first)
1042 {
1043 	port_fwd_begin_t port_fwd_begin;
1044 	port_fwd_end_t  port_fwd_end;
1045 	struct rte_port *port;
1046 	unsigned int i;
1047 	portid_t   pt_id;
1048 	streamid_t sm_id;
1049 
1050 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1051 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1052 
1053 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1054 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1055 
1056 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1057 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1058 		(!nb_rxq || !nb_txq))
1059 		rte_exit(EXIT_FAILURE,
1060 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1061 			cur_fwd_eng->fwd_mode_name);
1062 
1063 	if (all_ports_started() == 0) {
1064 		printf("Not all ports were started\n");
1065 		return;
1066 	}
1067 	if (test_done == 0) {
1068 		printf("Packet forwarding already started\n");
1069 		return;
1070 	}
1071 
1072 	if (init_fwd_streams() < 0) {
1073 		printf("Fail from init_fwd_streams()\n");
1074 		return;
1075 	}
1076 
1077 	if(dcb_test) {
1078 		for (i = 0; i < nb_fwd_ports; i++) {
1079 			pt_id = fwd_ports_ids[i];
1080 			port = &ports[pt_id];
1081 			if (!port->dcb_flag) {
1082 				printf("In DCB mode, all forwarding ports must "
1083                                        "be configured in this mode.\n");
1084 				return;
1085 			}
1086 		}
1087 		if (nb_fwd_lcores == 1) {
1088 			printf("In DCB mode,the nb forwarding cores "
1089                                "should be larger than 1.\n");
1090 			return;
1091 		}
1092 	}
1093 	test_done = 0;
1094 
1095 	if(!no_flush_rx)
1096 		flush_fwd_rx_queues();
1097 
1098 	fwd_config_setup();
1099 	pkt_fwd_config_display(&cur_fwd_config);
1100 	rxtx_config_display();
1101 
1102 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1103 		pt_id = fwd_ports_ids[i];
1104 		port = &ports[pt_id];
1105 		rte_eth_stats_get(pt_id, &port->stats);
1106 		port->tx_dropped = 0;
1107 
1108 		map_port_queue_stats_mapping_registers(pt_id, port);
1109 	}
1110 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1111 		fwd_streams[sm_id]->rx_packets = 0;
1112 		fwd_streams[sm_id]->tx_packets = 0;
1113 		fwd_streams[sm_id]->fwd_dropped = 0;
1114 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1115 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1116 
1117 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1118 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1119 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1120 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1121 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1122 #endif
1123 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1124 		fwd_streams[sm_id]->core_cycles = 0;
1125 #endif
1126 	}
1127 	if (with_tx_first) {
1128 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1129 		if (port_fwd_begin != NULL) {
1130 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1131 				(*port_fwd_begin)(fwd_ports_ids[i]);
1132 		}
1133 		while (with_tx_first--) {
1134 			launch_packet_forwarding(
1135 					run_one_txonly_burst_on_core);
1136 			rte_eal_mp_wait_lcore();
1137 		}
1138 		port_fwd_end = tx_only_engine.port_fwd_end;
1139 		if (port_fwd_end != NULL) {
1140 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1141 				(*port_fwd_end)(fwd_ports_ids[i]);
1142 		}
1143 	}
1144 	launch_packet_forwarding(start_pkt_forward_on_core);
1145 }
1146 
1147 void
1148 stop_packet_forwarding(void)
1149 {
1150 	struct rte_eth_stats stats;
1151 	struct rte_port *port;
1152 	port_fwd_end_t  port_fwd_end;
1153 	int i;
1154 	portid_t   pt_id;
1155 	streamid_t sm_id;
1156 	lcoreid_t  lc_id;
1157 	uint64_t total_recv;
1158 	uint64_t total_xmit;
1159 	uint64_t total_rx_dropped;
1160 	uint64_t total_tx_dropped;
1161 	uint64_t total_rx_nombuf;
1162 	uint64_t tx_dropped;
1163 	uint64_t rx_bad_ip_csum;
1164 	uint64_t rx_bad_l4_csum;
1165 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1166 	uint64_t fwd_cycles;
1167 #endif
1168 	static const char *acc_stats_border = "+++++++++++++++";
1169 
1170 	if (test_done) {
1171 		printf("Packet forwarding not started\n");
1172 		return;
1173 	}
1174 	printf("Telling cores to stop...");
1175 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1176 		fwd_lcores[lc_id]->stopped = 1;
1177 	printf("\nWaiting for lcores to finish...\n");
1178 	rte_eal_mp_wait_lcore();
1179 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1180 	if (port_fwd_end != NULL) {
1181 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1182 			pt_id = fwd_ports_ids[i];
1183 			(*port_fwd_end)(pt_id);
1184 		}
1185 	}
1186 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1187 	fwd_cycles = 0;
1188 #endif
1189 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1190 		if (cur_fwd_config.nb_fwd_streams >
1191 		    cur_fwd_config.nb_fwd_ports) {
1192 			fwd_stream_stats_display(sm_id);
1193 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1194 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1195 		} else {
1196 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1197 				fwd_streams[sm_id];
1198 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1199 				fwd_streams[sm_id];
1200 		}
1201 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1202 		tx_dropped = (uint64_t) (tx_dropped +
1203 					 fwd_streams[sm_id]->fwd_dropped);
1204 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1205 
1206 		rx_bad_ip_csum =
1207 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1208 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1209 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1210 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1211 							rx_bad_ip_csum;
1212 
1213 		rx_bad_l4_csum =
1214 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1215 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1216 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1217 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1218 							rx_bad_l4_csum;
1219 
1220 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1221 		fwd_cycles = (uint64_t) (fwd_cycles +
1222 					 fwd_streams[sm_id]->core_cycles);
1223 #endif
1224 	}
1225 	total_recv = 0;
1226 	total_xmit = 0;
1227 	total_rx_dropped = 0;
1228 	total_tx_dropped = 0;
1229 	total_rx_nombuf  = 0;
1230 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1231 		pt_id = fwd_ports_ids[i];
1232 
1233 		port = &ports[pt_id];
1234 		rte_eth_stats_get(pt_id, &stats);
1235 		stats.ipackets -= port->stats.ipackets;
1236 		port->stats.ipackets = 0;
1237 		stats.opackets -= port->stats.opackets;
1238 		port->stats.opackets = 0;
1239 		stats.ibytes   -= port->stats.ibytes;
1240 		port->stats.ibytes = 0;
1241 		stats.obytes   -= port->stats.obytes;
1242 		port->stats.obytes = 0;
1243 		stats.imissed  -= port->stats.imissed;
1244 		port->stats.imissed = 0;
1245 		stats.oerrors  -= port->stats.oerrors;
1246 		port->stats.oerrors = 0;
1247 		stats.rx_nombuf -= port->stats.rx_nombuf;
1248 		port->stats.rx_nombuf = 0;
1249 
1250 		total_recv += stats.ipackets;
1251 		total_xmit += stats.opackets;
1252 		total_rx_dropped += stats.imissed;
1253 		total_tx_dropped += port->tx_dropped;
1254 		total_rx_nombuf  += stats.rx_nombuf;
1255 
1256 		fwd_port_stats_display(pt_id, &stats);
1257 	}
1258 	printf("\n  %s Accumulated forward statistics for all ports"
1259 	       "%s\n",
1260 	       acc_stats_border, acc_stats_border);
1261 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1262 	       "%-"PRIu64"\n"
1263 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1264 	       "%-"PRIu64"\n",
1265 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1266 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1267 	if (total_rx_nombuf > 0)
1268 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1269 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1270 	       "%s\n",
1271 	       acc_stats_border, acc_stats_border);
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273 	if (total_recv > 0)
1274 		printf("\n  CPU cycles/packet=%u (total cycles="
1275 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1276 		       (unsigned int)(fwd_cycles / total_recv),
1277 		       fwd_cycles, total_recv);
1278 #endif
1279 	printf("\nDone.\n");
1280 	test_done = 1;
1281 }
1282 
1283 void
1284 dev_set_link_up(portid_t pid)
1285 {
1286 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1287 		printf("\nSet link up fail.\n");
1288 }
1289 
1290 void
1291 dev_set_link_down(portid_t pid)
1292 {
1293 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1294 		printf("\nSet link down fail.\n");
1295 }
1296 
1297 static int
1298 all_ports_started(void)
1299 {
1300 	portid_t pi;
1301 	struct rte_port *port;
1302 
1303 	RTE_ETH_FOREACH_DEV(pi) {
1304 		port = &ports[pi];
1305 		/* Check if there is a port which is not started */
1306 		if ((port->port_status != RTE_PORT_STARTED) &&
1307 			(port->slave_flag == 0))
1308 			return 0;
1309 	}
1310 
1311 	/* No port is not started */
1312 	return 1;
1313 }
1314 
1315 int
1316 all_ports_stopped(void)
1317 {
1318 	portid_t pi;
1319 	struct rte_port *port;
1320 
1321 	RTE_ETH_FOREACH_DEV(pi) {
1322 		port = &ports[pi];
1323 		if ((port->port_status != RTE_PORT_STOPPED) &&
1324 			(port->slave_flag == 0))
1325 			return 0;
1326 	}
1327 
1328 	return 1;
1329 }
1330 
1331 int
1332 port_is_started(portid_t port_id)
1333 {
1334 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1335 		return 0;
1336 
1337 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1338 		return 0;
1339 
1340 	return 1;
1341 }
1342 
1343 static int
1344 port_is_closed(portid_t port_id)
1345 {
1346 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1347 		return 0;
1348 
1349 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1350 		return 0;
1351 
1352 	return 1;
1353 }
1354 
1355 int
1356 start_port(portid_t pid)
1357 {
1358 	int diag, need_check_link_status = -1;
1359 	portid_t pi;
1360 	queueid_t qi;
1361 	struct rte_port *port;
1362 	struct ether_addr mac_addr;
1363 	enum rte_eth_event_type event_type;
1364 
1365 	if (port_id_is_invalid(pid, ENABLED_WARN))
1366 		return 0;
1367 
1368 	if(dcb_config)
1369 		dcb_test = 1;
1370 	RTE_ETH_FOREACH_DEV(pi) {
1371 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1372 			continue;
1373 
1374 		need_check_link_status = 0;
1375 		port = &ports[pi];
1376 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1377 						 RTE_PORT_HANDLING) == 0) {
1378 			printf("Port %d is now not stopped\n", pi);
1379 			continue;
1380 		}
1381 
1382 		if (port->need_reconfig > 0) {
1383 			port->need_reconfig = 0;
1384 
1385 			printf("Configuring Port %d (socket %u)\n", pi,
1386 					port->socket_id);
1387 			/* configure port */
1388 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1389 						&(port->dev_conf));
1390 			if (diag != 0) {
1391 				if (rte_atomic16_cmpset(&(port->port_status),
1392 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1393 					printf("Port %d can not be set back "
1394 							"to stopped\n", pi);
1395 				printf("Fail to configure port %d\n", pi);
1396 				/* try to reconfigure port next time */
1397 				port->need_reconfig = 1;
1398 				return -1;
1399 			}
1400 		}
1401 		if (port->need_reconfig_queues > 0) {
1402 			port->need_reconfig_queues = 0;
1403 			/* setup tx queues */
1404 			for (qi = 0; qi < nb_txq; qi++) {
1405 				if ((numa_support) &&
1406 					(txring_numa[pi] != NUMA_NO_CONFIG))
1407 					diag = rte_eth_tx_queue_setup(pi, qi,
1408 						nb_txd,txring_numa[pi],
1409 						&(port->tx_conf));
1410 				else
1411 					diag = rte_eth_tx_queue_setup(pi, qi,
1412 						nb_txd,port->socket_id,
1413 						&(port->tx_conf));
1414 
1415 				if (diag == 0)
1416 					continue;
1417 
1418 				/* Fail to setup tx queue, return */
1419 				if (rte_atomic16_cmpset(&(port->port_status),
1420 							RTE_PORT_HANDLING,
1421 							RTE_PORT_STOPPED) == 0)
1422 					printf("Port %d can not be set back "
1423 							"to stopped\n", pi);
1424 				printf("Fail to configure port %d tx queues\n", pi);
1425 				/* try to reconfigure queues next time */
1426 				port->need_reconfig_queues = 1;
1427 				return -1;
1428 			}
1429 			/* setup rx queues */
1430 			for (qi = 0; qi < nb_rxq; qi++) {
1431 				if ((numa_support) &&
1432 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1433 					struct rte_mempool * mp =
1434 						mbuf_pool_find(rxring_numa[pi]);
1435 					if (mp == NULL) {
1436 						printf("Failed to setup RX queue:"
1437 							"No mempool allocation"
1438 							" on the socket %d\n",
1439 							rxring_numa[pi]);
1440 						return -1;
1441 					}
1442 
1443 					diag = rte_eth_rx_queue_setup(pi, qi,
1444 					     nb_rxd,rxring_numa[pi],
1445 					     &(port->rx_conf),mp);
1446 				} else {
1447 					struct rte_mempool *mp =
1448 						mbuf_pool_find(port->socket_id);
1449 					if (mp == NULL) {
1450 						printf("Failed to setup RX queue:"
1451 							"No mempool allocation"
1452 							" on the socket %d\n",
1453 							port->socket_id);
1454 						return -1;
1455 					}
1456 					diag = rte_eth_rx_queue_setup(pi, qi,
1457 					     nb_rxd,port->socket_id,
1458 					     &(port->rx_conf), mp);
1459 				}
1460 				if (diag == 0)
1461 					continue;
1462 
1463 				/* Fail to setup rx queue, return */
1464 				if (rte_atomic16_cmpset(&(port->port_status),
1465 							RTE_PORT_HANDLING,
1466 							RTE_PORT_STOPPED) == 0)
1467 					printf("Port %d can not be set back "
1468 							"to stopped\n", pi);
1469 				printf("Fail to configure port %d rx queues\n", pi);
1470 				/* try to reconfigure queues next time */
1471 				port->need_reconfig_queues = 1;
1472 				return -1;
1473 			}
1474 		}
1475 
1476 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1477 		     event_type < RTE_ETH_EVENT_MAX;
1478 		     event_type++) {
1479 			diag = rte_eth_dev_callback_register(pi,
1480 							event_type,
1481 							eth_event_callback,
1482 							NULL);
1483 			if (diag) {
1484 				printf("Failed to setup even callback for event %d\n",
1485 					event_type);
1486 				return -1;
1487 			}
1488 		}
1489 
1490 		/* start port */
1491 		if (rte_eth_dev_start(pi) < 0) {
1492 			printf("Fail to start port %d\n", pi);
1493 
1494 			/* Fail to setup rx queue, return */
1495 			if (rte_atomic16_cmpset(&(port->port_status),
1496 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1497 				printf("Port %d can not be set back to "
1498 							"stopped\n", pi);
1499 			continue;
1500 		}
1501 
1502 		if (rte_atomic16_cmpset(&(port->port_status),
1503 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1504 			printf("Port %d can not be set into started\n", pi);
1505 
1506 		rte_eth_macaddr_get(pi, &mac_addr);
1507 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1508 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1509 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1510 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1511 
1512 		/* at least one port started, need checking link status */
1513 		need_check_link_status = 1;
1514 	}
1515 
1516 	if (need_check_link_status == 1 && !no_link_check)
1517 		check_all_ports_link_status(RTE_PORT_ALL);
1518 	else if (need_check_link_status == 0)
1519 		printf("Please stop the ports first\n");
1520 
1521 	printf("Done\n");
1522 	return 0;
1523 }
1524 
1525 void
1526 stop_port(portid_t pid)
1527 {
1528 	portid_t pi;
1529 	struct rte_port *port;
1530 	int need_check_link_status = 0;
1531 
1532 	if (dcb_test) {
1533 		dcb_test = 0;
1534 		dcb_config = 0;
1535 	}
1536 
1537 	if (port_id_is_invalid(pid, ENABLED_WARN))
1538 		return;
1539 
1540 	printf("Stopping ports...\n");
1541 
1542 	RTE_ETH_FOREACH_DEV(pi) {
1543 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1544 			continue;
1545 
1546 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1547 			printf("Please remove port %d from forwarding configuration.\n", pi);
1548 			continue;
1549 		}
1550 
1551 		if (port_is_bonding_slave(pi)) {
1552 			printf("Please remove port %d from bonded device.\n", pi);
1553 			continue;
1554 		}
1555 
1556 		port = &ports[pi];
1557 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1558 						RTE_PORT_HANDLING) == 0)
1559 			continue;
1560 
1561 		rte_eth_dev_stop(pi);
1562 
1563 		if (rte_atomic16_cmpset(&(port->port_status),
1564 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1565 			printf("Port %d can not be set into stopped\n", pi);
1566 		need_check_link_status = 1;
1567 	}
1568 	if (need_check_link_status && !no_link_check)
1569 		check_all_ports_link_status(RTE_PORT_ALL);
1570 
1571 	printf("Done\n");
1572 }
1573 
1574 void
1575 close_port(portid_t pid)
1576 {
1577 	portid_t pi;
1578 	struct rte_port *port;
1579 
1580 	if (port_id_is_invalid(pid, ENABLED_WARN))
1581 		return;
1582 
1583 	printf("Closing ports...\n");
1584 
1585 	RTE_ETH_FOREACH_DEV(pi) {
1586 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1587 			continue;
1588 
1589 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1590 			printf("Please remove port %d from forwarding configuration.\n", pi);
1591 			continue;
1592 		}
1593 
1594 		if (port_is_bonding_slave(pi)) {
1595 			printf("Please remove port %d from bonded device.\n", pi);
1596 			continue;
1597 		}
1598 
1599 		port = &ports[pi];
1600 		if (rte_atomic16_cmpset(&(port->port_status),
1601 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1602 			printf("Port %d is already closed\n", pi);
1603 			continue;
1604 		}
1605 
1606 		if (rte_atomic16_cmpset(&(port->port_status),
1607 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1608 			printf("Port %d is now not stopped\n", pi);
1609 			continue;
1610 		}
1611 
1612 		if (port->flow_list)
1613 			port_flow_flush(pi);
1614 		rte_eth_dev_close(pi);
1615 
1616 		if (rte_atomic16_cmpset(&(port->port_status),
1617 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1618 			printf("Port %d cannot be set to closed\n", pi);
1619 	}
1620 
1621 	printf("Done\n");
1622 }
1623 
1624 void
1625 attach_port(char *identifier)
1626 {
1627 	portid_t pi = 0;
1628 	unsigned int socket_id;
1629 
1630 	printf("Attaching a new port...\n");
1631 
1632 	if (identifier == NULL) {
1633 		printf("Invalid parameters are specified\n");
1634 		return;
1635 	}
1636 
1637 	if (rte_eth_dev_attach(identifier, &pi))
1638 		return;
1639 
1640 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1641 	/* if socket_id is invalid, set to 0 */
1642 	if (check_socket_id(socket_id) < 0)
1643 		socket_id = 0;
1644 	reconfig(pi, socket_id);
1645 	rte_eth_promiscuous_enable(pi);
1646 
1647 	nb_ports = rte_eth_dev_count();
1648 
1649 	ports[pi].port_status = RTE_PORT_STOPPED;
1650 
1651 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1652 	printf("Done\n");
1653 }
1654 
1655 void
1656 detach_port(uint8_t port_id)
1657 {
1658 	char name[RTE_ETH_NAME_MAX_LEN];
1659 
1660 	printf("Detaching a port...\n");
1661 
1662 	if (!port_is_closed(port_id)) {
1663 		printf("Please close port first\n");
1664 		return;
1665 	}
1666 
1667 	if (ports[port_id].flow_list)
1668 		port_flow_flush(port_id);
1669 
1670 	if (rte_eth_dev_detach(port_id, name))
1671 		return;
1672 
1673 	nb_ports = rte_eth_dev_count();
1674 
1675 	printf("Port '%s' is detached. Now total ports is %d\n",
1676 			name, nb_ports);
1677 	printf("Done\n");
1678 	return;
1679 }
1680 
1681 void
1682 pmd_test_exit(void)
1683 {
1684 	portid_t pt_id;
1685 
1686 	if (test_done == 0)
1687 		stop_packet_forwarding();
1688 
1689 	if (ports != NULL) {
1690 		no_link_check = 1;
1691 		RTE_ETH_FOREACH_DEV(pt_id) {
1692 			printf("\nShutting down port %d...\n", pt_id);
1693 			fflush(stdout);
1694 			stop_port(pt_id);
1695 			close_port(pt_id);
1696 		}
1697 	}
1698 	printf("\nBye...\n");
1699 }
1700 
1701 typedef void (*cmd_func_t)(void);
1702 struct pmd_test_command {
1703 	const char *cmd_name;
1704 	cmd_func_t cmd_func;
1705 };
1706 
1707 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1708 
1709 /* Check the link status of all ports in up to 9s, and print them finally */
1710 static void
1711 check_all_ports_link_status(uint32_t port_mask)
1712 {
1713 #define CHECK_INTERVAL 100 /* 100ms */
1714 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1715 	uint8_t portid, count, all_ports_up, print_flag = 0;
1716 	struct rte_eth_link link;
1717 
1718 	printf("Checking link statuses...\n");
1719 	fflush(stdout);
1720 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1721 		all_ports_up = 1;
1722 		RTE_ETH_FOREACH_DEV(portid) {
1723 			if ((port_mask & (1 << portid)) == 0)
1724 				continue;
1725 			memset(&link, 0, sizeof(link));
1726 			rte_eth_link_get_nowait(portid, &link);
1727 			/* print link status if flag set */
1728 			if (print_flag == 1) {
1729 				if (link.link_status)
1730 					printf("Port %d Link Up - speed %u "
1731 						"Mbps - %s\n", (uint8_t)portid,
1732 						(unsigned)link.link_speed,
1733 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1734 					("full-duplex") : ("half-duplex\n"));
1735 				else
1736 					printf("Port %d Link Down\n",
1737 						(uint8_t)portid);
1738 				continue;
1739 			}
1740 			/* clear all_ports_up flag if any link down */
1741 			if (link.link_status == ETH_LINK_DOWN) {
1742 				all_ports_up = 0;
1743 				break;
1744 			}
1745 		}
1746 		/* after finally printing all link status, get out */
1747 		if (print_flag == 1)
1748 			break;
1749 
1750 		if (all_ports_up == 0) {
1751 			fflush(stdout);
1752 			rte_delay_ms(CHECK_INTERVAL);
1753 		}
1754 
1755 		/* set the print_flag if all ports up or timeout */
1756 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1757 			print_flag = 1;
1758 		}
1759 
1760 		if (lsc_interrupt)
1761 			break;
1762 	}
1763 }
1764 
1765 static void
1766 rmv_event_callback(void *arg)
1767 {
1768 	struct rte_eth_dev *dev;
1769 	struct rte_devargs *da;
1770 	char name[32] = "";
1771 	uint8_t port_id = (intptr_t)arg;
1772 
1773 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1774 	dev = &rte_eth_devices[port_id];
1775 	da = dev->device->devargs;
1776 
1777 	stop_port(port_id);
1778 	close_port(port_id);
1779 	if (da->type == RTE_DEVTYPE_VIRTUAL)
1780 		snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1781 	else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1782 		rte_eal_pci_device_name(&da->pci.addr, name, sizeof(name));
1783 	printf("removing device %s\n", name);
1784 	rte_eal_dev_detach(name);
1785 	dev->state = RTE_ETH_DEV_UNUSED;
1786 }
1787 
1788 /* This function is used by the interrupt thread */
1789 static void
1790 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1791 {
1792 	static const char * const event_desc[] = {
1793 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1794 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1795 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1796 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1797 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1798 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1799 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1800 		[RTE_ETH_EVENT_MAX] = NULL,
1801 	};
1802 
1803 	RTE_SET_USED(param);
1804 
1805 	if (type >= RTE_ETH_EVENT_MAX) {
1806 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1807 			port_id, __func__, type);
1808 		fflush(stderr);
1809 	} else {
1810 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1811 			event_desc[type]);
1812 		fflush(stdout);
1813 	}
1814 
1815 	switch (type) {
1816 	case RTE_ETH_EVENT_INTR_RMV:
1817 		if (rte_eal_alarm_set(100000,
1818 				rmv_event_callback, (void *)(intptr_t)port_id))
1819 			fprintf(stderr, "Could not set up deferred device removal\n");
1820 		break;
1821 	default:
1822 		break;
1823 	}
1824 }
1825 
1826 static int
1827 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1828 {
1829 	uint16_t i;
1830 	int diag;
1831 	uint8_t mapping_found = 0;
1832 
1833 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1834 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1835 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1836 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1837 					tx_queue_stats_mappings[i].queue_id,
1838 					tx_queue_stats_mappings[i].stats_counter_id);
1839 			if (diag != 0)
1840 				return diag;
1841 			mapping_found = 1;
1842 		}
1843 	}
1844 	if (mapping_found)
1845 		port->tx_queue_stats_mapping_enabled = 1;
1846 	return 0;
1847 }
1848 
1849 static int
1850 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1851 {
1852 	uint16_t i;
1853 	int diag;
1854 	uint8_t mapping_found = 0;
1855 
1856 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1857 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1858 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1859 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1860 					rx_queue_stats_mappings[i].queue_id,
1861 					rx_queue_stats_mappings[i].stats_counter_id);
1862 			if (diag != 0)
1863 				return diag;
1864 			mapping_found = 1;
1865 		}
1866 	}
1867 	if (mapping_found)
1868 		port->rx_queue_stats_mapping_enabled = 1;
1869 	return 0;
1870 }
1871 
1872 static void
1873 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1874 {
1875 	int diag = 0;
1876 
1877 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1878 	if (diag != 0) {
1879 		if (diag == -ENOTSUP) {
1880 			port->tx_queue_stats_mapping_enabled = 0;
1881 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1882 		}
1883 		else
1884 			rte_exit(EXIT_FAILURE,
1885 					"set_tx_queue_stats_mapping_registers "
1886 					"failed for port id=%d diag=%d\n",
1887 					pi, diag);
1888 	}
1889 
1890 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1891 	if (diag != 0) {
1892 		if (diag == -ENOTSUP) {
1893 			port->rx_queue_stats_mapping_enabled = 0;
1894 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1895 		}
1896 		else
1897 			rte_exit(EXIT_FAILURE,
1898 					"set_rx_queue_stats_mapping_registers "
1899 					"failed for port id=%d diag=%d\n",
1900 					pi, diag);
1901 	}
1902 }
1903 
1904 static void
1905 rxtx_port_config(struct rte_port *port)
1906 {
1907 	port->rx_conf = port->dev_info.default_rxconf;
1908 	port->tx_conf = port->dev_info.default_txconf;
1909 
1910 	/* Check if any RX/TX parameters have been passed */
1911 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1912 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1913 
1914 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1915 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1916 
1917 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1918 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1919 
1920 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1921 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1922 
1923 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1924 		port->rx_conf.rx_drop_en = rx_drop_en;
1925 
1926 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1927 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1928 
1929 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1930 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1931 
1932 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1933 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1934 
1935 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1936 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1937 
1938 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1939 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1940 
1941 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1942 		port->tx_conf.txq_flags = txq_flags;
1943 }
1944 
1945 void
1946 init_port_config(void)
1947 {
1948 	portid_t pid;
1949 	struct rte_port *port;
1950 
1951 	RTE_ETH_FOREACH_DEV(pid) {
1952 		port = &ports[pid];
1953 		port->dev_conf.rxmode = rx_mode;
1954 		port->dev_conf.fdir_conf = fdir_conf;
1955 		if (nb_rxq > 1) {
1956 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1957 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1958 		} else {
1959 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1960 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1961 		}
1962 
1963 		if (port->dcb_flag == 0) {
1964 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1965 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1966 			else
1967 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1968 		}
1969 
1970 		rxtx_port_config(port);
1971 
1972 		rte_eth_macaddr_get(pid, &port->eth_addr);
1973 
1974 		map_port_queue_stats_mapping_registers(pid, port);
1975 #ifdef RTE_NIC_BYPASS
1976 		rte_eth_dev_bypass_init(pid);
1977 #endif
1978 
1979 		if (lsc_interrupt &&
1980 		    (rte_eth_devices[pid].data->dev_flags &
1981 		     RTE_ETH_DEV_INTR_LSC))
1982 			port->dev_conf.intr_conf.lsc = 1;
1983 		if (rmv_interrupt &&
1984 		    (rte_eth_devices[pid].data->dev_flags &
1985 		     RTE_ETH_DEV_INTR_RMV))
1986 			port->dev_conf.intr_conf.rmv = 1;
1987 	}
1988 }
1989 
1990 void set_port_slave_flag(portid_t slave_pid)
1991 {
1992 	struct rte_port *port;
1993 
1994 	port = &ports[slave_pid];
1995 	port->slave_flag = 1;
1996 }
1997 
1998 void clear_port_slave_flag(portid_t slave_pid)
1999 {
2000 	struct rte_port *port;
2001 
2002 	port = &ports[slave_pid];
2003 	port->slave_flag = 0;
2004 }
2005 
2006 uint8_t port_is_bonding_slave(portid_t slave_pid)
2007 {
2008 	struct rte_port *port;
2009 
2010 	port = &ports[slave_pid];
2011 	return port->slave_flag;
2012 }
2013 
2014 const uint16_t vlan_tags[] = {
2015 		0,  1,  2,  3,  4,  5,  6,  7,
2016 		8,  9, 10, 11,  12, 13, 14, 15,
2017 		16, 17, 18, 19, 20, 21, 22, 23,
2018 		24, 25, 26, 27, 28, 29, 30, 31
2019 };
2020 
2021 static  int
2022 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2023 		 enum dcb_mode_enable dcb_mode,
2024 		 enum rte_eth_nb_tcs num_tcs,
2025 		 uint8_t pfc_en)
2026 {
2027 	uint8_t i;
2028 
2029 	/*
2030 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2031 	 * given above, and the number of traffic classes available for use.
2032 	 */
2033 	if (dcb_mode == DCB_VT_ENABLED) {
2034 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2035 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2036 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2037 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2038 
2039 		/* VMDQ+DCB RX and TX configurations */
2040 		vmdq_rx_conf->enable_default_pool = 0;
2041 		vmdq_rx_conf->default_pool = 0;
2042 		vmdq_rx_conf->nb_queue_pools =
2043 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2044 		vmdq_tx_conf->nb_queue_pools =
2045 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2046 
2047 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2048 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2049 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2050 			vmdq_rx_conf->pool_map[i].pools =
2051 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2052 		}
2053 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2054 			vmdq_rx_conf->dcb_tc[i] = i;
2055 			vmdq_tx_conf->dcb_tc[i] = i;
2056 		}
2057 
2058 		/* set DCB mode of RX and TX of multiple queues */
2059 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2060 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2061 	} else {
2062 		struct rte_eth_dcb_rx_conf *rx_conf =
2063 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2064 		struct rte_eth_dcb_tx_conf *tx_conf =
2065 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2066 
2067 		rx_conf->nb_tcs = num_tcs;
2068 		tx_conf->nb_tcs = num_tcs;
2069 
2070 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2071 			rx_conf->dcb_tc[i] = i % num_tcs;
2072 			tx_conf->dcb_tc[i] = i % num_tcs;
2073 		}
2074 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2075 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2076 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2077 	}
2078 
2079 	if (pfc_en)
2080 		eth_conf->dcb_capability_en =
2081 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2082 	else
2083 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2084 
2085 	return 0;
2086 }
2087 
2088 int
2089 init_port_dcb_config(portid_t pid,
2090 		     enum dcb_mode_enable dcb_mode,
2091 		     enum rte_eth_nb_tcs num_tcs,
2092 		     uint8_t pfc_en)
2093 {
2094 	struct rte_eth_conf port_conf;
2095 	struct rte_port *rte_port;
2096 	int retval;
2097 	uint16_t i;
2098 
2099 	rte_port = &ports[pid];
2100 
2101 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2102 	/* Enter DCB configuration status */
2103 	dcb_config = 1;
2104 
2105 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2106 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2107 	if (retval < 0)
2108 		return retval;
2109 	port_conf.rxmode.hw_vlan_filter = 1;
2110 
2111 	/**
2112 	 * Write the configuration into the device.
2113 	 * Set the numbers of RX & TX queues to 0, so
2114 	 * the RX & TX queues will not be setup.
2115 	 */
2116 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2117 
2118 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2119 
2120 	/* If dev_info.vmdq_pool_base is greater than 0,
2121 	 * the queue id of vmdq pools is started after pf queues.
2122 	 */
2123 	if (dcb_mode == DCB_VT_ENABLED &&
2124 	    rte_port->dev_info.vmdq_pool_base > 0) {
2125 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2126 			" for port %d.", pid);
2127 		return -1;
2128 	}
2129 
2130 	/* Assume the ports in testpmd have the same dcb capability
2131 	 * and has the same number of rxq and txq in dcb mode
2132 	 */
2133 	if (dcb_mode == DCB_VT_ENABLED) {
2134 		if (rte_port->dev_info.max_vfs > 0) {
2135 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2136 			nb_txq = rte_port->dev_info.nb_tx_queues;
2137 		} else {
2138 			nb_rxq = rte_port->dev_info.max_rx_queues;
2139 			nb_txq = rte_port->dev_info.max_tx_queues;
2140 		}
2141 	} else {
2142 		/*if vt is disabled, use all pf queues */
2143 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2144 			nb_rxq = rte_port->dev_info.max_rx_queues;
2145 			nb_txq = rte_port->dev_info.max_tx_queues;
2146 		} else {
2147 			nb_rxq = (queueid_t)num_tcs;
2148 			nb_txq = (queueid_t)num_tcs;
2149 
2150 		}
2151 	}
2152 	rx_free_thresh = 64;
2153 
2154 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2155 
2156 	rxtx_port_config(rte_port);
2157 	/* VLAN filter */
2158 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2159 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2160 		rx_vft_set(pid, vlan_tags[i], 1);
2161 
2162 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2163 	map_port_queue_stats_mapping_registers(pid, rte_port);
2164 
2165 	rte_port->dcb_flag = 1;
2166 
2167 	return 0;
2168 }
2169 
2170 static void
2171 init_port(void)
2172 {
2173 	/* Configuration of Ethernet ports. */
2174 	ports = rte_zmalloc("testpmd: ports",
2175 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2176 			    RTE_CACHE_LINE_SIZE);
2177 	if (ports == NULL) {
2178 		rte_exit(EXIT_FAILURE,
2179 				"rte_zmalloc(%d struct rte_port) failed\n",
2180 				RTE_MAX_ETHPORTS);
2181 	}
2182 }
2183 
2184 static void
2185 force_quit(void)
2186 {
2187 	pmd_test_exit();
2188 	prompt_exit();
2189 }
2190 
2191 static void
2192 signal_handler(int signum)
2193 {
2194 	if (signum == SIGINT || signum == SIGTERM) {
2195 		printf("\nSignal %d received, preparing to exit...\n",
2196 				signum);
2197 #ifdef RTE_LIBRTE_PDUMP
2198 		/* uninitialize packet capture framework */
2199 		rte_pdump_uninit();
2200 #endif
2201 #ifdef RTE_LIBRTE_LATENCY_STATS
2202 		rte_latencystats_uninit();
2203 #endif
2204 		force_quit();
2205 		/* exit with the expected status */
2206 		signal(signum, SIG_DFL);
2207 		kill(getpid(), signum);
2208 	}
2209 }
2210 
2211 int
2212 main(int argc, char** argv)
2213 {
2214 	int  diag;
2215 	uint8_t port_id;
2216 
2217 	signal(SIGINT, signal_handler);
2218 	signal(SIGTERM, signal_handler);
2219 
2220 	diag = rte_eal_init(argc, argv);
2221 	if (diag < 0)
2222 		rte_panic("Cannot init EAL\n");
2223 
2224 #ifdef RTE_LIBRTE_PDUMP
2225 	/* initialize packet capture framework */
2226 	rte_pdump_init(NULL);
2227 #endif
2228 
2229 	nb_ports = (portid_t) rte_eth_dev_count();
2230 	if (nb_ports == 0)
2231 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2232 
2233 	/* allocate port structures, and init them */
2234 	init_port();
2235 
2236 	set_def_fwd_config();
2237 	if (nb_lcores == 0)
2238 		rte_panic("Empty set of forwarding logical cores - check the "
2239 			  "core mask supplied in the command parameters\n");
2240 
2241 	argc -= diag;
2242 	argv += diag;
2243 	if (argc > 1)
2244 		launch_args_parse(argc, argv);
2245 
2246 	if (!nb_rxq && !nb_txq)
2247 		printf("Warning: Either rx or tx queues should be non-zero\n");
2248 
2249 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2250 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2251 		       "but nb_txq=%d will prevent to fully test it.\n",
2252 		       nb_rxq, nb_txq);
2253 
2254 	init_config();
2255 	if (start_port(RTE_PORT_ALL) != 0)
2256 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2257 
2258 	/* set all ports to promiscuous mode by default */
2259 	RTE_ETH_FOREACH_DEV(port_id)
2260 		rte_eth_promiscuous_enable(port_id);
2261 
2262 	/* Init metrics library */
2263 	rte_metrics_init(rte_socket_id());
2264 
2265 #ifdef RTE_LIBRTE_LATENCY_STATS
2266 	if (latencystats_enabled != 0) {
2267 		int ret = rte_latencystats_init(1, NULL);
2268 		if (ret)
2269 			printf("Warning: latencystats init()"
2270 				" returned error %d\n",	ret);
2271 		printf("Latencystats running on lcore %d\n",
2272 			latencystats_lcore_id);
2273 	}
2274 #endif
2275 
2276 	/* Setup bitrate stats */
2277 #ifdef RTE_LIBRTE_BITRATE
2278 	bitrate_data = rte_stats_bitrate_create();
2279 	if (bitrate_data == NULL)
2280 		rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2281 	rte_stats_bitrate_reg(bitrate_data);
2282 #endif
2283 
2284 
2285 #ifdef RTE_LIBRTE_CMDLINE
2286 	if (interactive == 1) {
2287 		if (auto_start) {
2288 			printf("Start automatic packet forwarding\n");
2289 			start_packet_forwarding(0);
2290 		}
2291 		prompt();
2292 	} else
2293 #endif
2294 	{
2295 		char c;
2296 		int rc;
2297 
2298 		printf("No commandline core given, start packet forwarding\n");
2299 		start_packet_forwarding(0);
2300 		printf("Press enter to exit\n");
2301 		rc = read(0, &c, 1);
2302 		pmd_test_exit();
2303 		if (rc < 0)
2304 			return 1;
2305 	}
2306 
2307 	return 0;
2308 }
2309