xref: /dpdk/app/test-pmd/testpmd.c (revision 36db4f6c70b43f307dd2a1ece19d910a3548349d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18 
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22 
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_interrupts.h>
42 #include <rte_pci.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
45 #include <rte_dev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
49 #endif
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
52 #endif
53 #include <rte_flow.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
57 #endif
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
60 #endif
61 
62 #include "testpmd.h"
63 
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
66 
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
70 uint8_t tx_first;
71 char cmdline_filename[PATH_MAX] = {0};
72 
73 /*
74  * NUMA support configuration.
75  * When set, the NUMA support attempts to dispatch the allocation of the
76  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77  * probed ports among the CPU sockets 0 and 1.
78  * Otherwise, all memory is allocated from CPU socket 0.
79  */
80 uint8_t numa_support = 1; /**< numa enabled by default */
81 
82 /*
83  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
84  * not configured.
85  */
86 uint8_t socket_num = UMA_NO_CONFIG;
87 
88 /*
89  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
90  */
91 uint8_t mp_anon = 0;
92 
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instantiated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101 
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;	       /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109 
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119 
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122 
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125 
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130 	&io_fwd_engine,
131 	&mac_fwd_engine,
132 	&mac_swap_engine,
133 	&flow_gen_engine,
134 	&rx_only_engine,
135 	&tx_only_engine,
136 	&csum_fwd_engine,
137 	&icmp_echo_engine,
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
139 	&softnic_tm_engine,
140 	&softnic_tm_bypass_engine,
141 #endif
142 #ifdef RTE_LIBRTE_IEEE1588
143 	&ieee1588_fwd_engine,
144 #endif
145 	NULL,
146 };
147 
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
153 
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
156                                       * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
158 
159 /*
160  * In container, it cannot terminate the process which running with 'stats-period'
161  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
162  */
163 uint8_t f_quit;
164 
165 /*
166  * Configuration of packet segments used by the "txonly" processing engine.
167  */
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170 	TXONLY_DEF_PACKET_LEN,
171 };
172 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173 
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
176 
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182 
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185 
186 /*
187  * Configurable number of RX/TX queues.
188  */
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191 
192 /*
193  * Configurable number of RX/TX ring descriptors.
194  */
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199 
200 #define RTE_PMD_PARAM_UNSET -1
201 /*
202  * Configurable values of RX and TX ring threshold registers.
203  */
204 
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
208 
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
212 
213 /*
214  * Configurable value of RX free threshold.
215  */
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
217 
218 /*
219  * Configurable value of RX drop enable.
220  */
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
222 
223 /*
224  * Configurable value of TX free threshold.
225  */
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
227 
228 /*
229  * Configurable value of TX RS bit threshold.
230  */
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Receive Side Scaling (RSS) configuration.
235  */
236 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
237 
238 /*
239  * Port topology configuration
240  */
241 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
242 
243 /*
244  * Avoids to flush all the RX streams before starts forwarding.
245  */
246 uint8_t no_flush_rx = 0; /* flush by default */
247 
248 /*
249  * Flow API isolated mode.
250  */
251 uint8_t flow_isolate_all;
252 
253 /*
254  * Avoids to check link status when starting/stopping a port.
255  */
256 uint8_t no_link_check = 0; /* check by default */
257 
258 /*
259  * Enable link status change notification
260  */
261 uint8_t lsc_interrupt = 1; /* enabled by default */
262 
263 /*
264  * Enable device removal notification.
265  */
266 uint8_t rmv_interrupt = 1; /* enabled by default */
267 
268 /*
269  * Display or mask ether events
270  * Default to all events except VF_MBOX
271  */
272 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
273 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
274 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
275 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
276 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
277 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
278 
279 /*
280  * NIC bypass mode configuration options.
281  */
282 
283 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
284 /* The NIC bypass watchdog timeout. */
285 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
286 #endif
287 
288 
289 #ifdef RTE_LIBRTE_LATENCY_STATS
290 
291 /*
292  * Set when latency stats is enabled in the commandline
293  */
294 uint8_t latencystats_enabled;
295 
296 /*
297  * Lcore ID to serive latency statistics.
298  */
299 lcoreid_t latencystats_lcore_id = -1;
300 
301 #endif
302 
303 /*
304  * Ethernet device configuration.
305  */
306 struct rte_eth_rxmode rx_mode = {
307 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
308 	.offloads = (DEV_RX_OFFLOAD_VLAN_FILTER |
309 		     DEV_RX_OFFLOAD_VLAN_STRIP |
310 		     DEV_RX_OFFLOAD_CRC_STRIP),
311 	.ignore_offload_bitfield = 1,
312 };
313 
314 struct rte_eth_txmode tx_mode = {
315 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
316 };
317 
318 struct rte_fdir_conf fdir_conf = {
319 	.mode = RTE_FDIR_MODE_NONE,
320 	.pballoc = RTE_FDIR_PBALLOC_64K,
321 	.status = RTE_FDIR_REPORT_STATUS,
322 	.mask = {
323 		.vlan_tci_mask = 0x0,
324 		.ipv4_mask     = {
325 			.src_ip = 0xFFFFFFFF,
326 			.dst_ip = 0xFFFFFFFF,
327 		},
328 		.ipv6_mask     = {
329 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
330 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
331 		},
332 		.src_port_mask = 0xFFFF,
333 		.dst_port_mask = 0xFFFF,
334 		.mac_addr_byte_mask = 0xFF,
335 		.tunnel_type_mask = 1,
336 		.tunnel_id_mask = 0xFFFFFFFF,
337 	},
338 	.drop_queue = 127,
339 };
340 
341 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
342 
343 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
344 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
345 
346 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
347 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
348 
349 uint16_t nb_tx_queue_stats_mappings = 0;
350 uint16_t nb_rx_queue_stats_mappings = 0;
351 
352 /*
353  * Display zero values by default for xstats
354  */
355 uint8_t xstats_hide_zero;
356 
357 unsigned int num_sockets = 0;
358 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
359 
360 #ifdef RTE_LIBRTE_BITRATE
361 /* Bitrate statistics */
362 struct rte_stats_bitrates *bitrate_data;
363 lcoreid_t bitrate_lcore_id;
364 uint8_t bitrate_enabled;
365 #endif
366 
367 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
368 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
369 
370 /* Forward function declarations */
371 static void map_port_queue_stats_mapping_registers(portid_t pi,
372 						   struct rte_port *port);
373 static void check_all_ports_link_status(uint32_t port_mask);
374 static int eth_event_callback(portid_t port_id,
375 			      enum rte_eth_event_type type,
376 			      void *param, void *ret_param);
377 
378 /*
379  * Check if all the ports are started.
380  * If yes, return positive value. If not, return zero.
381  */
382 static int all_ports_started(void);
383 
384 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
385 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
386 
387 /*
388  * Helper function to check if socket is already discovered.
389  * If yes, return positive value. If not, return zero.
390  */
391 int
392 new_socket_id(unsigned int socket_id)
393 {
394 	unsigned int i;
395 
396 	for (i = 0; i < num_sockets; i++) {
397 		if (socket_ids[i] == socket_id)
398 			return 0;
399 	}
400 	return 1;
401 }
402 
403 /*
404  * Setup default configuration.
405  */
406 static void
407 set_default_fwd_lcores_config(void)
408 {
409 	unsigned int i;
410 	unsigned int nb_lc;
411 	unsigned int sock_num;
412 
413 	nb_lc = 0;
414 	for (i = 0; i < RTE_MAX_LCORE; i++) {
415 		sock_num = rte_lcore_to_socket_id(i);
416 		if (new_socket_id(sock_num)) {
417 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
418 				rte_exit(EXIT_FAILURE,
419 					 "Total sockets greater than %u\n",
420 					 RTE_MAX_NUMA_NODES);
421 			}
422 			socket_ids[num_sockets++] = sock_num;
423 		}
424 		if (!rte_lcore_is_enabled(i))
425 			continue;
426 		if (i == rte_get_master_lcore())
427 			continue;
428 		fwd_lcores_cpuids[nb_lc++] = i;
429 	}
430 	nb_lcores = (lcoreid_t) nb_lc;
431 	nb_cfg_lcores = nb_lcores;
432 	nb_fwd_lcores = 1;
433 }
434 
435 static void
436 set_def_peer_eth_addrs(void)
437 {
438 	portid_t i;
439 
440 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
441 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
442 		peer_eth_addrs[i].addr_bytes[5] = i;
443 	}
444 }
445 
446 static void
447 set_default_fwd_ports_config(void)
448 {
449 	portid_t pt_id;
450 	int i = 0;
451 
452 	RTE_ETH_FOREACH_DEV(pt_id)
453 		fwd_ports_ids[i++] = pt_id;
454 
455 	nb_cfg_ports = nb_ports;
456 	nb_fwd_ports = nb_ports;
457 }
458 
459 void
460 set_def_fwd_config(void)
461 {
462 	set_default_fwd_lcores_config();
463 	set_def_peer_eth_addrs();
464 	set_default_fwd_ports_config();
465 }
466 
467 /*
468  * Configuration initialisation done once at init time.
469  */
470 static void
471 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
472 		 unsigned int socket_id)
473 {
474 	char pool_name[RTE_MEMPOOL_NAMESIZE];
475 	struct rte_mempool *rte_mp = NULL;
476 	uint32_t mb_size;
477 
478 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
479 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
480 
481 	TESTPMD_LOG(INFO,
482 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
483 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
484 
485 	if (mp_anon != 0) {
486 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
487 			mb_size, (unsigned) mb_mempool_cache,
488 			sizeof(struct rte_pktmbuf_pool_private),
489 			socket_id, 0);
490 		if (rte_mp == NULL)
491 			goto err;
492 
493 		if (rte_mempool_populate_anon(rte_mp) == 0) {
494 			rte_mempool_free(rte_mp);
495 			rte_mp = NULL;
496 			goto err;
497 		}
498 		rte_pktmbuf_pool_init(rte_mp, NULL);
499 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
500 	} else {
501 		/* wrapper to rte_mempool_create() */
502 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
503 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
504 	}
505 
506 err:
507 	if (rte_mp == NULL) {
508 		rte_exit(EXIT_FAILURE,
509 			"Creation of mbuf pool for socket %u failed: %s\n",
510 			socket_id, rte_strerror(rte_errno));
511 	} else if (verbose_level > 0) {
512 		rte_mempool_dump(stdout, rte_mp);
513 	}
514 }
515 
516 /*
517  * Check given socket id is valid or not with NUMA mode,
518  * if valid, return 0, else return -1
519  */
520 static int
521 check_socket_id(const unsigned int socket_id)
522 {
523 	static int warning_once = 0;
524 
525 	if (new_socket_id(socket_id)) {
526 		if (!warning_once && numa_support)
527 			printf("Warning: NUMA should be configured manually by"
528 			       " using --port-numa-config and"
529 			       " --ring-numa-config parameters along with"
530 			       " --numa.\n");
531 		warning_once = 1;
532 		return -1;
533 	}
534 	return 0;
535 }
536 
537 /*
538  * Get the allowed maximum number of RX queues.
539  * *pid return the port id which has minimal value of
540  * max_rx_queues in all ports.
541  */
542 queueid_t
543 get_allowed_max_nb_rxq(portid_t *pid)
544 {
545 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
546 	portid_t pi;
547 	struct rte_eth_dev_info dev_info;
548 
549 	RTE_ETH_FOREACH_DEV(pi) {
550 		rte_eth_dev_info_get(pi, &dev_info);
551 		if (dev_info.max_rx_queues < allowed_max_rxq) {
552 			allowed_max_rxq = dev_info.max_rx_queues;
553 			*pid = pi;
554 		}
555 	}
556 	return allowed_max_rxq;
557 }
558 
559 /*
560  * Check input rxq is valid or not.
561  * If input rxq is not greater than any of maximum number
562  * of RX queues of all ports, it is valid.
563  * if valid, return 0, else return -1
564  */
565 int
566 check_nb_rxq(queueid_t rxq)
567 {
568 	queueid_t allowed_max_rxq;
569 	portid_t pid = 0;
570 
571 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
572 	if (rxq > allowed_max_rxq) {
573 		printf("Fail: input rxq (%u) can't be greater "
574 		       "than max_rx_queues (%u) of port %u\n",
575 		       rxq,
576 		       allowed_max_rxq,
577 		       pid);
578 		return -1;
579 	}
580 	return 0;
581 }
582 
583 /*
584  * Get the allowed maximum number of TX queues.
585  * *pid return the port id which has minimal value of
586  * max_tx_queues in all ports.
587  */
588 queueid_t
589 get_allowed_max_nb_txq(portid_t *pid)
590 {
591 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
592 	portid_t pi;
593 	struct rte_eth_dev_info dev_info;
594 
595 	RTE_ETH_FOREACH_DEV(pi) {
596 		rte_eth_dev_info_get(pi, &dev_info);
597 		if (dev_info.max_tx_queues < allowed_max_txq) {
598 			allowed_max_txq = dev_info.max_tx_queues;
599 			*pid = pi;
600 		}
601 	}
602 	return allowed_max_txq;
603 }
604 
605 /*
606  * Check input txq is valid or not.
607  * If input txq is not greater than any of maximum number
608  * of TX queues of all ports, it is valid.
609  * if valid, return 0, else return -1
610  */
611 int
612 check_nb_txq(queueid_t txq)
613 {
614 	queueid_t allowed_max_txq;
615 	portid_t pid = 0;
616 
617 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
618 	if (txq > allowed_max_txq) {
619 		printf("Fail: input txq (%u) can't be greater "
620 		       "than max_tx_queues (%u) of port %u\n",
621 		       txq,
622 		       allowed_max_txq,
623 		       pid);
624 		return -1;
625 	}
626 	return 0;
627 }
628 
629 static void
630 init_config(void)
631 {
632 	portid_t pid;
633 	struct rte_port *port;
634 	struct rte_mempool *mbp;
635 	unsigned int nb_mbuf_per_pool;
636 	lcoreid_t  lc_id;
637 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
638 	struct rte_gro_param gro_param;
639 	uint32_t gso_types;
640 
641 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
642 
643 	if (numa_support) {
644 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
645 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
646 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
647 	}
648 
649 	/* Configuration of logical cores. */
650 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
651 				sizeof(struct fwd_lcore *) * nb_lcores,
652 				RTE_CACHE_LINE_SIZE);
653 	if (fwd_lcores == NULL) {
654 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
655 							"failed\n", nb_lcores);
656 	}
657 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
658 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
659 					       sizeof(struct fwd_lcore),
660 					       RTE_CACHE_LINE_SIZE);
661 		if (fwd_lcores[lc_id] == NULL) {
662 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
663 								"failed\n");
664 		}
665 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
666 	}
667 
668 	RTE_ETH_FOREACH_DEV(pid) {
669 		port = &ports[pid];
670 		/* Apply default Tx configuration for all ports */
671 		port->dev_conf.txmode = tx_mode;
672 		port->dev_conf.rxmode = rx_mode;
673 		rte_eth_dev_info_get(pid, &port->dev_info);
674 		if (!(port->dev_info.tx_offload_capa &
675 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
676 			port->dev_conf.txmode.offloads &=
677 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
678 
679 		if (numa_support) {
680 			if (port_numa[pid] != NUMA_NO_CONFIG)
681 				port_per_socket[port_numa[pid]]++;
682 			else {
683 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
684 
685 				/* if socket_id is invalid, set to 0 */
686 				if (check_socket_id(socket_id) < 0)
687 					socket_id = 0;
688 				port_per_socket[socket_id]++;
689 			}
690 		}
691 
692 		/* set flag to initialize port/queue */
693 		port->need_reconfig = 1;
694 		port->need_reconfig_queues = 1;
695 	}
696 
697 	/*
698 	 * Create pools of mbuf.
699 	 * If NUMA support is disabled, create a single pool of mbuf in
700 	 * socket 0 memory by default.
701 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
702 	 *
703 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
704 	 * nb_txd can be configured at run time.
705 	 */
706 	if (param_total_num_mbufs)
707 		nb_mbuf_per_pool = param_total_num_mbufs;
708 	else {
709 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
710 			(nb_lcores * mb_mempool_cache) +
711 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
712 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
713 	}
714 
715 	if (numa_support) {
716 		uint8_t i;
717 
718 		for (i = 0; i < num_sockets; i++)
719 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
720 					 socket_ids[i]);
721 	} else {
722 		if (socket_num == UMA_NO_CONFIG)
723 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
724 		else
725 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
726 						 socket_num);
727 	}
728 
729 	init_port_config();
730 
731 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
732 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
733 	/*
734 	 * Records which Mbuf pool to use by each logical core, if needed.
735 	 */
736 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
737 		mbp = mbuf_pool_find(
738 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
739 
740 		if (mbp == NULL)
741 			mbp = mbuf_pool_find(0);
742 		fwd_lcores[lc_id]->mbp = mbp;
743 		/* initialize GSO context */
744 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
745 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
746 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
747 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
748 			ETHER_CRC_LEN;
749 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
750 	}
751 
752 	/* Configuration of packet forwarding streams. */
753 	if (init_fwd_streams() < 0)
754 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
755 
756 	fwd_config_setup();
757 
758 	/* create a gro context for each lcore */
759 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
760 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
761 	gro_param.max_item_per_flow = MAX_PKT_BURST;
762 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
763 		gro_param.socket_id = rte_lcore_to_socket_id(
764 				fwd_lcores_cpuids[lc_id]);
765 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
766 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
767 			rte_exit(EXIT_FAILURE,
768 					"rte_gro_ctx_create() failed\n");
769 		}
770 	}
771 }
772 
773 
774 void
775 reconfig(portid_t new_port_id, unsigned socket_id)
776 {
777 	struct rte_port *port;
778 
779 	/* Reconfiguration of Ethernet ports. */
780 	port = &ports[new_port_id];
781 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
782 
783 	/* set flag to initialize port/queue */
784 	port->need_reconfig = 1;
785 	port->need_reconfig_queues = 1;
786 	port->socket_id = socket_id;
787 
788 	init_port_config();
789 }
790 
791 
792 int
793 init_fwd_streams(void)
794 {
795 	portid_t pid;
796 	struct rte_port *port;
797 	streamid_t sm_id, nb_fwd_streams_new;
798 	queueid_t q;
799 
800 	/* set socket id according to numa or not */
801 	RTE_ETH_FOREACH_DEV(pid) {
802 		port = &ports[pid];
803 		if (nb_rxq > port->dev_info.max_rx_queues) {
804 			printf("Fail: nb_rxq(%d) is greater than "
805 				"max_rx_queues(%d)\n", nb_rxq,
806 				port->dev_info.max_rx_queues);
807 			return -1;
808 		}
809 		if (nb_txq > port->dev_info.max_tx_queues) {
810 			printf("Fail: nb_txq(%d) is greater than "
811 				"max_tx_queues(%d)\n", nb_txq,
812 				port->dev_info.max_tx_queues);
813 			return -1;
814 		}
815 		if (numa_support) {
816 			if (port_numa[pid] != NUMA_NO_CONFIG)
817 				port->socket_id = port_numa[pid];
818 			else {
819 				port->socket_id = rte_eth_dev_socket_id(pid);
820 
821 				/* if socket_id is invalid, set to 0 */
822 				if (check_socket_id(port->socket_id) < 0)
823 					port->socket_id = 0;
824 			}
825 		}
826 		else {
827 			if (socket_num == UMA_NO_CONFIG)
828 				port->socket_id = 0;
829 			else
830 				port->socket_id = socket_num;
831 		}
832 	}
833 
834 	q = RTE_MAX(nb_rxq, nb_txq);
835 	if (q == 0) {
836 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
837 		return -1;
838 	}
839 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
840 	if (nb_fwd_streams_new == nb_fwd_streams)
841 		return 0;
842 	/* clear the old */
843 	if (fwd_streams != NULL) {
844 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
845 			if (fwd_streams[sm_id] == NULL)
846 				continue;
847 			rte_free(fwd_streams[sm_id]);
848 			fwd_streams[sm_id] = NULL;
849 		}
850 		rte_free(fwd_streams);
851 		fwd_streams = NULL;
852 	}
853 
854 	/* init new */
855 	nb_fwd_streams = nb_fwd_streams_new;
856 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
857 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
858 	if (fwd_streams == NULL)
859 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
860 						"failed\n", nb_fwd_streams);
861 
862 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
863 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
864 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
865 		if (fwd_streams[sm_id] == NULL)
866 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
867 								" failed\n");
868 	}
869 
870 	return 0;
871 }
872 
873 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
874 static void
875 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
876 {
877 	unsigned int total_burst;
878 	unsigned int nb_burst;
879 	unsigned int burst_stats[3];
880 	uint16_t pktnb_stats[3];
881 	uint16_t nb_pkt;
882 	int burst_percent[3];
883 
884 	/*
885 	 * First compute the total number of packet bursts and the
886 	 * two highest numbers of bursts of the same number of packets.
887 	 */
888 	total_burst = 0;
889 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
890 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
891 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
892 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
893 		if (nb_burst == 0)
894 			continue;
895 		total_burst += nb_burst;
896 		if (nb_burst > burst_stats[0]) {
897 			burst_stats[1] = burst_stats[0];
898 			pktnb_stats[1] = pktnb_stats[0];
899 			burst_stats[0] = nb_burst;
900 			pktnb_stats[0] = nb_pkt;
901 		}
902 	}
903 	if (total_burst == 0)
904 		return;
905 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
906 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
907 	       burst_percent[0], (int) pktnb_stats[0]);
908 	if (burst_stats[0] == total_burst) {
909 		printf("]\n");
910 		return;
911 	}
912 	if (burst_stats[0] + burst_stats[1] == total_burst) {
913 		printf(" + %d%% of %d pkts]\n",
914 		       100 - burst_percent[0], pktnb_stats[1]);
915 		return;
916 	}
917 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
918 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
919 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
920 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
921 		return;
922 	}
923 	printf(" + %d%% of %d pkts + %d%% of others]\n",
924 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
925 }
926 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
927 
928 static void
929 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
930 {
931 	struct rte_port *port;
932 	uint8_t i;
933 
934 	static const char *fwd_stats_border = "----------------------";
935 
936 	port = &ports[port_id];
937 	printf("\n  %s Forward statistics for port %-2d %s\n",
938 	       fwd_stats_border, port_id, fwd_stats_border);
939 
940 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
941 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
942 		       "%-"PRIu64"\n",
943 		       stats->ipackets, stats->imissed,
944 		       (uint64_t) (stats->ipackets + stats->imissed));
945 
946 		if (cur_fwd_eng == &csum_fwd_engine)
947 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
948 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
949 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
950 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
951 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
952 		}
953 
954 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
955 		       "%-"PRIu64"\n",
956 		       stats->opackets, port->tx_dropped,
957 		       (uint64_t) (stats->opackets + port->tx_dropped));
958 	}
959 	else {
960 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
961 		       "%14"PRIu64"\n",
962 		       stats->ipackets, stats->imissed,
963 		       (uint64_t) (stats->ipackets + stats->imissed));
964 
965 		if (cur_fwd_eng == &csum_fwd_engine)
966 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
967 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
968 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
969 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
970 			printf("  RX-nombufs:             %14"PRIu64"\n",
971 			       stats->rx_nombuf);
972 		}
973 
974 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
975 		       "%14"PRIu64"\n",
976 		       stats->opackets, port->tx_dropped,
977 		       (uint64_t) (stats->opackets + port->tx_dropped));
978 	}
979 
980 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
981 	if (port->rx_stream)
982 		pkt_burst_stats_display("RX",
983 			&port->rx_stream->rx_burst_stats);
984 	if (port->tx_stream)
985 		pkt_burst_stats_display("TX",
986 			&port->tx_stream->tx_burst_stats);
987 #endif
988 
989 	if (port->rx_queue_stats_mapping_enabled) {
990 		printf("\n");
991 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
992 			printf("  Stats reg %2d RX-packets:%14"PRIu64
993 			       "     RX-errors:%14"PRIu64
994 			       "    RX-bytes:%14"PRIu64"\n",
995 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
996 		}
997 		printf("\n");
998 	}
999 	if (port->tx_queue_stats_mapping_enabled) {
1000 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1001 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1002 			       "                                 TX-bytes:%14"PRIu64"\n",
1003 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1004 		}
1005 	}
1006 
1007 	printf("  %s--------------------------------%s\n",
1008 	       fwd_stats_border, fwd_stats_border);
1009 }
1010 
1011 static void
1012 fwd_stream_stats_display(streamid_t stream_id)
1013 {
1014 	struct fwd_stream *fs;
1015 	static const char *fwd_top_stats_border = "-------";
1016 
1017 	fs = fwd_streams[stream_id];
1018 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1019 	    (fs->fwd_dropped == 0))
1020 		return;
1021 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1022 	       "TX Port=%2d/Queue=%2d %s\n",
1023 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1024 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1025 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1026 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1027 
1028 	/* if checksum mode */
1029 	if (cur_fwd_eng == &csum_fwd_engine) {
1030 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1031 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1032 	}
1033 
1034 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1035 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1036 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1037 #endif
1038 }
1039 
1040 static void
1041 flush_fwd_rx_queues(void)
1042 {
1043 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1044 	portid_t  rxp;
1045 	portid_t port_id;
1046 	queueid_t rxq;
1047 	uint16_t  nb_rx;
1048 	uint16_t  i;
1049 	uint8_t   j;
1050 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1051 	uint64_t timer_period;
1052 
1053 	/* convert to number of cycles */
1054 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1055 
1056 	for (j = 0; j < 2; j++) {
1057 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1058 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1059 				port_id = fwd_ports_ids[rxp];
1060 				/**
1061 				* testpmd can stuck in the below do while loop
1062 				* if rte_eth_rx_burst() always returns nonzero
1063 				* packets. So timer is added to exit this loop
1064 				* after 1sec timer expiry.
1065 				*/
1066 				prev_tsc = rte_rdtsc();
1067 				do {
1068 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1069 						pkts_burst, MAX_PKT_BURST);
1070 					for (i = 0; i < nb_rx; i++)
1071 						rte_pktmbuf_free(pkts_burst[i]);
1072 
1073 					cur_tsc = rte_rdtsc();
1074 					diff_tsc = cur_tsc - prev_tsc;
1075 					timer_tsc += diff_tsc;
1076 				} while ((nb_rx > 0) &&
1077 					(timer_tsc < timer_period));
1078 				timer_tsc = 0;
1079 			}
1080 		}
1081 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1082 	}
1083 }
1084 
1085 static void
1086 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1087 {
1088 	struct fwd_stream **fsm;
1089 	streamid_t nb_fs;
1090 	streamid_t sm_id;
1091 #ifdef RTE_LIBRTE_BITRATE
1092 	uint64_t tics_per_1sec;
1093 	uint64_t tics_datum;
1094 	uint64_t tics_current;
1095 	uint8_t idx_port, cnt_ports;
1096 
1097 	cnt_ports = rte_eth_dev_count();
1098 	tics_datum = rte_rdtsc();
1099 	tics_per_1sec = rte_get_timer_hz();
1100 #endif
1101 	fsm = &fwd_streams[fc->stream_idx];
1102 	nb_fs = fc->stream_nb;
1103 	do {
1104 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1105 			(*pkt_fwd)(fsm[sm_id]);
1106 #ifdef RTE_LIBRTE_BITRATE
1107 		if (bitrate_enabled != 0 &&
1108 				bitrate_lcore_id == rte_lcore_id()) {
1109 			tics_current = rte_rdtsc();
1110 			if (tics_current - tics_datum >= tics_per_1sec) {
1111 				/* Periodic bitrate calculation */
1112 				for (idx_port = 0;
1113 						idx_port < cnt_ports;
1114 						idx_port++)
1115 					rte_stats_bitrate_calc(bitrate_data,
1116 						idx_port);
1117 				tics_datum = tics_current;
1118 			}
1119 		}
1120 #endif
1121 #ifdef RTE_LIBRTE_LATENCY_STATS
1122 		if (latencystats_enabled != 0 &&
1123 				latencystats_lcore_id == rte_lcore_id())
1124 			rte_latencystats_update();
1125 #endif
1126 
1127 	} while (! fc->stopped);
1128 }
1129 
1130 static int
1131 start_pkt_forward_on_core(void *fwd_arg)
1132 {
1133 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1134 			     cur_fwd_config.fwd_eng->packet_fwd);
1135 	return 0;
1136 }
1137 
1138 /*
1139  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1140  * Used to start communication flows in network loopback test configurations.
1141  */
1142 static int
1143 run_one_txonly_burst_on_core(void *fwd_arg)
1144 {
1145 	struct fwd_lcore *fwd_lc;
1146 	struct fwd_lcore tmp_lcore;
1147 
1148 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1149 	tmp_lcore = *fwd_lc;
1150 	tmp_lcore.stopped = 1;
1151 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1152 	return 0;
1153 }
1154 
1155 /*
1156  * Launch packet forwarding:
1157  *     - Setup per-port forwarding context.
1158  *     - launch logical cores with their forwarding configuration.
1159  */
1160 static void
1161 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1162 {
1163 	port_fwd_begin_t port_fwd_begin;
1164 	unsigned int i;
1165 	unsigned int lc_id;
1166 	int diag;
1167 
1168 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1169 	if (port_fwd_begin != NULL) {
1170 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1171 			(*port_fwd_begin)(fwd_ports_ids[i]);
1172 	}
1173 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1174 		lc_id = fwd_lcores_cpuids[i];
1175 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1176 			fwd_lcores[i]->stopped = 0;
1177 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1178 						     fwd_lcores[i], lc_id);
1179 			if (diag != 0)
1180 				printf("launch lcore %u failed - diag=%d\n",
1181 				       lc_id, diag);
1182 		}
1183 	}
1184 }
1185 
1186 /*
1187  * Launch packet forwarding configuration.
1188  */
1189 void
1190 start_packet_forwarding(int with_tx_first)
1191 {
1192 	port_fwd_begin_t port_fwd_begin;
1193 	port_fwd_end_t  port_fwd_end;
1194 	struct rte_port *port;
1195 	unsigned int i;
1196 	portid_t   pt_id;
1197 	streamid_t sm_id;
1198 
1199 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1200 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1201 
1202 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1203 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1204 
1205 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1206 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1207 		(!nb_rxq || !nb_txq))
1208 		rte_exit(EXIT_FAILURE,
1209 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1210 			cur_fwd_eng->fwd_mode_name);
1211 
1212 	if (all_ports_started() == 0) {
1213 		printf("Not all ports were started\n");
1214 		return;
1215 	}
1216 	if (test_done == 0) {
1217 		printf("Packet forwarding already started\n");
1218 		return;
1219 	}
1220 
1221 	if (init_fwd_streams() < 0) {
1222 		printf("Fail from init_fwd_streams()\n");
1223 		return;
1224 	}
1225 
1226 	if(dcb_test) {
1227 		for (i = 0; i < nb_fwd_ports; i++) {
1228 			pt_id = fwd_ports_ids[i];
1229 			port = &ports[pt_id];
1230 			if (!port->dcb_flag) {
1231 				printf("In DCB mode, all forwarding ports must "
1232                                        "be configured in this mode.\n");
1233 				return;
1234 			}
1235 		}
1236 		if (nb_fwd_lcores == 1) {
1237 			printf("In DCB mode,the nb forwarding cores "
1238                                "should be larger than 1.\n");
1239 			return;
1240 		}
1241 	}
1242 	test_done = 0;
1243 
1244 	if(!no_flush_rx)
1245 		flush_fwd_rx_queues();
1246 
1247 	fwd_config_setup();
1248 	pkt_fwd_config_display(&cur_fwd_config);
1249 	rxtx_config_display();
1250 
1251 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1252 		pt_id = fwd_ports_ids[i];
1253 		port = &ports[pt_id];
1254 		rte_eth_stats_get(pt_id, &port->stats);
1255 		port->tx_dropped = 0;
1256 
1257 		map_port_queue_stats_mapping_registers(pt_id, port);
1258 	}
1259 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1260 		fwd_streams[sm_id]->rx_packets = 0;
1261 		fwd_streams[sm_id]->tx_packets = 0;
1262 		fwd_streams[sm_id]->fwd_dropped = 0;
1263 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1264 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1265 
1266 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1267 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1268 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1269 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1270 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1271 #endif
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273 		fwd_streams[sm_id]->core_cycles = 0;
1274 #endif
1275 	}
1276 	if (with_tx_first) {
1277 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1278 		if (port_fwd_begin != NULL) {
1279 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1280 				(*port_fwd_begin)(fwd_ports_ids[i]);
1281 		}
1282 		while (with_tx_first--) {
1283 			launch_packet_forwarding(
1284 					run_one_txonly_burst_on_core);
1285 			rte_eal_mp_wait_lcore();
1286 		}
1287 		port_fwd_end = tx_only_engine.port_fwd_end;
1288 		if (port_fwd_end != NULL) {
1289 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1290 				(*port_fwd_end)(fwd_ports_ids[i]);
1291 		}
1292 	}
1293 	launch_packet_forwarding(start_pkt_forward_on_core);
1294 }
1295 
1296 void
1297 stop_packet_forwarding(void)
1298 {
1299 	struct rte_eth_stats stats;
1300 	struct rte_port *port;
1301 	port_fwd_end_t  port_fwd_end;
1302 	int i;
1303 	portid_t   pt_id;
1304 	streamid_t sm_id;
1305 	lcoreid_t  lc_id;
1306 	uint64_t total_recv;
1307 	uint64_t total_xmit;
1308 	uint64_t total_rx_dropped;
1309 	uint64_t total_tx_dropped;
1310 	uint64_t total_rx_nombuf;
1311 	uint64_t tx_dropped;
1312 	uint64_t rx_bad_ip_csum;
1313 	uint64_t rx_bad_l4_csum;
1314 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1315 	uint64_t fwd_cycles;
1316 #endif
1317 
1318 	static const char *acc_stats_border = "+++++++++++++++";
1319 
1320 	if (test_done) {
1321 		printf("Packet forwarding not started\n");
1322 		return;
1323 	}
1324 	printf("Telling cores to stop...");
1325 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1326 		fwd_lcores[lc_id]->stopped = 1;
1327 	printf("\nWaiting for lcores to finish...\n");
1328 	rte_eal_mp_wait_lcore();
1329 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1330 	if (port_fwd_end != NULL) {
1331 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1332 			pt_id = fwd_ports_ids[i];
1333 			(*port_fwd_end)(pt_id);
1334 		}
1335 	}
1336 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1337 	fwd_cycles = 0;
1338 #endif
1339 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1340 		if (cur_fwd_config.nb_fwd_streams >
1341 		    cur_fwd_config.nb_fwd_ports) {
1342 			fwd_stream_stats_display(sm_id);
1343 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1344 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1345 		} else {
1346 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1347 				fwd_streams[sm_id];
1348 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1349 				fwd_streams[sm_id];
1350 		}
1351 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1352 		tx_dropped = (uint64_t) (tx_dropped +
1353 					 fwd_streams[sm_id]->fwd_dropped);
1354 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1355 
1356 		rx_bad_ip_csum =
1357 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1358 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1359 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1360 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1361 							rx_bad_ip_csum;
1362 
1363 		rx_bad_l4_csum =
1364 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1365 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1366 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1367 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1368 							rx_bad_l4_csum;
1369 
1370 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1371 		fwd_cycles = (uint64_t) (fwd_cycles +
1372 					 fwd_streams[sm_id]->core_cycles);
1373 #endif
1374 	}
1375 	total_recv = 0;
1376 	total_xmit = 0;
1377 	total_rx_dropped = 0;
1378 	total_tx_dropped = 0;
1379 	total_rx_nombuf  = 0;
1380 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1381 		pt_id = fwd_ports_ids[i];
1382 
1383 		port = &ports[pt_id];
1384 		rte_eth_stats_get(pt_id, &stats);
1385 		stats.ipackets -= port->stats.ipackets;
1386 		port->stats.ipackets = 0;
1387 		stats.opackets -= port->stats.opackets;
1388 		port->stats.opackets = 0;
1389 		stats.ibytes   -= port->stats.ibytes;
1390 		port->stats.ibytes = 0;
1391 		stats.obytes   -= port->stats.obytes;
1392 		port->stats.obytes = 0;
1393 		stats.imissed  -= port->stats.imissed;
1394 		port->stats.imissed = 0;
1395 		stats.oerrors  -= port->stats.oerrors;
1396 		port->stats.oerrors = 0;
1397 		stats.rx_nombuf -= port->stats.rx_nombuf;
1398 		port->stats.rx_nombuf = 0;
1399 
1400 		total_recv += stats.ipackets;
1401 		total_xmit += stats.opackets;
1402 		total_rx_dropped += stats.imissed;
1403 		total_tx_dropped += port->tx_dropped;
1404 		total_rx_nombuf  += stats.rx_nombuf;
1405 
1406 		fwd_port_stats_display(pt_id, &stats);
1407 	}
1408 
1409 	printf("\n  %s Accumulated forward statistics for all ports"
1410 	       "%s\n",
1411 	       acc_stats_border, acc_stats_border);
1412 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1413 	       "%-"PRIu64"\n"
1414 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1415 	       "%-"PRIu64"\n",
1416 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1417 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1418 	if (total_rx_nombuf > 0)
1419 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1420 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1421 	       "%s\n",
1422 	       acc_stats_border, acc_stats_border);
1423 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1424 	if (total_recv > 0)
1425 		printf("\n  CPU cycles/packet=%u (total cycles="
1426 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1427 		       (unsigned int)(fwd_cycles / total_recv),
1428 		       fwd_cycles, total_recv);
1429 #endif
1430 	printf("\nDone.\n");
1431 	test_done = 1;
1432 }
1433 
1434 void
1435 dev_set_link_up(portid_t pid)
1436 {
1437 	if (rte_eth_dev_set_link_up(pid) < 0)
1438 		printf("\nSet link up fail.\n");
1439 }
1440 
1441 void
1442 dev_set_link_down(portid_t pid)
1443 {
1444 	if (rte_eth_dev_set_link_down(pid) < 0)
1445 		printf("\nSet link down fail.\n");
1446 }
1447 
1448 static int
1449 all_ports_started(void)
1450 {
1451 	portid_t pi;
1452 	struct rte_port *port;
1453 
1454 	RTE_ETH_FOREACH_DEV(pi) {
1455 		port = &ports[pi];
1456 		/* Check if there is a port which is not started */
1457 		if ((port->port_status != RTE_PORT_STARTED) &&
1458 			(port->slave_flag == 0))
1459 			return 0;
1460 	}
1461 
1462 	/* No port is not started */
1463 	return 1;
1464 }
1465 
1466 int
1467 port_is_stopped(portid_t port_id)
1468 {
1469 	struct rte_port *port = &ports[port_id];
1470 
1471 	if ((port->port_status != RTE_PORT_STOPPED) &&
1472 	    (port->slave_flag == 0))
1473 		return 0;
1474 	return 1;
1475 }
1476 
1477 int
1478 all_ports_stopped(void)
1479 {
1480 	portid_t pi;
1481 
1482 	RTE_ETH_FOREACH_DEV(pi) {
1483 		if (!port_is_stopped(pi))
1484 			return 0;
1485 	}
1486 
1487 	return 1;
1488 }
1489 
1490 int
1491 port_is_started(portid_t port_id)
1492 {
1493 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1494 		return 0;
1495 
1496 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1497 		return 0;
1498 
1499 	return 1;
1500 }
1501 
1502 static int
1503 port_is_closed(portid_t port_id)
1504 {
1505 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1506 		return 0;
1507 
1508 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1509 		return 0;
1510 
1511 	return 1;
1512 }
1513 
1514 int
1515 start_port(portid_t pid)
1516 {
1517 	int diag, need_check_link_status = -1;
1518 	portid_t pi;
1519 	queueid_t qi;
1520 	struct rte_port *port;
1521 	struct ether_addr mac_addr;
1522 	enum rte_eth_event_type event_type;
1523 
1524 	if (port_id_is_invalid(pid, ENABLED_WARN))
1525 		return 0;
1526 
1527 	if(dcb_config)
1528 		dcb_test = 1;
1529 	RTE_ETH_FOREACH_DEV(pi) {
1530 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1531 			continue;
1532 
1533 		need_check_link_status = 0;
1534 		port = &ports[pi];
1535 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1536 						 RTE_PORT_HANDLING) == 0) {
1537 			printf("Port %d is now not stopped\n", pi);
1538 			continue;
1539 		}
1540 
1541 		if (port->need_reconfig > 0) {
1542 			port->need_reconfig = 0;
1543 
1544 			if (flow_isolate_all) {
1545 				int ret = port_flow_isolate(pi, 1);
1546 				if (ret) {
1547 					printf("Failed to apply isolated"
1548 					       " mode on port %d\n", pi);
1549 					return -1;
1550 				}
1551 			}
1552 
1553 			printf("Configuring Port %d (socket %u)\n", pi,
1554 					port->socket_id);
1555 			/* configure port */
1556 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1557 						&(port->dev_conf));
1558 			if (diag != 0) {
1559 				if (rte_atomic16_cmpset(&(port->port_status),
1560 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1561 					printf("Port %d can not be set back "
1562 							"to stopped\n", pi);
1563 				printf("Fail to configure port %d\n", pi);
1564 				/* try to reconfigure port next time */
1565 				port->need_reconfig = 1;
1566 				return -1;
1567 			}
1568 		}
1569 		if (port->need_reconfig_queues > 0) {
1570 			port->need_reconfig_queues = 0;
1571 			port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1572 			/* Apply Tx offloads configuration */
1573 			port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1574 			/* setup tx queues */
1575 			for (qi = 0; qi < nb_txq; qi++) {
1576 				if ((numa_support) &&
1577 					(txring_numa[pi] != NUMA_NO_CONFIG))
1578 					diag = rte_eth_tx_queue_setup(pi, qi,
1579 						nb_txd,txring_numa[pi],
1580 						&(port->tx_conf));
1581 				else
1582 					diag = rte_eth_tx_queue_setup(pi, qi,
1583 						nb_txd,port->socket_id,
1584 						&(port->tx_conf));
1585 
1586 				if (diag == 0)
1587 					continue;
1588 
1589 				/* Fail to setup tx queue, return */
1590 				if (rte_atomic16_cmpset(&(port->port_status),
1591 							RTE_PORT_HANDLING,
1592 							RTE_PORT_STOPPED) == 0)
1593 					printf("Port %d can not be set back "
1594 							"to stopped\n", pi);
1595 				printf("Fail to configure port %d tx queues\n", pi);
1596 				/* try to reconfigure queues next time */
1597 				port->need_reconfig_queues = 1;
1598 				return -1;
1599 			}
1600 			/* Apply Rx offloads configuration */
1601 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1602 			/* setup rx queues */
1603 			for (qi = 0; qi < nb_rxq; qi++) {
1604 				if ((numa_support) &&
1605 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1606 					struct rte_mempool * mp =
1607 						mbuf_pool_find(rxring_numa[pi]);
1608 					if (mp == NULL) {
1609 						printf("Failed to setup RX queue:"
1610 							"No mempool allocation"
1611 							" on the socket %d\n",
1612 							rxring_numa[pi]);
1613 						return -1;
1614 					}
1615 
1616 					diag = rte_eth_rx_queue_setup(pi, qi,
1617 					     nb_rxd,rxring_numa[pi],
1618 					     &(port->rx_conf),mp);
1619 				} else {
1620 					struct rte_mempool *mp =
1621 						mbuf_pool_find(port->socket_id);
1622 					if (mp == NULL) {
1623 						printf("Failed to setup RX queue:"
1624 							"No mempool allocation"
1625 							" on the socket %d\n",
1626 							port->socket_id);
1627 						return -1;
1628 					}
1629 					diag = rte_eth_rx_queue_setup(pi, qi,
1630 					     nb_rxd,port->socket_id,
1631 					     &(port->rx_conf), mp);
1632 				}
1633 				if (diag == 0)
1634 					continue;
1635 
1636 				/* Fail to setup rx queue, return */
1637 				if (rte_atomic16_cmpset(&(port->port_status),
1638 							RTE_PORT_HANDLING,
1639 							RTE_PORT_STOPPED) == 0)
1640 					printf("Port %d can not be set back "
1641 							"to stopped\n", pi);
1642 				printf("Fail to configure port %d rx queues\n", pi);
1643 				/* try to reconfigure queues next time */
1644 				port->need_reconfig_queues = 1;
1645 				return -1;
1646 			}
1647 		}
1648 
1649 		/* start port */
1650 		if (rte_eth_dev_start(pi) < 0) {
1651 			printf("Fail to start port %d\n", pi);
1652 
1653 			/* Fail to setup rx queue, return */
1654 			if (rte_atomic16_cmpset(&(port->port_status),
1655 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1656 				printf("Port %d can not be set back to "
1657 							"stopped\n", pi);
1658 			continue;
1659 		}
1660 
1661 		if (rte_atomic16_cmpset(&(port->port_status),
1662 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1663 			printf("Port %d can not be set into started\n", pi);
1664 
1665 		rte_eth_macaddr_get(pi, &mac_addr);
1666 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1667 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1668 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1669 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1670 
1671 		/* at least one port started, need checking link status */
1672 		need_check_link_status = 1;
1673 	}
1674 
1675 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1676 	     event_type < RTE_ETH_EVENT_MAX;
1677 	     event_type++) {
1678 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1679 						event_type,
1680 						eth_event_callback,
1681 						NULL);
1682 		if (diag) {
1683 			printf("Failed to setup even callback for event %d\n",
1684 				event_type);
1685 			return -1;
1686 		}
1687 	}
1688 
1689 	if (need_check_link_status == 1 && !no_link_check)
1690 		check_all_ports_link_status(RTE_PORT_ALL);
1691 	else if (need_check_link_status == 0)
1692 		printf("Please stop the ports first\n");
1693 
1694 	printf("Done\n");
1695 	return 0;
1696 }
1697 
1698 void
1699 stop_port(portid_t pid)
1700 {
1701 	portid_t pi;
1702 	struct rte_port *port;
1703 	int need_check_link_status = 0;
1704 
1705 	if (dcb_test) {
1706 		dcb_test = 0;
1707 		dcb_config = 0;
1708 	}
1709 
1710 	if (port_id_is_invalid(pid, ENABLED_WARN))
1711 		return;
1712 
1713 	printf("Stopping ports...\n");
1714 
1715 	RTE_ETH_FOREACH_DEV(pi) {
1716 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1717 			continue;
1718 
1719 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1720 			printf("Please remove port %d from forwarding configuration.\n", pi);
1721 			continue;
1722 		}
1723 
1724 		if (port_is_bonding_slave(pi)) {
1725 			printf("Please remove port %d from bonded device.\n", pi);
1726 			continue;
1727 		}
1728 
1729 		port = &ports[pi];
1730 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1731 						RTE_PORT_HANDLING) == 0)
1732 			continue;
1733 
1734 		rte_eth_dev_stop(pi);
1735 
1736 		if (rte_atomic16_cmpset(&(port->port_status),
1737 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1738 			printf("Port %d can not be set into stopped\n", pi);
1739 		need_check_link_status = 1;
1740 	}
1741 	if (need_check_link_status && !no_link_check)
1742 		check_all_ports_link_status(RTE_PORT_ALL);
1743 
1744 	printf("Done\n");
1745 }
1746 
1747 void
1748 close_port(portid_t pid)
1749 {
1750 	portid_t pi;
1751 	struct rte_port *port;
1752 
1753 	if (port_id_is_invalid(pid, ENABLED_WARN))
1754 		return;
1755 
1756 	printf("Closing ports...\n");
1757 
1758 	RTE_ETH_FOREACH_DEV(pi) {
1759 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1760 			continue;
1761 
1762 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1763 			printf("Please remove port %d from forwarding configuration.\n", pi);
1764 			continue;
1765 		}
1766 
1767 		if (port_is_bonding_slave(pi)) {
1768 			printf("Please remove port %d from bonded device.\n", pi);
1769 			continue;
1770 		}
1771 
1772 		port = &ports[pi];
1773 		if (rte_atomic16_cmpset(&(port->port_status),
1774 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1775 			printf("Port %d is already closed\n", pi);
1776 			continue;
1777 		}
1778 
1779 		if (rte_atomic16_cmpset(&(port->port_status),
1780 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1781 			printf("Port %d is now not stopped\n", pi);
1782 			continue;
1783 		}
1784 
1785 		if (port->flow_list)
1786 			port_flow_flush(pi);
1787 		rte_eth_dev_close(pi);
1788 
1789 		if (rte_atomic16_cmpset(&(port->port_status),
1790 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1791 			printf("Port %d cannot be set to closed\n", pi);
1792 	}
1793 
1794 	printf("Done\n");
1795 }
1796 
1797 void
1798 reset_port(portid_t pid)
1799 {
1800 	int diag;
1801 	portid_t pi;
1802 	struct rte_port *port;
1803 
1804 	if (port_id_is_invalid(pid, ENABLED_WARN))
1805 		return;
1806 
1807 	printf("Resetting ports...\n");
1808 
1809 	RTE_ETH_FOREACH_DEV(pi) {
1810 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1811 			continue;
1812 
1813 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1814 			printf("Please remove port %d from forwarding "
1815 			       "configuration.\n", pi);
1816 			continue;
1817 		}
1818 
1819 		if (port_is_bonding_slave(pi)) {
1820 			printf("Please remove port %d from bonded device.\n",
1821 			       pi);
1822 			continue;
1823 		}
1824 
1825 		diag = rte_eth_dev_reset(pi);
1826 		if (diag == 0) {
1827 			port = &ports[pi];
1828 			port->need_reconfig = 1;
1829 			port->need_reconfig_queues = 1;
1830 		} else {
1831 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1832 		}
1833 	}
1834 
1835 	printf("Done\n");
1836 }
1837 
1838 void
1839 attach_port(char *identifier)
1840 {
1841 	portid_t pi = 0;
1842 	unsigned int socket_id;
1843 
1844 	printf("Attaching a new port...\n");
1845 
1846 	if (identifier == NULL) {
1847 		printf("Invalid parameters are specified\n");
1848 		return;
1849 	}
1850 
1851 	if (rte_eth_dev_attach(identifier, &pi))
1852 		return;
1853 
1854 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1855 	/* if socket_id is invalid, set to 0 */
1856 	if (check_socket_id(socket_id) < 0)
1857 		socket_id = 0;
1858 	reconfig(pi, socket_id);
1859 	rte_eth_promiscuous_enable(pi);
1860 
1861 	nb_ports = rte_eth_dev_count();
1862 
1863 	ports[pi].port_status = RTE_PORT_STOPPED;
1864 
1865 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1866 	printf("Done\n");
1867 }
1868 
1869 void
1870 detach_port(portid_t port_id)
1871 {
1872 	char name[RTE_ETH_NAME_MAX_LEN];
1873 
1874 	printf("Detaching a port...\n");
1875 
1876 	if (!port_is_closed(port_id)) {
1877 		printf("Please close port first\n");
1878 		return;
1879 	}
1880 
1881 	if (ports[port_id].flow_list)
1882 		port_flow_flush(port_id);
1883 
1884 	if (rte_eth_dev_detach(port_id, name)) {
1885 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1886 		return;
1887 	}
1888 
1889 	nb_ports = rte_eth_dev_count();
1890 
1891 	printf("Port '%s' is detached. Now total ports is %d\n",
1892 			name, nb_ports);
1893 	printf("Done\n");
1894 	return;
1895 }
1896 
1897 void
1898 pmd_test_exit(void)
1899 {
1900 	portid_t pt_id;
1901 
1902 	if (test_done == 0)
1903 		stop_packet_forwarding();
1904 
1905 	if (ports != NULL) {
1906 		no_link_check = 1;
1907 		RTE_ETH_FOREACH_DEV(pt_id) {
1908 			printf("\nShutting down port %d...\n", pt_id);
1909 			fflush(stdout);
1910 			stop_port(pt_id);
1911 			close_port(pt_id);
1912 		}
1913 	}
1914 	printf("\nBye...\n");
1915 }
1916 
1917 typedef void (*cmd_func_t)(void);
1918 struct pmd_test_command {
1919 	const char *cmd_name;
1920 	cmd_func_t cmd_func;
1921 };
1922 
1923 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1924 
1925 /* Check the link status of all ports in up to 9s, and print them finally */
1926 static void
1927 check_all_ports_link_status(uint32_t port_mask)
1928 {
1929 #define CHECK_INTERVAL 100 /* 100ms */
1930 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1931 	portid_t portid;
1932 	uint8_t count, all_ports_up, print_flag = 0;
1933 	struct rte_eth_link link;
1934 
1935 	printf("Checking link statuses...\n");
1936 	fflush(stdout);
1937 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1938 		all_ports_up = 1;
1939 		RTE_ETH_FOREACH_DEV(portid) {
1940 			if ((port_mask & (1 << portid)) == 0)
1941 				continue;
1942 			memset(&link, 0, sizeof(link));
1943 			rte_eth_link_get_nowait(portid, &link);
1944 			/* print link status if flag set */
1945 			if (print_flag == 1) {
1946 				if (link.link_status)
1947 					printf(
1948 					"Port%d Link Up. speed %u Mbps- %s\n",
1949 					portid, link.link_speed,
1950 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1951 					("full-duplex") : ("half-duplex\n"));
1952 				else
1953 					printf("Port %d Link Down\n", portid);
1954 				continue;
1955 			}
1956 			/* clear all_ports_up flag if any link down */
1957 			if (link.link_status == ETH_LINK_DOWN) {
1958 				all_ports_up = 0;
1959 				break;
1960 			}
1961 		}
1962 		/* after finally printing all link status, get out */
1963 		if (print_flag == 1)
1964 			break;
1965 
1966 		if (all_ports_up == 0) {
1967 			fflush(stdout);
1968 			rte_delay_ms(CHECK_INTERVAL);
1969 		}
1970 
1971 		/* set the print_flag if all ports up or timeout */
1972 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1973 			print_flag = 1;
1974 		}
1975 
1976 		if (lsc_interrupt)
1977 			break;
1978 	}
1979 }
1980 
1981 static void
1982 rmv_event_callback(void *arg)
1983 {
1984 	struct rte_eth_dev *dev;
1985 	portid_t port_id = (intptr_t)arg;
1986 
1987 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1988 	dev = &rte_eth_devices[port_id];
1989 
1990 	stop_port(port_id);
1991 	close_port(port_id);
1992 	printf("removing device %s\n", dev->device->name);
1993 	if (rte_eal_dev_detach(dev->device))
1994 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1995 			dev->device->name);
1996 }
1997 
1998 /* This function is used by the interrupt thread */
1999 static int
2000 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2001 		  void *ret_param)
2002 {
2003 	static const char * const event_desc[] = {
2004 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2005 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2006 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2007 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2008 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2009 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2010 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2011 		[RTE_ETH_EVENT_NEW] = "device probed",
2012 		[RTE_ETH_EVENT_DESTROY] = "device released",
2013 		[RTE_ETH_EVENT_MAX] = NULL,
2014 	};
2015 
2016 	RTE_SET_USED(param);
2017 	RTE_SET_USED(ret_param);
2018 
2019 	if (type >= RTE_ETH_EVENT_MAX) {
2020 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2021 			port_id, __func__, type);
2022 		fflush(stderr);
2023 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2024 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2025 			event_desc[type]);
2026 		fflush(stdout);
2027 	}
2028 
2029 	switch (type) {
2030 	case RTE_ETH_EVENT_INTR_RMV:
2031 		if (rte_eal_alarm_set(100000,
2032 				rmv_event_callback, (void *)(intptr_t)port_id))
2033 			fprintf(stderr, "Could not set up deferred device removal\n");
2034 		break;
2035 	default:
2036 		break;
2037 	}
2038 	return 0;
2039 }
2040 
2041 static int
2042 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2043 {
2044 	uint16_t i;
2045 	int diag;
2046 	uint8_t mapping_found = 0;
2047 
2048 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2049 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2050 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2051 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2052 					tx_queue_stats_mappings[i].queue_id,
2053 					tx_queue_stats_mappings[i].stats_counter_id);
2054 			if (diag != 0)
2055 				return diag;
2056 			mapping_found = 1;
2057 		}
2058 	}
2059 	if (mapping_found)
2060 		port->tx_queue_stats_mapping_enabled = 1;
2061 	return 0;
2062 }
2063 
2064 static int
2065 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2066 {
2067 	uint16_t i;
2068 	int diag;
2069 	uint8_t mapping_found = 0;
2070 
2071 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2072 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2073 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2074 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2075 					rx_queue_stats_mappings[i].queue_id,
2076 					rx_queue_stats_mappings[i].stats_counter_id);
2077 			if (diag != 0)
2078 				return diag;
2079 			mapping_found = 1;
2080 		}
2081 	}
2082 	if (mapping_found)
2083 		port->rx_queue_stats_mapping_enabled = 1;
2084 	return 0;
2085 }
2086 
2087 static void
2088 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2089 {
2090 	int diag = 0;
2091 
2092 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2093 	if (diag != 0) {
2094 		if (diag == -ENOTSUP) {
2095 			port->tx_queue_stats_mapping_enabled = 0;
2096 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2097 		}
2098 		else
2099 			rte_exit(EXIT_FAILURE,
2100 					"set_tx_queue_stats_mapping_registers "
2101 					"failed for port id=%d diag=%d\n",
2102 					pi, diag);
2103 	}
2104 
2105 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2106 	if (diag != 0) {
2107 		if (diag == -ENOTSUP) {
2108 			port->rx_queue_stats_mapping_enabled = 0;
2109 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2110 		}
2111 		else
2112 			rte_exit(EXIT_FAILURE,
2113 					"set_rx_queue_stats_mapping_registers "
2114 					"failed for port id=%d diag=%d\n",
2115 					pi, diag);
2116 	}
2117 }
2118 
2119 static void
2120 rxtx_port_config(struct rte_port *port)
2121 {
2122 	port->rx_conf = port->dev_info.default_rxconf;
2123 	port->tx_conf = port->dev_info.default_txconf;
2124 
2125 	/* Check if any RX/TX parameters have been passed */
2126 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2127 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2128 
2129 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2130 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2131 
2132 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2133 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2134 
2135 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2136 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2137 
2138 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2139 		port->rx_conf.rx_drop_en = rx_drop_en;
2140 
2141 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2142 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2143 
2144 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2145 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2146 
2147 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2148 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2149 
2150 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2151 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2152 
2153 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2154 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2155 }
2156 
2157 void
2158 init_port_config(void)
2159 {
2160 	portid_t pid;
2161 	struct rte_port *port;
2162 
2163 	RTE_ETH_FOREACH_DEV(pid) {
2164 		port = &ports[pid];
2165 		port->dev_conf.fdir_conf = fdir_conf;
2166 		if (nb_rxq > 1) {
2167 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2168 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2169 		} else {
2170 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2171 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2172 		}
2173 
2174 		if (port->dcb_flag == 0) {
2175 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2176 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2177 			else
2178 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2179 		}
2180 
2181 		rxtx_port_config(port);
2182 
2183 		rte_eth_macaddr_get(pid, &port->eth_addr);
2184 
2185 		map_port_queue_stats_mapping_registers(pid, port);
2186 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2187 		rte_pmd_ixgbe_bypass_init(pid);
2188 #endif
2189 
2190 		if (lsc_interrupt &&
2191 		    (rte_eth_devices[pid].data->dev_flags &
2192 		     RTE_ETH_DEV_INTR_LSC))
2193 			port->dev_conf.intr_conf.lsc = 1;
2194 		if (rmv_interrupt &&
2195 		    (rte_eth_devices[pid].data->dev_flags &
2196 		     RTE_ETH_DEV_INTR_RMV))
2197 			port->dev_conf.intr_conf.rmv = 1;
2198 
2199 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2200 		/* Detect softnic port */
2201 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2202 			port->softnic_enable = 1;
2203 			memset(&port->softport, 0, sizeof(struct softnic_port));
2204 
2205 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2206 				port->softport.tm_flag = 1;
2207 		}
2208 #endif
2209 	}
2210 }
2211 
2212 void set_port_slave_flag(portid_t slave_pid)
2213 {
2214 	struct rte_port *port;
2215 
2216 	port = &ports[slave_pid];
2217 	port->slave_flag = 1;
2218 }
2219 
2220 void clear_port_slave_flag(portid_t slave_pid)
2221 {
2222 	struct rte_port *port;
2223 
2224 	port = &ports[slave_pid];
2225 	port->slave_flag = 0;
2226 }
2227 
2228 uint8_t port_is_bonding_slave(portid_t slave_pid)
2229 {
2230 	struct rte_port *port;
2231 
2232 	port = &ports[slave_pid];
2233 	return port->slave_flag;
2234 }
2235 
2236 const uint16_t vlan_tags[] = {
2237 		0,  1,  2,  3,  4,  5,  6,  7,
2238 		8,  9, 10, 11,  12, 13, 14, 15,
2239 		16, 17, 18, 19, 20, 21, 22, 23,
2240 		24, 25, 26, 27, 28, 29, 30, 31
2241 };
2242 
2243 static  int
2244 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2245 		 enum dcb_mode_enable dcb_mode,
2246 		 enum rte_eth_nb_tcs num_tcs,
2247 		 uint8_t pfc_en)
2248 {
2249 	uint8_t i;
2250 
2251 	/*
2252 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2253 	 * given above, and the number of traffic classes available for use.
2254 	 */
2255 	if (dcb_mode == DCB_VT_ENABLED) {
2256 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2257 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2258 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2259 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2260 
2261 		/* VMDQ+DCB RX and TX configurations */
2262 		vmdq_rx_conf->enable_default_pool = 0;
2263 		vmdq_rx_conf->default_pool = 0;
2264 		vmdq_rx_conf->nb_queue_pools =
2265 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2266 		vmdq_tx_conf->nb_queue_pools =
2267 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2268 
2269 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2270 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2271 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2272 			vmdq_rx_conf->pool_map[i].pools =
2273 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2274 		}
2275 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2276 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2277 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2278 		}
2279 
2280 		/* set DCB mode of RX and TX of multiple queues */
2281 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2282 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2283 	} else {
2284 		struct rte_eth_dcb_rx_conf *rx_conf =
2285 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2286 		struct rte_eth_dcb_tx_conf *tx_conf =
2287 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2288 
2289 		rx_conf->nb_tcs = num_tcs;
2290 		tx_conf->nb_tcs = num_tcs;
2291 
2292 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2293 			rx_conf->dcb_tc[i] = i % num_tcs;
2294 			tx_conf->dcb_tc[i] = i % num_tcs;
2295 		}
2296 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2297 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2298 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2299 	}
2300 
2301 	if (pfc_en)
2302 		eth_conf->dcb_capability_en =
2303 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2304 	else
2305 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2306 
2307 	return 0;
2308 }
2309 
2310 int
2311 init_port_dcb_config(portid_t pid,
2312 		     enum dcb_mode_enable dcb_mode,
2313 		     enum rte_eth_nb_tcs num_tcs,
2314 		     uint8_t pfc_en)
2315 {
2316 	struct rte_eth_conf port_conf;
2317 	struct rte_port *rte_port;
2318 	int retval;
2319 	uint16_t i;
2320 
2321 	rte_port = &ports[pid];
2322 
2323 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2324 	/* Enter DCB configuration status */
2325 	dcb_config = 1;
2326 
2327 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2328 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2329 	if (retval < 0)
2330 		return retval;
2331 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2332 
2333 	/**
2334 	 * Write the configuration into the device.
2335 	 * Set the numbers of RX & TX queues to 0, so
2336 	 * the RX & TX queues will not be setup.
2337 	 */
2338 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2339 
2340 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2341 
2342 	/* If dev_info.vmdq_pool_base is greater than 0,
2343 	 * the queue id of vmdq pools is started after pf queues.
2344 	 */
2345 	if (dcb_mode == DCB_VT_ENABLED &&
2346 	    rte_port->dev_info.vmdq_pool_base > 0) {
2347 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2348 			" for port %d.", pid);
2349 		return -1;
2350 	}
2351 
2352 	/* Assume the ports in testpmd have the same dcb capability
2353 	 * and has the same number of rxq and txq in dcb mode
2354 	 */
2355 	if (dcb_mode == DCB_VT_ENABLED) {
2356 		if (rte_port->dev_info.max_vfs > 0) {
2357 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2358 			nb_txq = rte_port->dev_info.nb_tx_queues;
2359 		} else {
2360 			nb_rxq = rte_port->dev_info.max_rx_queues;
2361 			nb_txq = rte_port->dev_info.max_tx_queues;
2362 		}
2363 	} else {
2364 		/*if vt is disabled, use all pf queues */
2365 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2366 			nb_rxq = rte_port->dev_info.max_rx_queues;
2367 			nb_txq = rte_port->dev_info.max_tx_queues;
2368 		} else {
2369 			nb_rxq = (queueid_t)num_tcs;
2370 			nb_txq = (queueid_t)num_tcs;
2371 
2372 		}
2373 	}
2374 	rx_free_thresh = 64;
2375 
2376 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2377 
2378 	rxtx_port_config(rte_port);
2379 	/* VLAN filter */
2380 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2381 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2382 		rx_vft_set(pid, vlan_tags[i], 1);
2383 
2384 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2385 	map_port_queue_stats_mapping_registers(pid, rte_port);
2386 
2387 	rte_port->dcb_flag = 1;
2388 
2389 	return 0;
2390 }
2391 
2392 static void
2393 init_port(void)
2394 {
2395 	/* Configuration of Ethernet ports. */
2396 	ports = rte_zmalloc("testpmd: ports",
2397 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2398 			    RTE_CACHE_LINE_SIZE);
2399 	if (ports == NULL) {
2400 		rte_exit(EXIT_FAILURE,
2401 				"rte_zmalloc(%d struct rte_port) failed\n",
2402 				RTE_MAX_ETHPORTS);
2403 	}
2404 }
2405 
2406 static void
2407 force_quit(void)
2408 {
2409 	pmd_test_exit();
2410 	prompt_exit();
2411 }
2412 
2413 static void
2414 print_stats(void)
2415 {
2416 	uint8_t i;
2417 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2418 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2419 
2420 	/* Clear screen and move to top left */
2421 	printf("%s%s", clr, top_left);
2422 
2423 	printf("\nPort statistics ====================================");
2424 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2425 		nic_stats_display(fwd_ports_ids[i]);
2426 }
2427 
2428 static void
2429 signal_handler(int signum)
2430 {
2431 	if (signum == SIGINT || signum == SIGTERM) {
2432 		printf("\nSignal %d received, preparing to exit...\n",
2433 				signum);
2434 #ifdef RTE_LIBRTE_PDUMP
2435 		/* uninitialize packet capture framework */
2436 		rte_pdump_uninit();
2437 #endif
2438 #ifdef RTE_LIBRTE_LATENCY_STATS
2439 		rte_latencystats_uninit();
2440 #endif
2441 		force_quit();
2442 		/* Set flag to indicate the force termination. */
2443 		f_quit = 1;
2444 		/* exit with the expected status */
2445 		signal(signum, SIG_DFL);
2446 		kill(getpid(), signum);
2447 	}
2448 }
2449 
2450 int
2451 main(int argc, char** argv)
2452 {
2453 	int  diag;
2454 	portid_t port_id;
2455 
2456 	signal(SIGINT, signal_handler);
2457 	signal(SIGTERM, signal_handler);
2458 
2459 	diag = rte_eal_init(argc, argv);
2460 	if (diag < 0)
2461 		rte_panic("Cannot init EAL\n");
2462 
2463 	testpmd_logtype = rte_log_register("testpmd");
2464 	if (testpmd_logtype < 0)
2465 		rte_panic("Cannot register log type");
2466 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2467 
2468 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2469 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2470 			strerror(errno));
2471 	}
2472 
2473 #ifdef RTE_LIBRTE_PDUMP
2474 	/* initialize packet capture framework */
2475 	rte_pdump_init(NULL);
2476 #endif
2477 
2478 	nb_ports = (portid_t) rte_eth_dev_count();
2479 	if (nb_ports == 0)
2480 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2481 
2482 	/* allocate port structures, and init them */
2483 	init_port();
2484 
2485 	set_def_fwd_config();
2486 	if (nb_lcores == 0)
2487 		rte_panic("Empty set of forwarding logical cores - check the "
2488 			  "core mask supplied in the command parameters\n");
2489 
2490 	/* Bitrate/latency stats disabled by default */
2491 #ifdef RTE_LIBRTE_BITRATE
2492 	bitrate_enabled = 0;
2493 #endif
2494 #ifdef RTE_LIBRTE_LATENCY_STATS
2495 	latencystats_enabled = 0;
2496 #endif
2497 
2498 	argc -= diag;
2499 	argv += diag;
2500 	if (argc > 1)
2501 		launch_args_parse(argc, argv);
2502 
2503 	if (tx_first && interactive)
2504 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2505 				"interactive mode.\n");
2506 
2507 	if (tx_first && lsc_interrupt) {
2508 		printf("Warning: lsc_interrupt needs to be off when "
2509 				" using tx_first. Disabling.\n");
2510 		lsc_interrupt = 0;
2511 	}
2512 
2513 	if (!nb_rxq && !nb_txq)
2514 		printf("Warning: Either rx or tx queues should be non-zero\n");
2515 
2516 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2517 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2518 		       "but nb_txq=%d will prevent to fully test it.\n",
2519 		       nb_rxq, nb_txq);
2520 
2521 	init_config();
2522 	if (start_port(RTE_PORT_ALL) != 0)
2523 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2524 
2525 	/* set all ports to promiscuous mode by default */
2526 	RTE_ETH_FOREACH_DEV(port_id)
2527 		rte_eth_promiscuous_enable(port_id);
2528 
2529 	/* Init metrics library */
2530 	rte_metrics_init(rte_socket_id());
2531 
2532 #ifdef RTE_LIBRTE_LATENCY_STATS
2533 	if (latencystats_enabled != 0) {
2534 		int ret = rte_latencystats_init(1, NULL);
2535 		if (ret)
2536 			printf("Warning: latencystats init()"
2537 				" returned error %d\n",	ret);
2538 		printf("Latencystats running on lcore %d\n",
2539 			latencystats_lcore_id);
2540 	}
2541 #endif
2542 
2543 	/* Setup bitrate stats */
2544 #ifdef RTE_LIBRTE_BITRATE
2545 	if (bitrate_enabled != 0) {
2546 		bitrate_data = rte_stats_bitrate_create();
2547 		if (bitrate_data == NULL)
2548 			rte_exit(EXIT_FAILURE,
2549 				"Could not allocate bitrate data.\n");
2550 		rte_stats_bitrate_reg(bitrate_data);
2551 	}
2552 #endif
2553 
2554 #ifdef RTE_LIBRTE_CMDLINE
2555 	if (strlen(cmdline_filename) != 0)
2556 		cmdline_read_from_file(cmdline_filename);
2557 
2558 	if (interactive == 1) {
2559 		if (auto_start) {
2560 			printf("Start automatic packet forwarding\n");
2561 			start_packet_forwarding(0);
2562 		}
2563 		prompt();
2564 		pmd_test_exit();
2565 	} else
2566 #endif
2567 	{
2568 		char c;
2569 		int rc;
2570 
2571 		f_quit = 0;
2572 
2573 		printf("No commandline core given, start packet forwarding\n");
2574 		start_packet_forwarding(tx_first);
2575 		if (stats_period != 0) {
2576 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2577 			uint64_t timer_period;
2578 
2579 			/* Convert to number of cycles */
2580 			timer_period = stats_period * rte_get_timer_hz();
2581 
2582 			while (f_quit == 0) {
2583 				cur_time = rte_get_timer_cycles();
2584 				diff_time += cur_time - prev_time;
2585 
2586 				if (diff_time >= timer_period) {
2587 					print_stats();
2588 					/* Reset the timer */
2589 					diff_time = 0;
2590 				}
2591 				/* Sleep to avoid unnecessary checks */
2592 				prev_time = cur_time;
2593 				sleep(1);
2594 			}
2595 		}
2596 
2597 		printf("Press enter to exit\n");
2598 		rc = read(0, &c, 1);
2599 		pmd_test_exit();
2600 		if (rc < 0)
2601 			return 1;
2602 	}
2603 
2604 	return 0;
2605 }
2606