xref: /dpdk/app/test-pmd/testpmd.c (revision 0e45c64d27977aa5e4f8745563aec17ee0e9f89b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18 
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22 
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_mbuf_pool_ops.h>
42 #include <rte_interrupts.h>
43 #include <rte_pci.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
46 #include <rte_dev.h>
47 #include <rte_string_fns.h>
48 #ifdef RTE_LIBRTE_IXGBE_PMD
49 #include <rte_pmd_ixgbe.h>
50 #endif
51 #ifdef RTE_LIBRTE_PDUMP
52 #include <rte_pdump.h>
53 #endif
54 #include <rte_flow.h>
55 #include <rte_metrics.h>
56 #ifdef RTE_LIBRTE_BITRATE
57 #include <rte_bitrate.h>
58 #endif
59 #ifdef RTE_LIBRTE_LATENCY_STATS
60 #include <rte_latencystats.h>
61 #endif
62 
63 #include "testpmd.h"
64 
65 uint16_t verbose_level = 0; /**< Silent by default. */
66 int testpmd_logtype; /**< Log type for testpmd logs */
67 
68 /* use master core for command line ? */
69 uint8_t interactive = 0;
70 uint8_t auto_start = 0;
71 uint8_t tx_first;
72 char cmdline_filename[PATH_MAX] = {0};
73 
74 /*
75  * NUMA support configuration.
76  * When set, the NUMA support attempts to dispatch the allocation of the
77  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
78  * probed ports among the CPU sockets 0 and 1.
79  * Otherwise, all memory is allocated from CPU socket 0.
80  */
81 uint8_t numa_support = 1; /**< numa enabled by default */
82 
83 /*
84  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
85  * not configured.
86  */
87 uint8_t socket_num = UMA_NO_CONFIG;
88 
89 /*
90  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
91  */
92 uint8_t mp_anon = 0;
93 
94 /*
95  * Record the Ethernet address of peer target ports to which packets are
96  * forwarded.
97  * Must be instantiated with the ethernet addresses of peer traffic generator
98  * ports.
99  */
100 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
101 portid_t nb_peer_eth_addrs = 0;
102 
103 /*
104  * Probed Target Environment.
105  */
106 struct rte_port *ports;	       /**< For all probed ethernet ports. */
107 portid_t nb_ports;             /**< Number of probed ethernet ports. */
108 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
109 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
110 
111 /*
112  * Test Forwarding Configuration.
113  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
114  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
115  */
116 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
117 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
118 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
119 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
120 
121 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
122 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
123 
124 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
125 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
126 
127 /*
128  * Forwarding engines.
129  */
130 struct fwd_engine * fwd_engines[] = {
131 	&io_fwd_engine,
132 	&mac_fwd_engine,
133 	&mac_swap_engine,
134 	&flow_gen_engine,
135 	&rx_only_engine,
136 	&tx_only_engine,
137 	&csum_fwd_engine,
138 	&icmp_echo_engine,
139 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
140 	&softnic_tm_engine,
141 	&softnic_tm_bypass_engine,
142 #endif
143 #ifdef RTE_LIBRTE_IEEE1588
144 	&ieee1588_fwd_engine,
145 #endif
146 	NULL,
147 };
148 
149 struct fwd_config cur_fwd_config;
150 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
151 uint32_t retry_enabled;
152 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
153 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
154 
155 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
156 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
157                                       * specified on command-line. */
158 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
159 
160 /*
161  * In container, it cannot terminate the process which running with 'stats-period'
162  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
163  */
164 uint8_t f_quit;
165 
166 /*
167  * Configuration of packet segments used by the "txonly" processing engine.
168  */
169 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
170 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
171 	TXONLY_DEF_PACKET_LEN,
172 };
173 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
174 
175 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
176 /**< Split policy for packets to TX. */
177 
178 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
179 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
180 
181 /* current configuration is in DCB or not,0 means it is not in DCB mode */
182 uint8_t dcb_config = 0;
183 
184 /* Whether the dcb is in testing status */
185 uint8_t dcb_test = 0;
186 
187 /*
188  * Configurable number of RX/TX queues.
189  */
190 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
191 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
192 
193 /*
194  * Configurable number of RX/TX ring descriptors.
195  */
196 #define RTE_TEST_RX_DESC_DEFAULT 128
197 #define RTE_TEST_TX_DESC_DEFAULT 512
198 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
199 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
200 
201 #define RTE_PMD_PARAM_UNSET -1
202 /*
203  * Configurable values of RX and TX ring threshold registers.
204  */
205 
206 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
209 
210 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
213 
214 /*
215  * Configurable value of RX free threshold.
216  */
217 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
218 
219 /*
220  * Configurable value of RX drop enable.
221  */
222 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
223 
224 /*
225  * Configurable value of TX free threshold.
226  */
227 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
228 
229 /*
230  * Configurable value of TX RS bit threshold.
231  */
232 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Receive Side Scaling (RSS) configuration.
236  */
237 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
238 
239 /*
240  * Port topology configuration
241  */
242 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
243 
244 /*
245  * Avoids to flush all the RX streams before starts forwarding.
246  */
247 uint8_t no_flush_rx = 0; /* flush by default */
248 
249 /*
250  * Flow API isolated mode.
251  */
252 uint8_t flow_isolate_all;
253 
254 /*
255  * Avoids to check link status when starting/stopping a port.
256  */
257 uint8_t no_link_check = 0; /* check by default */
258 
259 /*
260  * Enable link status change notification
261  */
262 uint8_t lsc_interrupt = 1; /* enabled by default */
263 
264 /*
265  * Enable device removal notification.
266  */
267 uint8_t rmv_interrupt = 1; /* enabled by default */
268 
269 /*
270  * Display or mask ether events
271  * Default to all events except VF_MBOX
272  */
273 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
274 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
275 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
276 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
277 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
278 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
279 
280 /*
281  * NIC bypass mode configuration options.
282  */
283 
284 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
285 /* The NIC bypass watchdog timeout. */
286 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
287 #endif
288 
289 
290 #ifdef RTE_LIBRTE_LATENCY_STATS
291 
292 /*
293  * Set when latency stats is enabled in the commandline
294  */
295 uint8_t latencystats_enabled;
296 
297 /*
298  * Lcore ID to serive latency statistics.
299  */
300 lcoreid_t latencystats_lcore_id = -1;
301 
302 #endif
303 
304 /*
305  * Ethernet device configuration.
306  */
307 struct rte_eth_rxmode rx_mode = {
308 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
309 	.offloads = (DEV_RX_OFFLOAD_VLAN_FILTER |
310 		     DEV_RX_OFFLOAD_VLAN_STRIP |
311 		     DEV_RX_OFFLOAD_CRC_STRIP),
312 	.ignore_offload_bitfield = 1,
313 };
314 
315 struct rte_eth_txmode tx_mode = {
316 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
317 };
318 
319 struct rte_fdir_conf fdir_conf = {
320 	.mode = RTE_FDIR_MODE_NONE,
321 	.pballoc = RTE_FDIR_PBALLOC_64K,
322 	.status = RTE_FDIR_REPORT_STATUS,
323 	.mask = {
324 		.vlan_tci_mask = 0x0,
325 		.ipv4_mask     = {
326 			.src_ip = 0xFFFFFFFF,
327 			.dst_ip = 0xFFFFFFFF,
328 		},
329 		.ipv6_mask     = {
330 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
331 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
332 		},
333 		.src_port_mask = 0xFFFF,
334 		.dst_port_mask = 0xFFFF,
335 		.mac_addr_byte_mask = 0xFF,
336 		.tunnel_type_mask = 1,
337 		.tunnel_id_mask = 0xFFFFFFFF,
338 	},
339 	.drop_queue = 127,
340 };
341 
342 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
343 
344 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
345 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
346 
347 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
348 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
349 
350 uint16_t nb_tx_queue_stats_mappings = 0;
351 uint16_t nb_rx_queue_stats_mappings = 0;
352 
353 /*
354  * Display zero values by default for xstats
355  */
356 uint8_t xstats_hide_zero;
357 
358 unsigned int num_sockets = 0;
359 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
360 
361 #ifdef RTE_LIBRTE_BITRATE
362 /* Bitrate statistics */
363 struct rte_stats_bitrates *bitrate_data;
364 lcoreid_t bitrate_lcore_id;
365 uint8_t bitrate_enabled;
366 #endif
367 
368 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
369 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
370 
371 /* Forward function declarations */
372 static void map_port_queue_stats_mapping_registers(portid_t pi,
373 						   struct rte_port *port);
374 static void check_all_ports_link_status(uint32_t port_mask);
375 static int eth_event_callback(portid_t port_id,
376 			      enum rte_eth_event_type type,
377 			      void *param, void *ret_param);
378 
379 /*
380  * Check if all the ports are started.
381  * If yes, return positive value. If not, return zero.
382  */
383 static int all_ports_started(void);
384 
385 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
386 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
387 
388 /*
389  * Helper function to check if socket is already discovered.
390  * If yes, return positive value. If not, return zero.
391  */
392 int
393 new_socket_id(unsigned int socket_id)
394 {
395 	unsigned int i;
396 
397 	for (i = 0; i < num_sockets; i++) {
398 		if (socket_ids[i] == socket_id)
399 			return 0;
400 	}
401 	return 1;
402 }
403 
404 /*
405  * Setup default configuration.
406  */
407 static void
408 set_default_fwd_lcores_config(void)
409 {
410 	unsigned int i;
411 	unsigned int nb_lc;
412 	unsigned int sock_num;
413 
414 	nb_lc = 0;
415 	for (i = 0; i < RTE_MAX_LCORE; i++) {
416 		sock_num = rte_lcore_to_socket_id(i);
417 		if (new_socket_id(sock_num)) {
418 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
419 				rte_exit(EXIT_FAILURE,
420 					 "Total sockets greater than %u\n",
421 					 RTE_MAX_NUMA_NODES);
422 			}
423 			socket_ids[num_sockets++] = sock_num;
424 		}
425 		if (!rte_lcore_is_enabled(i))
426 			continue;
427 		if (i == rte_get_master_lcore())
428 			continue;
429 		fwd_lcores_cpuids[nb_lc++] = i;
430 	}
431 	nb_lcores = (lcoreid_t) nb_lc;
432 	nb_cfg_lcores = nb_lcores;
433 	nb_fwd_lcores = 1;
434 }
435 
436 static void
437 set_def_peer_eth_addrs(void)
438 {
439 	portid_t i;
440 
441 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
442 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
443 		peer_eth_addrs[i].addr_bytes[5] = i;
444 	}
445 }
446 
447 static void
448 set_default_fwd_ports_config(void)
449 {
450 	portid_t pt_id;
451 	int i = 0;
452 
453 	RTE_ETH_FOREACH_DEV(pt_id)
454 		fwd_ports_ids[i++] = pt_id;
455 
456 	nb_cfg_ports = nb_ports;
457 	nb_fwd_ports = nb_ports;
458 }
459 
460 void
461 set_def_fwd_config(void)
462 {
463 	set_default_fwd_lcores_config();
464 	set_def_peer_eth_addrs();
465 	set_default_fwd_ports_config();
466 }
467 
468 /*
469  * Configuration initialisation done once at init time.
470  */
471 static void
472 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
473 		 unsigned int socket_id)
474 {
475 	char pool_name[RTE_MEMPOOL_NAMESIZE];
476 	struct rte_mempool *rte_mp = NULL;
477 	uint32_t mb_size;
478 
479 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
480 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
481 
482 	TESTPMD_LOG(INFO,
483 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
484 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
485 
486 	if (mp_anon != 0) {
487 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
488 			mb_size, (unsigned) mb_mempool_cache,
489 			sizeof(struct rte_pktmbuf_pool_private),
490 			socket_id, 0);
491 		if (rte_mp == NULL)
492 			goto err;
493 
494 		if (rte_mempool_populate_anon(rte_mp) == 0) {
495 			rte_mempool_free(rte_mp);
496 			rte_mp = NULL;
497 			goto err;
498 		}
499 		rte_pktmbuf_pool_init(rte_mp, NULL);
500 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
501 	} else {
502 		/* wrapper to rte_mempool_create() */
503 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
504 				rte_mbuf_best_mempool_ops());
505 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
506 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
507 	}
508 
509 err:
510 	if (rte_mp == NULL) {
511 		rte_exit(EXIT_FAILURE,
512 			"Creation of mbuf pool for socket %u failed: %s\n",
513 			socket_id, rte_strerror(rte_errno));
514 	} else if (verbose_level > 0) {
515 		rte_mempool_dump(stdout, rte_mp);
516 	}
517 }
518 
519 /*
520  * Check given socket id is valid or not with NUMA mode,
521  * if valid, return 0, else return -1
522  */
523 static int
524 check_socket_id(const unsigned int socket_id)
525 {
526 	static int warning_once = 0;
527 
528 	if (new_socket_id(socket_id)) {
529 		if (!warning_once && numa_support)
530 			printf("Warning: NUMA should be configured manually by"
531 			       " using --port-numa-config and"
532 			       " --ring-numa-config parameters along with"
533 			       " --numa.\n");
534 		warning_once = 1;
535 		return -1;
536 	}
537 	return 0;
538 }
539 
540 /*
541  * Get the allowed maximum number of RX queues.
542  * *pid return the port id which has minimal value of
543  * max_rx_queues in all ports.
544  */
545 queueid_t
546 get_allowed_max_nb_rxq(portid_t *pid)
547 {
548 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
549 	portid_t pi;
550 	struct rte_eth_dev_info dev_info;
551 
552 	RTE_ETH_FOREACH_DEV(pi) {
553 		rte_eth_dev_info_get(pi, &dev_info);
554 		if (dev_info.max_rx_queues < allowed_max_rxq) {
555 			allowed_max_rxq = dev_info.max_rx_queues;
556 			*pid = pi;
557 		}
558 	}
559 	return allowed_max_rxq;
560 }
561 
562 /*
563  * Check input rxq is valid or not.
564  * If input rxq is not greater than any of maximum number
565  * of RX queues of all ports, it is valid.
566  * if valid, return 0, else return -1
567  */
568 int
569 check_nb_rxq(queueid_t rxq)
570 {
571 	queueid_t allowed_max_rxq;
572 	portid_t pid = 0;
573 
574 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
575 	if (rxq > allowed_max_rxq) {
576 		printf("Fail: input rxq (%u) can't be greater "
577 		       "than max_rx_queues (%u) of port %u\n",
578 		       rxq,
579 		       allowed_max_rxq,
580 		       pid);
581 		return -1;
582 	}
583 	return 0;
584 }
585 
586 /*
587  * Get the allowed maximum number of TX queues.
588  * *pid return the port id which has minimal value of
589  * max_tx_queues in all ports.
590  */
591 queueid_t
592 get_allowed_max_nb_txq(portid_t *pid)
593 {
594 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
595 	portid_t pi;
596 	struct rte_eth_dev_info dev_info;
597 
598 	RTE_ETH_FOREACH_DEV(pi) {
599 		rte_eth_dev_info_get(pi, &dev_info);
600 		if (dev_info.max_tx_queues < allowed_max_txq) {
601 			allowed_max_txq = dev_info.max_tx_queues;
602 			*pid = pi;
603 		}
604 	}
605 	return allowed_max_txq;
606 }
607 
608 /*
609  * Check input txq is valid or not.
610  * If input txq is not greater than any of maximum number
611  * of TX queues of all ports, it is valid.
612  * if valid, return 0, else return -1
613  */
614 int
615 check_nb_txq(queueid_t txq)
616 {
617 	queueid_t allowed_max_txq;
618 	portid_t pid = 0;
619 
620 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
621 	if (txq > allowed_max_txq) {
622 		printf("Fail: input txq (%u) can't be greater "
623 		       "than max_tx_queues (%u) of port %u\n",
624 		       txq,
625 		       allowed_max_txq,
626 		       pid);
627 		return -1;
628 	}
629 	return 0;
630 }
631 
632 static void
633 init_config(void)
634 {
635 	portid_t pid;
636 	struct rte_port *port;
637 	struct rte_mempool *mbp;
638 	unsigned int nb_mbuf_per_pool;
639 	lcoreid_t  lc_id;
640 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
641 	struct rte_gro_param gro_param;
642 	uint32_t gso_types;
643 
644 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
645 
646 	if (numa_support) {
647 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
648 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
649 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
650 	}
651 
652 	/* Configuration of logical cores. */
653 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
654 				sizeof(struct fwd_lcore *) * nb_lcores,
655 				RTE_CACHE_LINE_SIZE);
656 	if (fwd_lcores == NULL) {
657 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
658 							"failed\n", nb_lcores);
659 	}
660 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
661 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
662 					       sizeof(struct fwd_lcore),
663 					       RTE_CACHE_LINE_SIZE);
664 		if (fwd_lcores[lc_id] == NULL) {
665 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
666 								"failed\n");
667 		}
668 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
669 	}
670 
671 	RTE_ETH_FOREACH_DEV(pid) {
672 		port = &ports[pid];
673 		/* Apply default Tx configuration for all ports */
674 		port->dev_conf.txmode = tx_mode;
675 		port->dev_conf.rxmode = rx_mode;
676 		rte_eth_dev_info_get(pid, &port->dev_info);
677 		if (!(port->dev_info.tx_offload_capa &
678 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
679 			port->dev_conf.txmode.offloads &=
680 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
681 
682 		if (numa_support) {
683 			if (port_numa[pid] != NUMA_NO_CONFIG)
684 				port_per_socket[port_numa[pid]]++;
685 			else {
686 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
687 
688 				/* if socket_id is invalid, set to 0 */
689 				if (check_socket_id(socket_id) < 0)
690 					socket_id = 0;
691 				port_per_socket[socket_id]++;
692 			}
693 		}
694 
695 		/* set flag to initialize port/queue */
696 		port->need_reconfig = 1;
697 		port->need_reconfig_queues = 1;
698 	}
699 
700 	/*
701 	 * Create pools of mbuf.
702 	 * If NUMA support is disabled, create a single pool of mbuf in
703 	 * socket 0 memory by default.
704 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
705 	 *
706 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
707 	 * nb_txd can be configured at run time.
708 	 */
709 	if (param_total_num_mbufs)
710 		nb_mbuf_per_pool = param_total_num_mbufs;
711 	else {
712 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
713 			(nb_lcores * mb_mempool_cache) +
714 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
715 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
716 	}
717 
718 	if (numa_support) {
719 		uint8_t i;
720 
721 		for (i = 0; i < num_sockets; i++)
722 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
723 					 socket_ids[i]);
724 	} else {
725 		if (socket_num == UMA_NO_CONFIG)
726 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
727 		else
728 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
729 						 socket_num);
730 	}
731 
732 	init_port_config();
733 
734 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
735 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
736 	/*
737 	 * Records which Mbuf pool to use by each logical core, if needed.
738 	 */
739 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
740 		mbp = mbuf_pool_find(
741 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
742 
743 		if (mbp == NULL)
744 			mbp = mbuf_pool_find(0);
745 		fwd_lcores[lc_id]->mbp = mbp;
746 		/* initialize GSO context */
747 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
748 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
749 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
750 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
751 			ETHER_CRC_LEN;
752 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
753 	}
754 
755 	/* Configuration of packet forwarding streams. */
756 	if (init_fwd_streams() < 0)
757 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
758 
759 	fwd_config_setup();
760 
761 	/* create a gro context for each lcore */
762 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
763 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
764 	gro_param.max_item_per_flow = MAX_PKT_BURST;
765 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
766 		gro_param.socket_id = rte_lcore_to_socket_id(
767 				fwd_lcores_cpuids[lc_id]);
768 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
769 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
770 			rte_exit(EXIT_FAILURE,
771 					"rte_gro_ctx_create() failed\n");
772 		}
773 	}
774 }
775 
776 
777 void
778 reconfig(portid_t new_port_id, unsigned socket_id)
779 {
780 	struct rte_port *port;
781 
782 	/* Reconfiguration of Ethernet ports. */
783 	port = &ports[new_port_id];
784 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
785 
786 	/* set flag to initialize port/queue */
787 	port->need_reconfig = 1;
788 	port->need_reconfig_queues = 1;
789 	port->socket_id = socket_id;
790 
791 	init_port_config();
792 }
793 
794 
795 int
796 init_fwd_streams(void)
797 {
798 	portid_t pid;
799 	struct rte_port *port;
800 	streamid_t sm_id, nb_fwd_streams_new;
801 	queueid_t q;
802 
803 	/* set socket id according to numa or not */
804 	RTE_ETH_FOREACH_DEV(pid) {
805 		port = &ports[pid];
806 		if (nb_rxq > port->dev_info.max_rx_queues) {
807 			printf("Fail: nb_rxq(%d) is greater than "
808 				"max_rx_queues(%d)\n", nb_rxq,
809 				port->dev_info.max_rx_queues);
810 			return -1;
811 		}
812 		if (nb_txq > port->dev_info.max_tx_queues) {
813 			printf("Fail: nb_txq(%d) is greater than "
814 				"max_tx_queues(%d)\n", nb_txq,
815 				port->dev_info.max_tx_queues);
816 			return -1;
817 		}
818 		if (numa_support) {
819 			if (port_numa[pid] != NUMA_NO_CONFIG)
820 				port->socket_id = port_numa[pid];
821 			else {
822 				port->socket_id = rte_eth_dev_socket_id(pid);
823 
824 				/* if socket_id is invalid, set to 0 */
825 				if (check_socket_id(port->socket_id) < 0)
826 					port->socket_id = 0;
827 			}
828 		}
829 		else {
830 			if (socket_num == UMA_NO_CONFIG)
831 				port->socket_id = 0;
832 			else
833 				port->socket_id = socket_num;
834 		}
835 	}
836 
837 	q = RTE_MAX(nb_rxq, nb_txq);
838 	if (q == 0) {
839 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
840 		return -1;
841 	}
842 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
843 	if (nb_fwd_streams_new == nb_fwd_streams)
844 		return 0;
845 	/* clear the old */
846 	if (fwd_streams != NULL) {
847 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
848 			if (fwd_streams[sm_id] == NULL)
849 				continue;
850 			rte_free(fwd_streams[sm_id]);
851 			fwd_streams[sm_id] = NULL;
852 		}
853 		rte_free(fwd_streams);
854 		fwd_streams = NULL;
855 	}
856 
857 	/* init new */
858 	nb_fwd_streams = nb_fwd_streams_new;
859 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
860 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
861 	if (fwd_streams == NULL)
862 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
863 						"failed\n", nb_fwd_streams);
864 
865 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
866 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
867 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
868 		if (fwd_streams[sm_id] == NULL)
869 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
870 								" failed\n");
871 	}
872 
873 	return 0;
874 }
875 
876 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
877 static void
878 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
879 {
880 	unsigned int total_burst;
881 	unsigned int nb_burst;
882 	unsigned int burst_stats[3];
883 	uint16_t pktnb_stats[3];
884 	uint16_t nb_pkt;
885 	int burst_percent[3];
886 
887 	/*
888 	 * First compute the total number of packet bursts and the
889 	 * two highest numbers of bursts of the same number of packets.
890 	 */
891 	total_burst = 0;
892 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
893 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
894 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
895 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
896 		if (nb_burst == 0)
897 			continue;
898 		total_burst += nb_burst;
899 		if (nb_burst > burst_stats[0]) {
900 			burst_stats[1] = burst_stats[0];
901 			pktnb_stats[1] = pktnb_stats[0];
902 			burst_stats[0] = nb_burst;
903 			pktnb_stats[0] = nb_pkt;
904 		}
905 	}
906 	if (total_burst == 0)
907 		return;
908 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
909 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
910 	       burst_percent[0], (int) pktnb_stats[0]);
911 	if (burst_stats[0] == total_burst) {
912 		printf("]\n");
913 		return;
914 	}
915 	if (burst_stats[0] + burst_stats[1] == total_burst) {
916 		printf(" + %d%% of %d pkts]\n",
917 		       100 - burst_percent[0], pktnb_stats[1]);
918 		return;
919 	}
920 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
921 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
922 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
923 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
924 		return;
925 	}
926 	printf(" + %d%% of %d pkts + %d%% of others]\n",
927 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
928 }
929 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
930 
931 static void
932 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
933 {
934 	struct rte_port *port;
935 	uint8_t i;
936 
937 	static const char *fwd_stats_border = "----------------------";
938 
939 	port = &ports[port_id];
940 	printf("\n  %s Forward statistics for port %-2d %s\n",
941 	       fwd_stats_border, port_id, fwd_stats_border);
942 
943 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
944 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
945 		       "%-"PRIu64"\n",
946 		       stats->ipackets, stats->imissed,
947 		       (uint64_t) (stats->ipackets + stats->imissed));
948 
949 		if (cur_fwd_eng == &csum_fwd_engine)
950 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
951 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
952 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
953 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
954 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
955 		}
956 
957 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
958 		       "%-"PRIu64"\n",
959 		       stats->opackets, port->tx_dropped,
960 		       (uint64_t) (stats->opackets + port->tx_dropped));
961 	}
962 	else {
963 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
964 		       "%14"PRIu64"\n",
965 		       stats->ipackets, stats->imissed,
966 		       (uint64_t) (stats->ipackets + stats->imissed));
967 
968 		if (cur_fwd_eng == &csum_fwd_engine)
969 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
970 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
971 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
972 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
973 			printf("  RX-nombufs:             %14"PRIu64"\n",
974 			       stats->rx_nombuf);
975 		}
976 
977 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
978 		       "%14"PRIu64"\n",
979 		       stats->opackets, port->tx_dropped,
980 		       (uint64_t) (stats->opackets + port->tx_dropped));
981 	}
982 
983 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
984 	if (port->rx_stream)
985 		pkt_burst_stats_display("RX",
986 			&port->rx_stream->rx_burst_stats);
987 	if (port->tx_stream)
988 		pkt_burst_stats_display("TX",
989 			&port->tx_stream->tx_burst_stats);
990 #endif
991 
992 	if (port->rx_queue_stats_mapping_enabled) {
993 		printf("\n");
994 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
995 			printf("  Stats reg %2d RX-packets:%14"PRIu64
996 			       "     RX-errors:%14"PRIu64
997 			       "    RX-bytes:%14"PRIu64"\n",
998 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
999 		}
1000 		printf("\n");
1001 	}
1002 	if (port->tx_queue_stats_mapping_enabled) {
1003 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1004 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1005 			       "                                 TX-bytes:%14"PRIu64"\n",
1006 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1007 		}
1008 	}
1009 
1010 	printf("  %s--------------------------------%s\n",
1011 	       fwd_stats_border, fwd_stats_border);
1012 }
1013 
1014 static void
1015 fwd_stream_stats_display(streamid_t stream_id)
1016 {
1017 	struct fwd_stream *fs;
1018 	static const char *fwd_top_stats_border = "-------";
1019 
1020 	fs = fwd_streams[stream_id];
1021 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1022 	    (fs->fwd_dropped == 0))
1023 		return;
1024 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1025 	       "TX Port=%2d/Queue=%2d %s\n",
1026 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1027 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1028 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1029 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1030 
1031 	/* if checksum mode */
1032 	if (cur_fwd_eng == &csum_fwd_engine) {
1033 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1034 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1035 	}
1036 
1037 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1038 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1039 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1040 #endif
1041 }
1042 
1043 static void
1044 flush_fwd_rx_queues(void)
1045 {
1046 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1047 	portid_t  rxp;
1048 	portid_t port_id;
1049 	queueid_t rxq;
1050 	uint16_t  nb_rx;
1051 	uint16_t  i;
1052 	uint8_t   j;
1053 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1054 	uint64_t timer_period;
1055 
1056 	/* convert to number of cycles */
1057 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1058 
1059 	for (j = 0; j < 2; j++) {
1060 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1061 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1062 				port_id = fwd_ports_ids[rxp];
1063 				/**
1064 				* testpmd can stuck in the below do while loop
1065 				* if rte_eth_rx_burst() always returns nonzero
1066 				* packets. So timer is added to exit this loop
1067 				* after 1sec timer expiry.
1068 				*/
1069 				prev_tsc = rte_rdtsc();
1070 				do {
1071 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1072 						pkts_burst, MAX_PKT_BURST);
1073 					for (i = 0; i < nb_rx; i++)
1074 						rte_pktmbuf_free(pkts_burst[i]);
1075 
1076 					cur_tsc = rte_rdtsc();
1077 					diff_tsc = cur_tsc - prev_tsc;
1078 					timer_tsc += diff_tsc;
1079 				} while ((nb_rx > 0) &&
1080 					(timer_tsc < timer_period));
1081 				timer_tsc = 0;
1082 			}
1083 		}
1084 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1085 	}
1086 }
1087 
1088 static void
1089 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1090 {
1091 	struct fwd_stream **fsm;
1092 	streamid_t nb_fs;
1093 	streamid_t sm_id;
1094 #ifdef RTE_LIBRTE_BITRATE
1095 	uint64_t tics_per_1sec;
1096 	uint64_t tics_datum;
1097 	uint64_t tics_current;
1098 	uint8_t idx_port, cnt_ports;
1099 
1100 	cnt_ports = rte_eth_dev_count();
1101 	tics_datum = rte_rdtsc();
1102 	tics_per_1sec = rte_get_timer_hz();
1103 #endif
1104 	fsm = &fwd_streams[fc->stream_idx];
1105 	nb_fs = fc->stream_nb;
1106 	do {
1107 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1108 			(*pkt_fwd)(fsm[sm_id]);
1109 #ifdef RTE_LIBRTE_BITRATE
1110 		if (bitrate_enabled != 0 &&
1111 				bitrate_lcore_id == rte_lcore_id()) {
1112 			tics_current = rte_rdtsc();
1113 			if (tics_current - tics_datum >= tics_per_1sec) {
1114 				/* Periodic bitrate calculation */
1115 				for (idx_port = 0;
1116 						idx_port < cnt_ports;
1117 						idx_port++)
1118 					rte_stats_bitrate_calc(bitrate_data,
1119 						idx_port);
1120 				tics_datum = tics_current;
1121 			}
1122 		}
1123 #endif
1124 #ifdef RTE_LIBRTE_LATENCY_STATS
1125 		if (latencystats_enabled != 0 &&
1126 				latencystats_lcore_id == rte_lcore_id())
1127 			rte_latencystats_update();
1128 #endif
1129 
1130 	} while (! fc->stopped);
1131 }
1132 
1133 static int
1134 start_pkt_forward_on_core(void *fwd_arg)
1135 {
1136 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1137 			     cur_fwd_config.fwd_eng->packet_fwd);
1138 	return 0;
1139 }
1140 
1141 /*
1142  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1143  * Used to start communication flows in network loopback test configurations.
1144  */
1145 static int
1146 run_one_txonly_burst_on_core(void *fwd_arg)
1147 {
1148 	struct fwd_lcore *fwd_lc;
1149 	struct fwd_lcore tmp_lcore;
1150 
1151 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1152 	tmp_lcore = *fwd_lc;
1153 	tmp_lcore.stopped = 1;
1154 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1155 	return 0;
1156 }
1157 
1158 /*
1159  * Launch packet forwarding:
1160  *     - Setup per-port forwarding context.
1161  *     - launch logical cores with their forwarding configuration.
1162  */
1163 static void
1164 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1165 {
1166 	port_fwd_begin_t port_fwd_begin;
1167 	unsigned int i;
1168 	unsigned int lc_id;
1169 	int diag;
1170 
1171 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1172 	if (port_fwd_begin != NULL) {
1173 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1174 			(*port_fwd_begin)(fwd_ports_ids[i]);
1175 	}
1176 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1177 		lc_id = fwd_lcores_cpuids[i];
1178 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1179 			fwd_lcores[i]->stopped = 0;
1180 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1181 						     fwd_lcores[i], lc_id);
1182 			if (diag != 0)
1183 				printf("launch lcore %u failed - diag=%d\n",
1184 				       lc_id, diag);
1185 		}
1186 	}
1187 }
1188 
1189 /*
1190  * Launch packet forwarding configuration.
1191  */
1192 void
1193 start_packet_forwarding(int with_tx_first)
1194 {
1195 	port_fwd_begin_t port_fwd_begin;
1196 	port_fwd_end_t  port_fwd_end;
1197 	struct rte_port *port;
1198 	unsigned int i;
1199 	portid_t   pt_id;
1200 	streamid_t sm_id;
1201 
1202 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1203 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1204 
1205 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1206 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1207 
1208 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1209 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1210 		(!nb_rxq || !nb_txq))
1211 		rte_exit(EXIT_FAILURE,
1212 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1213 			cur_fwd_eng->fwd_mode_name);
1214 
1215 	if (all_ports_started() == 0) {
1216 		printf("Not all ports were started\n");
1217 		return;
1218 	}
1219 	if (test_done == 0) {
1220 		printf("Packet forwarding already started\n");
1221 		return;
1222 	}
1223 
1224 	if (init_fwd_streams() < 0) {
1225 		printf("Fail from init_fwd_streams()\n");
1226 		return;
1227 	}
1228 
1229 	if(dcb_test) {
1230 		for (i = 0; i < nb_fwd_ports; i++) {
1231 			pt_id = fwd_ports_ids[i];
1232 			port = &ports[pt_id];
1233 			if (!port->dcb_flag) {
1234 				printf("In DCB mode, all forwarding ports must "
1235                                        "be configured in this mode.\n");
1236 				return;
1237 			}
1238 		}
1239 		if (nb_fwd_lcores == 1) {
1240 			printf("In DCB mode,the nb forwarding cores "
1241                                "should be larger than 1.\n");
1242 			return;
1243 		}
1244 	}
1245 	test_done = 0;
1246 
1247 	if(!no_flush_rx)
1248 		flush_fwd_rx_queues();
1249 
1250 	fwd_config_setup();
1251 	pkt_fwd_config_display(&cur_fwd_config);
1252 	rxtx_config_display();
1253 
1254 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1255 		pt_id = fwd_ports_ids[i];
1256 		port = &ports[pt_id];
1257 		rte_eth_stats_get(pt_id, &port->stats);
1258 		port->tx_dropped = 0;
1259 
1260 		map_port_queue_stats_mapping_registers(pt_id, port);
1261 	}
1262 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1263 		fwd_streams[sm_id]->rx_packets = 0;
1264 		fwd_streams[sm_id]->tx_packets = 0;
1265 		fwd_streams[sm_id]->fwd_dropped = 0;
1266 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1267 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1268 
1269 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1270 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1271 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1272 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1273 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1274 #endif
1275 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1276 		fwd_streams[sm_id]->core_cycles = 0;
1277 #endif
1278 	}
1279 	if (with_tx_first) {
1280 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1281 		if (port_fwd_begin != NULL) {
1282 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1283 				(*port_fwd_begin)(fwd_ports_ids[i]);
1284 		}
1285 		while (with_tx_first--) {
1286 			launch_packet_forwarding(
1287 					run_one_txonly_burst_on_core);
1288 			rte_eal_mp_wait_lcore();
1289 		}
1290 		port_fwd_end = tx_only_engine.port_fwd_end;
1291 		if (port_fwd_end != NULL) {
1292 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1293 				(*port_fwd_end)(fwd_ports_ids[i]);
1294 		}
1295 	}
1296 	launch_packet_forwarding(start_pkt_forward_on_core);
1297 }
1298 
1299 void
1300 stop_packet_forwarding(void)
1301 {
1302 	struct rte_eth_stats stats;
1303 	struct rte_port *port;
1304 	port_fwd_end_t  port_fwd_end;
1305 	int i;
1306 	portid_t   pt_id;
1307 	streamid_t sm_id;
1308 	lcoreid_t  lc_id;
1309 	uint64_t total_recv;
1310 	uint64_t total_xmit;
1311 	uint64_t total_rx_dropped;
1312 	uint64_t total_tx_dropped;
1313 	uint64_t total_rx_nombuf;
1314 	uint64_t tx_dropped;
1315 	uint64_t rx_bad_ip_csum;
1316 	uint64_t rx_bad_l4_csum;
1317 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1318 	uint64_t fwd_cycles;
1319 #endif
1320 
1321 	static const char *acc_stats_border = "+++++++++++++++";
1322 
1323 	if (test_done) {
1324 		printf("Packet forwarding not started\n");
1325 		return;
1326 	}
1327 	printf("Telling cores to stop...");
1328 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1329 		fwd_lcores[lc_id]->stopped = 1;
1330 	printf("\nWaiting for lcores to finish...\n");
1331 	rte_eal_mp_wait_lcore();
1332 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1333 	if (port_fwd_end != NULL) {
1334 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1335 			pt_id = fwd_ports_ids[i];
1336 			(*port_fwd_end)(pt_id);
1337 		}
1338 	}
1339 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1340 	fwd_cycles = 0;
1341 #endif
1342 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1343 		if (cur_fwd_config.nb_fwd_streams >
1344 		    cur_fwd_config.nb_fwd_ports) {
1345 			fwd_stream_stats_display(sm_id);
1346 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1347 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1348 		} else {
1349 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1350 				fwd_streams[sm_id];
1351 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1352 				fwd_streams[sm_id];
1353 		}
1354 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1355 		tx_dropped = (uint64_t) (tx_dropped +
1356 					 fwd_streams[sm_id]->fwd_dropped);
1357 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1358 
1359 		rx_bad_ip_csum =
1360 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1361 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1362 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1363 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1364 							rx_bad_ip_csum;
1365 
1366 		rx_bad_l4_csum =
1367 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1368 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1369 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1370 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1371 							rx_bad_l4_csum;
1372 
1373 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1374 		fwd_cycles = (uint64_t) (fwd_cycles +
1375 					 fwd_streams[sm_id]->core_cycles);
1376 #endif
1377 	}
1378 	total_recv = 0;
1379 	total_xmit = 0;
1380 	total_rx_dropped = 0;
1381 	total_tx_dropped = 0;
1382 	total_rx_nombuf  = 0;
1383 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1384 		pt_id = fwd_ports_ids[i];
1385 
1386 		port = &ports[pt_id];
1387 		rte_eth_stats_get(pt_id, &stats);
1388 		stats.ipackets -= port->stats.ipackets;
1389 		port->stats.ipackets = 0;
1390 		stats.opackets -= port->stats.opackets;
1391 		port->stats.opackets = 0;
1392 		stats.ibytes   -= port->stats.ibytes;
1393 		port->stats.ibytes = 0;
1394 		stats.obytes   -= port->stats.obytes;
1395 		port->stats.obytes = 0;
1396 		stats.imissed  -= port->stats.imissed;
1397 		port->stats.imissed = 0;
1398 		stats.oerrors  -= port->stats.oerrors;
1399 		port->stats.oerrors = 0;
1400 		stats.rx_nombuf -= port->stats.rx_nombuf;
1401 		port->stats.rx_nombuf = 0;
1402 
1403 		total_recv += stats.ipackets;
1404 		total_xmit += stats.opackets;
1405 		total_rx_dropped += stats.imissed;
1406 		total_tx_dropped += port->tx_dropped;
1407 		total_rx_nombuf  += stats.rx_nombuf;
1408 
1409 		fwd_port_stats_display(pt_id, &stats);
1410 	}
1411 
1412 	printf("\n  %s Accumulated forward statistics for all ports"
1413 	       "%s\n",
1414 	       acc_stats_border, acc_stats_border);
1415 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1416 	       "%-"PRIu64"\n"
1417 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1418 	       "%-"PRIu64"\n",
1419 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1420 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1421 	if (total_rx_nombuf > 0)
1422 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1423 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1424 	       "%s\n",
1425 	       acc_stats_border, acc_stats_border);
1426 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1427 	if (total_recv > 0)
1428 		printf("\n  CPU cycles/packet=%u (total cycles="
1429 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1430 		       (unsigned int)(fwd_cycles / total_recv),
1431 		       fwd_cycles, total_recv);
1432 #endif
1433 	printf("\nDone.\n");
1434 	test_done = 1;
1435 }
1436 
1437 void
1438 dev_set_link_up(portid_t pid)
1439 {
1440 	if (rte_eth_dev_set_link_up(pid) < 0)
1441 		printf("\nSet link up fail.\n");
1442 }
1443 
1444 void
1445 dev_set_link_down(portid_t pid)
1446 {
1447 	if (rte_eth_dev_set_link_down(pid) < 0)
1448 		printf("\nSet link down fail.\n");
1449 }
1450 
1451 static int
1452 all_ports_started(void)
1453 {
1454 	portid_t pi;
1455 	struct rte_port *port;
1456 
1457 	RTE_ETH_FOREACH_DEV(pi) {
1458 		port = &ports[pi];
1459 		/* Check if there is a port which is not started */
1460 		if ((port->port_status != RTE_PORT_STARTED) &&
1461 			(port->slave_flag == 0))
1462 			return 0;
1463 	}
1464 
1465 	/* No port is not started */
1466 	return 1;
1467 }
1468 
1469 int
1470 port_is_stopped(portid_t port_id)
1471 {
1472 	struct rte_port *port = &ports[port_id];
1473 
1474 	if ((port->port_status != RTE_PORT_STOPPED) &&
1475 	    (port->slave_flag == 0))
1476 		return 0;
1477 	return 1;
1478 }
1479 
1480 int
1481 all_ports_stopped(void)
1482 {
1483 	portid_t pi;
1484 
1485 	RTE_ETH_FOREACH_DEV(pi) {
1486 		if (!port_is_stopped(pi))
1487 			return 0;
1488 	}
1489 
1490 	return 1;
1491 }
1492 
1493 int
1494 port_is_started(portid_t port_id)
1495 {
1496 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1497 		return 0;
1498 
1499 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1500 		return 0;
1501 
1502 	return 1;
1503 }
1504 
1505 static int
1506 port_is_closed(portid_t port_id)
1507 {
1508 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1509 		return 0;
1510 
1511 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1512 		return 0;
1513 
1514 	return 1;
1515 }
1516 
1517 int
1518 start_port(portid_t pid)
1519 {
1520 	int diag, need_check_link_status = -1;
1521 	portid_t pi;
1522 	queueid_t qi;
1523 	struct rte_port *port;
1524 	struct ether_addr mac_addr;
1525 	enum rte_eth_event_type event_type;
1526 
1527 	if (port_id_is_invalid(pid, ENABLED_WARN))
1528 		return 0;
1529 
1530 	if(dcb_config)
1531 		dcb_test = 1;
1532 	RTE_ETH_FOREACH_DEV(pi) {
1533 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1534 			continue;
1535 
1536 		need_check_link_status = 0;
1537 		port = &ports[pi];
1538 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1539 						 RTE_PORT_HANDLING) == 0) {
1540 			printf("Port %d is now not stopped\n", pi);
1541 			continue;
1542 		}
1543 
1544 		if (port->need_reconfig > 0) {
1545 			port->need_reconfig = 0;
1546 
1547 			if (flow_isolate_all) {
1548 				int ret = port_flow_isolate(pi, 1);
1549 				if (ret) {
1550 					printf("Failed to apply isolated"
1551 					       " mode on port %d\n", pi);
1552 					return -1;
1553 				}
1554 			}
1555 
1556 			printf("Configuring Port %d (socket %u)\n", pi,
1557 					port->socket_id);
1558 			/* configure port */
1559 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1560 						&(port->dev_conf));
1561 			if (diag != 0) {
1562 				if (rte_atomic16_cmpset(&(port->port_status),
1563 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1564 					printf("Port %d can not be set back "
1565 							"to stopped\n", pi);
1566 				printf("Fail to configure port %d\n", pi);
1567 				/* try to reconfigure port next time */
1568 				port->need_reconfig = 1;
1569 				return -1;
1570 			}
1571 		}
1572 		if (port->need_reconfig_queues > 0) {
1573 			port->need_reconfig_queues = 0;
1574 			port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1575 			/* Apply Tx offloads configuration */
1576 			port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1577 			/* setup tx queues */
1578 			for (qi = 0; qi < nb_txq; qi++) {
1579 				if ((numa_support) &&
1580 					(txring_numa[pi] != NUMA_NO_CONFIG))
1581 					diag = rte_eth_tx_queue_setup(pi, qi,
1582 						nb_txd,txring_numa[pi],
1583 						&(port->tx_conf));
1584 				else
1585 					diag = rte_eth_tx_queue_setup(pi, qi,
1586 						nb_txd,port->socket_id,
1587 						&(port->tx_conf));
1588 
1589 				if (diag == 0)
1590 					continue;
1591 
1592 				/* Fail to setup tx queue, return */
1593 				if (rte_atomic16_cmpset(&(port->port_status),
1594 							RTE_PORT_HANDLING,
1595 							RTE_PORT_STOPPED) == 0)
1596 					printf("Port %d can not be set back "
1597 							"to stopped\n", pi);
1598 				printf("Fail to configure port %d tx queues\n", pi);
1599 				/* try to reconfigure queues next time */
1600 				port->need_reconfig_queues = 1;
1601 				return -1;
1602 			}
1603 			/* Apply Rx offloads configuration */
1604 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1605 			/* setup rx queues */
1606 			for (qi = 0; qi < nb_rxq; qi++) {
1607 				if ((numa_support) &&
1608 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1609 					struct rte_mempool * mp =
1610 						mbuf_pool_find(rxring_numa[pi]);
1611 					if (mp == NULL) {
1612 						printf("Failed to setup RX queue:"
1613 							"No mempool allocation"
1614 							" on the socket %d\n",
1615 							rxring_numa[pi]);
1616 						return -1;
1617 					}
1618 
1619 					diag = rte_eth_rx_queue_setup(pi, qi,
1620 					     nb_rxd,rxring_numa[pi],
1621 					     &(port->rx_conf),mp);
1622 				} else {
1623 					struct rte_mempool *mp =
1624 						mbuf_pool_find(port->socket_id);
1625 					if (mp == NULL) {
1626 						printf("Failed to setup RX queue:"
1627 							"No mempool allocation"
1628 							" on the socket %d\n",
1629 							port->socket_id);
1630 						return -1;
1631 					}
1632 					diag = rte_eth_rx_queue_setup(pi, qi,
1633 					     nb_rxd,port->socket_id,
1634 					     &(port->rx_conf), mp);
1635 				}
1636 				if (diag == 0)
1637 					continue;
1638 
1639 				/* Fail to setup rx queue, return */
1640 				if (rte_atomic16_cmpset(&(port->port_status),
1641 							RTE_PORT_HANDLING,
1642 							RTE_PORT_STOPPED) == 0)
1643 					printf("Port %d can not be set back "
1644 							"to stopped\n", pi);
1645 				printf("Fail to configure port %d rx queues\n", pi);
1646 				/* try to reconfigure queues next time */
1647 				port->need_reconfig_queues = 1;
1648 				return -1;
1649 			}
1650 		}
1651 
1652 		/* start port */
1653 		if (rte_eth_dev_start(pi) < 0) {
1654 			printf("Fail to start port %d\n", pi);
1655 
1656 			/* Fail to setup rx queue, return */
1657 			if (rte_atomic16_cmpset(&(port->port_status),
1658 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1659 				printf("Port %d can not be set back to "
1660 							"stopped\n", pi);
1661 			continue;
1662 		}
1663 
1664 		if (rte_atomic16_cmpset(&(port->port_status),
1665 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1666 			printf("Port %d can not be set into started\n", pi);
1667 
1668 		rte_eth_macaddr_get(pi, &mac_addr);
1669 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1670 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1671 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1672 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1673 
1674 		/* at least one port started, need checking link status */
1675 		need_check_link_status = 1;
1676 	}
1677 
1678 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1679 	     event_type < RTE_ETH_EVENT_MAX;
1680 	     event_type++) {
1681 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1682 						event_type,
1683 						eth_event_callback,
1684 						NULL);
1685 		if (diag) {
1686 			printf("Failed to setup even callback for event %d\n",
1687 				event_type);
1688 			return -1;
1689 		}
1690 	}
1691 
1692 	if (need_check_link_status == 1 && !no_link_check)
1693 		check_all_ports_link_status(RTE_PORT_ALL);
1694 	else if (need_check_link_status == 0)
1695 		printf("Please stop the ports first\n");
1696 
1697 	printf("Done\n");
1698 	return 0;
1699 }
1700 
1701 void
1702 stop_port(portid_t pid)
1703 {
1704 	portid_t pi;
1705 	struct rte_port *port;
1706 	int need_check_link_status = 0;
1707 
1708 	if (dcb_test) {
1709 		dcb_test = 0;
1710 		dcb_config = 0;
1711 	}
1712 
1713 	if (port_id_is_invalid(pid, ENABLED_WARN))
1714 		return;
1715 
1716 	printf("Stopping ports...\n");
1717 
1718 	RTE_ETH_FOREACH_DEV(pi) {
1719 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1720 			continue;
1721 
1722 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1723 			printf("Please remove port %d from forwarding configuration.\n", pi);
1724 			continue;
1725 		}
1726 
1727 		if (port_is_bonding_slave(pi)) {
1728 			printf("Please remove port %d from bonded device.\n", pi);
1729 			continue;
1730 		}
1731 
1732 		port = &ports[pi];
1733 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1734 						RTE_PORT_HANDLING) == 0)
1735 			continue;
1736 
1737 		rte_eth_dev_stop(pi);
1738 
1739 		if (rte_atomic16_cmpset(&(port->port_status),
1740 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1741 			printf("Port %d can not be set into stopped\n", pi);
1742 		need_check_link_status = 1;
1743 	}
1744 	if (need_check_link_status && !no_link_check)
1745 		check_all_ports_link_status(RTE_PORT_ALL);
1746 
1747 	printf("Done\n");
1748 }
1749 
1750 void
1751 close_port(portid_t pid)
1752 {
1753 	portid_t pi;
1754 	struct rte_port *port;
1755 
1756 	if (port_id_is_invalid(pid, ENABLED_WARN))
1757 		return;
1758 
1759 	printf("Closing ports...\n");
1760 
1761 	RTE_ETH_FOREACH_DEV(pi) {
1762 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1763 			continue;
1764 
1765 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1766 			printf("Please remove port %d from forwarding configuration.\n", pi);
1767 			continue;
1768 		}
1769 
1770 		if (port_is_bonding_slave(pi)) {
1771 			printf("Please remove port %d from bonded device.\n", pi);
1772 			continue;
1773 		}
1774 
1775 		port = &ports[pi];
1776 		if (rte_atomic16_cmpset(&(port->port_status),
1777 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1778 			printf("Port %d is already closed\n", pi);
1779 			continue;
1780 		}
1781 
1782 		if (rte_atomic16_cmpset(&(port->port_status),
1783 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1784 			printf("Port %d is now not stopped\n", pi);
1785 			continue;
1786 		}
1787 
1788 		if (port->flow_list)
1789 			port_flow_flush(pi);
1790 		rte_eth_dev_close(pi);
1791 
1792 		if (rte_atomic16_cmpset(&(port->port_status),
1793 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1794 			printf("Port %d cannot be set to closed\n", pi);
1795 	}
1796 
1797 	printf("Done\n");
1798 }
1799 
1800 void
1801 reset_port(portid_t pid)
1802 {
1803 	int diag;
1804 	portid_t pi;
1805 	struct rte_port *port;
1806 
1807 	if (port_id_is_invalid(pid, ENABLED_WARN))
1808 		return;
1809 
1810 	printf("Resetting ports...\n");
1811 
1812 	RTE_ETH_FOREACH_DEV(pi) {
1813 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1814 			continue;
1815 
1816 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1817 			printf("Please remove port %d from forwarding "
1818 			       "configuration.\n", pi);
1819 			continue;
1820 		}
1821 
1822 		if (port_is_bonding_slave(pi)) {
1823 			printf("Please remove port %d from bonded device.\n",
1824 			       pi);
1825 			continue;
1826 		}
1827 
1828 		diag = rte_eth_dev_reset(pi);
1829 		if (diag == 0) {
1830 			port = &ports[pi];
1831 			port->need_reconfig = 1;
1832 			port->need_reconfig_queues = 1;
1833 		} else {
1834 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1835 		}
1836 	}
1837 
1838 	printf("Done\n");
1839 }
1840 
1841 void
1842 attach_port(char *identifier)
1843 {
1844 	portid_t pi = 0;
1845 	unsigned int socket_id;
1846 
1847 	printf("Attaching a new port...\n");
1848 
1849 	if (identifier == NULL) {
1850 		printf("Invalid parameters are specified\n");
1851 		return;
1852 	}
1853 
1854 	if (rte_eth_dev_attach(identifier, &pi))
1855 		return;
1856 
1857 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1858 	/* if socket_id is invalid, set to 0 */
1859 	if (check_socket_id(socket_id) < 0)
1860 		socket_id = 0;
1861 	reconfig(pi, socket_id);
1862 	rte_eth_promiscuous_enable(pi);
1863 
1864 	nb_ports = rte_eth_dev_count();
1865 
1866 	ports[pi].port_status = RTE_PORT_STOPPED;
1867 
1868 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1869 	printf("Done\n");
1870 }
1871 
1872 void
1873 detach_port(portid_t port_id)
1874 {
1875 	char name[RTE_ETH_NAME_MAX_LEN];
1876 
1877 	printf("Detaching a port...\n");
1878 
1879 	if (!port_is_closed(port_id)) {
1880 		printf("Please close port first\n");
1881 		return;
1882 	}
1883 
1884 	if (ports[port_id].flow_list)
1885 		port_flow_flush(port_id);
1886 
1887 	if (rte_eth_dev_detach(port_id, name)) {
1888 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1889 		return;
1890 	}
1891 
1892 	nb_ports = rte_eth_dev_count();
1893 
1894 	printf("Port '%s' is detached. Now total ports is %d\n",
1895 			name, nb_ports);
1896 	printf("Done\n");
1897 	return;
1898 }
1899 
1900 void
1901 pmd_test_exit(void)
1902 {
1903 	portid_t pt_id;
1904 
1905 	if (test_done == 0)
1906 		stop_packet_forwarding();
1907 
1908 	if (ports != NULL) {
1909 		no_link_check = 1;
1910 		RTE_ETH_FOREACH_DEV(pt_id) {
1911 			printf("\nShutting down port %d...\n", pt_id);
1912 			fflush(stdout);
1913 			stop_port(pt_id);
1914 			close_port(pt_id);
1915 		}
1916 	}
1917 	printf("\nBye...\n");
1918 }
1919 
1920 typedef void (*cmd_func_t)(void);
1921 struct pmd_test_command {
1922 	const char *cmd_name;
1923 	cmd_func_t cmd_func;
1924 };
1925 
1926 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1927 
1928 /* Check the link status of all ports in up to 9s, and print them finally */
1929 static void
1930 check_all_ports_link_status(uint32_t port_mask)
1931 {
1932 #define CHECK_INTERVAL 100 /* 100ms */
1933 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1934 	portid_t portid;
1935 	uint8_t count, all_ports_up, print_flag = 0;
1936 	struct rte_eth_link link;
1937 
1938 	printf("Checking link statuses...\n");
1939 	fflush(stdout);
1940 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1941 		all_ports_up = 1;
1942 		RTE_ETH_FOREACH_DEV(portid) {
1943 			if ((port_mask & (1 << portid)) == 0)
1944 				continue;
1945 			memset(&link, 0, sizeof(link));
1946 			rte_eth_link_get_nowait(portid, &link);
1947 			/* print link status if flag set */
1948 			if (print_flag == 1) {
1949 				if (link.link_status)
1950 					printf(
1951 					"Port%d Link Up. speed %u Mbps- %s\n",
1952 					portid, link.link_speed,
1953 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1954 					("full-duplex") : ("half-duplex\n"));
1955 				else
1956 					printf("Port %d Link Down\n", portid);
1957 				continue;
1958 			}
1959 			/* clear all_ports_up flag if any link down */
1960 			if (link.link_status == ETH_LINK_DOWN) {
1961 				all_ports_up = 0;
1962 				break;
1963 			}
1964 		}
1965 		/* after finally printing all link status, get out */
1966 		if (print_flag == 1)
1967 			break;
1968 
1969 		if (all_ports_up == 0) {
1970 			fflush(stdout);
1971 			rte_delay_ms(CHECK_INTERVAL);
1972 		}
1973 
1974 		/* set the print_flag if all ports up or timeout */
1975 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1976 			print_flag = 1;
1977 		}
1978 
1979 		if (lsc_interrupt)
1980 			break;
1981 	}
1982 }
1983 
1984 static void
1985 rmv_event_callback(void *arg)
1986 {
1987 	struct rte_eth_dev *dev;
1988 	portid_t port_id = (intptr_t)arg;
1989 
1990 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1991 	dev = &rte_eth_devices[port_id];
1992 
1993 	stop_port(port_id);
1994 	close_port(port_id);
1995 	printf("removing device %s\n", dev->device->name);
1996 	if (rte_eal_dev_detach(dev->device))
1997 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1998 			dev->device->name);
1999 }
2000 
2001 /* This function is used by the interrupt thread */
2002 static int
2003 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2004 		  void *ret_param)
2005 {
2006 	static const char * const event_desc[] = {
2007 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2008 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2009 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2010 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2011 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2012 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2013 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2014 		[RTE_ETH_EVENT_NEW] = "device probed",
2015 		[RTE_ETH_EVENT_DESTROY] = "device released",
2016 		[RTE_ETH_EVENT_MAX] = NULL,
2017 	};
2018 
2019 	RTE_SET_USED(param);
2020 	RTE_SET_USED(ret_param);
2021 
2022 	if (type >= RTE_ETH_EVENT_MAX) {
2023 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2024 			port_id, __func__, type);
2025 		fflush(stderr);
2026 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2027 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2028 			event_desc[type]);
2029 		fflush(stdout);
2030 	}
2031 
2032 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2033 		return 0;
2034 
2035 	switch (type) {
2036 	case RTE_ETH_EVENT_INTR_RMV:
2037 		if (rte_eal_alarm_set(100000,
2038 				rmv_event_callback, (void *)(intptr_t)port_id))
2039 			fprintf(stderr, "Could not set up deferred device removal\n");
2040 		break;
2041 	default:
2042 		break;
2043 	}
2044 	return 0;
2045 }
2046 
2047 static int
2048 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2049 {
2050 	uint16_t i;
2051 	int diag;
2052 	uint8_t mapping_found = 0;
2053 
2054 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2055 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2056 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2057 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2058 					tx_queue_stats_mappings[i].queue_id,
2059 					tx_queue_stats_mappings[i].stats_counter_id);
2060 			if (diag != 0)
2061 				return diag;
2062 			mapping_found = 1;
2063 		}
2064 	}
2065 	if (mapping_found)
2066 		port->tx_queue_stats_mapping_enabled = 1;
2067 	return 0;
2068 }
2069 
2070 static int
2071 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2072 {
2073 	uint16_t i;
2074 	int diag;
2075 	uint8_t mapping_found = 0;
2076 
2077 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2078 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2079 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2080 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2081 					rx_queue_stats_mappings[i].queue_id,
2082 					rx_queue_stats_mappings[i].stats_counter_id);
2083 			if (diag != 0)
2084 				return diag;
2085 			mapping_found = 1;
2086 		}
2087 	}
2088 	if (mapping_found)
2089 		port->rx_queue_stats_mapping_enabled = 1;
2090 	return 0;
2091 }
2092 
2093 static void
2094 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2095 {
2096 	int diag = 0;
2097 
2098 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2099 	if (diag != 0) {
2100 		if (diag == -ENOTSUP) {
2101 			port->tx_queue_stats_mapping_enabled = 0;
2102 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2103 		}
2104 		else
2105 			rte_exit(EXIT_FAILURE,
2106 					"set_tx_queue_stats_mapping_registers "
2107 					"failed for port id=%d diag=%d\n",
2108 					pi, diag);
2109 	}
2110 
2111 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2112 	if (diag != 0) {
2113 		if (diag == -ENOTSUP) {
2114 			port->rx_queue_stats_mapping_enabled = 0;
2115 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2116 		}
2117 		else
2118 			rte_exit(EXIT_FAILURE,
2119 					"set_rx_queue_stats_mapping_registers "
2120 					"failed for port id=%d diag=%d\n",
2121 					pi, diag);
2122 	}
2123 }
2124 
2125 static void
2126 rxtx_port_config(struct rte_port *port)
2127 {
2128 	port->rx_conf = port->dev_info.default_rxconf;
2129 	port->tx_conf = port->dev_info.default_txconf;
2130 
2131 	/* Check if any RX/TX parameters have been passed */
2132 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2133 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2134 
2135 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2136 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2137 
2138 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2139 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2140 
2141 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2142 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2143 
2144 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2145 		port->rx_conf.rx_drop_en = rx_drop_en;
2146 
2147 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2148 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2149 
2150 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2151 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2152 
2153 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2154 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2155 
2156 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2157 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2158 
2159 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2160 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2161 }
2162 
2163 void
2164 init_port_config(void)
2165 {
2166 	portid_t pid;
2167 	struct rte_port *port;
2168 
2169 	RTE_ETH_FOREACH_DEV(pid) {
2170 		port = &ports[pid];
2171 		port->dev_conf.fdir_conf = fdir_conf;
2172 		if (nb_rxq > 1) {
2173 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2174 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2175 		} else {
2176 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2177 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2178 		}
2179 
2180 		if (port->dcb_flag == 0) {
2181 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2182 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2183 			else
2184 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2185 		}
2186 
2187 		rxtx_port_config(port);
2188 
2189 		rte_eth_macaddr_get(pid, &port->eth_addr);
2190 
2191 		map_port_queue_stats_mapping_registers(pid, port);
2192 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2193 		rte_pmd_ixgbe_bypass_init(pid);
2194 #endif
2195 
2196 		if (lsc_interrupt &&
2197 		    (rte_eth_devices[pid].data->dev_flags &
2198 		     RTE_ETH_DEV_INTR_LSC))
2199 			port->dev_conf.intr_conf.lsc = 1;
2200 		if (rmv_interrupt &&
2201 		    (rte_eth_devices[pid].data->dev_flags &
2202 		     RTE_ETH_DEV_INTR_RMV))
2203 			port->dev_conf.intr_conf.rmv = 1;
2204 
2205 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2206 		/* Detect softnic port */
2207 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2208 			port->softnic_enable = 1;
2209 			memset(&port->softport, 0, sizeof(struct softnic_port));
2210 
2211 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2212 				port->softport.tm_flag = 1;
2213 		}
2214 #endif
2215 	}
2216 }
2217 
2218 void set_port_slave_flag(portid_t slave_pid)
2219 {
2220 	struct rte_port *port;
2221 
2222 	port = &ports[slave_pid];
2223 	port->slave_flag = 1;
2224 }
2225 
2226 void clear_port_slave_flag(portid_t slave_pid)
2227 {
2228 	struct rte_port *port;
2229 
2230 	port = &ports[slave_pid];
2231 	port->slave_flag = 0;
2232 }
2233 
2234 uint8_t port_is_bonding_slave(portid_t slave_pid)
2235 {
2236 	struct rte_port *port;
2237 
2238 	port = &ports[slave_pid];
2239 	return port->slave_flag;
2240 }
2241 
2242 const uint16_t vlan_tags[] = {
2243 		0,  1,  2,  3,  4,  5,  6,  7,
2244 		8,  9, 10, 11,  12, 13, 14, 15,
2245 		16, 17, 18, 19, 20, 21, 22, 23,
2246 		24, 25, 26, 27, 28, 29, 30, 31
2247 };
2248 
2249 static  int
2250 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2251 		 enum dcb_mode_enable dcb_mode,
2252 		 enum rte_eth_nb_tcs num_tcs,
2253 		 uint8_t pfc_en)
2254 {
2255 	uint8_t i;
2256 
2257 	/*
2258 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2259 	 * given above, and the number of traffic classes available for use.
2260 	 */
2261 	if (dcb_mode == DCB_VT_ENABLED) {
2262 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2263 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2264 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2265 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2266 
2267 		/* VMDQ+DCB RX and TX configurations */
2268 		vmdq_rx_conf->enable_default_pool = 0;
2269 		vmdq_rx_conf->default_pool = 0;
2270 		vmdq_rx_conf->nb_queue_pools =
2271 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2272 		vmdq_tx_conf->nb_queue_pools =
2273 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2274 
2275 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2276 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2277 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2278 			vmdq_rx_conf->pool_map[i].pools =
2279 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2280 		}
2281 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2282 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2283 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2284 		}
2285 
2286 		/* set DCB mode of RX and TX of multiple queues */
2287 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2288 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2289 	} else {
2290 		struct rte_eth_dcb_rx_conf *rx_conf =
2291 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2292 		struct rte_eth_dcb_tx_conf *tx_conf =
2293 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2294 
2295 		rx_conf->nb_tcs = num_tcs;
2296 		tx_conf->nb_tcs = num_tcs;
2297 
2298 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2299 			rx_conf->dcb_tc[i] = i % num_tcs;
2300 			tx_conf->dcb_tc[i] = i % num_tcs;
2301 		}
2302 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2303 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2304 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2305 	}
2306 
2307 	if (pfc_en)
2308 		eth_conf->dcb_capability_en =
2309 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2310 	else
2311 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2312 
2313 	return 0;
2314 }
2315 
2316 int
2317 init_port_dcb_config(portid_t pid,
2318 		     enum dcb_mode_enable dcb_mode,
2319 		     enum rte_eth_nb_tcs num_tcs,
2320 		     uint8_t pfc_en)
2321 {
2322 	struct rte_eth_conf port_conf;
2323 	struct rte_port *rte_port;
2324 	int retval;
2325 	uint16_t i;
2326 
2327 	rte_port = &ports[pid];
2328 
2329 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2330 	/* Enter DCB configuration status */
2331 	dcb_config = 1;
2332 
2333 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2334 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2335 	if (retval < 0)
2336 		return retval;
2337 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2338 
2339 	/**
2340 	 * Write the configuration into the device.
2341 	 * Set the numbers of RX & TX queues to 0, so
2342 	 * the RX & TX queues will not be setup.
2343 	 */
2344 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2345 
2346 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2347 
2348 	/* If dev_info.vmdq_pool_base is greater than 0,
2349 	 * the queue id of vmdq pools is started after pf queues.
2350 	 */
2351 	if (dcb_mode == DCB_VT_ENABLED &&
2352 	    rte_port->dev_info.vmdq_pool_base > 0) {
2353 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2354 			" for port %d.", pid);
2355 		return -1;
2356 	}
2357 
2358 	/* Assume the ports in testpmd have the same dcb capability
2359 	 * and has the same number of rxq and txq in dcb mode
2360 	 */
2361 	if (dcb_mode == DCB_VT_ENABLED) {
2362 		if (rte_port->dev_info.max_vfs > 0) {
2363 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2364 			nb_txq = rte_port->dev_info.nb_tx_queues;
2365 		} else {
2366 			nb_rxq = rte_port->dev_info.max_rx_queues;
2367 			nb_txq = rte_port->dev_info.max_tx_queues;
2368 		}
2369 	} else {
2370 		/*if vt is disabled, use all pf queues */
2371 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2372 			nb_rxq = rte_port->dev_info.max_rx_queues;
2373 			nb_txq = rte_port->dev_info.max_tx_queues;
2374 		} else {
2375 			nb_rxq = (queueid_t)num_tcs;
2376 			nb_txq = (queueid_t)num_tcs;
2377 
2378 		}
2379 	}
2380 	rx_free_thresh = 64;
2381 
2382 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2383 
2384 	rxtx_port_config(rte_port);
2385 	/* VLAN filter */
2386 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2387 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2388 		rx_vft_set(pid, vlan_tags[i], 1);
2389 
2390 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2391 	map_port_queue_stats_mapping_registers(pid, rte_port);
2392 
2393 	rte_port->dcb_flag = 1;
2394 
2395 	return 0;
2396 }
2397 
2398 static void
2399 init_port(void)
2400 {
2401 	/* Configuration of Ethernet ports. */
2402 	ports = rte_zmalloc("testpmd: ports",
2403 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2404 			    RTE_CACHE_LINE_SIZE);
2405 	if (ports == NULL) {
2406 		rte_exit(EXIT_FAILURE,
2407 				"rte_zmalloc(%d struct rte_port) failed\n",
2408 				RTE_MAX_ETHPORTS);
2409 	}
2410 }
2411 
2412 static void
2413 force_quit(void)
2414 {
2415 	pmd_test_exit();
2416 	prompt_exit();
2417 }
2418 
2419 static void
2420 print_stats(void)
2421 {
2422 	uint8_t i;
2423 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2424 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2425 
2426 	/* Clear screen and move to top left */
2427 	printf("%s%s", clr, top_left);
2428 
2429 	printf("\nPort statistics ====================================");
2430 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2431 		nic_stats_display(fwd_ports_ids[i]);
2432 }
2433 
2434 static void
2435 signal_handler(int signum)
2436 {
2437 	if (signum == SIGINT || signum == SIGTERM) {
2438 		printf("\nSignal %d received, preparing to exit...\n",
2439 				signum);
2440 #ifdef RTE_LIBRTE_PDUMP
2441 		/* uninitialize packet capture framework */
2442 		rte_pdump_uninit();
2443 #endif
2444 #ifdef RTE_LIBRTE_LATENCY_STATS
2445 		rte_latencystats_uninit();
2446 #endif
2447 		force_quit();
2448 		/* Set flag to indicate the force termination. */
2449 		f_quit = 1;
2450 		/* exit with the expected status */
2451 		signal(signum, SIG_DFL);
2452 		kill(getpid(), signum);
2453 	}
2454 }
2455 
2456 int
2457 main(int argc, char** argv)
2458 {
2459 	int  diag;
2460 	portid_t port_id;
2461 
2462 	signal(SIGINT, signal_handler);
2463 	signal(SIGTERM, signal_handler);
2464 
2465 	diag = rte_eal_init(argc, argv);
2466 	if (diag < 0)
2467 		rte_panic("Cannot init EAL\n");
2468 
2469 	testpmd_logtype = rte_log_register("testpmd");
2470 	if (testpmd_logtype < 0)
2471 		rte_panic("Cannot register log type");
2472 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2473 
2474 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2475 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2476 			strerror(errno));
2477 	}
2478 
2479 #ifdef RTE_LIBRTE_PDUMP
2480 	/* initialize packet capture framework */
2481 	rte_pdump_init(NULL);
2482 #endif
2483 
2484 	nb_ports = (portid_t) rte_eth_dev_count();
2485 	if (nb_ports == 0)
2486 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2487 
2488 	/* allocate port structures, and init them */
2489 	init_port();
2490 
2491 	set_def_fwd_config();
2492 	if (nb_lcores == 0)
2493 		rte_panic("Empty set of forwarding logical cores - check the "
2494 			  "core mask supplied in the command parameters\n");
2495 
2496 	/* Bitrate/latency stats disabled by default */
2497 #ifdef RTE_LIBRTE_BITRATE
2498 	bitrate_enabled = 0;
2499 #endif
2500 #ifdef RTE_LIBRTE_LATENCY_STATS
2501 	latencystats_enabled = 0;
2502 #endif
2503 
2504 	argc -= diag;
2505 	argv += diag;
2506 	if (argc > 1)
2507 		launch_args_parse(argc, argv);
2508 
2509 	if (tx_first && interactive)
2510 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2511 				"interactive mode.\n");
2512 
2513 	if (tx_first && lsc_interrupt) {
2514 		printf("Warning: lsc_interrupt needs to be off when "
2515 				" using tx_first. Disabling.\n");
2516 		lsc_interrupt = 0;
2517 	}
2518 
2519 	if (!nb_rxq && !nb_txq)
2520 		printf("Warning: Either rx or tx queues should be non-zero\n");
2521 
2522 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2523 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2524 		       "but nb_txq=%d will prevent to fully test it.\n",
2525 		       nb_rxq, nb_txq);
2526 
2527 	init_config();
2528 	if (start_port(RTE_PORT_ALL) != 0)
2529 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2530 
2531 	/* set all ports to promiscuous mode by default */
2532 	RTE_ETH_FOREACH_DEV(port_id)
2533 		rte_eth_promiscuous_enable(port_id);
2534 
2535 	/* Init metrics library */
2536 	rte_metrics_init(rte_socket_id());
2537 
2538 #ifdef RTE_LIBRTE_LATENCY_STATS
2539 	if (latencystats_enabled != 0) {
2540 		int ret = rte_latencystats_init(1, NULL);
2541 		if (ret)
2542 			printf("Warning: latencystats init()"
2543 				" returned error %d\n",	ret);
2544 		printf("Latencystats running on lcore %d\n",
2545 			latencystats_lcore_id);
2546 	}
2547 #endif
2548 
2549 	/* Setup bitrate stats */
2550 #ifdef RTE_LIBRTE_BITRATE
2551 	if (bitrate_enabled != 0) {
2552 		bitrate_data = rte_stats_bitrate_create();
2553 		if (bitrate_data == NULL)
2554 			rte_exit(EXIT_FAILURE,
2555 				"Could not allocate bitrate data.\n");
2556 		rte_stats_bitrate_reg(bitrate_data);
2557 	}
2558 #endif
2559 
2560 #ifdef RTE_LIBRTE_CMDLINE
2561 	if (strlen(cmdline_filename) != 0)
2562 		cmdline_read_from_file(cmdline_filename);
2563 
2564 	if (interactive == 1) {
2565 		if (auto_start) {
2566 			printf("Start automatic packet forwarding\n");
2567 			start_packet_forwarding(0);
2568 		}
2569 		prompt();
2570 		pmd_test_exit();
2571 	} else
2572 #endif
2573 	{
2574 		char c;
2575 		int rc;
2576 
2577 		f_quit = 0;
2578 
2579 		printf("No commandline core given, start packet forwarding\n");
2580 		start_packet_forwarding(tx_first);
2581 		if (stats_period != 0) {
2582 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2583 			uint64_t timer_period;
2584 
2585 			/* Convert to number of cycles */
2586 			timer_period = stats_period * rte_get_timer_hz();
2587 
2588 			while (f_quit == 0) {
2589 				cur_time = rte_get_timer_cycles();
2590 				diff_time += cur_time - prev_time;
2591 
2592 				if (diff_time >= timer_period) {
2593 					print_stats();
2594 					/* Reset the timer */
2595 					diff_time = 0;
2596 				}
2597 				/* Sleep to avoid unnecessary checks */
2598 				prev_time = cur_time;
2599 				sleep(1);
2600 			}
2601 		}
2602 
2603 		printf("Press enter to exit\n");
2604 		rc = read(0, &c, 1);
2605 		pmd_test_exit();
2606 		if (rc < 0)
2607 			return 1;
2608 	}
2609 
2610 	return 0;
2611 }
2612