xref: /dpdk/app/test-pmd/testpmd.c (revision 0c00abfd85448449d73acc09cc6fe445dba37b50)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
131 
132 /*
133  * Test Forwarding Configuration.
134  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
135  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
136  */
137 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
138 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
139 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
140 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
141 
142 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
143 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
144 
145 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
146 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
147 
148 /*
149  * Forwarding engines.
150  */
151 struct fwd_engine * fwd_engines[] = {
152 	&io_fwd_engine,
153 	&mac_fwd_engine,
154 	&mac_swap_engine,
155 	&flow_gen_engine,
156 	&rx_only_engine,
157 	&tx_only_engine,
158 	&csum_fwd_engine,
159 	&icmp_echo_engine,
160 #if defined RTE_LIBRTE_PMD_SOFTNIC
161 	&softnic_fwd_engine,
162 #endif
163 #ifdef RTE_LIBRTE_IEEE1588
164 	&ieee1588_fwd_engine,
165 #endif
166 	NULL,
167 };
168 
169 struct fwd_config cur_fwd_config;
170 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
171 uint32_t retry_enabled;
172 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
173 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
174 
175 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
176 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
177                                       * specified on command-line. */
178 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
179 
180 /*
181  * In container, it cannot terminate the process which running with 'stats-period'
182  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
183  */
184 uint8_t f_quit;
185 
186 /*
187  * Configuration of packet segments used by the "txonly" processing engine.
188  */
189 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
190 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
191 	TXONLY_DEF_PACKET_LEN,
192 };
193 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194 
195 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
196 /**< Split policy for packets to TX. */
197 
198 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
199 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200 
201 /* current configuration is in DCB or not,0 means it is not in DCB mode */
202 uint8_t dcb_config = 0;
203 
204 /* Whether the dcb is in testing status */
205 uint8_t dcb_test = 0;
206 
207 /*
208  * Configurable number of RX/TX queues.
209  */
210 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
211 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 
213 /*
214  * Configurable number of RX/TX ring descriptors.
215  * Defaults are supplied by drivers via ethdev.
216  */
217 #define RTE_TEST_RX_DESC_DEFAULT 0
218 #define RTE_TEST_TX_DESC_DEFAULT 0
219 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
220 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
221 
222 #define RTE_PMD_PARAM_UNSET -1
223 /*
224  * Configurable values of RX and TX ring threshold registers.
225  */
226 
227 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
229 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
230 
231 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
233 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
234 
235 /*
236  * Configurable value of RX free threshold.
237  */
238 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
239 
240 /*
241  * Configurable value of RX drop enable.
242  */
243 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
244 
245 /*
246  * Configurable value of TX free threshold.
247  */
248 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
249 
250 /*
251  * Configurable value of TX RS bit threshold.
252  */
253 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
254 
255 /*
256  * Receive Side Scaling (RSS) configuration.
257  */
258 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 
260 /*
261  * Port topology configuration
262  */
263 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 
265 /*
266  * Avoids to flush all the RX streams before starts forwarding.
267  */
268 uint8_t no_flush_rx = 0; /* flush by default */
269 
270 /*
271  * Flow API isolated mode.
272  */
273 uint8_t flow_isolate_all;
274 
275 /*
276  * Avoids to check link status when starting/stopping a port.
277  */
278 uint8_t no_link_check = 0; /* check by default */
279 
280 /*
281  * Enable link status change notification
282  */
283 uint8_t lsc_interrupt = 1; /* enabled by default */
284 
285 /*
286  * Enable device removal notification.
287  */
288 uint8_t rmv_interrupt = 1; /* enabled by default */
289 
290 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
291 
292 /*
293  * Display or mask ether events
294  * Default to all events except VF_MBOX
295  */
296 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
301 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
302 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
303 /*
304  * Decide if all memory are locked for performance.
305  */
306 int do_mlockall = 0;
307 
308 /*
309  * NIC bypass mode configuration options.
310  */
311 
312 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
313 /* The NIC bypass watchdog timeout. */
314 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
315 #endif
316 
317 
318 #ifdef RTE_LIBRTE_LATENCY_STATS
319 
320 /*
321  * Set when latency stats is enabled in the commandline
322  */
323 uint8_t latencystats_enabled;
324 
325 /*
326  * Lcore ID to serive latency statistics.
327  */
328 lcoreid_t latencystats_lcore_id = -1;
329 
330 #endif
331 
332 /*
333  * Ethernet device configuration.
334  */
335 struct rte_eth_rxmode rx_mode = {
336 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
337 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
338 };
339 
340 struct rte_eth_txmode tx_mode = {
341 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 };
343 
344 struct rte_fdir_conf fdir_conf = {
345 	.mode = RTE_FDIR_MODE_NONE,
346 	.pballoc = RTE_FDIR_PBALLOC_64K,
347 	.status = RTE_FDIR_REPORT_STATUS,
348 	.mask = {
349 		.vlan_tci_mask = 0xFFEF,
350 		.ipv4_mask     = {
351 			.src_ip = 0xFFFFFFFF,
352 			.dst_ip = 0xFFFFFFFF,
353 		},
354 		.ipv6_mask     = {
355 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357 		},
358 		.src_port_mask = 0xFFFF,
359 		.dst_port_mask = 0xFFFF,
360 		.mac_addr_byte_mask = 0xFF,
361 		.tunnel_type_mask = 1,
362 		.tunnel_id_mask = 0xFFFFFFFF,
363 	},
364 	.drop_queue = 127,
365 };
366 
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368 
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371 
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374 
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377 
378 /*
379  * Display zero values by default for xstats
380  */
381 uint8_t xstats_hide_zero;
382 
383 unsigned int num_sockets = 0;
384 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385 
386 #ifdef RTE_LIBRTE_BITRATE
387 /* Bitrate statistics */
388 struct rte_stats_bitrates *bitrate_data;
389 lcoreid_t bitrate_lcore_id;
390 uint8_t bitrate_enabled;
391 #endif
392 
393 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395 
396 struct vxlan_encap_conf vxlan_encap_conf = {
397 	.select_ipv4 = 1,
398 	.select_vlan = 0,
399 	.vni = "\x00\x00\x00",
400 	.udp_src = 0,
401 	.udp_dst = RTE_BE16(4789),
402 	.ipv4_src = IPv4(127, 0, 0, 1),
403 	.ipv4_dst = IPv4(255, 255, 255, 255),
404 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
405 		"\x00\x00\x00\x00\x00\x00\x00\x01",
406 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
407 		"\x00\x00\x00\x00\x00\x00\x11\x11",
408 	.vlan_tci = 0,
409 	.eth_src = "\x00\x00\x00\x00\x00\x00",
410 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
411 };
412 
413 struct nvgre_encap_conf nvgre_encap_conf = {
414 	.select_ipv4 = 1,
415 	.select_vlan = 0,
416 	.tni = "\x00\x00\x00",
417 	.ipv4_src = IPv4(127, 0, 0, 1),
418 	.ipv4_dst = IPv4(255, 255, 255, 255),
419 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
420 		"\x00\x00\x00\x00\x00\x00\x00\x01",
421 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
422 		"\x00\x00\x00\x00\x00\x00\x11\x11",
423 	.vlan_tci = 0,
424 	.eth_src = "\x00\x00\x00\x00\x00\x00",
425 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
426 };
427 
428 /* Forward function declarations */
429 static void map_port_queue_stats_mapping_registers(portid_t pi,
430 						   struct rte_port *port);
431 static void check_all_ports_link_status(uint32_t port_mask);
432 static int eth_event_callback(portid_t port_id,
433 			      enum rte_eth_event_type type,
434 			      void *param, void *ret_param);
435 static void eth_dev_event_callback(char *device_name,
436 				enum rte_dev_event_type type,
437 				void *param);
438 static int eth_dev_event_callback_register(void);
439 static int eth_dev_event_callback_unregister(void);
440 
441 
442 /*
443  * Check if all the ports are started.
444  * If yes, return positive value. If not, return zero.
445  */
446 static int all_ports_started(void);
447 
448 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
449 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
450 
451 /*
452  * Helper function to check if socket is already discovered.
453  * If yes, return positive value. If not, return zero.
454  */
455 int
456 new_socket_id(unsigned int socket_id)
457 {
458 	unsigned int i;
459 
460 	for (i = 0; i < num_sockets; i++) {
461 		if (socket_ids[i] == socket_id)
462 			return 0;
463 	}
464 	return 1;
465 }
466 
467 /*
468  * Setup default configuration.
469  */
470 static void
471 set_default_fwd_lcores_config(void)
472 {
473 	unsigned int i;
474 	unsigned int nb_lc;
475 	unsigned int sock_num;
476 
477 	nb_lc = 0;
478 	for (i = 0; i < RTE_MAX_LCORE; i++) {
479 		sock_num = rte_lcore_to_socket_id(i);
480 		if (new_socket_id(sock_num)) {
481 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
482 				rte_exit(EXIT_FAILURE,
483 					 "Total sockets greater than %u\n",
484 					 RTE_MAX_NUMA_NODES);
485 			}
486 			socket_ids[num_sockets++] = sock_num;
487 		}
488 		if (!rte_lcore_is_enabled(i))
489 			continue;
490 		if (i == rte_get_master_lcore())
491 			continue;
492 		fwd_lcores_cpuids[nb_lc++] = i;
493 	}
494 	nb_lcores = (lcoreid_t) nb_lc;
495 	nb_cfg_lcores = nb_lcores;
496 	nb_fwd_lcores = 1;
497 }
498 
499 static void
500 set_def_peer_eth_addrs(void)
501 {
502 	portid_t i;
503 
504 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
505 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
506 		peer_eth_addrs[i].addr_bytes[5] = i;
507 	}
508 }
509 
510 static void
511 set_default_fwd_ports_config(void)
512 {
513 	portid_t pt_id;
514 	int i = 0;
515 
516 	RTE_ETH_FOREACH_DEV(pt_id)
517 		fwd_ports_ids[i++] = pt_id;
518 
519 	nb_cfg_ports = nb_ports;
520 	nb_fwd_ports = nb_ports;
521 }
522 
523 void
524 set_def_fwd_config(void)
525 {
526 	set_default_fwd_lcores_config();
527 	set_def_peer_eth_addrs();
528 	set_default_fwd_ports_config();
529 }
530 
531 /*
532  * Configuration initialisation done once at init time.
533  */
534 static void
535 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
536 		 unsigned int socket_id)
537 {
538 	char pool_name[RTE_MEMPOOL_NAMESIZE];
539 	struct rte_mempool *rte_mp = NULL;
540 	uint32_t mb_size;
541 
542 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
543 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
544 
545 	TESTPMD_LOG(INFO,
546 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
547 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
548 
549 	if (mp_anon != 0) {
550 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
551 			mb_size, (unsigned) mb_mempool_cache,
552 			sizeof(struct rte_pktmbuf_pool_private),
553 			socket_id, 0);
554 		if (rte_mp == NULL)
555 			goto err;
556 
557 		if (rte_mempool_populate_anon(rte_mp) == 0) {
558 			rte_mempool_free(rte_mp);
559 			rte_mp = NULL;
560 			goto err;
561 		}
562 		rte_pktmbuf_pool_init(rte_mp, NULL);
563 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
564 	} else {
565 		/* wrapper to rte_mempool_create() */
566 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
567 				rte_mbuf_best_mempool_ops());
568 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
569 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
570 	}
571 
572 err:
573 	if (rte_mp == NULL) {
574 		rte_exit(EXIT_FAILURE,
575 			"Creation of mbuf pool for socket %u failed: %s\n",
576 			socket_id, rte_strerror(rte_errno));
577 	} else if (verbose_level > 0) {
578 		rte_mempool_dump(stdout, rte_mp);
579 	}
580 }
581 
582 /*
583  * Check given socket id is valid or not with NUMA mode,
584  * if valid, return 0, else return -1
585  */
586 static int
587 check_socket_id(const unsigned int socket_id)
588 {
589 	static int warning_once = 0;
590 
591 	if (new_socket_id(socket_id)) {
592 		if (!warning_once && numa_support)
593 			printf("Warning: NUMA should be configured manually by"
594 			       " using --port-numa-config and"
595 			       " --ring-numa-config parameters along with"
596 			       " --numa.\n");
597 		warning_once = 1;
598 		return -1;
599 	}
600 	return 0;
601 }
602 
603 /*
604  * Get the allowed maximum number of RX queues.
605  * *pid return the port id which has minimal value of
606  * max_rx_queues in all ports.
607  */
608 queueid_t
609 get_allowed_max_nb_rxq(portid_t *pid)
610 {
611 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
612 	portid_t pi;
613 	struct rte_eth_dev_info dev_info;
614 
615 	RTE_ETH_FOREACH_DEV(pi) {
616 		rte_eth_dev_info_get(pi, &dev_info);
617 		if (dev_info.max_rx_queues < allowed_max_rxq) {
618 			allowed_max_rxq = dev_info.max_rx_queues;
619 			*pid = pi;
620 		}
621 	}
622 	return allowed_max_rxq;
623 }
624 
625 /*
626  * Check input rxq is valid or not.
627  * If input rxq is not greater than any of maximum number
628  * of RX queues of all ports, it is valid.
629  * if valid, return 0, else return -1
630  */
631 int
632 check_nb_rxq(queueid_t rxq)
633 {
634 	queueid_t allowed_max_rxq;
635 	portid_t pid = 0;
636 
637 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
638 	if (rxq > allowed_max_rxq) {
639 		printf("Fail: input rxq (%u) can't be greater "
640 		       "than max_rx_queues (%u) of port %u\n",
641 		       rxq,
642 		       allowed_max_rxq,
643 		       pid);
644 		return -1;
645 	}
646 	return 0;
647 }
648 
649 /*
650  * Get the allowed maximum number of TX queues.
651  * *pid return the port id which has minimal value of
652  * max_tx_queues in all ports.
653  */
654 queueid_t
655 get_allowed_max_nb_txq(portid_t *pid)
656 {
657 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
658 	portid_t pi;
659 	struct rte_eth_dev_info dev_info;
660 
661 	RTE_ETH_FOREACH_DEV(pi) {
662 		rte_eth_dev_info_get(pi, &dev_info);
663 		if (dev_info.max_tx_queues < allowed_max_txq) {
664 			allowed_max_txq = dev_info.max_tx_queues;
665 			*pid = pi;
666 		}
667 	}
668 	return allowed_max_txq;
669 }
670 
671 /*
672  * Check input txq is valid or not.
673  * If input txq is not greater than any of maximum number
674  * of TX queues of all ports, it is valid.
675  * if valid, return 0, else return -1
676  */
677 int
678 check_nb_txq(queueid_t txq)
679 {
680 	queueid_t allowed_max_txq;
681 	portid_t pid = 0;
682 
683 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
684 	if (txq > allowed_max_txq) {
685 		printf("Fail: input txq (%u) can't be greater "
686 		       "than max_tx_queues (%u) of port %u\n",
687 		       txq,
688 		       allowed_max_txq,
689 		       pid);
690 		return -1;
691 	}
692 	return 0;
693 }
694 
695 static void
696 init_config(void)
697 {
698 	portid_t pid;
699 	struct rte_port *port;
700 	struct rte_mempool *mbp;
701 	unsigned int nb_mbuf_per_pool;
702 	lcoreid_t  lc_id;
703 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
704 	struct rte_gro_param gro_param;
705 	uint32_t gso_types;
706 	int k;
707 
708 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
709 
710 	if (numa_support) {
711 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
712 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
713 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
714 	}
715 
716 	/* Configuration of logical cores. */
717 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
718 				sizeof(struct fwd_lcore *) * nb_lcores,
719 				RTE_CACHE_LINE_SIZE);
720 	if (fwd_lcores == NULL) {
721 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
722 							"failed\n", nb_lcores);
723 	}
724 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
725 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
726 					       sizeof(struct fwd_lcore),
727 					       RTE_CACHE_LINE_SIZE);
728 		if (fwd_lcores[lc_id] == NULL) {
729 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
730 								"failed\n");
731 		}
732 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
733 	}
734 
735 	RTE_ETH_FOREACH_DEV(pid) {
736 		port = &ports[pid];
737 		/* Apply default TxRx configuration for all ports */
738 		port->dev_conf.txmode = tx_mode;
739 		port->dev_conf.rxmode = rx_mode;
740 		rte_eth_dev_info_get(pid, &port->dev_info);
741 
742 		if (!(port->dev_info.rx_offload_capa &
743 					DEV_RX_OFFLOAD_CRC_STRIP))
744 			port->dev_conf.rxmode.offloads &=
745 				~DEV_RX_OFFLOAD_CRC_STRIP;
746 		if (!(port->dev_info.tx_offload_capa &
747 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
748 			port->dev_conf.txmode.offloads &=
749 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
750 		if (numa_support) {
751 			if (port_numa[pid] != NUMA_NO_CONFIG)
752 				port_per_socket[port_numa[pid]]++;
753 			else {
754 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
755 
756 				/* if socket_id is invalid, set to 0 */
757 				if (check_socket_id(socket_id) < 0)
758 					socket_id = 0;
759 				port_per_socket[socket_id]++;
760 			}
761 		}
762 
763 		/* Apply Rx offloads configuration */
764 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
765 			port->rx_conf[k].offloads =
766 				port->dev_conf.rxmode.offloads;
767 		/* Apply Tx offloads configuration */
768 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
769 			port->tx_conf[k].offloads =
770 				port->dev_conf.txmode.offloads;
771 
772 		/* set flag to initialize port/queue */
773 		port->need_reconfig = 1;
774 		port->need_reconfig_queues = 1;
775 	}
776 
777 	/*
778 	 * Create pools of mbuf.
779 	 * If NUMA support is disabled, create a single pool of mbuf in
780 	 * socket 0 memory by default.
781 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
782 	 *
783 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
784 	 * nb_txd can be configured at run time.
785 	 */
786 	if (param_total_num_mbufs)
787 		nb_mbuf_per_pool = param_total_num_mbufs;
788 	else {
789 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
790 			(nb_lcores * mb_mempool_cache) +
791 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
792 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
793 	}
794 
795 	if (numa_support) {
796 		uint8_t i;
797 
798 		for (i = 0; i < num_sockets; i++)
799 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
800 					 socket_ids[i]);
801 	} else {
802 		if (socket_num == UMA_NO_CONFIG)
803 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
804 		else
805 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
806 						 socket_num);
807 	}
808 
809 	init_port_config();
810 
811 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
812 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
813 	/*
814 	 * Records which Mbuf pool to use by each logical core, if needed.
815 	 */
816 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
817 		mbp = mbuf_pool_find(
818 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
819 
820 		if (mbp == NULL)
821 			mbp = mbuf_pool_find(0);
822 		fwd_lcores[lc_id]->mbp = mbp;
823 		/* initialize GSO context */
824 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
825 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
826 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
827 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
828 			ETHER_CRC_LEN;
829 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
830 	}
831 
832 	/* Configuration of packet forwarding streams. */
833 	if (init_fwd_streams() < 0)
834 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
835 
836 	fwd_config_setup();
837 
838 	/* create a gro context for each lcore */
839 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
840 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
841 	gro_param.max_item_per_flow = MAX_PKT_BURST;
842 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
843 		gro_param.socket_id = rte_lcore_to_socket_id(
844 				fwd_lcores_cpuids[lc_id]);
845 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
846 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
847 			rte_exit(EXIT_FAILURE,
848 					"rte_gro_ctx_create() failed\n");
849 		}
850 	}
851 
852 #if defined RTE_LIBRTE_PMD_SOFTNIC
853 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
854 		RTE_ETH_FOREACH_DEV(pid) {
855 			port = &ports[pid];
856 			const char *driver = port->dev_info.driver_name;
857 
858 			if (strcmp(driver, "net_softnic") == 0)
859 				port->softport.fwd_lcore_arg = fwd_lcores;
860 		}
861 	}
862 #endif
863 
864 }
865 
866 
867 void
868 reconfig(portid_t new_port_id, unsigned socket_id)
869 {
870 	struct rte_port *port;
871 
872 	/* Reconfiguration of Ethernet ports. */
873 	port = &ports[new_port_id];
874 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
875 
876 	/* set flag to initialize port/queue */
877 	port->need_reconfig = 1;
878 	port->need_reconfig_queues = 1;
879 	port->socket_id = socket_id;
880 
881 	init_port_config();
882 }
883 
884 
885 int
886 init_fwd_streams(void)
887 {
888 	portid_t pid;
889 	struct rte_port *port;
890 	streamid_t sm_id, nb_fwd_streams_new;
891 	queueid_t q;
892 
893 	/* set socket id according to numa or not */
894 	RTE_ETH_FOREACH_DEV(pid) {
895 		port = &ports[pid];
896 		if (nb_rxq > port->dev_info.max_rx_queues) {
897 			printf("Fail: nb_rxq(%d) is greater than "
898 				"max_rx_queues(%d)\n", nb_rxq,
899 				port->dev_info.max_rx_queues);
900 			return -1;
901 		}
902 		if (nb_txq > port->dev_info.max_tx_queues) {
903 			printf("Fail: nb_txq(%d) is greater than "
904 				"max_tx_queues(%d)\n", nb_txq,
905 				port->dev_info.max_tx_queues);
906 			return -1;
907 		}
908 		if (numa_support) {
909 			if (port_numa[pid] != NUMA_NO_CONFIG)
910 				port->socket_id = port_numa[pid];
911 			else {
912 				port->socket_id = rte_eth_dev_socket_id(pid);
913 
914 				/* if socket_id is invalid, set to 0 */
915 				if (check_socket_id(port->socket_id) < 0)
916 					port->socket_id = 0;
917 			}
918 		}
919 		else {
920 			if (socket_num == UMA_NO_CONFIG)
921 				port->socket_id = 0;
922 			else
923 				port->socket_id = socket_num;
924 		}
925 	}
926 
927 	q = RTE_MAX(nb_rxq, nb_txq);
928 	if (q == 0) {
929 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
930 		return -1;
931 	}
932 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
933 	if (nb_fwd_streams_new == nb_fwd_streams)
934 		return 0;
935 	/* clear the old */
936 	if (fwd_streams != NULL) {
937 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
938 			if (fwd_streams[sm_id] == NULL)
939 				continue;
940 			rte_free(fwd_streams[sm_id]);
941 			fwd_streams[sm_id] = NULL;
942 		}
943 		rte_free(fwd_streams);
944 		fwd_streams = NULL;
945 	}
946 
947 	/* init new */
948 	nb_fwd_streams = nb_fwd_streams_new;
949 	if (nb_fwd_streams) {
950 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
951 			sizeof(struct fwd_stream *) * nb_fwd_streams,
952 			RTE_CACHE_LINE_SIZE);
953 		if (fwd_streams == NULL)
954 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
955 				 " (struct fwd_stream *)) failed\n",
956 				 nb_fwd_streams);
957 
958 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
959 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
960 				" struct fwd_stream", sizeof(struct fwd_stream),
961 				RTE_CACHE_LINE_SIZE);
962 			if (fwd_streams[sm_id] == NULL)
963 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
964 					 "(struct fwd_stream) failed\n");
965 		}
966 	}
967 
968 	return 0;
969 }
970 
971 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
972 static void
973 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
974 {
975 	unsigned int total_burst;
976 	unsigned int nb_burst;
977 	unsigned int burst_stats[3];
978 	uint16_t pktnb_stats[3];
979 	uint16_t nb_pkt;
980 	int burst_percent[3];
981 
982 	/*
983 	 * First compute the total number of packet bursts and the
984 	 * two highest numbers of bursts of the same number of packets.
985 	 */
986 	total_burst = 0;
987 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
988 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
989 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
990 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
991 		if (nb_burst == 0)
992 			continue;
993 		total_burst += nb_burst;
994 		if (nb_burst > burst_stats[0]) {
995 			burst_stats[1] = burst_stats[0];
996 			pktnb_stats[1] = pktnb_stats[0];
997 			burst_stats[0] = nb_burst;
998 			pktnb_stats[0] = nb_pkt;
999 		} else if (nb_burst > burst_stats[1]) {
1000 			burst_stats[1] = nb_burst;
1001 			pktnb_stats[1] = nb_pkt;
1002 		}
1003 	}
1004 	if (total_burst == 0)
1005 		return;
1006 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1007 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1008 	       burst_percent[0], (int) pktnb_stats[0]);
1009 	if (burst_stats[0] == total_burst) {
1010 		printf("]\n");
1011 		return;
1012 	}
1013 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1014 		printf(" + %d%% of %d pkts]\n",
1015 		       100 - burst_percent[0], pktnb_stats[1]);
1016 		return;
1017 	}
1018 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1019 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1020 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1021 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1022 		return;
1023 	}
1024 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1025 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1026 }
1027 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1028 
1029 static void
1030 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1031 {
1032 	struct rte_port *port;
1033 	uint8_t i;
1034 
1035 	static const char *fwd_stats_border = "----------------------";
1036 
1037 	port = &ports[port_id];
1038 	printf("\n  %s Forward statistics for port %-2d %s\n",
1039 	       fwd_stats_border, port_id, fwd_stats_border);
1040 
1041 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1042 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1043 		       "%-"PRIu64"\n",
1044 		       stats->ipackets, stats->imissed,
1045 		       (uint64_t) (stats->ipackets + stats->imissed));
1046 
1047 		if (cur_fwd_eng == &csum_fwd_engine)
1048 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1049 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1050 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1051 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1052 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1053 		}
1054 
1055 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1056 		       "%-"PRIu64"\n",
1057 		       stats->opackets, port->tx_dropped,
1058 		       (uint64_t) (stats->opackets + port->tx_dropped));
1059 	}
1060 	else {
1061 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1062 		       "%14"PRIu64"\n",
1063 		       stats->ipackets, stats->imissed,
1064 		       (uint64_t) (stats->ipackets + stats->imissed));
1065 
1066 		if (cur_fwd_eng == &csum_fwd_engine)
1067 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1068 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1069 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1070 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1071 			printf("  RX-nombufs:             %14"PRIu64"\n",
1072 			       stats->rx_nombuf);
1073 		}
1074 
1075 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1076 		       "%14"PRIu64"\n",
1077 		       stats->opackets, port->tx_dropped,
1078 		       (uint64_t) (stats->opackets + port->tx_dropped));
1079 	}
1080 
1081 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1082 	if (port->rx_stream)
1083 		pkt_burst_stats_display("RX",
1084 			&port->rx_stream->rx_burst_stats);
1085 	if (port->tx_stream)
1086 		pkt_burst_stats_display("TX",
1087 			&port->tx_stream->tx_burst_stats);
1088 #endif
1089 
1090 	if (port->rx_queue_stats_mapping_enabled) {
1091 		printf("\n");
1092 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1093 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1094 			       "     RX-errors:%14"PRIu64
1095 			       "    RX-bytes:%14"PRIu64"\n",
1096 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1097 		}
1098 		printf("\n");
1099 	}
1100 	if (port->tx_queue_stats_mapping_enabled) {
1101 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1102 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1103 			       "                                 TX-bytes:%14"PRIu64"\n",
1104 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1105 		}
1106 	}
1107 
1108 	printf("  %s--------------------------------%s\n",
1109 	       fwd_stats_border, fwd_stats_border);
1110 }
1111 
1112 static void
1113 fwd_stream_stats_display(streamid_t stream_id)
1114 {
1115 	struct fwd_stream *fs;
1116 	static const char *fwd_top_stats_border = "-------";
1117 
1118 	fs = fwd_streams[stream_id];
1119 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1120 	    (fs->fwd_dropped == 0))
1121 		return;
1122 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1123 	       "TX Port=%2d/Queue=%2d %s\n",
1124 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1125 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1126 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1127 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1128 
1129 	/* if checksum mode */
1130 	if (cur_fwd_eng == &csum_fwd_engine) {
1131 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1132 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1133 	}
1134 
1135 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1136 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1137 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1138 #endif
1139 }
1140 
1141 static void
1142 flush_fwd_rx_queues(void)
1143 {
1144 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1145 	portid_t  rxp;
1146 	portid_t port_id;
1147 	queueid_t rxq;
1148 	uint16_t  nb_rx;
1149 	uint16_t  i;
1150 	uint8_t   j;
1151 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1152 	uint64_t timer_period;
1153 
1154 	/* convert to number of cycles */
1155 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1156 
1157 	for (j = 0; j < 2; j++) {
1158 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1159 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1160 				port_id = fwd_ports_ids[rxp];
1161 				/**
1162 				* testpmd can stuck in the below do while loop
1163 				* if rte_eth_rx_burst() always returns nonzero
1164 				* packets. So timer is added to exit this loop
1165 				* after 1sec timer expiry.
1166 				*/
1167 				prev_tsc = rte_rdtsc();
1168 				do {
1169 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1170 						pkts_burst, MAX_PKT_BURST);
1171 					for (i = 0; i < nb_rx; i++)
1172 						rte_pktmbuf_free(pkts_burst[i]);
1173 
1174 					cur_tsc = rte_rdtsc();
1175 					diff_tsc = cur_tsc - prev_tsc;
1176 					timer_tsc += diff_tsc;
1177 				} while ((nb_rx > 0) &&
1178 					(timer_tsc < timer_period));
1179 				timer_tsc = 0;
1180 			}
1181 		}
1182 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1183 	}
1184 }
1185 
1186 static void
1187 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1188 {
1189 	struct fwd_stream **fsm;
1190 	streamid_t nb_fs;
1191 	streamid_t sm_id;
1192 #ifdef RTE_LIBRTE_BITRATE
1193 	uint64_t tics_per_1sec;
1194 	uint64_t tics_datum;
1195 	uint64_t tics_current;
1196 	uint16_t i, cnt_ports;
1197 
1198 	cnt_ports = nb_ports;
1199 	tics_datum = rte_rdtsc();
1200 	tics_per_1sec = rte_get_timer_hz();
1201 #endif
1202 	fsm = &fwd_streams[fc->stream_idx];
1203 	nb_fs = fc->stream_nb;
1204 	do {
1205 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1206 			(*pkt_fwd)(fsm[sm_id]);
1207 #ifdef RTE_LIBRTE_BITRATE
1208 		if (bitrate_enabled != 0 &&
1209 				bitrate_lcore_id == rte_lcore_id()) {
1210 			tics_current = rte_rdtsc();
1211 			if (tics_current - tics_datum >= tics_per_1sec) {
1212 				/* Periodic bitrate calculation */
1213 				for (i = 0; i < cnt_ports; i++)
1214 					rte_stats_bitrate_calc(bitrate_data,
1215 						ports_ids[i]);
1216 				tics_datum = tics_current;
1217 			}
1218 		}
1219 #endif
1220 #ifdef RTE_LIBRTE_LATENCY_STATS
1221 		if (latencystats_enabled != 0 &&
1222 				latencystats_lcore_id == rte_lcore_id())
1223 			rte_latencystats_update();
1224 #endif
1225 
1226 	} while (! fc->stopped);
1227 }
1228 
1229 static int
1230 start_pkt_forward_on_core(void *fwd_arg)
1231 {
1232 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1233 			     cur_fwd_config.fwd_eng->packet_fwd);
1234 	return 0;
1235 }
1236 
1237 /*
1238  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1239  * Used to start communication flows in network loopback test configurations.
1240  */
1241 static int
1242 run_one_txonly_burst_on_core(void *fwd_arg)
1243 {
1244 	struct fwd_lcore *fwd_lc;
1245 	struct fwd_lcore tmp_lcore;
1246 
1247 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1248 	tmp_lcore = *fwd_lc;
1249 	tmp_lcore.stopped = 1;
1250 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1251 	return 0;
1252 }
1253 
1254 /*
1255  * Launch packet forwarding:
1256  *     - Setup per-port forwarding context.
1257  *     - launch logical cores with their forwarding configuration.
1258  */
1259 static void
1260 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1261 {
1262 	port_fwd_begin_t port_fwd_begin;
1263 	unsigned int i;
1264 	unsigned int lc_id;
1265 	int diag;
1266 
1267 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1268 	if (port_fwd_begin != NULL) {
1269 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1270 			(*port_fwd_begin)(fwd_ports_ids[i]);
1271 	}
1272 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1273 		lc_id = fwd_lcores_cpuids[i];
1274 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1275 			fwd_lcores[i]->stopped = 0;
1276 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1277 						     fwd_lcores[i], lc_id);
1278 			if (diag != 0)
1279 				printf("launch lcore %u failed - diag=%d\n",
1280 				       lc_id, diag);
1281 		}
1282 	}
1283 }
1284 
1285 /*
1286  * Update the forward ports list.
1287  */
1288 void
1289 update_fwd_ports(portid_t new_pid)
1290 {
1291 	unsigned int i;
1292 	unsigned int new_nb_fwd_ports = 0;
1293 	int move = 0;
1294 
1295 	for (i = 0; i < nb_fwd_ports; ++i) {
1296 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1297 			move = 1;
1298 		else if (move)
1299 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1300 		else
1301 			new_nb_fwd_ports++;
1302 	}
1303 	if (new_pid < RTE_MAX_ETHPORTS)
1304 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1305 
1306 	nb_fwd_ports = new_nb_fwd_ports;
1307 	nb_cfg_ports = new_nb_fwd_ports;
1308 }
1309 
1310 /*
1311  * Launch packet forwarding configuration.
1312  */
1313 void
1314 start_packet_forwarding(int with_tx_first)
1315 {
1316 	port_fwd_begin_t port_fwd_begin;
1317 	port_fwd_end_t  port_fwd_end;
1318 	struct rte_port *port;
1319 	unsigned int i;
1320 	portid_t   pt_id;
1321 	streamid_t sm_id;
1322 
1323 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1324 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1325 
1326 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1327 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1328 
1329 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1330 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1331 		(!nb_rxq || !nb_txq))
1332 		rte_exit(EXIT_FAILURE,
1333 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1334 			cur_fwd_eng->fwd_mode_name);
1335 
1336 	if (all_ports_started() == 0) {
1337 		printf("Not all ports were started\n");
1338 		return;
1339 	}
1340 	if (test_done == 0) {
1341 		printf("Packet forwarding already started\n");
1342 		return;
1343 	}
1344 
1345 
1346 	if(dcb_test) {
1347 		for (i = 0; i < nb_fwd_ports; i++) {
1348 			pt_id = fwd_ports_ids[i];
1349 			port = &ports[pt_id];
1350 			if (!port->dcb_flag) {
1351 				printf("In DCB mode, all forwarding ports must "
1352                                        "be configured in this mode.\n");
1353 				return;
1354 			}
1355 		}
1356 		if (nb_fwd_lcores == 1) {
1357 			printf("In DCB mode,the nb forwarding cores "
1358                                "should be larger than 1.\n");
1359 			return;
1360 		}
1361 	}
1362 	test_done = 0;
1363 
1364 	fwd_config_setup();
1365 
1366 	if(!no_flush_rx)
1367 		flush_fwd_rx_queues();
1368 
1369 	pkt_fwd_config_display(&cur_fwd_config);
1370 	rxtx_config_display();
1371 
1372 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1373 		pt_id = fwd_ports_ids[i];
1374 		port = &ports[pt_id];
1375 		rte_eth_stats_get(pt_id, &port->stats);
1376 		port->tx_dropped = 0;
1377 
1378 		map_port_queue_stats_mapping_registers(pt_id, port);
1379 	}
1380 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1381 		fwd_streams[sm_id]->rx_packets = 0;
1382 		fwd_streams[sm_id]->tx_packets = 0;
1383 		fwd_streams[sm_id]->fwd_dropped = 0;
1384 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1385 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1386 
1387 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1388 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1389 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1390 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1391 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1392 #endif
1393 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1394 		fwd_streams[sm_id]->core_cycles = 0;
1395 #endif
1396 	}
1397 	if (with_tx_first) {
1398 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1399 		if (port_fwd_begin != NULL) {
1400 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1401 				(*port_fwd_begin)(fwd_ports_ids[i]);
1402 		}
1403 		while (with_tx_first--) {
1404 			launch_packet_forwarding(
1405 					run_one_txonly_burst_on_core);
1406 			rte_eal_mp_wait_lcore();
1407 		}
1408 		port_fwd_end = tx_only_engine.port_fwd_end;
1409 		if (port_fwd_end != NULL) {
1410 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1411 				(*port_fwd_end)(fwd_ports_ids[i]);
1412 		}
1413 	}
1414 	launch_packet_forwarding(start_pkt_forward_on_core);
1415 }
1416 
1417 void
1418 stop_packet_forwarding(void)
1419 {
1420 	struct rte_eth_stats stats;
1421 	struct rte_port *port;
1422 	port_fwd_end_t  port_fwd_end;
1423 	int i;
1424 	portid_t   pt_id;
1425 	streamid_t sm_id;
1426 	lcoreid_t  lc_id;
1427 	uint64_t total_recv;
1428 	uint64_t total_xmit;
1429 	uint64_t total_rx_dropped;
1430 	uint64_t total_tx_dropped;
1431 	uint64_t total_rx_nombuf;
1432 	uint64_t tx_dropped;
1433 	uint64_t rx_bad_ip_csum;
1434 	uint64_t rx_bad_l4_csum;
1435 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1436 	uint64_t fwd_cycles;
1437 #endif
1438 
1439 	static const char *acc_stats_border = "+++++++++++++++";
1440 
1441 	if (test_done) {
1442 		printf("Packet forwarding not started\n");
1443 		return;
1444 	}
1445 	printf("Telling cores to stop...");
1446 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1447 		fwd_lcores[lc_id]->stopped = 1;
1448 	printf("\nWaiting for lcores to finish...\n");
1449 	rte_eal_mp_wait_lcore();
1450 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1451 	if (port_fwd_end != NULL) {
1452 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1453 			pt_id = fwd_ports_ids[i];
1454 			(*port_fwd_end)(pt_id);
1455 		}
1456 	}
1457 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1458 	fwd_cycles = 0;
1459 #endif
1460 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1461 		if (cur_fwd_config.nb_fwd_streams >
1462 		    cur_fwd_config.nb_fwd_ports) {
1463 			fwd_stream_stats_display(sm_id);
1464 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1465 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1466 		} else {
1467 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1468 				fwd_streams[sm_id];
1469 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1470 				fwd_streams[sm_id];
1471 		}
1472 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1473 		tx_dropped = (uint64_t) (tx_dropped +
1474 					 fwd_streams[sm_id]->fwd_dropped);
1475 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1476 
1477 		rx_bad_ip_csum =
1478 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1479 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1480 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1481 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1482 							rx_bad_ip_csum;
1483 
1484 		rx_bad_l4_csum =
1485 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1486 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1487 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1488 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1489 							rx_bad_l4_csum;
1490 
1491 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1492 		fwd_cycles = (uint64_t) (fwd_cycles +
1493 					 fwd_streams[sm_id]->core_cycles);
1494 #endif
1495 	}
1496 	total_recv = 0;
1497 	total_xmit = 0;
1498 	total_rx_dropped = 0;
1499 	total_tx_dropped = 0;
1500 	total_rx_nombuf  = 0;
1501 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1502 		pt_id = fwd_ports_ids[i];
1503 
1504 		port = &ports[pt_id];
1505 		rte_eth_stats_get(pt_id, &stats);
1506 		stats.ipackets -= port->stats.ipackets;
1507 		port->stats.ipackets = 0;
1508 		stats.opackets -= port->stats.opackets;
1509 		port->stats.opackets = 0;
1510 		stats.ibytes   -= port->stats.ibytes;
1511 		port->stats.ibytes = 0;
1512 		stats.obytes   -= port->stats.obytes;
1513 		port->stats.obytes = 0;
1514 		stats.imissed  -= port->stats.imissed;
1515 		port->stats.imissed = 0;
1516 		stats.oerrors  -= port->stats.oerrors;
1517 		port->stats.oerrors = 0;
1518 		stats.rx_nombuf -= port->stats.rx_nombuf;
1519 		port->stats.rx_nombuf = 0;
1520 
1521 		total_recv += stats.ipackets;
1522 		total_xmit += stats.opackets;
1523 		total_rx_dropped += stats.imissed;
1524 		total_tx_dropped += port->tx_dropped;
1525 		total_rx_nombuf  += stats.rx_nombuf;
1526 
1527 		fwd_port_stats_display(pt_id, &stats);
1528 	}
1529 
1530 	printf("\n  %s Accumulated forward statistics for all ports"
1531 	       "%s\n",
1532 	       acc_stats_border, acc_stats_border);
1533 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1534 	       "%-"PRIu64"\n"
1535 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1536 	       "%-"PRIu64"\n",
1537 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1538 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1539 	if (total_rx_nombuf > 0)
1540 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1541 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1542 	       "%s\n",
1543 	       acc_stats_border, acc_stats_border);
1544 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1545 	if (total_recv > 0)
1546 		printf("\n  CPU cycles/packet=%u (total cycles="
1547 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1548 		       (unsigned int)(fwd_cycles / total_recv),
1549 		       fwd_cycles, total_recv);
1550 #endif
1551 	printf("\nDone.\n");
1552 	test_done = 1;
1553 }
1554 
1555 void
1556 dev_set_link_up(portid_t pid)
1557 {
1558 	if (rte_eth_dev_set_link_up(pid) < 0)
1559 		printf("\nSet link up fail.\n");
1560 }
1561 
1562 void
1563 dev_set_link_down(portid_t pid)
1564 {
1565 	if (rte_eth_dev_set_link_down(pid) < 0)
1566 		printf("\nSet link down fail.\n");
1567 }
1568 
1569 static int
1570 all_ports_started(void)
1571 {
1572 	portid_t pi;
1573 	struct rte_port *port;
1574 
1575 	RTE_ETH_FOREACH_DEV(pi) {
1576 		port = &ports[pi];
1577 		/* Check if there is a port which is not started */
1578 		if ((port->port_status != RTE_PORT_STARTED) &&
1579 			(port->slave_flag == 0))
1580 			return 0;
1581 	}
1582 
1583 	/* No port is not started */
1584 	return 1;
1585 }
1586 
1587 int
1588 port_is_stopped(portid_t port_id)
1589 {
1590 	struct rte_port *port = &ports[port_id];
1591 
1592 	if ((port->port_status != RTE_PORT_STOPPED) &&
1593 	    (port->slave_flag == 0))
1594 		return 0;
1595 	return 1;
1596 }
1597 
1598 int
1599 all_ports_stopped(void)
1600 {
1601 	portid_t pi;
1602 
1603 	RTE_ETH_FOREACH_DEV(pi) {
1604 		if (!port_is_stopped(pi))
1605 			return 0;
1606 	}
1607 
1608 	return 1;
1609 }
1610 
1611 int
1612 port_is_started(portid_t port_id)
1613 {
1614 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1615 		return 0;
1616 
1617 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1618 		return 0;
1619 
1620 	return 1;
1621 }
1622 
1623 static int
1624 port_is_closed(portid_t port_id)
1625 {
1626 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1627 		return 0;
1628 
1629 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1630 		return 0;
1631 
1632 	return 1;
1633 }
1634 
1635 int
1636 start_port(portid_t pid)
1637 {
1638 	int diag, need_check_link_status = -1;
1639 	portid_t pi;
1640 	queueid_t qi;
1641 	struct rte_port *port;
1642 	struct ether_addr mac_addr;
1643 	enum rte_eth_event_type event_type;
1644 
1645 	if (port_id_is_invalid(pid, ENABLED_WARN))
1646 		return 0;
1647 
1648 	if(dcb_config)
1649 		dcb_test = 1;
1650 	RTE_ETH_FOREACH_DEV(pi) {
1651 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1652 			continue;
1653 
1654 		need_check_link_status = 0;
1655 		port = &ports[pi];
1656 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1657 						 RTE_PORT_HANDLING) == 0) {
1658 			printf("Port %d is now not stopped\n", pi);
1659 			continue;
1660 		}
1661 
1662 		if (port->need_reconfig > 0) {
1663 			port->need_reconfig = 0;
1664 
1665 			if (flow_isolate_all) {
1666 				int ret = port_flow_isolate(pi, 1);
1667 				if (ret) {
1668 					printf("Failed to apply isolated"
1669 					       " mode on port %d\n", pi);
1670 					return -1;
1671 				}
1672 			}
1673 
1674 			printf("Configuring Port %d (socket %u)\n", pi,
1675 					port->socket_id);
1676 			/* configure port */
1677 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1678 						&(port->dev_conf));
1679 			if (diag != 0) {
1680 				if (rte_atomic16_cmpset(&(port->port_status),
1681 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1682 					printf("Port %d can not be set back "
1683 							"to stopped\n", pi);
1684 				printf("Fail to configure port %d\n", pi);
1685 				/* try to reconfigure port next time */
1686 				port->need_reconfig = 1;
1687 				return -1;
1688 			}
1689 		}
1690 		if (port->need_reconfig_queues > 0) {
1691 			port->need_reconfig_queues = 0;
1692 			/* setup tx queues */
1693 			for (qi = 0; qi < nb_txq; qi++) {
1694 				if ((numa_support) &&
1695 					(txring_numa[pi] != NUMA_NO_CONFIG))
1696 					diag = rte_eth_tx_queue_setup(pi, qi,
1697 						port->nb_tx_desc[qi],
1698 						txring_numa[pi],
1699 						&(port->tx_conf[qi]));
1700 				else
1701 					diag = rte_eth_tx_queue_setup(pi, qi,
1702 						port->nb_tx_desc[qi],
1703 						port->socket_id,
1704 						&(port->tx_conf[qi]));
1705 
1706 				if (diag == 0)
1707 					continue;
1708 
1709 				/* Fail to setup tx queue, return */
1710 				if (rte_atomic16_cmpset(&(port->port_status),
1711 							RTE_PORT_HANDLING,
1712 							RTE_PORT_STOPPED) == 0)
1713 					printf("Port %d can not be set back "
1714 							"to stopped\n", pi);
1715 				printf("Fail to configure port %d tx queues\n",
1716 				       pi);
1717 				/* try to reconfigure queues next time */
1718 				port->need_reconfig_queues = 1;
1719 				return -1;
1720 			}
1721 			for (qi = 0; qi < nb_rxq; qi++) {
1722 				/* setup rx queues */
1723 				if ((numa_support) &&
1724 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1725 					struct rte_mempool * mp =
1726 						mbuf_pool_find(rxring_numa[pi]);
1727 					if (mp == NULL) {
1728 						printf("Failed to setup RX queue:"
1729 							"No mempool allocation"
1730 							" on the socket %d\n",
1731 							rxring_numa[pi]);
1732 						return -1;
1733 					}
1734 
1735 					diag = rte_eth_rx_queue_setup(pi, qi,
1736 					     port->nb_rx_desc[qi],
1737 					     rxring_numa[pi],
1738 					     &(port->rx_conf[qi]),
1739 					     mp);
1740 				} else {
1741 					struct rte_mempool *mp =
1742 						mbuf_pool_find(port->socket_id);
1743 					if (mp == NULL) {
1744 						printf("Failed to setup RX queue:"
1745 							"No mempool allocation"
1746 							" on the socket %d\n",
1747 							port->socket_id);
1748 						return -1;
1749 					}
1750 					diag = rte_eth_rx_queue_setup(pi, qi,
1751 					     port->nb_rx_desc[qi],
1752 					     port->socket_id,
1753 					     &(port->rx_conf[qi]),
1754 					     mp);
1755 				}
1756 				if (diag == 0)
1757 					continue;
1758 
1759 				/* Fail to setup rx queue, return */
1760 				if (rte_atomic16_cmpset(&(port->port_status),
1761 							RTE_PORT_HANDLING,
1762 							RTE_PORT_STOPPED) == 0)
1763 					printf("Port %d can not be set back "
1764 							"to stopped\n", pi);
1765 				printf("Fail to configure port %d rx queues\n",
1766 				       pi);
1767 				/* try to reconfigure queues next time */
1768 				port->need_reconfig_queues = 1;
1769 				return -1;
1770 			}
1771 		}
1772 
1773 		/* start port */
1774 		if (rte_eth_dev_start(pi) < 0) {
1775 			printf("Fail to start port %d\n", pi);
1776 
1777 			/* Fail to setup rx queue, return */
1778 			if (rte_atomic16_cmpset(&(port->port_status),
1779 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1780 				printf("Port %d can not be set back to "
1781 							"stopped\n", pi);
1782 			continue;
1783 		}
1784 
1785 		if (rte_atomic16_cmpset(&(port->port_status),
1786 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1787 			printf("Port %d can not be set into started\n", pi);
1788 
1789 		rte_eth_macaddr_get(pi, &mac_addr);
1790 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1791 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1792 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1793 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1794 
1795 		/* at least one port started, need checking link status */
1796 		need_check_link_status = 1;
1797 	}
1798 
1799 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1800 	     event_type < RTE_ETH_EVENT_MAX;
1801 	     event_type++) {
1802 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1803 						event_type,
1804 						eth_event_callback,
1805 						NULL);
1806 		if (diag) {
1807 			printf("Failed to setup even callback for event %d\n",
1808 				event_type);
1809 			return -1;
1810 		}
1811 	}
1812 
1813 	if (need_check_link_status == 1 && !no_link_check)
1814 		check_all_ports_link_status(RTE_PORT_ALL);
1815 	else if (need_check_link_status == 0)
1816 		printf("Please stop the ports first\n");
1817 
1818 	printf("Done\n");
1819 	return 0;
1820 }
1821 
1822 void
1823 stop_port(portid_t pid)
1824 {
1825 	portid_t pi;
1826 	struct rte_port *port;
1827 	int need_check_link_status = 0;
1828 
1829 	if (dcb_test) {
1830 		dcb_test = 0;
1831 		dcb_config = 0;
1832 	}
1833 
1834 	if (port_id_is_invalid(pid, ENABLED_WARN))
1835 		return;
1836 
1837 	printf("Stopping ports...\n");
1838 
1839 	RTE_ETH_FOREACH_DEV(pi) {
1840 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1841 			continue;
1842 
1843 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1844 			printf("Please remove port %d from forwarding configuration.\n", pi);
1845 			continue;
1846 		}
1847 
1848 		if (port_is_bonding_slave(pi)) {
1849 			printf("Please remove port %d from bonded device.\n", pi);
1850 			continue;
1851 		}
1852 
1853 		port = &ports[pi];
1854 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1855 						RTE_PORT_HANDLING) == 0)
1856 			continue;
1857 
1858 		rte_eth_dev_stop(pi);
1859 
1860 		if (rte_atomic16_cmpset(&(port->port_status),
1861 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1862 			printf("Port %d can not be set into stopped\n", pi);
1863 		need_check_link_status = 1;
1864 	}
1865 	if (need_check_link_status && !no_link_check)
1866 		check_all_ports_link_status(RTE_PORT_ALL);
1867 
1868 	printf("Done\n");
1869 }
1870 
1871 void
1872 close_port(portid_t pid)
1873 {
1874 	portid_t pi;
1875 	struct rte_port *port;
1876 
1877 	if (port_id_is_invalid(pid, ENABLED_WARN))
1878 		return;
1879 
1880 	printf("Closing ports...\n");
1881 
1882 	RTE_ETH_FOREACH_DEV(pi) {
1883 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1884 			continue;
1885 
1886 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1887 			printf("Please remove port %d from forwarding configuration.\n", pi);
1888 			continue;
1889 		}
1890 
1891 		if (port_is_bonding_slave(pi)) {
1892 			printf("Please remove port %d from bonded device.\n", pi);
1893 			continue;
1894 		}
1895 
1896 		port = &ports[pi];
1897 		if (rte_atomic16_cmpset(&(port->port_status),
1898 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1899 			printf("Port %d is already closed\n", pi);
1900 			continue;
1901 		}
1902 
1903 		if (rte_atomic16_cmpset(&(port->port_status),
1904 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1905 			printf("Port %d is now not stopped\n", pi);
1906 			continue;
1907 		}
1908 
1909 		if (port->flow_list)
1910 			port_flow_flush(pi);
1911 		rte_eth_dev_close(pi);
1912 
1913 		if (rte_atomic16_cmpset(&(port->port_status),
1914 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1915 			printf("Port %d cannot be set to closed\n", pi);
1916 	}
1917 
1918 	printf("Done\n");
1919 }
1920 
1921 void
1922 reset_port(portid_t pid)
1923 {
1924 	int diag;
1925 	portid_t pi;
1926 	struct rte_port *port;
1927 
1928 	if (port_id_is_invalid(pid, ENABLED_WARN))
1929 		return;
1930 
1931 	printf("Resetting ports...\n");
1932 
1933 	RTE_ETH_FOREACH_DEV(pi) {
1934 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1935 			continue;
1936 
1937 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1938 			printf("Please remove port %d from forwarding "
1939 			       "configuration.\n", pi);
1940 			continue;
1941 		}
1942 
1943 		if (port_is_bonding_slave(pi)) {
1944 			printf("Please remove port %d from bonded device.\n",
1945 			       pi);
1946 			continue;
1947 		}
1948 
1949 		diag = rte_eth_dev_reset(pi);
1950 		if (diag == 0) {
1951 			port = &ports[pi];
1952 			port->need_reconfig = 1;
1953 			port->need_reconfig_queues = 1;
1954 		} else {
1955 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1956 		}
1957 	}
1958 
1959 	printf("Done\n");
1960 }
1961 
1962 static int
1963 eth_dev_event_callback_register(void)
1964 {
1965 	int ret;
1966 
1967 	/* register the device event callback */
1968 	ret = rte_dev_event_callback_register(NULL,
1969 		eth_dev_event_callback, NULL);
1970 	if (ret) {
1971 		printf("Failed to register device event callback\n");
1972 		return -1;
1973 	}
1974 
1975 	return 0;
1976 }
1977 
1978 
1979 static int
1980 eth_dev_event_callback_unregister(void)
1981 {
1982 	int ret;
1983 
1984 	/* unregister the device event callback */
1985 	ret = rte_dev_event_callback_unregister(NULL,
1986 		eth_dev_event_callback, NULL);
1987 	if (ret < 0) {
1988 		printf("Failed to unregister device event callback\n");
1989 		return -1;
1990 	}
1991 
1992 	return 0;
1993 }
1994 
1995 void
1996 attach_port(char *identifier)
1997 {
1998 	portid_t pi = 0;
1999 	unsigned int socket_id;
2000 
2001 	printf("Attaching a new port...\n");
2002 
2003 	if (identifier == NULL) {
2004 		printf("Invalid parameters are specified\n");
2005 		return;
2006 	}
2007 
2008 	if (rte_eth_dev_attach(identifier, &pi))
2009 		return;
2010 
2011 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2012 	/* if socket_id is invalid, set to 0 */
2013 	if (check_socket_id(socket_id) < 0)
2014 		socket_id = 0;
2015 	reconfig(pi, socket_id);
2016 	rte_eth_promiscuous_enable(pi);
2017 
2018 	ports_ids[nb_ports] = pi;
2019 	nb_ports = rte_eth_dev_count_avail();
2020 
2021 	ports[pi].port_status = RTE_PORT_STOPPED;
2022 
2023 	update_fwd_ports(pi);
2024 
2025 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2026 	printf("Done\n");
2027 }
2028 
2029 void
2030 detach_port(portid_t port_id)
2031 {
2032 	char name[RTE_ETH_NAME_MAX_LEN];
2033 	uint16_t i;
2034 
2035 	printf("Detaching a port...\n");
2036 
2037 	if (!port_is_closed(port_id)) {
2038 		printf("Please close port first\n");
2039 		return;
2040 	}
2041 
2042 	if (ports[port_id].flow_list)
2043 		port_flow_flush(port_id);
2044 
2045 	if (rte_eth_dev_detach(port_id, name)) {
2046 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2047 		return;
2048 	}
2049 
2050 	for (i = 0; i < nb_ports; i++) {
2051 		if (ports_ids[i] == port_id) {
2052 			ports_ids[i] = ports_ids[nb_ports-1];
2053 			ports_ids[nb_ports-1] = 0;
2054 			break;
2055 		}
2056 	}
2057 	nb_ports = rte_eth_dev_count_avail();
2058 
2059 	update_fwd_ports(RTE_MAX_ETHPORTS);
2060 
2061 	printf("Port %u is detached. Now total ports is %d\n",
2062 			port_id, nb_ports);
2063 	printf("Done\n");
2064 	return;
2065 }
2066 
2067 void
2068 pmd_test_exit(void)
2069 {
2070 	struct rte_device *device;
2071 	portid_t pt_id;
2072 	int ret;
2073 
2074 	if (test_done == 0)
2075 		stop_packet_forwarding();
2076 
2077 	if (ports != NULL) {
2078 		no_link_check = 1;
2079 		RTE_ETH_FOREACH_DEV(pt_id) {
2080 			printf("\nShutting down port %d...\n", pt_id);
2081 			fflush(stdout);
2082 			stop_port(pt_id);
2083 			close_port(pt_id);
2084 
2085 			/*
2086 			 * This is a workaround to fix a virtio-user issue that
2087 			 * requires to call clean-up routine to remove existing
2088 			 * socket.
2089 			 * This workaround valid only for testpmd, needs a fix
2090 			 * valid for all applications.
2091 			 * TODO: Implement proper resource cleanup
2092 			 */
2093 			device = rte_eth_devices[pt_id].device;
2094 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2095 				detach_port(pt_id);
2096 		}
2097 	}
2098 
2099 	if (hot_plug) {
2100 		ret = rte_dev_event_monitor_stop();
2101 		if (ret)
2102 			RTE_LOG(ERR, EAL,
2103 				"fail to stop device event monitor.");
2104 
2105 		ret = eth_dev_event_callback_unregister();
2106 		if (ret)
2107 			RTE_LOG(ERR, EAL,
2108 				"fail to unregister all event callbacks.");
2109 	}
2110 
2111 	printf("\nBye...\n");
2112 }
2113 
2114 typedef void (*cmd_func_t)(void);
2115 struct pmd_test_command {
2116 	const char *cmd_name;
2117 	cmd_func_t cmd_func;
2118 };
2119 
2120 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2121 
2122 /* Check the link status of all ports in up to 9s, and print them finally */
2123 static void
2124 check_all_ports_link_status(uint32_t port_mask)
2125 {
2126 #define CHECK_INTERVAL 100 /* 100ms */
2127 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2128 	portid_t portid;
2129 	uint8_t count, all_ports_up, print_flag = 0;
2130 	struct rte_eth_link link;
2131 
2132 	printf("Checking link statuses...\n");
2133 	fflush(stdout);
2134 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2135 		all_ports_up = 1;
2136 		RTE_ETH_FOREACH_DEV(portid) {
2137 			if ((port_mask & (1 << portid)) == 0)
2138 				continue;
2139 			memset(&link, 0, sizeof(link));
2140 			rte_eth_link_get_nowait(portid, &link);
2141 			/* print link status if flag set */
2142 			if (print_flag == 1) {
2143 				if (link.link_status)
2144 					printf(
2145 					"Port%d Link Up. speed %u Mbps- %s\n",
2146 					portid, link.link_speed,
2147 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2148 					("full-duplex") : ("half-duplex\n"));
2149 				else
2150 					printf("Port %d Link Down\n", portid);
2151 				continue;
2152 			}
2153 			/* clear all_ports_up flag if any link down */
2154 			if (link.link_status == ETH_LINK_DOWN) {
2155 				all_ports_up = 0;
2156 				break;
2157 			}
2158 		}
2159 		/* after finally printing all link status, get out */
2160 		if (print_flag == 1)
2161 			break;
2162 
2163 		if (all_ports_up == 0) {
2164 			fflush(stdout);
2165 			rte_delay_ms(CHECK_INTERVAL);
2166 		}
2167 
2168 		/* set the print_flag if all ports up or timeout */
2169 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2170 			print_flag = 1;
2171 		}
2172 
2173 		if (lsc_interrupt)
2174 			break;
2175 	}
2176 }
2177 
2178 static void
2179 rmv_event_callback(void *arg)
2180 {
2181 	int need_to_start = 0;
2182 	int org_no_link_check = no_link_check;
2183 	portid_t port_id = (intptr_t)arg;
2184 
2185 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2186 
2187 	if (!test_done && port_is_forwarding(port_id)) {
2188 		need_to_start = 1;
2189 		stop_packet_forwarding();
2190 	}
2191 	no_link_check = 1;
2192 	stop_port(port_id);
2193 	no_link_check = org_no_link_check;
2194 	close_port(port_id);
2195 	detach_port(port_id);
2196 	if (need_to_start)
2197 		start_packet_forwarding(0);
2198 }
2199 
2200 /* This function is used by the interrupt thread */
2201 static int
2202 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2203 		  void *ret_param)
2204 {
2205 	static const char * const event_desc[] = {
2206 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2207 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2208 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2209 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2210 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2211 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2212 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2213 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2214 		[RTE_ETH_EVENT_NEW] = "device probed",
2215 		[RTE_ETH_EVENT_DESTROY] = "device released",
2216 		[RTE_ETH_EVENT_MAX] = NULL,
2217 	};
2218 
2219 	RTE_SET_USED(param);
2220 	RTE_SET_USED(ret_param);
2221 
2222 	if (type >= RTE_ETH_EVENT_MAX) {
2223 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2224 			port_id, __func__, type);
2225 		fflush(stderr);
2226 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2227 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2228 			event_desc[type]);
2229 		fflush(stdout);
2230 	}
2231 
2232 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2233 		return 0;
2234 
2235 	switch (type) {
2236 	case RTE_ETH_EVENT_INTR_RMV:
2237 		if (rte_eal_alarm_set(100000,
2238 				rmv_event_callback, (void *)(intptr_t)port_id))
2239 			fprintf(stderr, "Could not set up deferred device removal\n");
2240 		break;
2241 	default:
2242 		break;
2243 	}
2244 	return 0;
2245 }
2246 
2247 /* This function is used by the interrupt thread */
2248 static void
2249 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2250 			     __rte_unused void *arg)
2251 {
2252 	if (type >= RTE_DEV_EVENT_MAX) {
2253 		fprintf(stderr, "%s called upon invalid event %d\n",
2254 			__func__, type);
2255 		fflush(stderr);
2256 	}
2257 
2258 	switch (type) {
2259 	case RTE_DEV_EVENT_REMOVE:
2260 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2261 			device_name);
2262 		/* TODO: After finish failure handle, begin to stop
2263 		 * packet forward, stop port, close port, detach port.
2264 		 */
2265 		break;
2266 	case RTE_DEV_EVENT_ADD:
2267 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2268 			device_name);
2269 		/* TODO: After finish kernel driver binding,
2270 		 * begin to attach port.
2271 		 */
2272 		break;
2273 	default:
2274 		break;
2275 	}
2276 }
2277 
2278 static int
2279 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2280 {
2281 	uint16_t i;
2282 	int diag;
2283 	uint8_t mapping_found = 0;
2284 
2285 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2286 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2287 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2288 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2289 					tx_queue_stats_mappings[i].queue_id,
2290 					tx_queue_stats_mappings[i].stats_counter_id);
2291 			if (diag != 0)
2292 				return diag;
2293 			mapping_found = 1;
2294 		}
2295 	}
2296 	if (mapping_found)
2297 		port->tx_queue_stats_mapping_enabled = 1;
2298 	return 0;
2299 }
2300 
2301 static int
2302 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2303 {
2304 	uint16_t i;
2305 	int diag;
2306 	uint8_t mapping_found = 0;
2307 
2308 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2309 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2310 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2311 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2312 					rx_queue_stats_mappings[i].queue_id,
2313 					rx_queue_stats_mappings[i].stats_counter_id);
2314 			if (diag != 0)
2315 				return diag;
2316 			mapping_found = 1;
2317 		}
2318 	}
2319 	if (mapping_found)
2320 		port->rx_queue_stats_mapping_enabled = 1;
2321 	return 0;
2322 }
2323 
2324 static void
2325 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2326 {
2327 	int diag = 0;
2328 
2329 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2330 	if (diag != 0) {
2331 		if (diag == -ENOTSUP) {
2332 			port->tx_queue_stats_mapping_enabled = 0;
2333 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2334 		}
2335 		else
2336 			rte_exit(EXIT_FAILURE,
2337 					"set_tx_queue_stats_mapping_registers "
2338 					"failed for port id=%d diag=%d\n",
2339 					pi, diag);
2340 	}
2341 
2342 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2343 	if (diag != 0) {
2344 		if (diag == -ENOTSUP) {
2345 			port->rx_queue_stats_mapping_enabled = 0;
2346 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2347 		}
2348 		else
2349 			rte_exit(EXIT_FAILURE,
2350 					"set_rx_queue_stats_mapping_registers "
2351 					"failed for port id=%d diag=%d\n",
2352 					pi, diag);
2353 	}
2354 }
2355 
2356 static void
2357 rxtx_port_config(struct rte_port *port)
2358 {
2359 	uint16_t qid;
2360 
2361 	for (qid = 0; qid < nb_rxq; qid++) {
2362 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2363 
2364 		/* Check if any Rx parameters have been passed */
2365 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2366 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2367 
2368 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2369 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2370 
2371 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2372 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2373 
2374 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2375 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2376 
2377 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2378 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2379 
2380 		port->nb_rx_desc[qid] = nb_rxd;
2381 	}
2382 
2383 	for (qid = 0; qid < nb_txq; qid++) {
2384 		port->tx_conf[qid] = port->dev_info.default_txconf;
2385 
2386 		/* Check if any Tx parameters have been passed */
2387 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2388 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2389 
2390 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2391 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2392 
2393 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2394 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2395 
2396 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2397 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2398 
2399 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2400 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2401 
2402 		port->nb_tx_desc[qid] = nb_txd;
2403 	}
2404 }
2405 
2406 void
2407 init_port_config(void)
2408 {
2409 	portid_t pid;
2410 	struct rte_port *port;
2411 
2412 	RTE_ETH_FOREACH_DEV(pid) {
2413 		port = &ports[pid];
2414 		port->dev_conf.fdir_conf = fdir_conf;
2415 		rte_eth_dev_info_get(pid, &port->dev_info);
2416 		if (nb_rxq > 1) {
2417 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2418 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2419 				rss_hf & port->dev_info.flow_type_rss_offloads;
2420 		} else {
2421 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2422 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2423 		}
2424 
2425 		if (port->dcb_flag == 0) {
2426 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2427 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2428 			else
2429 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2430 		}
2431 
2432 		rxtx_port_config(port);
2433 
2434 		rte_eth_macaddr_get(pid, &port->eth_addr);
2435 
2436 		map_port_queue_stats_mapping_registers(pid, port);
2437 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2438 		rte_pmd_ixgbe_bypass_init(pid);
2439 #endif
2440 
2441 		if (lsc_interrupt &&
2442 		    (rte_eth_devices[pid].data->dev_flags &
2443 		     RTE_ETH_DEV_INTR_LSC))
2444 			port->dev_conf.intr_conf.lsc = 1;
2445 		if (rmv_interrupt &&
2446 		    (rte_eth_devices[pid].data->dev_flags &
2447 		     RTE_ETH_DEV_INTR_RMV))
2448 			port->dev_conf.intr_conf.rmv = 1;
2449 	}
2450 }
2451 
2452 void set_port_slave_flag(portid_t slave_pid)
2453 {
2454 	struct rte_port *port;
2455 
2456 	port = &ports[slave_pid];
2457 	port->slave_flag = 1;
2458 }
2459 
2460 void clear_port_slave_flag(portid_t slave_pid)
2461 {
2462 	struct rte_port *port;
2463 
2464 	port = &ports[slave_pid];
2465 	port->slave_flag = 0;
2466 }
2467 
2468 uint8_t port_is_bonding_slave(portid_t slave_pid)
2469 {
2470 	struct rte_port *port;
2471 
2472 	port = &ports[slave_pid];
2473 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2474 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2475 		return 1;
2476 	return 0;
2477 }
2478 
2479 const uint16_t vlan_tags[] = {
2480 		0,  1,  2,  3,  4,  5,  6,  7,
2481 		8,  9, 10, 11,  12, 13, 14, 15,
2482 		16, 17, 18, 19, 20, 21, 22, 23,
2483 		24, 25, 26, 27, 28, 29, 30, 31
2484 };
2485 
2486 static  int
2487 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2488 		 enum dcb_mode_enable dcb_mode,
2489 		 enum rte_eth_nb_tcs num_tcs,
2490 		 uint8_t pfc_en)
2491 {
2492 	uint8_t i;
2493 
2494 	/*
2495 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2496 	 * given above, and the number of traffic classes available for use.
2497 	 */
2498 	if (dcb_mode == DCB_VT_ENABLED) {
2499 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2500 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2501 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2502 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2503 
2504 		/* VMDQ+DCB RX and TX configurations */
2505 		vmdq_rx_conf->enable_default_pool = 0;
2506 		vmdq_rx_conf->default_pool = 0;
2507 		vmdq_rx_conf->nb_queue_pools =
2508 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2509 		vmdq_tx_conf->nb_queue_pools =
2510 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2511 
2512 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2513 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2514 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2515 			vmdq_rx_conf->pool_map[i].pools =
2516 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2517 		}
2518 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2519 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2520 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2521 		}
2522 
2523 		/* set DCB mode of RX and TX of multiple queues */
2524 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2525 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2526 	} else {
2527 		struct rte_eth_dcb_rx_conf *rx_conf =
2528 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2529 		struct rte_eth_dcb_tx_conf *tx_conf =
2530 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2531 
2532 		rx_conf->nb_tcs = num_tcs;
2533 		tx_conf->nb_tcs = num_tcs;
2534 
2535 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2536 			rx_conf->dcb_tc[i] = i % num_tcs;
2537 			tx_conf->dcb_tc[i] = i % num_tcs;
2538 		}
2539 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2540 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2541 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2542 	}
2543 
2544 	if (pfc_en)
2545 		eth_conf->dcb_capability_en =
2546 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2547 	else
2548 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2549 
2550 	return 0;
2551 }
2552 
2553 int
2554 init_port_dcb_config(portid_t pid,
2555 		     enum dcb_mode_enable dcb_mode,
2556 		     enum rte_eth_nb_tcs num_tcs,
2557 		     uint8_t pfc_en)
2558 {
2559 	struct rte_eth_conf port_conf;
2560 	struct rte_port *rte_port;
2561 	int retval;
2562 	uint16_t i;
2563 
2564 	rte_port = &ports[pid];
2565 
2566 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2567 	/* Enter DCB configuration status */
2568 	dcb_config = 1;
2569 
2570 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2571 	port_conf.txmode = rte_port->dev_conf.txmode;
2572 
2573 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2574 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2575 	if (retval < 0)
2576 		return retval;
2577 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2578 
2579 	/* re-configure the device . */
2580 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2581 
2582 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2583 
2584 	/* If dev_info.vmdq_pool_base is greater than 0,
2585 	 * the queue id of vmdq pools is started after pf queues.
2586 	 */
2587 	if (dcb_mode == DCB_VT_ENABLED &&
2588 	    rte_port->dev_info.vmdq_pool_base > 0) {
2589 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2590 			" for port %d.", pid);
2591 		return -1;
2592 	}
2593 
2594 	/* Assume the ports in testpmd have the same dcb capability
2595 	 * and has the same number of rxq and txq in dcb mode
2596 	 */
2597 	if (dcb_mode == DCB_VT_ENABLED) {
2598 		if (rte_port->dev_info.max_vfs > 0) {
2599 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2600 			nb_txq = rte_port->dev_info.nb_tx_queues;
2601 		} else {
2602 			nb_rxq = rte_port->dev_info.max_rx_queues;
2603 			nb_txq = rte_port->dev_info.max_tx_queues;
2604 		}
2605 	} else {
2606 		/*if vt is disabled, use all pf queues */
2607 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2608 			nb_rxq = rte_port->dev_info.max_rx_queues;
2609 			nb_txq = rte_port->dev_info.max_tx_queues;
2610 		} else {
2611 			nb_rxq = (queueid_t)num_tcs;
2612 			nb_txq = (queueid_t)num_tcs;
2613 
2614 		}
2615 	}
2616 	rx_free_thresh = 64;
2617 
2618 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2619 
2620 	rxtx_port_config(rte_port);
2621 	/* VLAN filter */
2622 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2623 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2624 		rx_vft_set(pid, vlan_tags[i], 1);
2625 
2626 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2627 	map_port_queue_stats_mapping_registers(pid, rte_port);
2628 
2629 	rte_port->dcb_flag = 1;
2630 
2631 	return 0;
2632 }
2633 
2634 static void
2635 init_port(void)
2636 {
2637 	/* Configuration of Ethernet ports. */
2638 	ports = rte_zmalloc("testpmd: ports",
2639 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2640 			    RTE_CACHE_LINE_SIZE);
2641 	if (ports == NULL) {
2642 		rte_exit(EXIT_FAILURE,
2643 				"rte_zmalloc(%d struct rte_port) failed\n",
2644 				RTE_MAX_ETHPORTS);
2645 	}
2646 }
2647 
2648 static void
2649 force_quit(void)
2650 {
2651 	pmd_test_exit();
2652 	prompt_exit();
2653 }
2654 
2655 static void
2656 print_stats(void)
2657 {
2658 	uint8_t i;
2659 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2660 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2661 
2662 	/* Clear screen and move to top left */
2663 	printf("%s%s", clr, top_left);
2664 
2665 	printf("\nPort statistics ====================================");
2666 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2667 		nic_stats_display(fwd_ports_ids[i]);
2668 }
2669 
2670 static void
2671 signal_handler(int signum)
2672 {
2673 	if (signum == SIGINT || signum == SIGTERM) {
2674 		printf("\nSignal %d received, preparing to exit...\n",
2675 				signum);
2676 #ifdef RTE_LIBRTE_PDUMP
2677 		/* uninitialize packet capture framework */
2678 		rte_pdump_uninit();
2679 #endif
2680 #ifdef RTE_LIBRTE_LATENCY_STATS
2681 		rte_latencystats_uninit();
2682 #endif
2683 		force_quit();
2684 		/* Set flag to indicate the force termination. */
2685 		f_quit = 1;
2686 		/* exit with the expected status */
2687 		signal(signum, SIG_DFL);
2688 		kill(getpid(), signum);
2689 	}
2690 }
2691 
2692 int
2693 main(int argc, char** argv)
2694 {
2695 	int diag;
2696 	portid_t port_id;
2697 	uint16_t count;
2698 	int ret;
2699 
2700 	signal(SIGINT, signal_handler);
2701 	signal(SIGTERM, signal_handler);
2702 
2703 	diag = rte_eal_init(argc, argv);
2704 	if (diag < 0)
2705 		rte_panic("Cannot init EAL\n");
2706 
2707 	testpmd_logtype = rte_log_register("testpmd");
2708 	if (testpmd_logtype < 0)
2709 		rte_panic("Cannot register log type");
2710 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2711 
2712 #ifdef RTE_LIBRTE_PDUMP
2713 	/* initialize packet capture framework */
2714 	rte_pdump_init(NULL);
2715 #endif
2716 
2717 	count = 0;
2718 	RTE_ETH_FOREACH_DEV(port_id) {
2719 		ports_ids[count] = port_id;
2720 		count++;
2721 	}
2722 	nb_ports = (portid_t) count;
2723 	if (nb_ports == 0)
2724 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2725 
2726 	/* allocate port structures, and init them */
2727 	init_port();
2728 
2729 	set_def_fwd_config();
2730 	if (nb_lcores == 0)
2731 		rte_panic("Empty set of forwarding logical cores - check the "
2732 			  "core mask supplied in the command parameters\n");
2733 
2734 	/* Bitrate/latency stats disabled by default */
2735 #ifdef RTE_LIBRTE_BITRATE
2736 	bitrate_enabled = 0;
2737 #endif
2738 #ifdef RTE_LIBRTE_LATENCY_STATS
2739 	latencystats_enabled = 0;
2740 #endif
2741 
2742 	/* on FreeBSD, mlockall() is disabled by default */
2743 #ifdef RTE_EXEC_ENV_BSDAPP
2744 	do_mlockall = 0;
2745 #else
2746 	do_mlockall = 1;
2747 #endif
2748 
2749 	argc -= diag;
2750 	argv += diag;
2751 	if (argc > 1)
2752 		launch_args_parse(argc, argv);
2753 
2754 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2755 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2756 			strerror(errno));
2757 	}
2758 
2759 	if (tx_first && interactive)
2760 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2761 				"interactive mode.\n");
2762 
2763 	if (tx_first && lsc_interrupt) {
2764 		printf("Warning: lsc_interrupt needs to be off when "
2765 				" using tx_first. Disabling.\n");
2766 		lsc_interrupt = 0;
2767 	}
2768 
2769 	if (!nb_rxq && !nb_txq)
2770 		printf("Warning: Either rx or tx queues should be non-zero\n");
2771 
2772 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2773 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2774 		       "but nb_txq=%d will prevent to fully test it.\n",
2775 		       nb_rxq, nb_txq);
2776 
2777 	init_config();
2778 
2779 	if (hot_plug) {
2780 		/* enable hot plug monitoring */
2781 		ret = rte_dev_event_monitor_start();
2782 		if (ret) {
2783 			rte_errno = EINVAL;
2784 			return -1;
2785 		}
2786 		eth_dev_event_callback_register();
2787 
2788 	}
2789 
2790 	if (start_port(RTE_PORT_ALL) != 0)
2791 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2792 
2793 	/* set all ports to promiscuous mode by default */
2794 	RTE_ETH_FOREACH_DEV(port_id)
2795 		rte_eth_promiscuous_enable(port_id);
2796 
2797 	/* Init metrics library */
2798 	rte_metrics_init(rte_socket_id());
2799 
2800 #ifdef RTE_LIBRTE_LATENCY_STATS
2801 	if (latencystats_enabled != 0) {
2802 		int ret = rte_latencystats_init(1, NULL);
2803 		if (ret)
2804 			printf("Warning: latencystats init()"
2805 				" returned error %d\n",	ret);
2806 		printf("Latencystats running on lcore %d\n",
2807 			latencystats_lcore_id);
2808 	}
2809 #endif
2810 
2811 	/* Setup bitrate stats */
2812 #ifdef RTE_LIBRTE_BITRATE
2813 	if (bitrate_enabled != 0) {
2814 		bitrate_data = rte_stats_bitrate_create();
2815 		if (bitrate_data == NULL)
2816 			rte_exit(EXIT_FAILURE,
2817 				"Could not allocate bitrate data.\n");
2818 		rte_stats_bitrate_reg(bitrate_data);
2819 	}
2820 #endif
2821 
2822 #ifdef RTE_LIBRTE_CMDLINE
2823 	if (strlen(cmdline_filename) != 0)
2824 		cmdline_read_from_file(cmdline_filename);
2825 
2826 	if (interactive == 1) {
2827 		if (auto_start) {
2828 			printf("Start automatic packet forwarding\n");
2829 			start_packet_forwarding(0);
2830 		}
2831 		prompt();
2832 		pmd_test_exit();
2833 	} else
2834 #endif
2835 	{
2836 		char c;
2837 		int rc;
2838 
2839 		f_quit = 0;
2840 
2841 		printf("No commandline core given, start packet forwarding\n");
2842 		start_packet_forwarding(tx_first);
2843 		if (stats_period != 0) {
2844 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2845 			uint64_t timer_period;
2846 
2847 			/* Convert to number of cycles */
2848 			timer_period = stats_period * rte_get_timer_hz();
2849 
2850 			while (f_quit == 0) {
2851 				cur_time = rte_get_timer_cycles();
2852 				diff_time += cur_time - prev_time;
2853 
2854 				if (diff_time >= timer_period) {
2855 					print_stats();
2856 					/* Reset the timer */
2857 					diff_time = 0;
2858 				}
2859 				/* Sleep to avoid unnecessary checks */
2860 				prev_time = cur_time;
2861 				sleep(1);
2862 			}
2863 		}
2864 
2865 		printf("Press enter to exit\n");
2866 		rc = read(0, &c, 1);
2867 		pmd_test_exit();
2868 		if (rc < 0)
2869 			return 1;
2870 	}
2871 
2872 	return 0;
2873 }
2874