xref: /dpdk/app/test-pmd/testpmd.c (revision 1960be7d32f85f07099e9642e3aba2a3c837b598)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC
159 	&softnic_fwd_engine,
160 #endif
161 #ifdef RTE_LIBRTE_IEEE1588
162 	&ieee1588_fwd_engine,
163 #endif
164 	NULL,
165 };
166 
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
172 
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
175                                       * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
177 
178 /*
179  * In container, it cannot terminate the process which running with 'stats-period'
180  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
181  */
182 uint8_t f_quit;
183 
184 /*
185  * Configuration of packet segments used by the "txonly" processing engine.
186  */
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 	TXONLY_DEF_PACKET_LEN,
190 };
191 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
192 
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
195 
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
198 
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
201 
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
204 
205 /*
206  * Configurable number of RX/TX queues.
207  */
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
210 
211 /*
212  * Configurable number of RX/TX ring descriptors.
213  * Defaults are supplied by drivers via ethdev.
214  */
215 #define RTE_TEST_RX_DESC_DEFAULT 0
216 #define RTE_TEST_TX_DESC_DEFAULT 0
217 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
218 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
219 
220 #define RTE_PMD_PARAM_UNSET -1
221 /*
222  * Configurable values of RX and TX ring threshold registers.
223  */
224 
225 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
228 
229 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Configurable value of RX free threshold.
235  */
236 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
237 
238 /*
239  * Configurable value of RX drop enable.
240  */
241 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
242 
243 /*
244  * Configurable value of TX free threshold.
245  */
246 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
247 
248 /*
249  * Configurable value of TX RS bit threshold.
250  */
251 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
252 
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257 
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262 
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267 
268 /*
269  * Flow API isolated mode.
270  */
271 uint8_t flow_isolate_all;
272 
273 /*
274  * Avoids to check link status when starting/stopping a port.
275  */
276 uint8_t no_link_check = 0; /* check by default */
277 
278 /*
279  * Enable link status change notification
280  */
281 uint8_t lsc_interrupt = 1; /* enabled by default */
282 
283 /*
284  * Enable device removal notification.
285  */
286 uint8_t rmv_interrupt = 1; /* enabled by default */
287 
288 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
289 
290 /*
291  * Display or mask ether events
292  * Default to all events except VF_MBOX
293  */
294 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
295 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301 /*
302  * Decide if all memory are locked for performance.
303  */
304 int do_mlockall = 0;
305 
306 /*
307  * NIC bypass mode configuration options.
308  */
309 
310 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
311 /* The NIC bypass watchdog timeout. */
312 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
313 #endif
314 
315 
316 #ifdef RTE_LIBRTE_LATENCY_STATS
317 
318 /*
319  * Set when latency stats is enabled in the commandline
320  */
321 uint8_t latencystats_enabled;
322 
323 /*
324  * Lcore ID to serive latency statistics.
325  */
326 lcoreid_t latencystats_lcore_id = -1;
327 
328 #endif
329 
330 /*
331  * Ethernet device configuration.
332  */
333 struct rte_eth_rxmode rx_mode = {
334 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
335 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
336 };
337 
338 struct rte_eth_txmode tx_mode = {
339 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
340 };
341 
342 struct rte_fdir_conf fdir_conf = {
343 	.mode = RTE_FDIR_MODE_NONE,
344 	.pballoc = RTE_FDIR_PBALLOC_64K,
345 	.status = RTE_FDIR_REPORT_STATUS,
346 	.mask = {
347 		.vlan_tci_mask = 0xFFEF,
348 		.ipv4_mask     = {
349 			.src_ip = 0xFFFFFFFF,
350 			.dst_ip = 0xFFFFFFFF,
351 		},
352 		.ipv6_mask     = {
353 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
355 		},
356 		.src_port_mask = 0xFFFF,
357 		.dst_port_mask = 0xFFFF,
358 		.mac_addr_byte_mask = 0xFF,
359 		.tunnel_type_mask = 1,
360 		.tunnel_id_mask = 0xFFFFFFFF,
361 	},
362 	.drop_queue = 127,
363 };
364 
365 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
366 
367 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
368 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
369 
370 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
371 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
372 
373 uint16_t nb_tx_queue_stats_mappings = 0;
374 uint16_t nb_rx_queue_stats_mappings = 0;
375 
376 /*
377  * Display zero values by default for xstats
378  */
379 uint8_t xstats_hide_zero;
380 
381 unsigned int num_sockets = 0;
382 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
383 
384 #ifdef RTE_LIBRTE_BITRATE
385 /* Bitrate statistics */
386 struct rte_stats_bitrates *bitrate_data;
387 lcoreid_t bitrate_lcore_id;
388 uint8_t bitrate_enabled;
389 #endif
390 
391 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
392 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
393 
394 struct vxlan_encap_conf vxlan_encap_conf = {
395 	.select_ipv4 = 1,
396 	.select_vlan = 0,
397 	.vni = "\x00\x00\x00",
398 	.udp_src = 0,
399 	.udp_dst = RTE_BE16(4789),
400 	.ipv4_src = IPv4(127, 0, 0, 1),
401 	.ipv4_dst = IPv4(255, 255, 255, 255),
402 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
403 		"\x00\x00\x00\x00\x00\x00\x00\x01",
404 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
405 		"\x00\x00\x00\x00\x00\x00\x11\x11",
406 	.vlan_tci = 0,
407 	.eth_src = "\x00\x00\x00\x00\x00\x00",
408 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
409 };
410 
411 /* Forward function declarations */
412 static void map_port_queue_stats_mapping_registers(portid_t pi,
413 						   struct rte_port *port);
414 static void check_all_ports_link_status(uint32_t port_mask);
415 static int eth_event_callback(portid_t port_id,
416 			      enum rte_eth_event_type type,
417 			      void *param, void *ret_param);
418 static void eth_dev_event_callback(char *device_name,
419 				enum rte_dev_event_type type,
420 				void *param);
421 static int eth_dev_event_callback_register(void);
422 static int eth_dev_event_callback_unregister(void);
423 
424 
425 /*
426  * Check if all the ports are started.
427  * If yes, return positive value. If not, return zero.
428  */
429 static int all_ports_started(void);
430 
431 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
432 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
433 
434 /*
435  * Helper function to check if socket is already discovered.
436  * If yes, return positive value. If not, return zero.
437  */
438 int
439 new_socket_id(unsigned int socket_id)
440 {
441 	unsigned int i;
442 
443 	for (i = 0; i < num_sockets; i++) {
444 		if (socket_ids[i] == socket_id)
445 			return 0;
446 	}
447 	return 1;
448 }
449 
450 /*
451  * Setup default configuration.
452  */
453 static void
454 set_default_fwd_lcores_config(void)
455 {
456 	unsigned int i;
457 	unsigned int nb_lc;
458 	unsigned int sock_num;
459 
460 	nb_lc = 0;
461 	for (i = 0; i < RTE_MAX_LCORE; i++) {
462 		sock_num = rte_lcore_to_socket_id(i);
463 		if (new_socket_id(sock_num)) {
464 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
465 				rte_exit(EXIT_FAILURE,
466 					 "Total sockets greater than %u\n",
467 					 RTE_MAX_NUMA_NODES);
468 			}
469 			socket_ids[num_sockets++] = sock_num;
470 		}
471 		if (!rte_lcore_is_enabled(i))
472 			continue;
473 		if (i == rte_get_master_lcore())
474 			continue;
475 		fwd_lcores_cpuids[nb_lc++] = i;
476 	}
477 	nb_lcores = (lcoreid_t) nb_lc;
478 	nb_cfg_lcores = nb_lcores;
479 	nb_fwd_lcores = 1;
480 }
481 
482 static void
483 set_def_peer_eth_addrs(void)
484 {
485 	portid_t i;
486 
487 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
488 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
489 		peer_eth_addrs[i].addr_bytes[5] = i;
490 	}
491 }
492 
493 static void
494 set_default_fwd_ports_config(void)
495 {
496 	portid_t pt_id;
497 	int i = 0;
498 
499 	RTE_ETH_FOREACH_DEV(pt_id)
500 		fwd_ports_ids[i++] = pt_id;
501 
502 	nb_cfg_ports = nb_ports;
503 	nb_fwd_ports = nb_ports;
504 }
505 
506 void
507 set_def_fwd_config(void)
508 {
509 	set_default_fwd_lcores_config();
510 	set_def_peer_eth_addrs();
511 	set_default_fwd_ports_config();
512 }
513 
514 /*
515  * Configuration initialisation done once at init time.
516  */
517 static void
518 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
519 		 unsigned int socket_id)
520 {
521 	char pool_name[RTE_MEMPOOL_NAMESIZE];
522 	struct rte_mempool *rte_mp = NULL;
523 	uint32_t mb_size;
524 
525 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
526 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
527 
528 	TESTPMD_LOG(INFO,
529 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
530 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
531 
532 	if (mp_anon != 0) {
533 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
534 			mb_size, (unsigned) mb_mempool_cache,
535 			sizeof(struct rte_pktmbuf_pool_private),
536 			socket_id, 0);
537 		if (rte_mp == NULL)
538 			goto err;
539 
540 		if (rte_mempool_populate_anon(rte_mp) == 0) {
541 			rte_mempool_free(rte_mp);
542 			rte_mp = NULL;
543 			goto err;
544 		}
545 		rte_pktmbuf_pool_init(rte_mp, NULL);
546 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
547 	} else {
548 		/* wrapper to rte_mempool_create() */
549 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
550 				rte_mbuf_best_mempool_ops());
551 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
552 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
553 	}
554 
555 err:
556 	if (rte_mp == NULL) {
557 		rte_exit(EXIT_FAILURE,
558 			"Creation of mbuf pool for socket %u failed: %s\n",
559 			socket_id, rte_strerror(rte_errno));
560 	} else if (verbose_level > 0) {
561 		rte_mempool_dump(stdout, rte_mp);
562 	}
563 }
564 
565 /*
566  * Check given socket id is valid or not with NUMA mode,
567  * if valid, return 0, else return -1
568  */
569 static int
570 check_socket_id(const unsigned int socket_id)
571 {
572 	static int warning_once = 0;
573 
574 	if (new_socket_id(socket_id)) {
575 		if (!warning_once && numa_support)
576 			printf("Warning: NUMA should be configured manually by"
577 			       " using --port-numa-config and"
578 			       " --ring-numa-config parameters along with"
579 			       " --numa.\n");
580 		warning_once = 1;
581 		return -1;
582 	}
583 	return 0;
584 }
585 
586 /*
587  * Get the allowed maximum number of RX queues.
588  * *pid return the port id which has minimal value of
589  * max_rx_queues in all ports.
590  */
591 queueid_t
592 get_allowed_max_nb_rxq(portid_t *pid)
593 {
594 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
595 	portid_t pi;
596 	struct rte_eth_dev_info dev_info;
597 
598 	RTE_ETH_FOREACH_DEV(pi) {
599 		rte_eth_dev_info_get(pi, &dev_info);
600 		if (dev_info.max_rx_queues < allowed_max_rxq) {
601 			allowed_max_rxq = dev_info.max_rx_queues;
602 			*pid = pi;
603 		}
604 	}
605 	return allowed_max_rxq;
606 }
607 
608 /*
609  * Check input rxq is valid or not.
610  * If input rxq is not greater than any of maximum number
611  * of RX queues of all ports, it is valid.
612  * if valid, return 0, else return -1
613  */
614 int
615 check_nb_rxq(queueid_t rxq)
616 {
617 	queueid_t allowed_max_rxq;
618 	portid_t pid = 0;
619 
620 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
621 	if (rxq > allowed_max_rxq) {
622 		printf("Fail: input rxq (%u) can't be greater "
623 		       "than max_rx_queues (%u) of port %u\n",
624 		       rxq,
625 		       allowed_max_rxq,
626 		       pid);
627 		return -1;
628 	}
629 	return 0;
630 }
631 
632 /*
633  * Get the allowed maximum number of TX queues.
634  * *pid return the port id which has minimal value of
635  * max_tx_queues in all ports.
636  */
637 queueid_t
638 get_allowed_max_nb_txq(portid_t *pid)
639 {
640 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
641 	portid_t pi;
642 	struct rte_eth_dev_info dev_info;
643 
644 	RTE_ETH_FOREACH_DEV(pi) {
645 		rte_eth_dev_info_get(pi, &dev_info);
646 		if (dev_info.max_tx_queues < allowed_max_txq) {
647 			allowed_max_txq = dev_info.max_tx_queues;
648 			*pid = pi;
649 		}
650 	}
651 	return allowed_max_txq;
652 }
653 
654 /*
655  * Check input txq is valid or not.
656  * If input txq is not greater than any of maximum number
657  * of TX queues of all ports, it is valid.
658  * if valid, return 0, else return -1
659  */
660 int
661 check_nb_txq(queueid_t txq)
662 {
663 	queueid_t allowed_max_txq;
664 	portid_t pid = 0;
665 
666 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
667 	if (txq > allowed_max_txq) {
668 		printf("Fail: input txq (%u) can't be greater "
669 		       "than max_tx_queues (%u) of port %u\n",
670 		       txq,
671 		       allowed_max_txq,
672 		       pid);
673 		return -1;
674 	}
675 	return 0;
676 }
677 
678 static void
679 init_config(void)
680 {
681 	portid_t pid;
682 	struct rte_port *port;
683 	struct rte_mempool *mbp;
684 	unsigned int nb_mbuf_per_pool;
685 	lcoreid_t  lc_id;
686 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
687 	struct rte_gro_param gro_param;
688 	uint32_t gso_types;
689 	int k;
690 
691 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
692 
693 	if (numa_support) {
694 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
695 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
696 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
697 	}
698 
699 	/* Configuration of logical cores. */
700 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
701 				sizeof(struct fwd_lcore *) * nb_lcores,
702 				RTE_CACHE_LINE_SIZE);
703 	if (fwd_lcores == NULL) {
704 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
705 							"failed\n", nb_lcores);
706 	}
707 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
708 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
709 					       sizeof(struct fwd_lcore),
710 					       RTE_CACHE_LINE_SIZE);
711 		if (fwd_lcores[lc_id] == NULL) {
712 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
713 								"failed\n");
714 		}
715 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
716 	}
717 
718 	RTE_ETH_FOREACH_DEV(pid) {
719 		port = &ports[pid];
720 		/* Apply default TxRx configuration for all ports */
721 		port->dev_conf.txmode = tx_mode;
722 		port->dev_conf.rxmode = rx_mode;
723 		rte_eth_dev_info_get(pid, &port->dev_info);
724 
725 		if (!(port->dev_info.rx_offload_capa &
726 					DEV_RX_OFFLOAD_CRC_STRIP))
727 			port->dev_conf.rxmode.offloads &=
728 				~DEV_RX_OFFLOAD_CRC_STRIP;
729 		if (!(port->dev_info.tx_offload_capa &
730 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
731 			port->dev_conf.txmode.offloads &=
732 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
733 		if (numa_support) {
734 			if (port_numa[pid] != NUMA_NO_CONFIG)
735 				port_per_socket[port_numa[pid]]++;
736 			else {
737 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
738 
739 				/* if socket_id is invalid, set to 0 */
740 				if (check_socket_id(socket_id) < 0)
741 					socket_id = 0;
742 				port_per_socket[socket_id]++;
743 			}
744 		}
745 
746 		/* Apply Rx offloads configuration */
747 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
748 			port->rx_conf[k].offloads =
749 				port->dev_conf.rxmode.offloads;
750 		/* Apply Tx offloads configuration */
751 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
752 			port->tx_conf[k].offloads =
753 				port->dev_conf.txmode.offloads;
754 
755 		/* set flag to initialize port/queue */
756 		port->need_reconfig = 1;
757 		port->need_reconfig_queues = 1;
758 	}
759 
760 	/*
761 	 * Create pools of mbuf.
762 	 * If NUMA support is disabled, create a single pool of mbuf in
763 	 * socket 0 memory by default.
764 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
765 	 *
766 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
767 	 * nb_txd can be configured at run time.
768 	 */
769 	if (param_total_num_mbufs)
770 		nb_mbuf_per_pool = param_total_num_mbufs;
771 	else {
772 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
773 			(nb_lcores * mb_mempool_cache) +
774 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
775 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
776 	}
777 
778 	if (numa_support) {
779 		uint8_t i;
780 
781 		for (i = 0; i < num_sockets; i++)
782 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
783 					 socket_ids[i]);
784 	} else {
785 		if (socket_num == UMA_NO_CONFIG)
786 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
787 		else
788 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
789 						 socket_num);
790 	}
791 
792 	init_port_config();
793 
794 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
795 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
796 	/*
797 	 * Records which Mbuf pool to use by each logical core, if needed.
798 	 */
799 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
800 		mbp = mbuf_pool_find(
801 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
802 
803 		if (mbp == NULL)
804 			mbp = mbuf_pool_find(0);
805 		fwd_lcores[lc_id]->mbp = mbp;
806 		/* initialize GSO context */
807 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
808 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
809 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
810 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
811 			ETHER_CRC_LEN;
812 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
813 	}
814 
815 	/* Configuration of packet forwarding streams. */
816 	if (init_fwd_streams() < 0)
817 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
818 
819 	fwd_config_setup();
820 
821 	/* create a gro context for each lcore */
822 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
823 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
824 	gro_param.max_item_per_flow = MAX_PKT_BURST;
825 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
826 		gro_param.socket_id = rte_lcore_to_socket_id(
827 				fwd_lcores_cpuids[lc_id]);
828 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
829 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
830 			rte_exit(EXIT_FAILURE,
831 					"rte_gro_ctx_create() failed\n");
832 		}
833 	}
834 
835 #if defined RTE_LIBRTE_PMD_SOFTNIC
836 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
837 		RTE_ETH_FOREACH_DEV(pid) {
838 			port = &ports[pid];
839 			const char *driver = port->dev_info.driver_name;
840 
841 			if (strcmp(driver, "net_softnic") == 0)
842 				port->softport.fwd_lcore_arg = fwd_lcores;
843 		}
844 	}
845 #endif
846 
847 }
848 
849 
850 void
851 reconfig(portid_t new_port_id, unsigned socket_id)
852 {
853 	struct rte_port *port;
854 
855 	/* Reconfiguration of Ethernet ports. */
856 	port = &ports[new_port_id];
857 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
858 
859 	/* set flag to initialize port/queue */
860 	port->need_reconfig = 1;
861 	port->need_reconfig_queues = 1;
862 	port->socket_id = socket_id;
863 
864 	init_port_config();
865 }
866 
867 
868 int
869 init_fwd_streams(void)
870 {
871 	portid_t pid;
872 	struct rte_port *port;
873 	streamid_t sm_id, nb_fwd_streams_new;
874 	queueid_t q;
875 
876 	/* set socket id according to numa or not */
877 	RTE_ETH_FOREACH_DEV(pid) {
878 		port = &ports[pid];
879 		if (nb_rxq > port->dev_info.max_rx_queues) {
880 			printf("Fail: nb_rxq(%d) is greater than "
881 				"max_rx_queues(%d)\n", nb_rxq,
882 				port->dev_info.max_rx_queues);
883 			return -1;
884 		}
885 		if (nb_txq > port->dev_info.max_tx_queues) {
886 			printf("Fail: nb_txq(%d) is greater than "
887 				"max_tx_queues(%d)\n", nb_txq,
888 				port->dev_info.max_tx_queues);
889 			return -1;
890 		}
891 		if (numa_support) {
892 			if (port_numa[pid] != NUMA_NO_CONFIG)
893 				port->socket_id = port_numa[pid];
894 			else {
895 				port->socket_id = rte_eth_dev_socket_id(pid);
896 
897 				/* if socket_id is invalid, set to 0 */
898 				if (check_socket_id(port->socket_id) < 0)
899 					port->socket_id = 0;
900 			}
901 		}
902 		else {
903 			if (socket_num == UMA_NO_CONFIG)
904 				port->socket_id = 0;
905 			else
906 				port->socket_id = socket_num;
907 		}
908 	}
909 
910 	q = RTE_MAX(nb_rxq, nb_txq);
911 	if (q == 0) {
912 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
913 		return -1;
914 	}
915 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
916 	if (nb_fwd_streams_new == nb_fwd_streams)
917 		return 0;
918 	/* clear the old */
919 	if (fwd_streams != NULL) {
920 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
921 			if (fwd_streams[sm_id] == NULL)
922 				continue;
923 			rte_free(fwd_streams[sm_id]);
924 			fwd_streams[sm_id] = NULL;
925 		}
926 		rte_free(fwd_streams);
927 		fwd_streams = NULL;
928 	}
929 
930 	/* init new */
931 	nb_fwd_streams = nb_fwd_streams_new;
932 	if (nb_fwd_streams) {
933 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
934 			sizeof(struct fwd_stream *) * nb_fwd_streams,
935 			RTE_CACHE_LINE_SIZE);
936 		if (fwd_streams == NULL)
937 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
938 				 " (struct fwd_stream *)) failed\n",
939 				 nb_fwd_streams);
940 
941 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
942 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
943 				" struct fwd_stream", sizeof(struct fwd_stream),
944 				RTE_CACHE_LINE_SIZE);
945 			if (fwd_streams[sm_id] == NULL)
946 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
947 					 "(struct fwd_stream) failed\n");
948 		}
949 	}
950 
951 	return 0;
952 }
953 
954 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
955 static void
956 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
957 {
958 	unsigned int total_burst;
959 	unsigned int nb_burst;
960 	unsigned int burst_stats[3];
961 	uint16_t pktnb_stats[3];
962 	uint16_t nb_pkt;
963 	int burst_percent[3];
964 
965 	/*
966 	 * First compute the total number of packet bursts and the
967 	 * two highest numbers of bursts of the same number of packets.
968 	 */
969 	total_burst = 0;
970 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
971 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
972 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
973 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
974 		if (nb_burst == 0)
975 			continue;
976 		total_burst += nb_burst;
977 		if (nb_burst > burst_stats[0]) {
978 			burst_stats[1] = burst_stats[0];
979 			pktnb_stats[1] = pktnb_stats[0];
980 			burst_stats[0] = nb_burst;
981 			pktnb_stats[0] = nb_pkt;
982 		} else if (nb_burst > burst_stats[1]) {
983 			burst_stats[1] = nb_burst;
984 			pktnb_stats[1] = nb_pkt;
985 		}
986 	}
987 	if (total_burst == 0)
988 		return;
989 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
990 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
991 	       burst_percent[0], (int) pktnb_stats[0]);
992 	if (burst_stats[0] == total_burst) {
993 		printf("]\n");
994 		return;
995 	}
996 	if (burst_stats[0] + burst_stats[1] == total_burst) {
997 		printf(" + %d%% of %d pkts]\n",
998 		       100 - burst_percent[0], pktnb_stats[1]);
999 		return;
1000 	}
1001 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1002 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1003 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1004 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1005 		return;
1006 	}
1007 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1008 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1009 }
1010 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1011 
1012 static void
1013 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1014 {
1015 	struct rte_port *port;
1016 	uint8_t i;
1017 
1018 	static const char *fwd_stats_border = "----------------------";
1019 
1020 	port = &ports[port_id];
1021 	printf("\n  %s Forward statistics for port %-2d %s\n",
1022 	       fwd_stats_border, port_id, fwd_stats_border);
1023 
1024 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1025 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1026 		       "%-"PRIu64"\n",
1027 		       stats->ipackets, stats->imissed,
1028 		       (uint64_t) (stats->ipackets + stats->imissed));
1029 
1030 		if (cur_fwd_eng == &csum_fwd_engine)
1031 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1032 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1033 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1034 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1035 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1036 		}
1037 
1038 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1039 		       "%-"PRIu64"\n",
1040 		       stats->opackets, port->tx_dropped,
1041 		       (uint64_t) (stats->opackets + port->tx_dropped));
1042 	}
1043 	else {
1044 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1045 		       "%14"PRIu64"\n",
1046 		       stats->ipackets, stats->imissed,
1047 		       (uint64_t) (stats->ipackets + stats->imissed));
1048 
1049 		if (cur_fwd_eng == &csum_fwd_engine)
1050 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1051 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1052 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1053 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1054 			printf("  RX-nombufs:             %14"PRIu64"\n",
1055 			       stats->rx_nombuf);
1056 		}
1057 
1058 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1059 		       "%14"PRIu64"\n",
1060 		       stats->opackets, port->tx_dropped,
1061 		       (uint64_t) (stats->opackets + port->tx_dropped));
1062 	}
1063 
1064 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1065 	if (port->rx_stream)
1066 		pkt_burst_stats_display("RX",
1067 			&port->rx_stream->rx_burst_stats);
1068 	if (port->tx_stream)
1069 		pkt_burst_stats_display("TX",
1070 			&port->tx_stream->tx_burst_stats);
1071 #endif
1072 
1073 	if (port->rx_queue_stats_mapping_enabled) {
1074 		printf("\n");
1075 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1076 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1077 			       "     RX-errors:%14"PRIu64
1078 			       "    RX-bytes:%14"PRIu64"\n",
1079 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1080 		}
1081 		printf("\n");
1082 	}
1083 	if (port->tx_queue_stats_mapping_enabled) {
1084 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1085 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1086 			       "                                 TX-bytes:%14"PRIu64"\n",
1087 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1088 		}
1089 	}
1090 
1091 	printf("  %s--------------------------------%s\n",
1092 	       fwd_stats_border, fwd_stats_border);
1093 }
1094 
1095 static void
1096 fwd_stream_stats_display(streamid_t stream_id)
1097 {
1098 	struct fwd_stream *fs;
1099 	static const char *fwd_top_stats_border = "-------";
1100 
1101 	fs = fwd_streams[stream_id];
1102 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1103 	    (fs->fwd_dropped == 0))
1104 		return;
1105 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1106 	       "TX Port=%2d/Queue=%2d %s\n",
1107 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1108 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1109 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1110 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1111 
1112 	/* if checksum mode */
1113 	if (cur_fwd_eng == &csum_fwd_engine) {
1114 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1115 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1116 	}
1117 
1118 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1119 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1120 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1121 #endif
1122 }
1123 
1124 static void
1125 flush_fwd_rx_queues(void)
1126 {
1127 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1128 	portid_t  rxp;
1129 	portid_t port_id;
1130 	queueid_t rxq;
1131 	uint16_t  nb_rx;
1132 	uint16_t  i;
1133 	uint8_t   j;
1134 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1135 	uint64_t timer_period;
1136 
1137 	/* convert to number of cycles */
1138 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1139 
1140 	for (j = 0; j < 2; j++) {
1141 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1142 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1143 				port_id = fwd_ports_ids[rxp];
1144 				/**
1145 				* testpmd can stuck in the below do while loop
1146 				* if rte_eth_rx_burst() always returns nonzero
1147 				* packets. So timer is added to exit this loop
1148 				* after 1sec timer expiry.
1149 				*/
1150 				prev_tsc = rte_rdtsc();
1151 				do {
1152 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1153 						pkts_burst, MAX_PKT_BURST);
1154 					for (i = 0; i < nb_rx; i++)
1155 						rte_pktmbuf_free(pkts_burst[i]);
1156 
1157 					cur_tsc = rte_rdtsc();
1158 					diff_tsc = cur_tsc - prev_tsc;
1159 					timer_tsc += diff_tsc;
1160 				} while ((nb_rx > 0) &&
1161 					(timer_tsc < timer_period));
1162 				timer_tsc = 0;
1163 			}
1164 		}
1165 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1166 	}
1167 }
1168 
1169 static void
1170 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1171 {
1172 	struct fwd_stream **fsm;
1173 	streamid_t nb_fs;
1174 	streamid_t sm_id;
1175 #ifdef RTE_LIBRTE_BITRATE
1176 	uint64_t tics_per_1sec;
1177 	uint64_t tics_datum;
1178 	uint64_t tics_current;
1179 	uint16_t idx_port;
1180 
1181 	tics_datum = rte_rdtsc();
1182 	tics_per_1sec = rte_get_timer_hz();
1183 #endif
1184 	fsm = &fwd_streams[fc->stream_idx];
1185 	nb_fs = fc->stream_nb;
1186 	do {
1187 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1188 			(*pkt_fwd)(fsm[sm_id]);
1189 #ifdef RTE_LIBRTE_BITRATE
1190 		if (bitrate_enabled != 0 &&
1191 				bitrate_lcore_id == rte_lcore_id()) {
1192 			tics_current = rte_rdtsc();
1193 			if (tics_current - tics_datum >= tics_per_1sec) {
1194 				/* Periodic bitrate calculation */
1195 				RTE_ETH_FOREACH_DEV(idx_port)
1196 					rte_stats_bitrate_calc(bitrate_data,
1197 						idx_port);
1198 				tics_datum = tics_current;
1199 			}
1200 		}
1201 #endif
1202 #ifdef RTE_LIBRTE_LATENCY_STATS
1203 		if (latencystats_enabled != 0 &&
1204 				latencystats_lcore_id == rte_lcore_id())
1205 			rte_latencystats_update();
1206 #endif
1207 
1208 	} while (! fc->stopped);
1209 }
1210 
1211 static int
1212 start_pkt_forward_on_core(void *fwd_arg)
1213 {
1214 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1215 			     cur_fwd_config.fwd_eng->packet_fwd);
1216 	return 0;
1217 }
1218 
1219 /*
1220  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1221  * Used to start communication flows in network loopback test configurations.
1222  */
1223 static int
1224 run_one_txonly_burst_on_core(void *fwd_arg)
1225 {
1226 	struct fwd_lcore *fwd_lc;
1227 	struct fwd_lcore tmp_lcore;
1228 
1229 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1230 	tmp_lcore = *fwd_lc;
1231 	tmp_lcore.stopped = 1;
1232 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1233 	return 0;
1234 }
1235 
1236 /*
1237  * Launch packet forwarding:
1238  *     - Setup per-port forwarding context.
1239  *     - launch logical cores with their forwarding configuration.
1240  */
1241 static void
1242 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1243 {
1244 	port_fwd_begin_t port_fwd_begin;
1245 	unsigned int i;
1246 	unsigned int lc_id;
1247 	int diag;
1248 
1249 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1250 	if (port_fwd_begin != NULL) {
1251 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1252 			(*port_fwd_begin)(fwd_ports_ids[i]);
1253 	}
1254 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1255 		lc_id = fwd_lcores_cpuids[i];
1256 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1257 			fwd_lcores[i]->stopped = 0;
1258 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1259 						     fwd_lcores[i], lc_id);
1260 			if (diag != 0)
1261 				printf("launch lcore %u failed - diag=%d\n",
1262 				       lc_id, diag);
1263 		}
1264 	}
1265 }
1266 
1267 /*
1268  * Update the forward ports list.
1269  */
1270 void
1271 update_fwd_ports(portid_t new_pid)
1272 {
1273 	unsigned int i;
1274 	unsigned int new_nb_fwd_ports = 0;
1275 	int move = 0;
1276 
1277 	for (i = 0; i < nb_fwd_ports; ++i) {
1278 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1279 			move = 1;
1280 		else if (move)
1281 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1282 		else
1283 			new_nb_fwd_ports++;
1284 	}
1285 	if (new_pid < RTE_MAX_ETHPORTS)
1286 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1287 
1288 	nb_fwd_ports = new_nb_fwd_ports;
1289 	nb_cfg_ports = new_nb_fwd_ports;
1290 }
1291 
1292 /*
1293  * Launch packet forwarding configuration.
1294  */
1295 void
1296 start_packet_forwarding(int with_tx_first)
1297 {
1298 	port_fwd_begin_t port_fwd_begin;
1299 	port_fwd_end_t  port_fwd_end;
1300 	struct rte_port *port;
1301 	unsigned int i;
1302 	portid_t   pt_id;
1303 	streamid_t sm_id;
1304 
1305 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1306 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1307 
1308 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1309 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1310 
1311 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1312 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1313 		(!nb_rxq || !nb_txq))
1314 		rte_exit(EXIT_FAILURE,
1315 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1316 			cur_fwd_eng->fwd_mode_name);
1317 
1318 	if (all_ports_started() == 0) {
1319 		printf("Not all ports were started\n");
1320 		return;
1321 	}
1322 	if (test_done == 0) {
1323 		printf("Packet forwarding already started\n");
1324 		return;
1325 	}
1326 
1327 
1328 	if(dcb_test) {
1329 		for (i = 0; i < nb_fwd_ports; i++) {
1330 			pt_id = fwd_ports_ids[i];
1331 			port = &ports[pt_id];
1332 			if (!port->dcb_flag) {
1333 				printf("In DCB mode, all forwarding ports must "
1334                                        "be configured in this mode.\n");
1335 				return;
1336 			}
1337 		}
1338 		if (nb_fwd_lcores == 1) {
1339 			printf("In DCB mode,the nb forwarding cores "
1340                                "should be larger than 1.\n");
1341 			return;
1342 		}
1343 	}
1344 	test_done = 0;
1345 
1346 	fwd_config_setup();
1347 
1348 	if(!no_flush_rx)
1349 		flush_fwd_rx_queues();
1350 
1351 	pkt_fwd_config_display(&cur_fwd_config);
1352 	rxtx_config_display();
1353 
1354 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1355 		pt_id = fwd_ports_ids[i];
1356 		port = &ports[pt_id];
1357 		rte_eth_stats_get(pt_id, &port->stats);
1358 		port->tx_dropped = 0;
1359 
1360 		map_port_queue_stats_mapping_registers(pt_id, port);
1361 	}
1362 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1363 		fwd_streams[sm_id]->rx_packets = 0;
1364 		fwd_streams[sm_id]->tx_packets = 0;
1365 		fwd_streams[sm_id]->fwd_dropped = 0;
1366 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1367 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1368 
1369 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1370 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1371 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1372 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1373 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1374 #endif
1375 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1376 		fwd_streams[sm_id]->core_cycles = 0;
1377 #endif
1378 	}
1379 	if (with_tx_first) {
1380 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1381 		if (port_fwd_begin != NULL) {
1382 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1383 				(*port_fwd_begin)(fwd_ports_ids[i]);
1384 		}
1385 		while (with_tx_first--) {
1386 			launch_packet_forwarding(
1387 					run_one_txonly_burst_on_core);
1388 			rte_eal_mp_wait_lcore();
1389 		}
1390 		port_fwd_end = tx_only_engine.port_fwd_end;
1391 		if (port_fwd_end != NULL) {
1392 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1393 				(*port_fwd_end)(fwd_ports_ids[i]);
1394 		}
1395 	}
1396 	launch_packet_forwarding(start_pkt_forward_on_core);
1397 }
1398 
1399 void
1400 stop_packet_forwarding(void)
1401 {
1402 	struct rte_eth_stats stats;
1403 	struct rte_port *port;
1404 	port_fwd_end_t  port_fwd_end;
1405 	int i;
1406 	portid_t   pt_id;
1407 	streamid_t sm_id;
1408 	lcoreid_t  lc_id;
1409 	uint64_t total_recv;
1410 	uint64_t total_xmit;
1411 	uint64_t total_rx_dropped;
1412 	uint64_t total_tx_dropped;
1413 	uint64_t total_rx_nombuf;
1414 	uint64_t tx_dropped;
1415 	uint64_t rx_bad_ip_csum;
1416 	uint64_t rx_bad_l4_csum;
1417 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1418 	uint64_t fwd_cycles;
1419 #endif
1420 
1421 	static const char *acc_stats_border = "+++++++++++++++";
1422 
1423 	if (test_done) {
1424 		printf("Packet forwarding not started\n");
1425 		return;
1426 	}
1427 	printf("Telling cores to stop...");
1428 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1429 		fwd_lcores[lc_id]->stopped = 1;
1430 	printf("\nWaiting for lcores to finish...\n");
1431 	rte_eal_mp_wait_lcore();
1432 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1433 	if (port_fwd_end != NULL) {
1434 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1435 			pt_id = fwd_ports_ids[i];
1436 			(*port_fwd_end)(pt_id);
1437 		}
1438 	}
1439 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1440 	fwd_cycles = 0;
1441 #endif
1442 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1443 		if (cur_fwd_config.nb_fwd_streams >
1444 		    cur_fwd_config.nb_fwd_ports) {
1445 			fwd_stream_stats_display(sm_id);
1446 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1447 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1448 		} else {
1449 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1450 				fwd_streams[sm_id];
1451 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1452 				fwd_streams[sm_id];
1453 		}
1454 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1455 		tx_dropped = (uint64_t) (tx_dropped +
1456 					 fwd_streams[sm_id]->fwd_dropped);
1457 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1458 
1459 		rx_bad_ip_csum =
1460 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1461 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1462 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1463 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1464 							rx_bad_ip_csum;
1465 
1466 		rx_bad_l4_csum =
1467 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1468 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1469 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1470 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1471 							rx_bad_l4_csum;
1472 
1473 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1474 		fwd_cycles = (uint64_t) (fwd_cycles +
1475 					 fwd_streams[sm_id]->core_cycles);
1476 #endif
1477 	}
1478 	total_recv = 0;
1479 	total_xmit = 0;
1480 	total_rx_dropped = 0;
1481 	total_tx_dropped = 0;
1482 	total_rx_nombuf  = 0;
1483 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1484 		pt_id = fwd_ports_ids[i];
1485 
1486 		port = &ports[pt_id];
1487 		rte_eth_stats_get(pt_id, &stats);
1488 		stats.ipackets -= port->stats.ipackets;
1489 		port->stats.ipackets = 0;
1490 		stats.opackets -= port->stats.opackets;
1491 		port->stats.opackets = 0;
1492 		stats.ibytes   -= port->stats.ibytes;
1493 		port->stats.ibytes = 0;
1494 		stats.obytes   -= port->stats.obytes;
1495 		port->stats.obytes = 0;
1496 		stats.imissed  -= port->stats.imissed;
1497 		port->stats.imissed = 0;
1498 		stats.oerrors  -= port->stats.oerrors;
1499 		port->stats.oerrors = 0;
1500 		stats.rx_nombuf -= port->stats.rx_nombuf;
1501 		port->stats.rx_nombuf = 0;
1502 
1503 		total_recv += stats.ipackets;
1504 		total_xmit += stats.opackets;
1505 		total_rx_dropped += stats.imissed;
1506 		total_tx_dropped += port->tx_dropped;
1507 		total_rx_nombuf  += stats.rx_nombuf;
1508 
1509 		fwd_port_stats_display(pt_id, &stats);
1510 	}
1511 
1512 	printf("\n  %s Accumulated forward statistics for all ports"
1513 	       "%s\n",
1514 	       acc_stats_border, acc_stats_border);
1515 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1516 	       "%-"PRIu64"\n"
1517 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1518 	       "%-"PRIu64"\n",
1519 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1520 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1521 	if (total_rx_nombuf > 0)
1522 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1523 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1524 	       "%s\n",
1525 	       acc_stats_border, acc_stats_border);
1526 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1527 	if (total_recv > 0)
1528 		printf("\n  CPU cycles/packet=%u (total cycles="
1529 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1530 		       (unsigned int)(fwd_cycles / total_recv),
1531 		       fwd_cycles, total_recv);
1532 #endif
1533 	printf("\nDone.\n");
1534 	test_done = 1;
1535 }
1536 
1537 void
1538 dev_set_link_up(portid_t pid)
1539 {
1540 	if (rte_eth_dev_set_link_up(pid) < 0)
1541 		printf("\nSet link up fail.\n");
1542 }
1543 
1544 void
1545 dev_set_link_down(portid_t pid)
1546 {
1547 	if (rte_eth_dev_set_link_down(pid) < 0)
1548 		printf("\nSet link down fail.\n");
1549 }
1550 
1551 static int
1552 all_ports_started(void)
1553 {
1554 	portid_t pi;
1555 	struct rte_port *port;
1556 
1557 	RTE_ETH_FOREACH_DEV(pi) {
1558 		port = &ports[pi];
1559 		/* Check if there is a port which is not started */
1560 		if ((port->port_status != RTE_PORT_STARTED) &&
1561 			(port->slave_flag == 0))
1562 			return 0;
1563 	}
1564 
1565 	/* No port is not started */
1566 	return 1;
1567 }
1568 
1569 int
1570 port_is_stopped(portid_t port_id)
1571 {
1572 	struct rte_port *port = &ports[port_id];
1573 
1574 	if ((port->port_status != RTE_PORT_STOPPED) &&
1575 	    (port->slave_flag == 0))
1576 		return 0;
1577 	return 1;
1578 }
1579 
1580 int
1581 all_ports_stopped(void)
1582 {
1583 	portid_t pi;
1584 
1585 	RTE_ETH_FOREACH_DEV(pi) {
1586 		if (!port_is_stopped(pi))
1587 			return 0;
1588 	}
1589 
1590 	return 1;
1591 }
1592 
1593 int
1594 port_is_started(portid_t port_id)
1595 {
1596 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1597 		return 0;
1598 
1599 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1600 		return 0;
1601 
1602 	return 1;
1603 }
1604 
1605 static int
1606 port_is_closed(portid_t port_id)
1607 {
1608 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1609 		return 0;
1610 
1611 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1612 		return 0;
1613 
1614 	return 1;
1615 }
1616 
1617 int
1618 start_port(portid_t pid)
1619 {
1620 	int diag, need_check_link_status = -1;
1621 	portid_t pi;
1622 	queueid_t qi;
1623 	struct rte_port *port;
1624 	struct ether_addr mac_addr;
1625 	enum rte_eth_event_type event_type;
1626 
1627 	if (port_id_is_invalid(pid, ENABLED_WARN))
1628 		return 0;
1629 
1630 	if(dcb_config)
1631 		dcb_test = 1;
1632 	RTE_ETH_FOREACH_DEV(pi) {
1633 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1634 			continue;
1635 
1636 		need_check_link_status = 0;
1637 		port = &ports[pi];
1638 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1639 						 RTE_PORT_HANDLING) == 0) {
1640 			printf("Port %d is now not stopped\n", pi);
1641 			continue;
1642 		}
1643 
1644 		if (port->need_reconfig > 0) {
1645 			port->need_reconfig = 0;
1646 
1647 			if (flow_isolate_all) {
1648 				int ret = port_flow_isolate(pi, 1);
1649 				if (ret) {
1650 					printf("Failed to apply isolated"
1651 					       " mode on port %d\n", pi);
1652 					return -1;
1653 				}
1654 			}
1655 
1656 			printf("Configuring Port %d (socket %u)\n", pi,
1657 					port->socket_id);
1658 			/* configure port */
1659 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1660 						&(port->dev_conf));
1661 			if (diag != 0) {
1662 				if (rte_atomic16_cmpset(&(port->port_status),
1663 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1664 					printf("Port %d can not be set back "
1665 							"to stopped\n", pi);
1666 				printf("Fail to configure port %d\n", pi);
1667 				/* try to reconfigure port next time */
1668 				port->need_reconfig = 1;
1669 				return -1;
1670 			}
1671 		}
1672 		if (port->need_reconfig_queues > 0) {
1673 			port->need_reconfig_queues = 0;
1674 			/* setup tx queues */
1675 			for (qi = 0; qi < nb_txq; qi++) {
1676 				if ((numa_support) &&
1677 					(txring_numa[pi] != NUMA_NO_CONFIG))
1678 					diag = rte_eth_tx_queue_setup(pi, qi,
1679 						port->nb_tx_desc[qi],
1680 						txring_numa[pi],
1681 						&(port->tx_conf[qi]));
1682 				else
1683 					diag = rte_eth_tx_queue_setup(pi, qi,
1684 						port->nb_tx_desc[qi],
1685 						port->socket_id,
1686 						&(port->tx_conf[qi]));
1687 
1688 				if (diag == 0)
1689 					continue;
1690 
1691 				/* Fail to setup tx queue, return */
1692 				if (rte_atomic16_cmpset(&(port->port_status),
1693 							RTE_PORT_HANDLING,
1694 							RTE_PORT_STOPPED) == 0)
1695 					printf("Port %d can not be set back "
1696 							"to stopped\n", pi);
1697 				printf("Fail to configure port %d tx queues\n",
1698 				       pi);
1699 				/* try to reconfigure queues next time */
1700 				port->need_reconfig_queues = 1;
1701 				return -1;
1702 			}
1703 			for (qi = 0; qi < nb_rxq; qi++) {
1704 				/* setup rx queues */
1705 				if ((numa_support) &&
1706 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1707 					struct rte_mempool * mp =
1708 						mbuf_pool_find(rxring_numa[pi]);
1709 					if (mp == NULL) {
1710 						printf("Failed to setup RX queue:"
1711 							"No mempool allocation"
1712 							" on the socket %d\n",
1713 							rxring_numa[pi]);
1714 						return -1;
1715 					}
1716 
1717 					diag = rte_eth_rx_queue_setup(pi, qi,
1718 					     port->nb_rx_desc[qi],
1719 					     rxring_numa[pi],
1720 					     &(port->rx_conf[qi]),
1721 					     mp);
1722 				} else {
1723 					struct rte_mempool *mp =
1724 						mbuf_pool_find(port->socket_id);
1725 					if (mp == NULL) {
1726 						printf("Failed to setup RX queue:"
1727 							"No mempool allocation"
1728 							" on the socket %d\n",
1729 							port->socket_id);
1730 						return -1;
1731 					}
1732 					diag = rte_eth_rx_queue_setup(pi, qi,
1733 					     port->nb_rx_desc[qi],
1734 					     port->socket_id,
1735 					     &(port->rx_conf[qi]),
1736 					     mp);
1737 				}
1738 				if (diag == 0)
1739 					continue;
1740 
1741 				/* Fail to setup rx queue, return */
1742 				if (rte_atomic16_cmpset(&(port->port_status),
1743 							RTE_PORT_HANDLING,
1744 							RTE_PORT_STOPPED) == 0)
1745 					printf("Port %d can not be set back "
1746 							"to stopped\n", pi);
1747 				printf("Fail to configure port %d rx queues\n",
1748 				       pi);
1749 				/* try to reconfigure queues next time */
1750 				port->need_reconfig_queues = 1;
1751 				return -1;
1752 			}
1753 		}
1754 
1755 		/* start port */
1756 		if (rte_eth_dev_start(pi) < 0) {
1757 			printf("Fail to start port %d\n", pi);
1758 
1759 			/* Fail to setup rx queue, return */
1760 			if (rte_atomic16_cmpset(&(port->port_status),
1761 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1762 				printf("Port %d can not be set back to "
1763 							"stopped\n", pi);
1764 			continue;
1765 		}
1766 
1767 		if (rte_atomic16_cmpset(&(port->port_status),
1768 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1769 			printf("Port %d can not be set into started\n", pi);
1770 
1771 		rte_eth_macaddr_get(pi, &mac_addr);
1772 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1773 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1774 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1775 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1776 
1777 		/* at least one port started, need checking link status */
1778 		need_check_link_status = 1;
1779 	}
1780 
1781 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1782 	     event_type < RTE_ETH_EVENT_MAX;
1783 	     event_type++) {
1784 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1785 						event_type,
1786 						eth_event_callback,
1787 						NULL);
1788 		if (diag) {
1789 			printf("Failed to setup even callback for event %d\n",
1790 				event_type);
1791 			return -1;
1792 		}
1793 	}
1794 
1795 	if (need_check_link_status == 1 && !no_link_check)
1796 		check_all_ports_link_status(RTE_PORT_ALL);
1797 	else if (need_check_link_status == 0)
1798 		printf("Please stop the ports first\n");
1799 
1800 	printf("Done\n");
1801 	return 0;
1802 }
1803 
1804 void
1805 stop_port(portid_t pid)
1806 {
1807 	portid_t pi;
1808 	struct rte_port *port;
1809 	int need_check_link_status = 0;
1810 
1811 	if (dcb_test) {
1812 		dcb_test = 0;
1813 		dcb_config = 0;
1814 	}
1815 
1816 	if (port_id_is_invalid(pid, ENABLED_WARN))
1817 		return;
1818 
1819 	printf("Stopping ports...\n");
1820 
1821 	RTE_ETH_FOREACH_DEV(pi) {
1822 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1823 			continue;
1824 
1825 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1826 			printf("Please remove port %d from forwarding configuration.\n", pi);
1827 			continue;
1828 		}
1829 
1830 		if (port_is_bonding_slave(pi)) {
1831 			printf("Please remove port %d from bonded device.\n", pi);
1832 			continue;
1833 		}
1834 
1835 		port = &ports[pi];
1836 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1837 						RTE_PORT_HANDLING) == 0)
1838 			continue;
1839 
1840 		rte_eth_dev_stop(pi);
1841 
1842 		if (rte_atomic16_cmpset(&(port->port_status),
1843 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1844 			printf("Port %d can not be set into stopped\n", pi);
1845 		need_check_link_status = 1;
1846 	}
1847 	if (need_check_link_status && !no_link_check)
1848 		check_all_ports_link_status(RTE_PORT_ALL);
1849 
1850 	printf("Done\n");
1851 }
1852 
1853 void
1854 close_port(portid_t pid)
1855 {
1856 	portid_t pi;
1857 	struct rte_port *port;
1858 
1859 	if (port_id_is_invalid(pid, ENABLED_WARN))
1860 		return;
1861 
1862 	printf("Closing ports...\n");
1863 
1864 	RTE_ETH_FOREACH_DEV(pi) {
1865 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1866 			continue;
1867 
1868 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1869 			printf("Please remove port %d from forwarding configuration.\n", pi);
1870 			continue;
1871 		}
1872 
1873 		if (port_is_bonding_slave(pi)) {
1874 			printf("Please remove port %d from bonded device.\n", pi);
1875 			continue;
1876 		}
1877 
1878 		port = &ports[pi];
1879 		if (rte_atomic16_cmpset(&(port->port_status),
1880 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1881 			printf("Port %d is already closed\n", pi);
1882 			continue;
1883 		}
1884 
1885 		if (rte_atomic16_cmpset(&(port->port_status),
1886 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1887 			printf("Port %d is now not stopped\n", pi);
1888 			continue;
1889 		}
1890 
1891 		if (port->flow_list)
1892 			port_flow_flush(pi);
1893 		rte_eth_dev_close(pi);
1894 
1895 		if (rte_atomic16_cmpset(&(port->port_status),
1896 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1897 			printf("Port %d cannot be set to closed\n", pi);
1898 	}
1899 
1900 	printf("Done\n");
1901 }
1902 
1903 void
1904 reset_port(portid_t pid)
1905 {
1906 	int diag;
1907 	portid_t pi;
1908 	struct rte_port *port;
1909 
1910 	if (port_id_is_invalid(pid, ENABLED_WARN))
1911 		return;
1912 
1913 	printf("Resetting ports...\n");
1914 
1915 	RTE_ETH_FOREACH_DEV(pi) {
1916 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1917 			continue;
1918 
1919 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1920 			printf("Please remove port %d from forwarding "
1921 			       "configuration.\n", pi);
1922 			continue;
1923 		}
1924 
1925 		if (port_is_bonding_slave(pi)) {
1926 			printf("Please remove port %d from bonded device.\n",
1927 			       pi);
1928 			continue;
1929 		}
1930 
1931 		diag = rte_eth_dev_reset(pi);
1932 		if (diag == 0) {
1933 			port = &ports[pi];
1934 			port->need_reconfig = 1;
1935 			port->need_reconfig_queues = 1;
1936 		} else {
1937 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1938 		}
1939 	}
1940 
1941 	printf("Done\n");
1942 }
1943 
1944 static int
1945 eth_dev_event_callback_register(void)
1946 {
1947 	int ret;
1948 
1949 	/* register the device event callback */
1950 	ret = rte_dev_event_callback_register(NULL,
1951 		eth_dev_event_callback, NULL);
1952 	if (ret) {
1953 		printf("Failed to register device event callback\n");
1954 		return -1;
1955 	}
1956 
1957 	return 0;
1958 }
1959 
1960 
1961 static int
1962 eth_dev_event_callback_unregister(void)
1963 {
1964 	int ret;
1965 
1966 	/* unregister the device event callback */
1967 	ret = rte_dev_event_callback_unregister(NULL,
1968 		eth_dev_event_callback, NULL);
1969 	if (ret < 0) {
1970 		printf("Failed to unregister device event callback\n");
1971 		return -1;
1972 	}
1973 
1974 	return 0;
1975 }
1976 
1977 void
1978 attach_port(char *identifier)
1979 {
1980 	portid_t pi = 0;
1981 	unsigned int socket_id;
1982 
1983 	printf("Attaching a new port...\n");
1984 
1985 	if (identifier == NULL) {
1986 		printf("Invalid parameters are specified\n");
1987 		return;
1988 	}
1989 
1990 	if (rte_eth_dev_attach(identifier, &pi))
1991 		return;
1992 
1993 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1994 	/* if socket_id is invalid, set to 0 */
1995 	if (check_socket_id(socket_id) < 0)
1996 		socket_id = 0;
1997 	reconfig(pi, socket_id);
1998 	rte_eth_promiscuous_enable(pi);
1999 
2000 	nb_ports = rte_eth_dev_count_avail();
2001 
2002 	ports[pi].port_status = RTE_PORT_STOPPED;
2003 
2004 	update_fwd_ports(pi);
2005 
2006 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2007 	printf("Done\n");
2008 }
2009 
2010 void
2011 detach_port(portid_t port_id)
2012 {
2013 	char name[RTE_ETH_NAME_MAX_LEN];
2014 
2015 	printf("Detaching a port...\n");
2016 
2017 	if (!port_is_closed(port_id)) {
2018 		printf("Please close port first\n");
2019 		return;
2020 	}
2021 
2022 	if (ports[port_id].flow_list)
2023 		port_flow_flush(port_id);
2024 
2025 	if (rte_eth_dev_detach(port_id, name)) {
2026 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2027 		return;
2028 	}
2029 
2030 	nb_ports = rte_eth_dev_count_avail();
2031 
2032 	update_fwd_ports(RTE_MAX_ETHPORTS);
2033 
2034 	printf("Port %u is detached. Now total ports is %d\n",
2035 			port_id, nb_ports);
2036 	printf("Done\n");
2037 	return;
2038 }
2039 
2040 void
2041 pmd_test_exit(void)
2042 {
2043 	struct rte_device *device;
2044 	portid_t pt_id;
2045 	int ret;
2046 
2047 	if (test_done == 0)
2048 		stop_packet_forwarding();
2049 
2050 	if (ports != NULL) {
2051 		no_link_check = 1;
2052 		RTE_ETH_FOREACH_DEV(pt_id) {
2053 			printf("\nShutting down port %d...\n", pt_id);
2054 			fflush(stdout);
2055 			stop_port(pt_id);
2056 			close_port(pt_id);
2057 
2058 			/*
2059 			 * This is a workaround to fix a virtio-user issue that
2060 			 * requires to call clean-up routine to remove existing
2061 			 * socket.
2062 			 * This workaround valid only for testpmd, needs a fix
2063 			 * valid for all applications.
2064 			 * TODO: Implement proper resource cleanup
2065 			 */
2066 			device = rte_eth_devices[pt_id].device;
2067 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2068 				detach_port(pt_id);
2069 		}
2070 	}
2071 
2072 	if (hot_plug) {
2073 		ret = rte_dev_event_monitor_stop();
2074 		if (ret)
2075 			RTE_LOG(ERR, EAL,
2076 				"fail to stop device event monitor.");
2077 
2078 		ret = eth_dev_event_callback_unregister();
2079 		if (ret)
2080 			RTE_LOG(ERR, EAL,
2081 				"fail to unregister all event callbacks.");
2082 	}
2083 
2084 	printf("\nBye...\n");
2085 }
2086 
2087 typedef void (*cmd_func_t)(void);
2088 struct pmd_test_command {
2089 	const char *cmd_name;
2090 	cmd_func_t cmd_func;
2091 };
2092 
2093 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2094 
2095 /* Check the link status of all ports in up to 9s, and print them finally */
2096 static void
2097 check_all_ports_link_status(uint32_t port_mask)
2098 {
2099 #define CHECK_INTERVAL 100 /* 100ms */
2100 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2101 	portid_t portid;
2102 	uint8_t count, all_ports_up, print_flag = 0;
2103 	struct rte_eth_link link;
2104 
2105 	printf("Checking link statuses...\n");
2106 	fflush(stdout);
2107 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2108 		all_ports_up = 1;
2109 		RTE_ETH_FOREACH_DEV(portid) {
2110 			if ((port_mask & (1 << portid)) == 0)
2111 				continue;
2112 			memset(&link, 0, sizeof(link));
2113 			rte_eth_link_get_nowait(portid, &link);
2114 			/* print link status if flag set */
2115 			if (print_flag == 1) {
2116 				if (link.link_status)
2117 					printf(
2118 					"Port%d Link Up. speed %u Mbps- %s\n",
2119 					portid, link.link_speed,
2120 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2121 					("full-duplex") : ("half-duplex\n"));
2122 				else
2123 					printf("Port %d Link Down\n", portid);
2124 				continue;
2125 			}
2126 			/* clear all_ports_up flag if any link down */
2127 			if (link.link_status == ETH_LINK_DOWN) {
2128 				all_ports_up = 0;
2129 				break;
2130 			}
2131 		}
2132 		/* after finally printing all link status, get out */
2133 		if (print_flag == 1)
2134 			break;
2135 
2136 		if (all_ports_up == 0) {
2137 			fflush(stdout);
2138 			rte_delay_ms(CHECK_INTERVAL);
2139 		}
2140 
2141 		/* set the print_flag if all ports up or timeout */
2142 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2143 			print_flag = 1;
2144 		}
2145 
2146 		if (lsc_interrupt)
2147 			break;
2148 	}
2149 }
2150 
2151 static void
2152 rmv_event_callback(void *arg)
2153 {
2154 	int need_to_start = 0;
2155 	int org_no_link_check = no_link_check;
2156 	portid_t port_id = (intptr_t)arg;
2157 
2158 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2159 
2160 	if (!test_done && port_is_forwarding(port_id)) {
2161 		need_to_start = 1;
2162 		stop_packet_forwarding();
2163 	}
2164 	no_link_check = 1;
2165 	stop_port(port_id);
2166 	no_link_check = org_no_link_check;
2167 	close_port(port_id);
2168 	detach_port(port_id);
2169 	if (need_to_start)
2170 		start_packet_forwarding(0);
2171 }
2172 
2173 /* This function is used by the interrupt thread */
2174 static int
2175 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2176 		  void *ret_param)
2177 {
2178 	static const char * const event_desc[] = {
2179 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2180 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2181 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2182 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2183 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2184 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2185 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2186 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2187 		[RTE_ETH_EVENT_NEW] = "device probed",
2188 		[RTE_ETH_EVENT_DESTROY] = "device released",
2189 		[RTE_ETH_EVENT_MAX] = NULL,
2190 	};
2191 
2192 	RTE_SET_USED(param);
2193 	RTE_SET_USED(ret_param);
2194 
2195 	if (type >= RTE_ETH_EVENT_MAX) {
2196 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2197 			port_id, __func__, type);
2198 		fflush(stderr);
2199 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2200 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2201 			event_desc[type]);
2202 		fflush(stdout);
2203 	}
2204 
2205 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2206 		return 0;
2207 
2208 	switch (type) {
2209 	case RTE_ETH_EVENT_INTR_RMV:
2210 		if (rte_eal_alarm_set(100000,
2211 				rmv_event_callback, (void *)(intptr_t)port_id))
2212 			fprintf(stderr, "Could not set up deferred device removal\n");
2213 		break;
2214 	default:
2215 		break;
2216 	}
2217 	return 0;
2218 }
2219 
2220 /* This function is used by the interrupt thread */
2221 static void
2222 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2223 			     __rte_unused void *arg)
2224 {
2225 	if (type >= RTE_DEV_EVENT_MAX) {
2226 		fprintf(stderr, "%s called upon invalid event %d\n",
2227 			__func__, type);
2228 		fflush(stderr);
2229 	}
2230 
2231 	switch (type) {
2232 	case RTE_DEV_EVENT_REMOVE:
2233 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2234 			device_name);
2235 		/* TODO: After finish failure handle, begin to stop
2236 		 * packet forward, stop port, close port, detach port.
2237 		 */
2238 		break;
2239 	case RTE_DEV_EVENT_ADD:
2240 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2241 			device_name);
2242 		/* TODO: After finish kernel driver binding,
2243 		 * begin to attach port.
2244 		 */
2245 		break;
2246 	default:
2247 		break;
2248 	}
2249 }
2250 
2251 static int
2252 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2253 {
2254 	uint16_t i;
2255 	int diag;
2256 	uint8_t mapping_found = 0;
2257 
2258 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2259 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2260 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2261 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2262 					tx_queue_stats_mappings[i].queue_id,
2263 					tx_queue_stats_mappings[i].stats_counter_id);
2264 			if (diag != 0)
2265 				return diag;
2266 			mapping_found = 1;
2267 		}
2268 	}
2269 	if (mapping_found)
2270 		port->tx_queue_stats_mapping_enabled = 1;
2271 	return 0;
2272 }
2273 
2274 static int
2275 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2276 {
2277 	uint16_t i;
2278 	int diag;
2279 	uint8_t mapping_found = 0;
2280 
2281 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2282 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2283 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2284 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2285 					rx_queue_stats_mappings[i].queue_id,
2286 					rx_queue_stats_mappings[i].stats_counter_id);
2287 			if (diag != 0)
2288 				return diag;
2289 			mapping_found = 1;
2290 		}
2291 	}
2292 	if (mapping_found)
2293 		port->rx_queue_stats_mapping_enabled = 1;
2294 	return 0;
2295 }
2296 
2297 static void
2298 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2299 {
2300 	int diag = 0;
2301 
2302 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2303 	if (diag != 0) {
2304 		if (diag == -ENOTSUP) {
2305 			port->tx_queue_stats_mapping_enabled = 0;
2306 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2307 		}
2308 		else
2309 			rte_exit(EXIT_FAILURE,
2310 					"set_tx_queue_stats_mapping_registers "
2311 					"failed for port id=%d diag=%d\n",
2312 					pi, diag);
2313 	}
2314 
2315 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2316 	if (diag != 0) {
2317 		if (diag == -ENOTSUP) {
2318 			port->rx_queue_stats_mapping_enabled = 0;
2319 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2320 		}
2321 		else
2322 			rte_exit(EXIT_FAILURE,
2323 					"set_rx_queue_stats_mapping_registers "
2324 					"failed for port id=%d diag=%d\n",
2325 					pi, diag);
2326 	}
2327 }
2328 
2329 static void
2330 rxtx_port_config(struct rte_port *port)
2331 {
2332 	uint16_t qid;
2333 
2334 	for (qid = 0; qid < nb_rxq; qid++) {
2335 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2336 
2337 		/* Check if any Rx parameters have been passed */
2338 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2339 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2340 
2341 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2342 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2343 
2344 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2345 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2346 
2347 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2348 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2349 
2350 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2351 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2352 
2353 		port->nb_rx_desc[qid] = nb_rxd;
2354 	}
2355 
2356 	for (qid = 0; qid < nb_txq; qid++) {
2357 		port->tx_conf[qid] = port->dev_info.default_txconf;
2358 
2359 		/* Check if any Tx parameters have been passed */
2360 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2361 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2362 
2363 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2364 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2365 
2366 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2367 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2368 
2369 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2370 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2371 
2372 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2373 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2374 
2375 		port->nb_tx_desc[qid] = nb_txd;
2376 	}
2377 }
2378 
2379 void
2380 init_port_config(void)
2381 {
2382 	portid_t pid;
2383 	struct rte_port *port;
2384 
2385 	RTE_ETH_FOREACH_DEV(pid) {
2386 		port = &ports[pid];
2387 		port->dev_conf.fdir_conf = fdir_conf;
2388 		rte_eth_dev_info_get(pid, &port->dev_info);
2389 		if (nb_rxq > 1) {
2390 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2391 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2392 				rss_hf & port->dev_info.flow_type_rss_offloads;
2393 		} else {
2394 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2395 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2396 		}
2397 
2398 		if (port->dcb_flag == 0) {
2399 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2400 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2401 			else
2402 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2403 		}
2404 
2405 		rxtx_port_config(port);
2406 
2407 		rte_eth_macaddr_get(pid, &port->eth_addr);
2408 
2409 		map_port_queue_stats_mapping_registers(pid, port);
2410 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2411 		rte_pmd_ixgbe_bypass_init(pid);
2412 #endif
2413 
2414 		if (lsc_interrupt &&
2415 		    (rte_eth_devices[pid].data->dev_flags &
2416 		     RTE_ETH_DEV_INTR_LSC))
2417 			port->dev_conf.intr_conf.lsc = 1;
2418 		if (rmv_interrupt &&
2419 		    (rte_eth_devices[pid].data->dev_flags &
2420 		     RTE_ETH_DEV_INTR_RMV))
2421 			port->dev_conf.intr_conf.rmv = 1;
2422 	}
2423 }
2424 
2425 void set_port_slave_flag(portid_t slave_pid)
2426 {
2427 	struct rte_port *port;
2428 
2429 	port = &ports[slave_pid];
2430 	port->slave_flag = 1;
2431 }
2432 
2433 void clear_port_slave_flag(portid_t slave_pid)
2434 {
2435 	struct rte_port *port;
2436 
2437 	port = &ports[slave_pid];
2438 	port->slave_flag = 0;
2439 }
2440 
2441 uint8_t port_is_bonding_slave(portid_t slave_pid)
2442 {
2443 	struct rte_port *port;
2444 
2445 	port = &ports[slave_pid];
2446 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2447 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2448 		return 1;
2449 	return 0;
2450 }
2451 
2452 const uint16_t vlan_tags[] = {
2453 		0,  1,  2,  3,  4,  5,  6,  7,
2454 		8,  9, 10, 11,  12, 13, 14, 15,
2455 		16, 17, 18, 19, 20, 21, 22, 23,
2456 		24, 25, 26, 27, 28, 29, 30, 31
2457 };
2458 
2459 static  int
2460 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2461 		 enum dcb_mode_enable dcb_mode,
2462 		 enum rte_eth_nb_tcs num_tcs,
2463 		 uint8_t pfc_en)
2464 {
2465 	uint8_t i;
2466 
2467 	/*
2468 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2469 	 * given above, and the number of traffic classes available for use.
2470 	 */
2471 	if (dcb_mode == DCB_VT_ENABLED) {
2472 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2473 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2474 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2475 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2476 
2477 		/* VMDQ+DCB RX and TX configurations */
2478 		vmdq_rx_conf->enable_default_pool = 0;
2479 		vmdq_rx_conf->default_pool = 0;
2480 		vmdq_rx_conf->nb_queue_pools =
2481 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2482 		vmdq_tx_conf->nb_queue_pools =
2483 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2484 
2485 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2486 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2487 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2488 			vmdq_rx_conf->pool_map[i].pools =
2489 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2490 		}
2491 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2492 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2493 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2494 		}
2495 
2496 		/* set DCB mode of RX and TX of multiple queues */
2497 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2498 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2499 	} else {
2500 		struct rte_eth_dcb_rx_conf *rx_conf =
2501 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2502 		struct rte_eth_dcb_tx_conf *tx_conf =
2503 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2504 
2505 		rx_conf->nb_tcs = num_tcs;
2506 		tx_conf->nb_tcs = num_tcs;
2507 
2508 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2509 			rx_conf->dcb_tc[i] = i % num_tcs;
2510 			tx_conf->dcb_tc[i] = i % num_tcs;
2511 		}
2512 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2513 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2514 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2515 	}
2516 
2517 	if (pfc_en)
2518 		eth_conf->dcb_capability_en =
2519 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2520 	else
2521 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2522 
2523 	return 0;
2524 }
2525 
2526 int
2527 init_port_dcb_config(portid_t pid,
2528 		     enum dcb_mode_enable dcb_mode,
2529 		     enum rte_eth_nb_tcs num_tcs,
2530 		     uint8_t pfc_en)
2531 {
2532 	struct rte_eth_conf port_conf;
2533 	struct rte_port *rte_port;
2534 	int retval;
2535 	uint16_t i;
2536 
2537 	rte_port = &ports[pid];
2538 
2539 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2540 	/* Enter DCB configuration status */
2541 	dcb_config = 1;
2542 
2543 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2544 	port_conf.txmode = rte_port->dev_conf.txmode;
2545 
2546 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2547 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2548 	if (retval < 0)
2549 		return retval;
2550 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2551 
2552 	/* re-configure the device . */
2553 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2554 
2555 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2556 
2557 	/* If dev_info.vmdq_pool_base is greater than 0,
2558 	 * the queue id of vmdq pools is started after pf queues.
2559 	 */
2560 	if (dcb_mode == DCB_VT_ENABLED &&
2561 	    rte_port->dev_info.vmdq_pool_base > 0) {
2562 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2563 			" for port %d.", pid);
2564 		return -1;
2565 	}
2566 
2567 	/* Assume the ports in testpmd have the same dcb capability
2568 	 * and has the same number of rxq and txq in dcb mode
2569 	 */
2570 	if (dcb_mode == DCB_VT_ENABLED) {
2571 		if (rte_port->dev_info.max_vfs > 0) {
2572 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2573 			nb_txq = rte_port->dev_info.nb_tx_queues;
2574 		} else {
2575 			nb_rxq = rte_port->dev_info.max_rx_queues;
2576 			nb_txq = rte_port->dev_info.max_tx_queues;
2577 		}
2578 	} else {
2579 		/*if vt is disabled, use all pf queues */
2580 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2581 			nb_rxq = rte_port->dev_info.max_rx_queues;
2582 			nb_txq = rte_port->dev_info.max_tx_queues;
2583 		} else {
2584 			nb_rxq = (queueid_t)num_tcs;
2585 			nb_txq = (queueid_t)num_tcs;
2586 
2587 		}
2588 	}
2589 	rx_free_thresh = 64;
2590 
2591 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2592 
2593 	rxtx_port_config(rte_port);
2594 	/* VLAN filter */
2595 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2596 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2597 		rx_vft_set(pid, vlan_tags[i], 1);
2598 
2599 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2600 	map_port_queue_stats_mapping_registers(pid, rte_port);
2601 
2602 	rte_port->dcb_flag = 1;
2603 
2604 	return 0;
2605 }
2606 
2607 static void
2608 init_port(void)
2609 {
2610 	/* Configuration of Ethernet ports. */
2611 	ports = rte_zmalloc("testpmd: ports",
2612 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2613 			    RTE_CACHE_LINE_SIZE);
2614 	if (ports == NULL) {
2615 		rte_exit(EXIT_FAILURE,
2616 				"rte_zmalloc(%d struct rte_port) failed\n",
2617 				RTE_MAX_ETHPORTS);
2618 	}
2619 }
2620 
2621 static void
2622 force_quit(void)
2623 {
2624 	pmd_test_exit();
2625 	prompt_exit();
2626 }
2627 
2628 static void
2629 print_stats(void)
2630 {
2631 	uint8_t i;
2632 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2633 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2634 
2635 	/* Clear screen and move to top left */
2636 	printf("%s%s", clr, top_left);
2637 
2638 	printf("\nPort statistics ====================================");
2639 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2640 		nic_stats_display(fwd_ports_ids[i]);
2641 }
2642 
2643 static void
2644 signal_handler(int signum)
2645 {
2646 	if (signum == SIGINT || signum == SIGTERM) {
2647 		printf("\nSignal %d received, preparing to exit...\n",
2648 				signum);
2649 #ifdef RTE_LIBRTE_PDUMP
2650 		/* uninitialize packet capture framework */
2651 		rte_pdump_uninit();
2652 #endif
2653 #ifdef RTE_LIBRTE_LATENCY_STATS
2654 		rte_latencystats_uninit();
2655 #endif
2656 		force_quit();
2657 		/* Set flag to indicate the force termination. */
2658 		f_quit = 1;
2659 		/* exit with the expected status */
2660 		signal(signum, SIG_DFL);
2661 		kill(getpid(), signum);
2662 	}
2663 }
2664 
2665 int
2666 main(int argc, char** argv)
2667 {
2668 	int diag;
2669 	portid_t port_id;
2670 	int ret;
2671 
2672 	signal(SIGINT, signal_handler);
2673 	signal(SIGTERM, signal_handler);
2674 
2675 	diag = rte_eal_init(argc, argv);
2676 	if (diag < 0)
2677 		rte_panic("Cannot init EAL\n");
2678 
2679 	testpmd_logtype = rte_log_register("testpmd");
2680 	if (testpmd_logtype < 0)
2681 		rte_panic("Cannot register log type");
2682 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2683 
2684 #ifdef RTE_LIBRTE_PDUMP
2685 	/* initialize packet capture framework */
2686 	rte_pdump_init(NULL);
2687 #endif
2688 
2689 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2690 	if (nb_ports == 0)
2691 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2692 
2693 	/* allocate port structures, and init them */
2694 	init_port();
2695 
2696 	set_def_fwd_config();
2697 	if (nb_lcores == 0)
2698 		rte_panic("Empty set of forwarding logical cores - check the "
2699 			  "core mask supplied in the command parameters\n");
2700 
2701 	/* Bitrate/latency stats disabled by default */
2702 #ifdef RTE_LIBRTE_BITRATE
2703 	bitrate_enabled = 0;
2704 #endif
2705 #ifdef RTE_LIBRTE_LATENCY_STATS
2706 	latencystats_enabled = 0;
2707 #endif
2708 
2709 	/* on FreeBSD, mlockall() is disabled by default */
2710 #ifdef RTE_EXEC_ENV_BSDAPP
2711 	do_mlockall = 0;
2712 #else
2713 	do_mlockall = 1;
2714 #endif
2715 
2716 	argc -= diag;
2717 	argv += diag;
2718 	if (argc > 1)
2719 		launch_args_parse(argc, argv);
2720 
2721 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2722 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2723 			strerror(errno));
2724 	}
2725 
2726 	if (tx_first && interactive)
2727 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2728 				"interactive mode.\n");
2729 
2730 	if (tx_first && lsc_interrupt) {
2731 		printf("Warning: lsc_interrupt needs to be off when "
2732 				" using tx_first. Disabling.\n");
2733 		lsc_interrupt = 0;
2734 	}
2735 
2736 	if (!nb_rxq && !nb_txq)
2737 		printf("Warning: Either rx or tx queues should be non-zero\n");
2738 
2739 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2740 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2741 		       "but nb_txq=%d will prevent to fully test it.\n",
2742 		       nb_rxq, nb_txq);
2743 
2744 	init_config();
2745 
2746 	if (hot_plug) {
2747 		/* enable hot plug monitoring */
2748 		ret = rte_dev_event_monitor_start();
2749 		if (ret) {
2750 			rte_errno = EINVAL;
2751 			return -1;
2752 		}
2753 		eth_dev_event_callback_register();
2754 
2755 	}
2756 
2757 	if (start_port(RTE_PORT_ALL) != 0)
2758 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2759 
2760 	/* set all ports to promiscuous mode by default */
2761 	RTE_ETH_FOREACH_DEV(port_id)
2762 		rte_eth_promiscuous_enable(port_id);
2763 
2764 	/* Init metrics library */
2765 	rte_metrics_init(rte_socket_id());
2766 
2767 #ifdef RTE_LIBRTE_LATENCY_STATS
2768 	if (latencystats_enabled != 0) {
2769 		int ret = rte_latencystats_init(1, NULL);
2770 		if (ret)
2771 			printf("Warning: latencystats init()"
2772 				" returned error %d\n",	ret);
2773 		printf("Latencystats running on lcore %d\n",
2774 			latencystats_lcore_id);
2775 	}
2776 #endif
2777 
2778 	/* Setup bitrate stats */
2779 #ifdef RTE_LIBRTE_BITRATE
2780 	if (bitrate_enabled != 0) {
2781 		bitrate_data = rte_stats_bitrate_create();
2782 		if (bitrate_data == NULL)
2783 			rte_exit(EXIT_FAILURE,
2784 				"Could not allocate bitrate data.\n");
2785 		rte_stats_bitrate_reg(bitrate_data);
2786 	}
2787 #endif
2788 
2789 #ifdef RTE_LIBRTE_CMDLINE
2790 	if (strlen(cmdline_filename) != 0)
2791 		cmdline_read_from_file(cmdline_filename);
2792 
2793 	if (interactive == 1) {
2794 		if (auto_start) {
2795 			printf("Start automatic packet forwarding\n");
2796 			start_packet_forwarding(0);
2797 		}
2798 		prompt();
2799 		pmd_test_exit();
2800 	} else
2801 #endif
2802 	{
2803 		char c;
2804 		int rc;
2805 
2806 		f_quit = 0;
2807 
2808 		printf("No commandline core given, start packet forwarding\n");
2809 		start_packet_forwarding(tx_first);
2810 		if (stats_period != 0) {
2811 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2812 			uint64_t timer_period;
2813 
2814 			/* Convert to number of cycles */
2815 			timer_period = stats_period * rte_get_timer_hz();
2816 
2817 			while (f_quit == 0) {
2818 				cur_time = rte_get_timer_cycles();
2819 				diff_time += cur_time - prev_time;
2820 
2821 				if (diff_time >= timer_period) {
2822 					print_stats();
2823 					/* Reset the timer */
2824 					diff_time = 0;
2825 				}
2826 				/* Sleep to avoid unnecessary checks */
2827 				prev_time = cur_time;
2828 				sleep(1);
2829 			}
2830 		}
2831 
2832 		printf("Press enter to exit\n");
2833 		rc = read(0, &c, 1);
2834 		pmd_test_exit();
2835 		if (rc < 0)
2836 			return 1;
2837 	}
2838 
2839 	return 0;
2840 }
2841