xref: /dpdk/app/test-pmd/testpmd.c (revision d9a42a69febf453cdb735e77fc0e01463ddf4acc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301 
302 /*
303  * NIC bypass mode configuration options.
304  */
305 
306 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
307 /* The NIC bypass watchdog timeout. */
308 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
309 #endif
310 
311 
312 #ifdef RTE_LIBRTE_LATENCY_STATS
313 
314 /*
315  * Set when latency stats is enabled in the commandline
316  */
317 uint8_t latencystats_enabled;
318 
319 /*
320  * Lcore ID to serive latency statistics.
321  */
322 lcoreid_t latencystats_lcore_id = -1;
323 
324 #endif
325 
326 /*
327  * Ethernet device configuration.
328  */
329 struct rte_eth_rxmode rx_mode = {
330 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
331 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
332 	.ignore_offload_bitfield = 1,
333 };
334 
335 struct rte_eth_txmode tx_mode = {
336 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
337 };
338 
339 struct rte_fdir_conf fdir_conf = {
340 	.mode = RTE_FDIR_MODE_NONE,
341 	.pballoc = RTE_FDIR_PBALLOC_64K,
342 	.status = RTE_FDIR_REPORT_STATUS,
343 	.mask = {
344 		.vlan_tci_mask = 0x0,
345 		.ipv4_mask     = {
346 			.src_ip = 0xFFFFFFFF,
347 			.dst_ip = 0xFFFFFFFF,
348 		},
349 		.ipv6_mask     = {
350 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
351 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
352 		},
353 		.src_port_mask = 0xFFFF,
354 		.dst_port_mask = 0xFFFF,
355 		.mac_addr_byte_mask = 0xFF,
356 		.tunnel_type_mask = 1,
357 		.tunnel_id_mask = 0xFFFFFFFF,
358 	},
359 	.drop_queue = 127,
360 };
361 
362 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
363 
364 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
365 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
366 
367 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
368 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
369 
370 uint16_t nb_tx_queue_stats_mappings = 0;
371 uint16_t nb_rx_queue_stats_mappings = 0;
372 
373 /*
374  * Display zero values by default for xstats
375  */
376 uint8_t xstats_hide_zero;
377 
378 unsigned int num_sockets = 0;
379 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
380 
381 #ifdef RTE_LIBRTE_BITRATE
382 /* Bitrate statistics */
383 struct rte_stats_bitrates *bitrate_data;
384 lcoreid_t bitrate_lcore_id;
385 uint8_t bitrate_enabled;
386 #endif
387 
388 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
389 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
390 
391 /* Forward function declarations */
392 static void map_port_queue_stats_mapping_registers(portid_t pi,
393 						   struct rte_port *port);
394 static void check_all_ports_link_status(uint32_t port_mask);
395 static int eth_event_callback(portid_t port_id,
396 			      enum rte_eth_event_type type,
397 			      void *param, void *ret_param);
398 static void eth_dev_event_callback(char *device_name,
399 				enum rte_dev_event_type type,
400 				void *param);
401 static int eth_dev_event_callback_register(void);
402 static int eth_dev_event_callback_unregister(void);
403 
404 
405 /*
406  * Check if all the ports are started.
407  * If yes, return positive value. If not, return zero.
408  */
409 static int all_ports_started(void);
410 
411 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
412 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
413 
414 /*
415  * Helper function to check if socket is already discovered.
416  * If yes, return positive value. If not, return zero.
417  */
418 int
419 new_socket_id(unsigned int socket_id)
420 {
421 	unsigned int i;
422 
423 	for (i = 0; i < num_sockets; i++) {
424 		if (socket_ids[i] == socket_id)
425 			return 0;
426 	}
427 	return 1;
428 }
429 
430 /*
431  * Setup default configuration.
432  */
433 static void
434 set_default_fwd_lcores_config(void)
435 {
436 	unsigned int i;
437 	unsigned int nb_lc;
438 	unsigned int sock_num;
439 
440 	nb_lc = 0;
441 	for (i = 0; i < RTE_MAX_LCORE; i++) {
442 		sock_num = rte_lcore_to_socket_id(i);
443 		if (new_socket_id(sock_num)) {
444 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
445 				rte_exit(EXIT_FAILURE,
446 					 "Total sockets greater than %u\n",
447 					 RTE_MAX_NUMA_NODES);
448 			}
449 			socket_ids[num_sockets++] = sock_num;
450 		}
451 		if (!rte_lcore_is_enabled(i))
452 			continue;
453 		if (i == rte_get_master_lcore())
454 			continue;
455 		fwd_lcores_cpuids[nb_lc++] = i;
456 	}
457 	nb_lcores = (lcoreid_t) nb_lc;
458 	nb_cfg_lcores = nb_lcores;
459 	nb_fwd_lcores = 1;
460 }
461 
462 static void
463 set_def_peer_eth_addrs(void)
464 {
465 	portid_t i;
466 
467 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
468 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
469 		peer_eth_addrs[i].addr_bytes[5] = i;
470 	}
471 }
472 
473 static void
474 set_default_fwd_ports_config(void)
475 {
476 	portid_t pt_id;
477 	int i = 0;
478 
479 	RTE_ETH_FOREACH_DEV(pt_id)
480 		fwd_ports_ids[i++] = pt_id;
481 
482 	nb_cfg_ports = nb_ports;
483 	nb_fwd_ports = nb_ports;
484 }
485 
486 void
487 set_def_fwd_config(void)
488 {
489 	set_default_fwd_lcores_config();
490 	set_def_peer_eth_addrs();
491 	set_default_fwd_ports_config();
492 }
493 
494 /*
495  * Configuration initialisation done once at init time.
496  */
497 static void
498 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
499 		 unsigned int socket_id)
500 {
501 	char pool_name[RTE_MEMPOOL_NAMESIZE];
502 	struct rte_mempool *rte_mp = NULL;
503 	uint32_t mb_size;
504 
505 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
506 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
507 
508 	TESTPMD_LOG(INFO,
509 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
510 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
511 
512 	if (mp_anon != 0) {
513 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
514 			mb_size, (unsigned) mb_mempool_cache,
515 			sizeof(struct rte_pktmbuf_pool_private),
516 			socket_id, 0);
517 		if (rte_mp == NULL)
518 			goto err;
519 
520 		if (rte_mempool_populate_anon(rte_mp) == 0) {
521 			rte_mempool_free(rte_mp);
522 			rte_mp = NULL;
523 			goto err;
524 		}
525 		rte_pktmbuf_pool_init(rte_mp, NULL);
526 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
527 	} else {
528 		/* wrapper to rte_mempool_create() */
529 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
530 				rte_mbuf_best_mempool_ops());
531 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
532 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
533 	}
534 
535 err:
536 	if (rte_mp == NULL) {
537 		rte_exit(EXIT_FAILURE,
538 			"Creation of mbuf pool for socket %u failed: %s\n",
539 			socket_id, rte_strerror(rte_errno));
540 	} else if (verbose_level > 0) {
541 		rte_mempool_dump(stdout, rte_mp);
542 	}
543 }
544 
545 /*
546  * Check given socket id is valid or not with NUMA mode,
547  * if valid, return 0, else return -1
548  */
549 static int
550 check_socket_id(const unsigned int socket_id)
551 {
552 	static int warning_once = 0;
553 
554 	if (new_socket_id(socket_id)) {
555 		if (!warning_once && numa_support)
556 			printf("Warning: NUMA should be configured manually by"
557 			       " using --port-numa-config and"
558 			       " --ring-numa-config parameters along with"
559 			       " --numa.\n");
560 		warning_once = 1;
561 		return -1;
562 	}
563 	return 0;
564 }
565 
566 /*
567  * Get the allowed maximum number of RX queues.
568  * *pid return the port id which has minimal value of
569  * max_rx_queues in all ports.
570  */
571 queueid_t
572 get_allowed_max_nb_rxq(portid_t *pid)
573 {
574 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
575 	portid_t pi;
576 	struct rte_eth_dev_info dev_info;
577 
578 	RTE_ETH_FOREACH_DEV(pi) {
579 		rte_eth_dev_info_get(pi, &dev_info);
580 		if (dev_info.max_rx_queues < allowed_max_rxq) {
581 			allowed_max_rxq = dev_info.max_rx_queues;
582 			*pid = pi;
583 		}
584 	}
585 	return allowed_max_rxq;
586 }
587 
588 /*
589  * Check input rxq is valid or not.
590  * If input rxq is not greater than any of maximum number
591  * of RX queues of all ports, it is valid.
592  * if valid, return 0, else return -1
593  */
594 int
595 check_nb_rxq(queueid_t rxq)
596 {
597 	queueid_t allowed_max_rxq;
598 	portid_t pid = 0;
599 
600 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
601 	if (rxq > allowed_max_rxq) {
602 		printf("Fail: input rxq (%u) can't be greater "
603 		       "than max_rx_queues (%u) of port %u\n",
604 		       rxq,
605 		       allowed_max_rxq,
606 		       pid);
607 		return -1;
608 	}
609 	return 0;
610 }
611 
612 /*
613  * Get the allowed maximum number of TX queues.
614  * *pid return the port id which has minimal value of
615  * max_tx_queues in all ports.
616  */
617 queueid_t
618 get_allowed_max_nb_txq(portid_t *pid)
619 {
620 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
621 	portid_t pi;
622 	struct rte_eth_dev_info dev_info;
623 
624 	RTE_ETH_FOREACH_DEV(pi) {
625 		rte_eth_dev_info_get(pi, &dev_info);
626 		if (dev_info.max_tx_queues < allowed_max_txq) {
627 			allowed_max_txq = dev_info.max_tx_queues;
628 			*pid = pi;
629 		}
630 	}
631 	return allowed_max_txq;
632 }
633 
634 /*
635  * Check input txq is valid or not.
636  * If input txq is not greater than any of maximum number
637  * of TX queues of all ports, it is valid.
638  * if valid, return 0, else return -1
639  */
640 int
641 check_nb_txq(queueid_t txq)
642 {
643 	queueid_t allowed_max_txq;
644 	portid_t pid = 0;
645 
646 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
647 	if (txq > allowed_max_txq) {
648 		printf("Fail: input txq (%u) can't be greater "
649 		       "than max_tx_queues (%u) of port %u\n",
650 		       txq,
651 		       allowed_max_txq,
652 		       pid);
653 		return -1;
654 	}
655 	return 0;
656 }
657 
658 static void
659 init_config(void)
660 {
661 	portid_t pid;
662 	struct rte_port *port;
663 	struct rte_mempool *mbp;
664 	unsigned int nb_mbuf_per_pool;
665 	lcoreid_t  lc_id;
666 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
667 	struct rte_gro_param gro_param;
668 	uint32_t gso_types;
669 
670 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
671 
672 	if (numa_support) {
673 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
674 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
675 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
676 	}
677 
678 	/* Configuration of logical cores. */
679 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
680 				sizeof(struct fwd_lcore *) * nb_lcores,
681 				RTE_CACHE_LINE_SIZE);
682 	if (fwd_lcores == NULL) {
683 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
684 							"failed\n", nb_lcores);
685 	}
686 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
687 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
688 					       sizeof(struct fwd_lcore),
689 					       RTE_CACHE_LINE_SIZE);
690 		if (fwd_lcores[lc_id] == NULL) {
691 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
692 								"failed\n");
693 		}
694 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
695 	}
696 
697 	RTE_ETH_FOREACH_DEV(pid) {
698 		port = &ports[pid];
699 		/* Apply default TxRx configuration for all ports */
700 		port->dev_conf.txmode = tx_mode;
701 		port->dev_conf.rxmode = rx_mode;
702 		rte_eth_dev_info_get(pid, &port->dev_info);
703 		if (!(port->dev_info.tx_offload_capa &
704 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
705 			port->dev_conf.txmode.offloads &=
706 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
707 		if (numa_support) {
708 			if (port_numa[pid] != NUMA_NO_CONFIG)
709 				port_per_socket[port_numa[pid]]++;
710 			else {
711 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
712 
713 				/* if socket_id is invalid, set to 0 */
714 				if (check_socket_id(socket_id) < 0)
715 					socket_id = 0;
716 				port_per_socket[socket_id]++;
717 			}
718 		}
719 
720 		/* set flag to initialize port/queue */
721 		port->need_reconfig = 1;
722 		port->need_reconfig_queues = 1;
723 	}
724 
725 	/*
726 	 * Create pools of mbuf.
727 	 * If NUMA support is disabled, create a single pool of mbuf in
728 	 * socket 0 memory by default.
729 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
730 	 *
731 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
732 	 * nb_txd can be configured at run time.
733 	 */
734 	if (param_total_num_mbufs)
735 		nb_mbuf_per_pool = param_total_num_mbufs;
736 	else {
737 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
738 			(nb_lcores * mb_mempool_cache) +
739 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
740 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
741 	}
742 
743 	if (numa_support) {
744 		uint8_t i;
745 
746 		for (i = 0; i < num_sockets; i++)
747 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
748 					 socket_ids[i]);
749 	} else {
750 		if (socket_num == UMA_NO_CONFIG)
751 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
752 		else
753 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
754 						 socket_num);
755 	}
756 
757 	init_port_config();
758 
759 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
760 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
761 	/*
762 	 * Records which Mbuf pool to use by each logical core, if needed.
763 	 */
764 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
765 		mbp = mbuf_pool_find(
766 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
767 
768 		if (mbp == NULL)
769 			mbp = mbuf_pool_find(0);
770 		fwd_lcores[lc_id]->mbp = mbp;
771 		/* initialize GSO context */
772 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
773 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
774 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
775 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
776 			ETHER_CRC_LEN;
777 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
778 	}
779 
780 	/* Configuration of packet forwarding streams. */
781 	if (init_fwd_streams() < 0)
782 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
783 
784 	fwd_config_setup();
785 
786 	/* create a gro context for each lcore */
787 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
788 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
789 	gro_param.max_item_per_flow = MAX_PKT_BURST;
790 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
791 		gro_param.socket_id = rte_lcore_to_socket_id(
792 				fwd_lcores_cpuids[lc_id]);
793 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
794 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
795 			rte_exit(EXIT_FAILURE,
796 					"rte_gro_ctx_create() failed\n");
797 		}
798 	}
799 }
800 
801 
802 void
803 reconfig(portid_t new_port_id, unsigned socket_id)
804 {
805 	struct rte_port *port;
806 
807 	/* Reconfiguration of Ethernet ports. */
808 	port = &ports[new_port_id];
809 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
810 
811 	/* set flag to initialize port/queue */
812 	port->need_reconfig = 1;
813 	port->need_reconfig_queues = 1;
814 	port->socket_id = socket_id;
815 
816 	init_port_config();
817 }
818 
819 
820 int
821 init_fwd_streams(void)
822 {
823 	portid_t pid;
824 	struct rte_port *port;
825 	streamid_t sm_id, nb_fwd_streams_new;
826 	queueid_t q;
827 
828 	/* set socket id according to numa or not */
829 	RTE_ETH_FOREACH_DEV(pid) {
830 		port = &ports[pid];
831 		if (nb_rxq > port->dev_info.max_rx_queues) {
832 			printf("Fail: nb_rxq(%d) is greater than "
833 				"max_rx_queues(%d)\n", nb_rxq,
834 				port->dev_info.max_rx_queues);
835 			return -1;
836 		}
837 		if (nb_txq > port->dev_info.max_tx_queues) {
838 			printf("Fail: nb_txq(%d) is greater than "
839 				"max_tx_queues(%d)\n", nb_txq,
840 				port->dev_info.max_tx_queues);
841 			return -1;
842 		}
843 		if (numa_support) {
844 			if (port_numa[pid] != NUMA_NO_CONFIG)
845 				port->socket_id = port_numa[pid];
846 			else {
847 				port->socket_id = rte_eth_dev_socket_id(pid);
848 
849 				/* if socket_id is invalid, set to 0 */
850 				if (check_socket_id(port->socket_id) < 0)
851 					port->socket_id = 0;
852 			}
853 		}
854 		else {
855 			if (socket_num == UMA_NO_CONFIG)
856 				port->socket_id = 0;
857 			else
858 				port->socket_id = socket_num;
859 		}
860 	}
861 
862 	q = RTE_MAX(nb_rxq, nb_txq);
863 	if (q == 0) {
864 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
865 		return -1;
866 	}
867 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
868 	if (nb_fwd_streams_new == nb_fwd_streams)
869 		return 0;
870 	/* clear the old */
871 	if (fwd_streams != NULL) {
872 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
873 			if (fwd_streams[sm_id] == NULL)
874 				continue;
875 			rte_free(fwd_streams[sm_id]);
876 			fwd_streams[sm_id] = NULL;
877 		}
878 		rte_free(fwd_streams);
879 		fwd_streams = NULL;
880 	}
881 
882 	/* init new */
883 	nb_fwd_streams = nb_fwd_streams_new;
884 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
885 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
886 	if (fwd_streams == NULL)
887 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
888 						"failed\n", nb_fwd_streams);
889 
890 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
891 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
892 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
893 		if (fwd_streams[sm_id] == NULL)
894 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
895 								" failed\n");
896 	}
897 
898 	return 0;
899 }
900 
901 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
902 static void
903 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
904 {
905 	unsigned int total_burst;
906 	unsigned int nb_burst;
907 	unsigned int burst_stats[3];
908 	uint16_t pktnb_stats[3];
909 	uint16_t nb_pkt;
910 	int burst_percent[3];
911 
912 	/*
913 	 * First compute the total number of packet bursts and the
914 	 * two highest numbers of bursts of the same number of packets.
915 	 */
916 	total_burst = 0;
917 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
918 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
919 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
920 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
921 		if (nb_burst == 0)
922 			continue;
923 		total_burst += nb_burst;
924 		if (nb_burst > burst_stats[0]) {
925 			burst_stats[1] = burst_stats[0];
926 			pktnb_stats[1] = pktnb_stats[0];
927 			burst_stats[0] = nb_burst;
928 			pktnb_stats[0] = nb_pkt;
929 		}
930 	}
931 	if (total_burst == 0)
932 		return;
933 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
934 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
935 	       burst_percent[0], (int) pktnb_stats[0]);
936 	if (burst_stats[0] == total_burst) {
937 		printf("]\n");
938 		return;
939 	}
940 	if (burst_stats[0] + burst_stats[1] == total_burst) {
941 		printf(" + %d%% of %d pkts]\n",
942 		       100 - burst_percent[0], pktnb_stats[1]);
943 		return;
944 	}
945 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
946 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
947 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
948 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
949 		return;
950 	}
951 	printf(" + %d%% of %d pkts + %d%% of others]\n",
952 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
953 }
954 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
955 
956 static void
957 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
958 {
959 	struct rte_port *port;
960 	uint8_t i;
961 
962 	static const char *fwd_stats_border = "----------------------";
963 
964 	port = &ports[port_id];
965 	printf("\n  %s Forward statistics for port %-2d %s\n",
966 	       fwd_stats_border, port_id, fwd_stats_border);
967 
968 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
969 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
970 		       "%-"PRIu64"\n",
971 		       stats->ipackets, stats->imissed,
972 		       (uint64_t) (stats->ipackets + stats->imissed));
973 
974 		if (cur_fwd_eng == &csum_fwd_engine)
975 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
976 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
977 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
978 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
979 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
980 		}
981 
982 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
983 		       "%-"PRIu64"\n",
984 		       stats->opackets, port->tx_dropped,
985 		       (uint64_t) (stats->opackets + port->tx_dropped));
986 	}
987 	else {
988 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
989 		       "%14"PRIu64"\n",
990 		       stats->ipackets, stats->imissed,
991 		       (uint64_t) (stats->ipackets + stats->imissed));
992 
993 		if (cur_fwd_eng == &csum_fwd_engine)
994 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
995 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
996 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
997 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
998 			printf("  RX-nombufs:             %14"PRIu64"\n",
999 			       stats->rx_nombuf);
1000 		}
1001 
1002 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1003 		       "%14"PRIu64"\n",
1004 		       stats->opackets, port->tx_dropped,
1005 		       (uint64_t) (stats->opackets + port->tx_dropped));
1006 	}
1007 
1008 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1009 	if (port->rx_stream)
1010 		pkt_burst_stats_display("RX",
1011 			&port->rx_stream->rx_burst_stats);
1012 	if (port->tx_stream)
1013 		pkt_burst_stats_display("TX",
1014 			&port->tx_stream->tx_burst_stats);
1015 #endif
1016 
1017 	if (port->rx_queue_stats_mapping_enabled) {
1018 		printf("\n");
1019 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1020 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1021 			       "     RX-errors:%14"PRIu64
1022 			       "    RX-bytes:%14"PRIu64"\n",
1023 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1024 		}
1025 		printf("\n");
1026 	}
1027 	if (port->tx_queue_stats_mapping_enabled) {
1028 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1029 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1030 			       "                                 TX-bytes:%14"PRIu64"\n",
1031 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1032 		}
1033 	}
1034 
1035 	printf("  %s--------------------------------%s\n",
1036 	       fwd_stats_border, fwd_stats_border);
1037 }
1038 
1039 static void
1040 fwd_stream_stats_display(streamid_t stream_id)
1041 {
1042 	struct fwd_stream *fs;
1043 	static const char *fwd_top_stats_border = "-------";
1044 
1045 	fs = fwd_streams[stream_id];
1046 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1047 	    (fs->fwd_dropped == 0))
1048 		return;
1049 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1050 	       "TX Port=%2d/Queue=%2d %s\n",
1051 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1052 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1053 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1054 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1055 
1056 	/* if checksum mode */
1057 	if (cur_fwd_eng == &csum_fwd_engine) {
1058 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1059 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1060 	}
1061 
1062 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1063 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1064 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1065 #endif
1066 }
1067 
1068 static void
1069 flush_fwd_rx_queues(void)
1070 {
1071 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1072 	portid_t  rxp;
1073 	portid_t port_id;
1074 	queueid_t rxq;
1075 	uint16_t  nb_rx;
1076 	uint16_t  i;
1077 	uint8_t   j;
1078 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1079 	uint64_t timer_period;
1080 
1081 	/* convert to number of cycles */
1082 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1083 
1084 	for (j = 0; j < 2; j++) {
1085 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1086 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1087 				port_id = fwd_ports_ids[rxp];
1088 				/**
1089 				* testpmd can stuck in the below do while loop
1090 				* if rte_eth_rx_burst() always returns nonzero
1091 				* packets. So timer is added to exit this loop
1092 				* after 1sec timer expiry.
1093 				*/
1094 				prev_tsc = rte_rdtsc();
1095 				do {
1096 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1097 						pkts_burst, MAX_PKT_BURST);
1098 					for (i = 0; i < nb_rx; i++)
1099 						rte_pktmbuf_free(pkts_burst[i]);
1100 
1101 					cur_tsc = rte_rdtsc();
1102 					diff_tsc = cur_tsc - prev_tsc;
1103 					timer_tsc += diff_tsc;
1104 				} while ((nb_rx > 0) &&
1105 					(timer_tsc < timer_period));
1106 				timer_tsc = 0;
1107 			}
1108 		}
1109 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1110 	}
1111 }
1112 
1113 static void
1114 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1115 {
1116 	struct fwd_stream **fsm;
1117 	streamid_t nb_fs;
1118 	streamid_t sm_id;
1119 #ifdef RTE_LIBRTE_BITRATE
1120 	uint64_t tics_per_1sec;
1121 	uint64_t tics_datum;
1122 	uint64_t tics_current;
1123 	uint16_t idx_port;
1124 
1125 	tics_datum = rte_rdtsc();
1126 	tics_per_1sec = rte_get_timer_hz();
1127 #endif
1128 	fsm = &fwd_streams[fc->stream_idx];
1129 	nb_fs = fc->stream_nb;
1130 	do {
1131 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1132 			(*pkt_fwd)(fsm[sm_id]);
1133 #ifdef RTE_LIBRTE_BITRATE
1134 		if (bitrate_enabled != 0 &&
1135 				bitrate_lcore_id == rte_lcore_id()) {
1136 			tics_current = rte_rdtsc();
1137 			if (tics_current - tics_datum >= tics_per_1sec) {
1138 				/* Periodic bitrate calculation */
1139 				RTE_ETH_FOREACH_DEV(idx_port)
1140 					rte_stats_bitrate_calc(bitrate_data,
1141 						idx_port);
1142 				tics_datum = tics_current;
1143 			}
1144 		}
1145 #endif
1146 #ifdef RTE_LIBRTE_LATENCY_STATS
1147 		if (latencystats_enabled != 0 &&
1148 				latencystats_lcore_id == rte_lcore_id())
1149 			rte_latencystats_update();
1150 #endif
1151 
1152 	} while (! fc->stopped);
1153 }
1154 
1155 static int
1156 start_pkt_forward_on_core(void *fwd_arg)
1157 {
1158 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1159 			     cur_fwd_config.fwd_eng->packet_fwd);
1160 	return 0;
1161 }
1162 
1163 /*
1164  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1165  * Used to start communication flows in network loopback test configurations.
1166  */
1167 static int
1168 run_one_txonly_burst_on_core(void *fwd_arg)
1169 {
1170 	struct fwd_lcore *fwd_lc;
1171 	struct fwd_lcore tmp_lcore;
1172 
1173 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1174 	tmp_lcore = *fwd_lc;
1175 	tmp_lcore.stopped = 1;
1176 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1177 	return 0;
1178 }
1179 
1180 /*
1181  * Launch packet forwarding:
1182  *     - Setup per-port forwarding context.
1183  *     - launch logical cores with their forwarding configuration.
1184  */
1185 static void
1186 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1187 {
1188 	port_fwd_begin_t port_fwd_begin;
1189 	unsigned int i;
1190 	unsigned int lc_id;
1191 	int diag;
1192 
1193 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1194 	if (port_fwd_begin != NULL) {
1195 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1196 			(*port_fwd_begin)(fwd_ports_ids[i]);
1197 	}
1198 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1199 		lc_id = fwd_lcores_cpuids[i];
1200 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1201 			fwd_lcores[i]->stopped = 0;
1202 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1203 						     fwd_lcores[i], lc_id);
1204 			if (diag != 0)
1205 				printf("launch lcore %u failed - diag=%d\n",
1206 				       lc_id, diag);
1207 		}
1208 	}
1209 }
1210 
1211 /*
1212  * Launch packet forwarding configuration.
1213  */
1214 void
1215 start_packet_forwarding(int with_tx_first)
1216 {
1217 	port_fwd_begin_t port_fwd_begin;
1218 	port_fwd_end_t  port_fwd_end;
1219 	struct rte_port *port;
1220 	unsigned int i;
1221 	portid_t   pt_id;
1222 	streamid_t sm_id;
1223 
1224 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1225 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1226 
1227 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1228 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1229 
1230 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1231 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1232 		(!nb_rxq || !nb_txq))
1233 		rte_exit(EXIT_FAILURE,
1234 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1235 			cur_fwd_eng->fwd_mode_name);
1236 
1237 	if (all_ports_started() == 0) {
1238 		printf("Not all ports were started\n");
1239 		return;
1240 	}
1241 	if (test_done == 0) {
1242 		printf("Packet forwarding already started\n");
1243 		return;
1244 	}
1245 
1246 	if (init_fwd_streams() < 0) {
1247 		printf("Fail from init_fwd_streams()\n");
1248 		return;
1249 	}
1250 
1251 	if(dcb_test) {
1252 		for (i = 0; i < nb_fwd_ports; i++) {
1253 			pt_id = fwd_ports_ids[i];
1254 			port = &ports[pt_id];
1255 			if (!port->dcb_flag) {
1256 				printf("In DCB mode, all forwarding ports must "
1257                                        "be configured in this mode.\n");
1258 				return;
1259 			}
1260 		}
1261 		if (nb_fwd_lcores == 1) {
1262 			printf("In DCB mode,the nb forwarding cores "
1263                                "should be larger than 1.\n");
1264 			return;
1265 		}
1266 	}
1267 	test_done = 0;
1268 
1269 	if(!no_flush_rx)
1270 		flush_fwd_rx_queues();
1271 
1272 	fwd_config_setup();
1273 	pkt_fwd_config_display(&cur_fwd_config);
1274 	rxtx_config_display();
1275 
1276 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1277 		pt_id = fwd_ports_ids[i];
1278 		port = &ports[pt_id];
1279 		rte_eth_stats_get(pt_id, &port->stats);
1280 		port->tx_dropped = 0;
1281 
1282 		map_port_queue_stats_mapping_registers(pt_id, port);
1283 	}
1284 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1285 		fwd_streams[sm_id]->rx_packets = 0;
1286 		fwd_streams[sm_id]->tx_packets = 0;
1287 		fwd_streams[sm_id]->fwd_dropped = 0;
1288 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1289 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1290 
1291 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1292 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1293 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1294 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1295 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1296 #endif
1297 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1298 		fwd_streams[sm_id]->core_cycles = 0;
1299 #endif
1300 	}
1301 	if (with_tx_first) {
1302 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1303 		if (port_fwd_begin != NULL) {
1304 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1305 				(*port_fwd_begin)(fwd_ports_ids[i]);
1306 		}
1307 		while (with_tx_first--) {
1308 			launch_packet_forwarding(
1309 					run_one_txonly_burst_on_core);
1310 			rte_eal_mp_wait_lcore();
1311 		}
1312 		port_fwd_end = tx_only_engine.port_fwd_end;
1313 		if (port_fwd_end != NULL) {
1314 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1315 				(*port_fwd_end)(fwd_ports_ids[i]);
1316 		}
1317 	}
1318 	launch_packet_forwarding(start_pkt_forward_on_core);
1319 }
1320 
1321 void
1322 stop_packet_forwarding(void)
1323 {
1324 	struct rte_eth_stats stats;
1325 	struct rte_port *port;
1326 	port_fwd_end_t  port_fwd_end;
1327 	int i;
1328 	portid_t   pt_id;
1329 	streamid_t sm_id;
1330 	lcoreid_t  lc_id;
1331 	uint64_t total_recv;
1332 	uint64_t total_xmit;
1333 	uint64_t total_rx_dropped;
1334 	uint64_t total_tx_dropped;
1335 	uint64_t total_rx_nombuf;
1336 	uint64_t tx_dropped;
1337 	uint64_t rx_bad_ip_csum;
1338 	uint64_t rx_bad_l4_csum;
1339 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1340 	uint64_t fwd_cycles;
1341 #endif
1342 
1343 	static const char *acc_stats_border = "+++++++++++++++";
1344 
1345 	if (test_done) {
1346 		printf("Packet forwarding not started\n");
1347 		return;
1348 	}
1349 	printf("Telling cores to stop...");
1350 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1351 		fwd_lcores[lc_id]->stopped = 1;
1352 	printf("\nWaiting for lcores to finish...\n");
1353 	rte_eal_mp_wait_lcore();
1354 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1355 	if (port_fwd_end != NULL) {
1356 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1357 			pt_id = fwd_ports_ids[i];
1358 			(*port_fwd_end)(pt_id);
1359 		}
1360 	}
1361 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1362 	fwd_cycles = 0;
1363 #endif
1364 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1365 		if (cur_fwd_config.nb_fwd_streams >
1366 		    cur_fwd_config.nb_fwd_ports) {
1367 			fwd_stream_stats_display(sm_id);
1368 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1369 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1370 		} else {
1371 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1372 				fwd_streams[sm_id];
1373 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1374 				fwd_streams[sm_id];
1375 		}
1376 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1377 		tx_dropped = (uint64_t) (tx_dropped +
1378 					 fwd_streams[sm_id]->fwd_dropped);
1379 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1380 
1381 		rx_bad_ip_csum =
1382 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1383 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1384 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1385 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1386 							rx_bad_ip_csum;
1387 
1388 		rx_bad_l4_csum =
1389 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1390 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1391 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1392 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1393 							rx_bad_l4_csum;
1394 
1395 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1396 		fwd_cycles = (uint64_t) (fwd_cycles +
1397 					 fwd_streams[sm_id]->core_cycles);
1398 #endif
1399 	}
1400 	total_recv = 0;
1401 	total_xmit = 0;
1402 	total_rx_dropped = 0;
1403 	total_tx_dropped = 0;
1404 	total_rx_nombuf  = 0;
1405 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1406 		pt_id = fwd_ports_ids[i];
1407 
1408 		port = &ports[pt_id];
1409 		rte_eth_stats_get(pt_id, &stats);
1410 		stats.ipackets -= port->stats.ipackets;
1411 		port->stats.ipackets = 0;
1412 		stats.opackets -= port->stats.opackets;
1413 		port->stats.opackets = 0;
1414 		stats.ibytes   -= port->stats.ibytes;
1415 		port->stats.ibytes = 0;
1416 		stats.obytes   -= port->stats.obytes;
1417 		port->stats.obytes = 0;
1418 		stats.imissed  -= port->stats.imissed;
1419 		port->stats.imissed = 0;
1420 		stats.oerrors  -= port->stats.oerrors;
1421 		port->stats.oerrors = 0;
1422 		stats.rx_nombuf -= port->stats.rx_nombuf;
1423 		port->stats.rx_nombuf = 0;
1424 
1425 		total_recv += stats.ipackets;
1426 		total_xmit += stats.opackets;
1427 		total_rx_dropped += stats.imissed;
1428 		total_tx_dropped += port->tx_dropped;
1429 		total_rx_nombuf  += stats.rx_nombuf;
1430 
1431 		fwd_port_stats_display(pt_id, &stats);
1432 	}
1433 
1434 	printf("\n  %s Accumulated forward statistics for all ports"
1435 	       "%s\n",
1436 	       acc_stats_border, acc_stats_border);
1437 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1438 	       "%-"PRIu64"\n"
1439 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1440 	       "%-"PRIu64"\n",
1441 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1442 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1443 	if (total_rx_nombuf > 0)
1444 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1445 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1446 	       "%s\n",
1447 	       acc_stats_border, acc_stats_border);
1448 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1449 	if (total_recv > 0)
1450 		printf("\n  CPU cycles/packet=%u (total cycles="
1451 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1452 		       (unsigned int)(fwd_cycles / total_recv),
1453 		       fwd_cycles, total_recv);
1454 #endif
1455 	printf("\nDone.\n");
1456 	test_done = 1;
1457 }
1458 
1459 void
1460 dev_set_link_up(portid_t pid)
1461 {
1462 	if (rte_eth_dev_set_link_up(pid) < 0)
1463 		printf("\nSet link up fail.\n");
1464 }
1465 
1466 void
1467 dev_set_link_down(portid_t pid)
1468 {
1469 	if (rte_eth_dev_set_link_down(pid) < 0)
1470 		printf("\nSet link down fail.\n");
1471 }
1472 
1473 static int
1474 all_ports_started(void)
1475 {
1476 	portid_t pi;
1477 	struct rte_port *port;
1478 
1479 	RTE_ETH_FOREACH_DEV(pi) {
1480 		port = &ports[pi];
1481 		/* Check if there is a port which is not started */
1482 		if ((port->port_status != RTE_PORT_STARTED) &&
1483 			(port->slave_flag == 0))
1484 			return 0;
1485 	}
1486 
1487 	/* No port is not started */
1488 	return 1;
1489 }
1490 
1491 int
1492 port_is_stopped(portid_t port_id)
1493 {
1494 	struct rte_port *port = &ports[port_id];
1495 
1496 	if ((port->port_status != RTE_PORT_STOPPED) &&
1497 	    (port->slave_flag == 0))
1498 		return 0;
1499 	return 1;
1500 }
1501 
1502 int
1503 all_ports_stopped(void)
1504 {
1505 	portid_t pi;
1506 
1507 	RTE_ETH_FOREACH_DEV(pi) {
1508 		if (!port_is_stopped(pi))
1509 			return 0;
1510 	}
1511 
1512 	return 1;
1513 }
1514 
1515 int
1516 port_is_started(portid_t port_id)
1517 {
1518 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1519 		return 0;
1520 
1521 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1522 		return 0;
1523 
1524 	return 1;
1525 }
1526 
1527 static int
1528 port_is_closed(portid_t port_id)
1529 {
1530 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1531 		return 0;
1532 
1533 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1534 		return 0;
1535 
1536 	return 1;
1537 }
1538 
1539 int
1540 start_port(portid_t pid)
1541 {
1542 	int diag, need_check_link_status = -1;
1543 	portid_t pi;
1544 	queueid_t qi;
1545 	struct rte_port *port;
1546 	struct ether_addr mac_addr;
1547 	enum rte_eth_event_type event_type;
1548 
1549 	if (port_id_is_invalid(pid, ENABLED_WARN))
1550 		return 0;
1551 
1552 	if(dcb_config)
1553 		dcb_test = 1;
1554 	RTE_ETH_FOREACH_DEV(pi) {
1555 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1556 			continue;
1557 
1558 		need_check_link_status = 0;
1559 		port = &ports[pi];
1560 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1561 						 RTE_PORT_HANDLING) == 0) {
1562 			printf("Port %d is now not stopped\n", pi);
1563 			continue;
1564 		}
1565 
1566 		if (port->need_reconfig > 0) {
1567 			port->need_reconfig = 0;
1568 
1569 			if (flow_isolate_all) {
1570 				int ret = port_flow_isolate(pi, 1);
1571 				if (ret) {
1572 					printf("Failed to apply isolated"
1573 					       " mode on port %d\n", pi);
1574 					return -1;
1575 				}
1576 			}
1577 
1578 			printf("Configuring Port %d (socket %u)\n", pi,
1579 					port->socket_id);
1580 			/* configure port */
1581 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1582 						&(port->dev_conf));
1583 			if (diag != 0) {
1584 				if (rte_atomic16_cmpset(&(port->port_status),
1585 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1586 					printf("Port %d can not be set back "
1587 							"to stopped\n", pi);
1588 				printf("Fail to configure port %d\n", pi);
1589 				/* try to reconfigure port next time */
1590 				port->need_reconfig = 1;
1591 				return -1;
1592 			}
1593 		}
1594 		if (port->need_reconfig_queues > 0) {
1595 			port->need_reconfig_queues = 0;
1596 			port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1597 			/* Apply Tx offloads configuration */
1598 			port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1599 			/* setup tx queues */
1600 			for (qi = 0; qi < nb_txq; qi++) {
1601 				if ((numa_support) &&
1602 					(txring_numa[pi] != NUMA_NO_CONFIG))
1603 					diag = rte_eth_tx_queue_setup(pi, qi,
1604 						nb_txd,txring_numa[pi],
1605 						&(port->tx_conf));
1606 				else
1607 					diag = rte_eth_tx_queue_setup(pi, qi,
1608 						nb_txd,port->socket_id,
1609 						&(port->tx_conf));
1610 
1611 				if (diag == 0)
1612 					continue;
1613 
1614 				/* Fail to setup tx queue, return */
1615 				if (rte_atomic16_cmpset(&(port->port_status),
1616 							RTE_PORT_HANDLING,
1617 							RTE_PORT_STOPPED) == 0)
1618 					printf("Port %d can not be set back "
1619 							"to stopped\n", pi);
1620 				printf("Fail to configure port %d tx queues\n", pi);
1621 				/* try to reconfigure queues next time */
1622 				port->need_reconfig_queues = 1;
1623 				return -1;
1624 			}
1625 			/* Apply Rx offloads configuration */
1626 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1627 			/* setup rx queues */
1628 			for (qi = 0; qi < nb_rxq; qi++) {
1629 				if ((numa_support) &&
1630 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1631 					struct rte_mempool * mp =
1632 						mbuf_pool_find(rxring_numa[pi]);
1633 					if (mp == NULL) {
1634 						printf("Failed to setup RX queue:"
1635 							"No mempool allocation"
1636 							" on the socket %d\n",
1637 							rxring_numa[pi]);
1638 						return -1;
1639 					}
1640 
1641 					diag = rte_eth_rx_queue_setup(pi, qi,
1642 					     nb_rxd,rxring_numa[pi],
1643 					     &(port->rx_conf),mp);
1644 				} else {
1645 					struct rte_mempool *mp =
1646 						mbuf_pool_find(port->socket_id);
1647 					if (mp == NULL) {
1648 						printf("Failed to setup RX queue:"
1649 							"No mempool allocation"
1650 							" on the socket %d\n",
1651 							port->socket_id);
1652 						return -1;
1653 					}
1654 					diag = rte_eth_rx_queue_setup(pi, qi,
1655 					     nb_rxd,port->socket_id,
1656 					     &(port->rx_conf), mp);
1657 				}
1658 				if (diag == 0)
1659 					continue;
1660 
1661 				/* Fail to setup rx queue, return */
1662 				if (rte_atomic16_cmpset(&(port->port_status),
1663 							RTE_PORT_HANDLING,
1664 							RTE_PORT_STOPPED) == 0)
1665 					printf("Port %d can not be set back "
1666 							"to stopped\n", pi);
1667 				printf("Fail to configure port %d rx queues\n", pi);
1668 				/* try to reconfigure queues next time */
1669 				port->need_reconfig_queues = 1;
1670 				return -1;
1671 			}
1672 		}
1673 
1674 		/* start port */
1675 		if (rte_eth_dev_start(pi) < 0) {
1676 			printf("Fail to start port %d\n", pi);
1677 
1678 			/* Fail to setup rx queue, return */
1679 			if (rte_atomic16_cmpset(&(port->port_status),
1680 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1681 				printf("Port %d can not be set back to "
1682 							"stopped\n", pi);
1683 			continue;
1684 		}
1685 
1686 		if (rte_atomic16_cmpset(&(port->port_status),
1687 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1688 			printf("Port %d can not be set into started\n", pi);
1689 
1690 		rte_eth_macaddr_get(pi, &mac_addr);
1691 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1692 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1693 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1694 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1695 
1696 		/* at least one port started, need checking link status */
1697 		need_check_link_status = 1;
1698 	}
1699 
1700 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1701 	     event_type < RTE_ETH_EVENT_MAX;
1702 	     event_type++) {
1703 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1704 						event_type,
1705 						eth_event_callback,
1706 						NULL);
1707 		if (diag) {
1708 			printf("Failed to setup even callback for event %d\n",
1709 				event_type);
1710 			return -1;
1711 		}
1712 	}
1713 
1714 	if (need_check_link_status == 1 && !no_link_check)
1715 		check_all_ports_link_status(RTE_PORT_ALL);
1716 	else if (need_check_link_status == 0)
1717 		printf("Please stop the ports first\n");
1718 
1719 	printf("Done\n");
1720 	return 0;
1721 }
1722 
1723 void
1724 stop_port(portid_t pid)
1725 {
1726 	portid_t pi;
1727 	struct rte_port *port;
1728 	int need_check_link_status = 0;
1729 
1730 	if (dcb_test) {
1731 		dcb_test = 0;
1732 		dcb_config = 0;
1733 	}
1734 
1735 	if (port_id_is_invalid(pid, ENABLED_WARN))
1736 		return;
1737 
1738 	printf("Stopping ports...\n");
1739 
1740 	RTE_ETH_FOREACH_DEV(pi) {
1741 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1742 			continue;
1743 
1744 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1745 			printf("Please remove port %d from forwarding configuration.\n", pi);
1746 			continue;
1747 		}
1748 
1749 		if (port_is_bonding_slave(pi)) {
1750 			printf("Please remove port %d from bonded device.\n", pi);
1751 			continue;
1752 		}
1753 
1754 		port = &ports[pi];
1755 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1756 						RTE_PORT_HANDLING) == 0)
1757 			continue;
1758 
1759 		rte_eth_dev_stop(pi);
1760 
1761 		if (rte_atomic16_cmpset(&(port->port_status),
1762 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1763 			printf("Port %d can not be set into stopped\n", pi);
1764 		need_check_link_status = 1;
1765 	}
1766 	if (need_check_link_status && !no_link_check)
1767 		check_all_ports_link_status(RTE_PORT_ALL);
1768 
1769 	printf("Done\n");
1770 }
1771 
1772 void
1773 close_port(portid_t pid)
1774 {
1775 	portid_t pi;
1776 	struct rte_port *port;
1777 
1778 	if (port_id_is_invalid(pid, ENABLED_WARN))
1779 		return;
1780 
1781 	printf("Closing ports...\n");
1782 
1783 	RTE_ETH_FOREACH_DEV(pi) {
1784 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1785 			continue;
1786 
1787 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1788 			printf("Please remove port %d from forwarding configuration.\n", pi);
1789 			continue;
1790 		}
1791 
1792 		if (port_is_bonding_slave(pi)) {
1793 			printf("Please remove port %d from bonded device.\n", pi);
1794 			continue;
1795 		}
1796 
1797 		port = &ports[pi];
1798 		if (rte_atomic16_cmpset(&(port->port_status),
1799 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1800 			printf("Port %d is already closed\n", pi);
1801 			continue;
1802 		}
1803 
1804 		if (rte_atomic16_cmpset(&(port->port_status),
1805 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1806 			printf("Port %d is now not stopped\n", pi);
1807 			continue;
1808 		}
1809 
1810 		if (port->flow_list)
1811 			port_flow_flush(pi);
1812 		rte_eth_dev_close(pi);
1813 
1814 		if (rte_atomic16_cmpset(&(port->port_status),
1815 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1816 			printf("Port %d cannot be set to closed\n", pi);
1817 	}
1818 
1819 	printf("Done\n");
1820 }
1821 
1822 void
1823 reset_port(portid_t pid)
1824 {
1825 	int diag;
1826 	portid_t pi;
1827 	struct rte_port *port;
1828 
1829 	if (port_id_is_invalid(pid, ENABLED_WARN))
1830 		return;
1831 
1832 	printf("Resetting ports...\n");
1833 
1834 	RTE_ETH_FOREACH_DEV(pi) {
1835 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1836 			continue;
1837 
1838 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1839 			printf("Please remove port %d from forwarding "
1840 			       "configuration.\n", pi);
1841 			continue;
1842 		}
1843 
1844 		if (port_is_bonding_slave(pi)) {
1845 			printf("Please remove port %d from bonded device.\n",
1846 			       pi);
1847 			continue;
1848 		}
1849 
1850 		diag = rte_eth_dev_reset(pi);
1851 		if (diag == 0) {
1852 			port = &ports[pi];
1853 			port->need_reconfig = 1;
1854 			port->need_reconfig_queues = 1;
1855 		} else {
1856 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1857 		}
1858 	}
1859 
1860 	printf("Done\n");
1861 }
1862 
1863 static int
1864 eth_dev_event_callback_register(void)
1865 {
1866 	int ret;
1867 
1868 	/* register the device event callback */
1869 	ret = rte_dev_event_callback_register(NULL,
1870 		eth_dev_event_callback, NULL);
1871 	if (ret) {
1872 		printf("Failed to register device event callback\n");
1873 		return -1;
1874 	}
1875 
1876 	return 0;
1877 }
1878 
1879 
1880 static int
1881 eth_dev_event_callback_unregister(void)
1882 {
1883 	int ret;
1884 
1885 	/* unregister the device event callback */
1886 	ret = rte_dev_event_callback_unregister(NULL,
1887 		eth_dev_event_callback, NULL);
1888 	if (ret < 0) {
1889 		printf("Failed to unregister device event callback\n");
1890 		return -1;
1891 	}
1892 
1893 	return 0;
1894 }
1895 
1896 void
1897 attach_port(char *identifier)
1898 {
1899 	portid_t pi = 0;
1900 	unsigned int socket_id;
1901 
1902 	printf("Attaching a new port...\n");
1903 
1904 	if (identifier == NULL) {
1905 		printf("Invalid parameters are specified\n");
1906 		return;
1907 	}
1908 
1909 	if (rte_eth_dev_attach(identifier, &pi))
1910 		return;
1911 
1912 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1913 	/* if socket_id is invalid, set to 0 */
1914 	if (check_socket_id(socket_id) < 0)
1915 		socket_id = 0;
1916 	reconfig(pi, socket_id);
1917 	rte_eth_promiscuous_enable(pi);
1918 
1919 	nb_ports = rte_eth_dev_count_avail();
1920 
1921 	ports[pi].port_status = RTE_PORT_STOPPED;
1922 
1923 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1924 	printf("Done\n");
1925 }
1926 
1927 void
1928 detach_port(portid_t port_id)
1929 {
1930 	char name[RTE_ETH_NAME_MAX_LEN];
1931 
1932 	printf("Detaching a port...\n");
1933 
1934 	if (!port_is_closed(port_id)) {
1935 		printf("Please close port first\n");
1936 		return;
1937 	}
1938 
1939 	if (ports[port_id].flow_list)
1940 		port_flow_flush(port_id);
1941 
1942 	if (rte_eth_dev_detach(port_id, name)) {
1943 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1944 		return;
1945 	}
1946 
1947 	nb_ports = rte_eth_dev_count_avail();
1948 
1949 	printf("Port '%s' is detached. Now total ports is %d\n",
1950 			name, nb_ports);
1951 	printf("Done\n");
1952 	return;
1953 }
1954 
1955 void
1956 pmd_test_exit(void)
1957 {
1958 	portid_t pt_id;
1959 	int ret;
1960 
1961 	if (test_done == 0)
1962 		stop_packet_forwarding();
1963 
1964 	if (ports != NULL) {
1965 		no_link_check = 1;
1966 		RTE_ETH_FOREACH_DEV(pt_id) {
1967 			printf("\nShutting down port %d...\n", pt_id);
1968 			fflush(stdout);
1969 			stop_port(pt_id);
1970 			close_port(pt_id);
1971 		}
1972 	}
1973 
1974 	if (hot_plug) {
1975 		ret = rte_dev_event_monitor_stop();
1976 		if (ret)
1977 			RTE_LOG(ERR, EAL,
1978 				"fail to stop device event monitor.");
1979 
1980 		ret = eth_dev_event_callback_unregister();
1981 		if (ret)
1982 			RTE_LOG(ERR, EAL,
1983 				"fail to unregister all event callbacks.");
1984 	}
1985 
1986 	printf("\nBye...\n");
1987 }
1988 
1989 typedef void (*cmd_func_t)(void);
1990 struct pmd_test_command {
1991 	const char *cmd_name;
1992 	cmd_func_t cmd_func;
1993 };
1994 
1995 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1996 
1997 /* Check the link status of all ports in up to 9s, and print them finally */
1998 static void
1999 check_all_ports_link_status(uint32_t port_mask)
2000 {
2001 #define CHECK_INTERVAL 100 /* 100ms */
2002 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2003 	portid_t portid;
2004 	uint8_t count, all_ports_up, print_flag = 0;
2005 	struct rte_eth_link link;
2006 
2007 	printf("Checking link statuses...\n");
2008 	fflush(stdout);
2009 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2010 		all_ports_up = 1;
2011 		RTE_ETH_FOREACH_DEV(portid) {
2012 			if ((port_mask & (1 << portid)) == 0)
2013 				continue;
2014 			memset(&link, 0, sizeof(link));
2015 			rte_eth_link_get_nowait(portid, &link);
2016 			/* print link status if flag set */
2017 			if (print_flag == 1) {
2018 				if (link.link_status)
2019 					printf(
2020 					"Port%d Link Up. speed %u Mbps- %s\n",
2021 					portid, link.link_speed,
2022 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2023 					("full-duplex") : ("half-duplex\n"));
2024 				else
2025 					printf("Port %d Link Down\n", portid);
2026 				continue;
2027 			}
2028 			/* clear all_ports_up flag if any link down */
2029 			if (link.link_status == ETH_LINK_DOWN) {
2030 				all_ports_up = 0;
2031 				break;
2032 			}
2033 		}
2034 		/* after finally printing all link status, get out */
2035 		if (print_flag == 1)
2036 			break;
2037 
2038 		if (all_ports_up == 0) {
2039 			fflush(stdout);
2040 			rte_delay_ms(CHECK_INTERVAL);
2041 		}
2042 
2043 		/* set the print_flag if all ports up or timeout */
2044 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2045 			print_flag = 1;
2046 		}
2047 
2048 		if (lsc_interrupt)
2049 			break;
2050 	}
2051 }
2052 
2053 static void
2054 rmv_event_callback(void *arg)
2055 {
2056 	struct rte_eth_dev *dev;
2057 	portid_t port_id = (intptr_t)arg;
2058 
2059 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2060 	dev = &rte_eth_devices[port_id];
2061 
2062 	stop_port(port_id);
2063 	close_port(port_id);
2064 	printf("removing device %s\n", dev->device->name);
2065 	if (rte_eal_dev_detach(dev->device))
2066 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2067 			dev->device->name);
2068 }
2069 
2070 /* This function is used by the interrupt thread */
2071 static int
2072 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2073 		  void *ret_param)
2074 {
2075 	static const char * const event_desc[] = {
2076 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2077 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2078 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2079 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2080 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2081 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2082 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2083 		[RTE_ETH_EVENT_NEW] = "device probed",
2084 		[RTE_ETH_EVENT_DESTROY] = "device released",
2085 		[RTE_ETH_EVENT_MAX] = NULL,
2086 	};
2087 
2088 	RTE_SET_USED(param);
2089 	RTE_SET_USED(ret_param);
2090 
2091 	if (type >= RTE_ETH_EVENT_MAX) {
2092 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2093 			port_id, __func__, type);
2094 		fflush(stderr);
2095 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2096 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2097 			event_desc[type]);
2098 		fflush(stdout);
2099 	}
2100 
2101 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2102 		return 0;
2103 
2104 	switch (type) {
2105 	case RTE_ETH_EVENT_INTR_RMV:
2106 		if (rte_eal_alarm_set(100000,
2107 				rmv_event_callback, (void *)(intptr_t)port_id))
2108 			fprintf(stderr, "Could not set up deferred device removal\n");
2109 		break;
2110 	default:
2111 		break;
2112 	}
2113 	return 0;
2114 }
2115 
2116 /* This function is used by the interrupt thread */
2117 static void
2118 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2119 			     __rte_unused void *arg)
2120 {
2121 	if (type >= RTE_DEV_EVENT_MAX) {
2122 		fprintf(stderr, "%s called upon invalid event %d\n",
2123 			__func__, type);
2124 		fflush(stderr);
2125 	}
2126 
2127 	switch (type) {
2128 	case RTE_DEV_EVENT_REMOVE:
2129 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2130 			device_name);
2131 		/* TODO: After finish failure handle, begin to stop
2132 		 * packet forward, stop port, close port, detach port.
2133 		 */
2134 		break;
2135 	case RTE_DEV_EVENT_ADD:
2136 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2137 			device_name);
2138 		/* TODO: After finish kernel driver binding,
2139 		 * begin to attach port.
2140 		 */
2141 		break;
2142 	default:
2143 		break;
2144 	}
2145 }
2146 
2147 static int
2148 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2149 {
2150 	uint16_t i;
2151 	int diag;
2152 	uint8_t mapping_found = 0;
2153 
2154 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2155 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2156 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2157 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2158 					tx_queue_stats_mappings[i].queue_id,
2159 					tx_queue_stats_mappings[i].stats_counter_id);
2160 			if (diag != 0)
2161 				return diag;
2162 			mapping_found = 1;
2163 		}
2164 	}
2165 	if (mapping_found)
2166 		port->tx_queue_stats_mapping_enabled = 1;
2167 	return 0;
2168 }
2169 
2170 static int
2171 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2172 {
2173 	uint16_t i;
2174 	int diag;
2175 	uint8_t mapping_found = 0;
2176 
2177 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2178 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2179 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2180 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2181 					rx_queue_stats_mappings[i].queue_id,
2182 					rx_queue_stats_mappings[i].stats_counter_id);
2183 			if (diag != 0)
2184 				return diag;
2185 			mapping_found = 1;
2186 		}
2187 	}
2188 	if (mapping_found)
2189 		port->rx_queue_stats_mapping_enabled = 1;
2190 	return 0;
2191 }
2192 
2193 static void
2194 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2195 {
2196 	int diag = 0;
2197 
2198 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2199 	if (diag != 0) {
2200 		if (diag == -ENOTSUP) {
2201 			port->tx_queue_stats_mapping_enabled = 0;
2202 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2203 		}
2204 		else
2205 			rte_exit(EXIT_FAILURE,
2206 					"set_tx_queue_stats_mapping_registers "
2207 					"failed for port id=%d diag=%d\n",
2208 					pi, diag);
2209 	}
2210 
2211 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2212 	if (diag != 0) {
2213 		if (diag == -ENOTSUP) {
2214 			port->rx_queue_stats_mapping_enabled = 0;
2215 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2216 		}
2217 		else
2218 			rte_exit(EXIT_FAILURE,
2219 					"set_rx_queue_stats_mapping_registers "
2220 					"failed for port id=%d diag=%d\n",
2221 					pi, diag);
2222 	}
2223 }
2224 
2225 static void
2226 rxtx_port_config(struct rte_port *port)
2227 {
2228 	port->rx_conf = port->dev_info.default_rxconf;
2229 	port->tx_conf = port->dev_info.default_txconf;
2230 
2231 	/* Check if any RX/TX parameters have been passed */
2232 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2233 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2234 
2235 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2236 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2237 
2238 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2239 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2240 
2241 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2242 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2243 
2244 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2245 		port->rx_conf.rx_drop_en = rx_drop_en;
2246 
2247 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2248 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2249 
2250 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2251 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2252 
2253 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2254 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2255 
2256 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2257 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2258 
2259 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2260 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2261 }
2262 
2263 void
2264 init_port_config(void)
2265 {
2266 	portid_t pid;
2267 	struct rte_port *port;
2268 
2269 	RTE_ETH_FOREACH_DEV(pid) {
2270 		port = &ports[pid];
2271 		port->dev_conf.fdir_conf = fdir_conf;
2272 		if (nb_rxq > 1) {
2273 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2274 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2275 		} else {
2276 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2277 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2278 		}
2279 
2280 		if (port->dcb_flag == 0) {
2281 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2282 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2283 			else
2284 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2285 		}
2286 
2287 		rxtx_port_config(port);
2288 
2289 		rte_eth_macaddr_get(pid, &port->eth_addr);
2290 
2291 		map_port_queue_stats_mapping_registers(pid, port);
2292 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2293 		rte_pmd_ixgbe_bypass_init(pid);
2294 #endif
2295 
2296 		if (lsc_interrupt &&
2297 		    (rte_eth_devices[pid].data->dev_flags &
2298 		     RTE_ETH_DEV_INTR_LSC))
2299 			port->dev_conf.intr_conf.lsc = 1;
2300 		if (rmv_interrupt &&
2301 		    (rte_eth_devices[pid].data->dev_flags &
2302 		     RTE_ETH_DEV_INTR_RMV))
2303 			port->dev_conf.intr_conf.rmv = 1;
2304 
2305 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2306 		/* Detect softnic port */
2307 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2308 			port->softnic_enable = 1;
2309 			memset(&port->softport, 0, sizeof(struct softnic_port));
2310 
2311 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2312 				port->softport.tm_flag = 1;
2313 		}
2314 #endif
2315 	}
2316 }
2317 
2318 void set_port_slave_flag(portid_t slave_pid)
2319 {
2320 	struct rte_port *port;
2321 
2322 	port = &ports[slave_pid];
2323 	port->slave_flag = 1;
2324 }
2325 
2326 void clear_port_slave_flag(portid_t slave_pid)
2327 {
2328 	struct rte_port *port;
2329 
2330 	port = &ports[slave_pid];
2331 	port->slave_flag = 0;
2332 }
2333 
2334 uint8_t port_is_bonding_slave(portid_t slave_pid)
2335 {
2336 	struct rte_port *port;
2337 
2338 	port = &ports[slave_pid];
2339 	return port->slave_flag;
2340 }
2341 
2342 const uint16_t vlan_tags[] = {
2343 		0,  1,  2,  3,  4,  5,  6,  7,
2344 		8,  9, 10, 11,  12, 13, 14, 15,
2345 		16, 17, 18, 19, 20, 21, 22, 23,
2346 		24, 25, 26, 27, 28, 29, 30, 31
2347 };
2348 
2349 static  int
2350 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2351 		 enum dcb_mode_enable dcb_mode,
2352 		 enum rte_eth_nb_tcs num_tcs,
2353 		 uint8_t pfc_en)
2354 {
2355 	uint8_t i;
2356 
2357 	/*
2358 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2359 	 * given above, and the number of traffic classes available for use.
2360 	 */
2361 	if (dcb_mode == DCB_VT_ENABLED) {
2362 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2363 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2364 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2365 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2366 
2367 		/* VMDQ+DCB RX and TX configurations */
2368 		vmdq_rx_conf->enable_default_pool = 0;
2369 		vmdq_rx_conf->default_pool = 0;
2370 		vmdq_rx_conf->nb_queue_pools =
2371 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2372 		vmdq_tx_conf->nb_queue_pools =
2373 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2374 
2375 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2376 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2377 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2378 			vmdq_rx_conf->pool_map[i].pools =
2379 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2380 		}
2381 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2382 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2383 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2384 		}
2385 
2386 		/* set DCB mode of RX and TX of multiple queues */
2387 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2388 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2389 	} else {
2390 		struct rte_eth_dcb_rx_conf *rx_conf =
2391 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2392 		struct rte_eth_dcb_tx_conf *tx_conf =
2393 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2394 
2395 		rx_conf->nb_tcs = num_tcs;
2396 		tx_conf->nb_tcs = num_tcs;
2397 
2398 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2399 			rx_conf->dcb_tc[i] = i % num_tcs;
2400 			tx_conf->dcb_tc[i] = i % num_tcs;
2401 		}
2402 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2403 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2404 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2405 	}
2406 
2407 	if (pfc_en)
2408 		eth_conf->dcb_capability_en =
2409 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2410 	else
2411 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2412 
2413 	return 0;
2414 }
2415 
2416 int
2417 init_port_dcb_config(portid_t pid,
2418 		     enum dcb_mode_enable dcb_mode,
2419 		     enum rte_eth_nb_tcs num_tcs,
2420 		     uint8_t pfc_en)
2421 {
2422 	struct rte_eth_conf port_conf;
2423 	struct rte_port *rte_port;
2424 	int retval;
2425 	uint16_t i;
2426 
2427 	rte_port = &ports[pid];
2428 
2429 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2430 	/* Enter DCB configuration status */
2431 	dcb_config = 1;
2432 
2433 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2434 	port_conf.txmode = rte_port->dev_conf.txmode;
2435 
2436 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2437 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2438 	if (retval < 0)
2439 		return retval;
2440 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2441 
2442 	/**
2443 	 * Write the configuration into the device.
2444 	 * Set the numbers of RX & TX queues to 0, so
2445 	 * the RX & TX queues will not be setup.
2446 	 */
2447 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2448 
2449 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2450 
2451 	/* If dev_info.vmdq_pool_base is greater than 0,
2452 	 * the queue id of vmdq pools is started after pf queues.
2453 	 */
2454 	if (dcb_mode == DCB_VT_ENABLED &&
2455 	    rte_port->dev_info.vmdq_pool_base > 0) {
2456 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2457 			" for port %d.", pid);
2458 		return -1;
2459 	}
2460 
2461 	/* Assume the ports in testpmd have the same dcb capability
2462 	 * and has the same number of rxq and txq in dcb mode
2463 	 */
2464 	if (dcb_mode == DCB_VT_ENABLED) {
2465 		if (rte_port->dev_info.max_vfs > 0) {
2466 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2467 			nb_txq = rte_port->dev_info.nb_tx_queues;
2468 		} else {
2469 			nb_rxq = rte_port->dev_info.max_rx_queues;
2470 			nb_txq = rte_port->dev_info.max_tx_queues;
2471 		}
2472 	} else {
2473 		/*if vt is disabled, use all pf queues */
2474 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2475 			nb_rxq = rte_port->dev_info.max_rx_queues;
2476 			nb_txq = rte_port->dev_info.max_tx_queues;
2477 		} else {
2478 			nb_rxq = (queueid_t)num_tcs;
2479 			nb_txq = (queueid_t)num_tcs;
2480 
2481 		}
2482 	}
2483 	rx_free_thresh = 64;
2484 
2485 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2486 
2487 	rxtx_port_config(rte_port);
2488 	/* VLAN filter */
2489 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2490 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2491 		rx_vft_set(pid, vlan_tags[i], 1);
2492 
2493 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2494 	map_port_queue_stats_mapping_registers(pid, rte_port);
2495 
2496 	rte_port->dcb_flag = 1;
2497 
2498 	return 0;
2499 }
2500 
2501 static void
2502 init_port(void)
2503 {
2504 	/* Configuration of Ethernet ports. */
2505 	ports = rte_zmalloc("testpmd: ports",
2506 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2507 			    RTE_CACHE_LINE_SIZE);
2508 	if (ports == NULL) {
2509 		rte_exit(EXIT_FAILURE,
2510 				"rte_zmalloc(%d struct rte_port) failed\n",
2511 				RTE_MAX_ETHPORTS);
2512 	}
2513 }
2514 
2515 static void
2516 force_quit(void)
2517 {
2518 	pmd_test_exit();
2519 	prompt_exit();
2520 }
2521 
2522 static void
2523 print_stats(void)
2524 {
2525 	uint8_t i;
2526 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2527 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2528 
2529 	/* Clear screen and move to top left */
2530 	printf("%s%s", clr, top_left);
2531 
2532 	printf("\nPort statistics ====================================");
2533 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2534 		nic_stats_display(fwd_ports_ids[i]);
2535 }
2536 
2537 static void
2538 signal_handler(int signum)
2539 {
2540 	if (signum == SIGINT || signum == SIGTERM) {
2541 		printf("\nSignal %d received, preparing to exit...\n",
2542 				signum);
2543 #ifdef RTE_LIBRTE_PDUMP
2544 		/* uninitialize packet capture framework */
2545 		rte_pdump_uninit();
2546 #endif
2547 #ifdef RTE_LIBRTE_LATENCY_STATS
2548 		rte_latencystats_uninit();
2549 #endif
2550 		force_quit();
2551 		/* Set flag to indicate the force termination. */
2552 		f_quit = 1;
2553 		/* exit with the expected status */
2554 		signal(signum, SIG_DFL);
2555 		kill(getpid(), signum);
2556 	}
2557 }
2558 
2559 int
2560 main(int argc, char** argv)
2561 {
2562 	int diag;
2563 	portid_t port_id;
2564 	int ret;
2565 
2566 	signal(SIGINT, signal_handler);
2567 	signal(SIGTERM, signal_handler);
2568 
2569 	diag = rte_eal_init(argc, argv);
2570 	if (diag < 0)
2571 		rte_panic("Cannot init EAL\n");
2572 
2573 	testpmd_logtype = rte_log_register("testpmd");
2574 	if (testpmd_logtype < 0)
2575 		rte_panic("Cannot register log type");
2576 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2577 
2578 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2579 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2580 			strerror(errno));
2581 	}
2582 
2583 #ifdef RTE_LIBRTE_PDUMP
2584 	/* initialize packet capture framework */
2585 	rte_pdump_init(NULL);
2586 #endif
2587 
2588 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2589 	if (nb_ports == 0)
2590 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2591 
2592 	/* allocate port structures, and init them */
2593 	init_port();
2594 
2595 	set_def_fwd_config();
2596 	if (nb_lcores == 0)
2597 		rte_panic("Empty set of forwarding logical cores - check the "
2598 			  "core mask supplied in the command parameters\n");
2599 
2600 	/* Bitrate/latency stats disabled by default */
2601 #ifdef RTE_LIBRTE_BITRATE
2602 	bitrate_enabled = 0;
2603 #endif
2604 #ifdef RTE_LIBRTE_LATENCY_STATS
2605 	latencystats_enabled = 0;
2606 #endif
2607 
2608 	argc -= diag;
2609 	argv += diag;
2610 	if (argc > 1)
2611 		launch_args_parse(argc, argv);
2612 
2613 	if (tx_first && interactive)
2614 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2615 				"interactive mode.\n");
2616 
2617 	if (tx_first && lsc_interrupt) {
2618 		printf("Warning: lsc_interrupt needs to be off when "
2619 				" using tx_first. Disabling.\n");
2620 		lsc_interrupt = 0;
2621 	}
2622 
2623 	if (!nb_rxq && !nb_txq)
2624 		printf("Warning: Either rx or tx queues should be non-zero\n");
2625 
2626 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2627 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2628 		       "but nb_txq=%d will prevent to fully test it.\n",
2629 		       nb_rxq, nb_txq);
2630 
2631 	init_config();
2632 
2633 	if (hot_plug) {
2634 		/* enable hot plug monitoring */
2635 		ret = rte_dev_event_monitor_start();
2636 		if (ret) {
2637 			rte_errno = EINVAL;
2638 			return -1;
2639 		}
2640 		eth_dev_event_callback_register();
2641 
2642 	}
2643 
2644 	if (start_port(RTE_PORT_ALL) != 0)
2645 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2646 
2647 	/* set all ports to promiscuous mode by default */
2648 	RTE_ETH_FOREACH_DEV(port_id)
2649 		rte_eth_promiscuous_enable(port_id);
2650 
2651 	/* Init metrics library */
2652 	rte_metrics_init(rte_socket_id());
2653 
2654 #ifdef RTE_LIBRTE_LATENCY_STATS
2655 	if (latencystats_enabled != 0) {
2656 		int ret = rte_latencystats_init(1, NULL);
2657 		if (ret)
2658 			printf("Warning: latencystats init()"
2659 				" returned error %d\n",	ret);
2660 		printf("Latencystats running on lcore %d\n",
2661 			latencystats_lcore_id);
2662 	}
2663 #endif
2664 
2665 	/* Setup bitrate stats */
2666 #ifdef RTE_LIBRTE_BITRATE
2667 	if (bitrate_enabled != 0) {
2668 		bitrate_data = rte_stats_bitrate_create();
2669 		if (bitrate_data == NULL)
2670 			rte_exit(EXIT_FAILURE,
2671 				"Could not allocate bitrate data.\n");
2672 		rte_stats_bitrate_reg(bitrate_data);
2673 	}
2674 #endif
2675 
2676 #ifdef RTE_LIBRTE_CMDLINE
2677 	if (strlen(cmdline_filename) != 0)
2678 		cmdline_read_from_file(cmdline_filename);
2679 
2680 	if (interactive == 1) {
2681 		if (auto_start) {
2682 			printf("Start automatic packet forwarding\n");
2683 			start_packet_forwarding(0);
2684 		}
2685 		prompt();
2686 		pmd_test_exit();
2687 	} else
2688 #endif
2689 	{
2690 		char c;
2691 		int rc;
2692 
2693 		f_quit = 0;
2694 
2695 		printf("No commandline core given, start packet forwarding\n");
2696 		start_packet_forwarding(tx_first);
2697 		if (stats_period != 0) {
2698 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2699 			uint64_t timer_period;
2700 
2701 			/* Convert to number of cycles */
2702 			timer_period = stats_period * rte_get_timer_hz();
2703 
2704 			while (f_quit == 0) {
2705 				cur_time = rte_get_timer_cycles();
2706 				diff_time += cur_time - prev_time;
2707 
2708 				if (diff_time >= timer_period) {
2709 					print_stats();
2710 					/* Reset the timer */
2711 					diff_time = 0;
2712 				}
2713 				/* Sleep to avoid unnecessary checks */
2714 				prev_time = cur_time;
2715 				sleep(1);
2716 			}
2717 		}
2718 
2719 		printf("Press enter to exit\n");
2720 		rc = read(0, &c, 1);
2721 		pmd_test_exit();
2722 		if (rc < 0)
2723 			return 1;
2724 	}
2725 
2726 	return 0;
2727 }
2728