xref: /dpdk/app/test-pmd/testpmd.c (revision dbfb8ec7094c7115c6d620929de2aedfc9e440aa)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30af75078fSIntel #include <rte_memory.h>
31af75078fSIntel #include <rte_memcpy.h>
32af75078fSIntel #include <rte_launch.h>
33af75078fSIntel #include <rte_eal.h>
34284c908cSGaetan Rivet #include <rte_alarm.h>
35af75078fSIntel #include <rte_per_lcore.h>
36af75078fSIntel #include <rte_lcore.h>
37af75078fSIntel #include <rte_atomic.h>
38af75078fSIntel #include <rte_branch_prediction.h>
39af75078fSIntel #include <rte_mempool.h>
40af75078fSIntel #include <rte_malloc.h>
41af75078fSIntel #include <rte_mbuf.h>
420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
43af75078fSIntel #include <rte_interrupts.h>
44af75078fSIntel #include <rte_pci.h>
45af75078fSIntel #include <rte_ether.h>
46af75078fSIntel #include <rte_ethdev.h>
47edab33b1STetsuya Mukawa #include <rte_dev.h>
48af75078fSIntel #include <rte_string_fns.h>
49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
51e261265eSRadu Nicolau #endif
52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
53102b7329SReshma Pattan #include <rte_pdump.h>
54102b7329SReshma Pattan #endif
55938a184aSAdrien Mazarguil #include <rte_flow.h>
567e4441c8SRemy Horton #include <rte_metrics.h>
577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
587e4441c8SRemy Horton #include <rte_bitrate.h>
597e4441c8SRemy Horton #endif
6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6162d3216dSReshma Pattan #include <rte_latencystats.h>
6262d3216dSReshma Pattan #endif
63af75078fSIntel 
64af75078fSIntel #include "testpmd.h"
65af75078fSIntel 
66af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
67285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
68af75078fSIntel 
69af75078fSIntel /* use master core for command line ? */
70af75078fSIntel uint8_t interactive = 0;
71ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
7299cabef0SPablo de Lara uint8_t tx_first;
7381ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
74af75078fSIntel 
75af75078fSIntel /*
76af75078fSIntel  * NUMA support configuration.
77af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
78af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
80af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
81af75078fSIntel  */
82999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
83af75078fSIntel 
84af75078fSIntel /*
85b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86b6ea6408SIntel  * not configured.
87b6ea6408SIntel  */
88b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
89b6ea6408SIntel 
90b6ea6408SIntel /*
91148f963fSBruce Richardson  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92148f963fSBruce Richardson  */
93148f963fSBruce Richardson uint8_t mp_anon = 0;
94148f963fSBruce Richardson 
95148f963fSBruce Richardson /*
9663531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
9763531389SGeorgios Katsikas  * is allocated.
9863531389SGeorgios Katsikas  */
9963531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
10063531389SGeorgios Katsikas 
10163531389SGeorgios Katsikas /*
10263531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
10363531389SGeorgios Katsikas  * is allocated.
10463531389SGeorgios Katsikas  */
10563531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
10663531389SGeorgios Katsikas 
10763531389SGeorgios Katsikas /*
10863531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
10963531389SGeorgios Katsikas  * is allocated.
11063531389SGeorgios Katsikas  */
11163531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
11263531389SGeorgios Katsikas 
11363531389SGeorgios Katsikas /*
114af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
115af75078fSIntel  * forwarded.
116547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
117af75078fSIntel  * ports.
118af75078fSIntel  */
119af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120af75078fSIntel portid_t nb_peer_eth_addrs = 0;
121af75078fSIntel 
122af75078fSIntel /*
123af75078fSIntel  * Probed Target Environment.
124af75078fSIntel  */
125af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
126af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
127af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129af75078fSIntel 
1304918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1314918a357SXiaoyun Li 
132af75078fSIntel /*
133af75078fSIntel  * Test Forwarding Configuration.
134af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
135af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
136af75078fSIntel  */
137af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
138af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
139af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
140af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
141af75078fSIntel 
142af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
143af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
144af75078fSIntel 
145af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
146af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
147af75078fSIntel 
148af75078fSIntel /*
149af75078fSIntel  * Forwarding engines.
150af75078fSIntel  */
151af75078fSIntel struct fwd_engine * fwd_engines[] = {
152af75078fSIntel 	&io_fwd_engine,
153af75078fSIntel 	&mac_fwd_engine,
154d47388f1SCyril Chemparathy 	&mac_swap_engine,
155e9e23a61SCyril Chemparathy 	&flow_gen_engine,
156af75078fSIntel 	&rx_only_engine,
157af75078fSIntel 	&tx_only_engine,
158af75078fSIntel 	&csum_fwd_engine,
159168dfa61SIvan Boule 	&icmp_echo_engine,
1600ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1610ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1625b590fbeSJasvinder Singh #endif
163af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
164af75078fSIntel 	&ieee1588_fwd_engine,
165af75078fSIntel #endif
166af75078fSIntel 	NULL,
167af75078fSIntel };
168af75078fSIntel 
169af75078fSIntel struct fwd_config cur_fwd_config;
170af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
171bf56fce1SZhihong Wang uint32_t retry_enabled;
172bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
173bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
174af75078fSIntel 
175af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
176c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
177c8798818SIntel                                       * specified on command-line. */
178cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
179d9a191a0SPhil Yang 
180d9a191a0SPhil Yang /*
181d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
182d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
183d9a191a0SPhil Yang  */
184d9a191a0SPhil Yang uint8_t f_quit;
185d9a191a0SPhil Yang 
186af75078fSIntel /*
187af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
188af75078fSIntel  */
189af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
190af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
191af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
192af75078fSIntel };
193af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194af75078fSIntel 
19579bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
19679bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
19779bec05bSKonstantin Ananyev 
198af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
199e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200af75078fSIntel 
201900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
202900550deSIntel uint8_t dcb_config = 0;
203900550deSIntel 
204900550deSIntel /* Whether the dcb is in testing status */
205900550deSIntel uint8_t dcb_test = 0;
206900550deSIntel 
207af75078fSIntel /*
208af75078fSIntel  * Configurable number of RX/TX queues.
209af75078fSIntel  */
210af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
211af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212af75078fSIntel 
213af75078fSIntel /*
214af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2158599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
216af75078fSIntel  */
2178599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2188599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
219af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
220af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
221af75078fSIntel 
222f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
223af75078fSIntel /*
224af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
225af75078fSIntel  */
226af75078fSIntel 
227f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
228f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
229f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
230af75078fSIntel 
231f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
232f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
233f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
234af75078fSIntel 
235af75078fSIntel /*
236af75078fSIntel  * Configurable value of RX free threshold.
237af75078fSIntel  */
238f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
239af75078fSIntel 
240af75078fSIntel /*
241ce8d5614SIntel  * Configurable value of RX drop enable.
242ce8d5614SIntel  */
243f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
244ce8d5614SIntel 
245ce8d5614SIntel /*
246af75078fSIntel  * Configurable value of TX free threshold.
247af75078fSIntel  */
248f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
249af75078fSIntel 
250af75078fSIntel /*
251af75078fSIntel  * Configurable value of TX RS bit threshold.
252af75078fSIntel  */
253f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
254af75078fSIntel 
255af75078fSIntel /*
256af75078fSIntel  * Receive Side Scaling (RSS) configuration.
257af75078fSIntel  */
2588a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259af75078fSIntel 
260af75078fSIntel /*
261af75078fSIntel  * Port topology configuration
262af75078fSIntel  */
263af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264af75078fSIntel 
2657741e4cfSIntel /*
2667741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
2677741e4cfSIntel  */
2687741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
2697741e4cfSIntel 
270af75078fSIntel /*
2717ee3e944SVasily Philipov  * Flow API isolated mode.
2727ee3e944SVasily Philipov  */
2737ee3e944SVasily Philipov uint8_t flow_isolate_all;
2747ee3e944SVasily Philipov 
2757ee3e944SVasily Philipov /*
276bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
277bc202406SDavid Marchand  */
278bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
279bc202406SDavid Marchand 
280bc202406SDavid Marchand /*
2818ea656f8SGaetan Rivet  * Enable link status change notification
2828ea656f8SGaetan Rivet  */
2838ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
2848ea656f8SGaetan Rivet 
2858ea656f8SGaetan Rivet /*
286284c908cSGaetan Rivet  * Enable device removal notification.
287284c908cSGaetan Rivet  */
288284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
289284c908cSGaetan Rivet 
290fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
291fb73e096SJeff Guo 
292284c908cSGaetan Rivet /*
2933af72783SGaetan Rivet  * Display or mask ether events
2943af72783SGaetan Rivet  * Default to all events except VF_MBOX
2953af72783SGaetan Rivet  */
2963af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
2973af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
2983af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
2993af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
300badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3013af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3023af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
303e505d84cSAnatoly Burakov /*
304e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
305e505d84cSAnatoly Burakov  */
306e505d84cSAnatoly Burakov int do_mlockall = 0;
3073af72783SGaetan Rivet 
3083af72783SGaetan Rivet /*
3097b7e5ba7SIntel  * NIC bypass mode configuration options.
3107b7e5ba7SIntel  */
3117b7e5ba7SIntel 
31250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3137b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
314e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3157b7e5ba7SIntel #endif
3167b7e5ba7SIntel 
317e261265eSRadu Nicolau 
31862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
31962d3216dSReshma Pattan 
32062d3216dSReshma Pattan /*
32162d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
32262d3216dSReshma Pattan  */
32362d3216dSReshma Pattan uint8_t latencystats_enabled;
32462d3216dSReshma Pattan 
32562d3216dSReshma Pattan /*
32662d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
32762d3216dSReshma Pattan  */
32862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
32962d3216dSReshma Pattan 
33062d3216dSReshma Pattan #endif
33162d3216dSReshma Pattan 
3327b7e5ba7SIntel /*
333af75078fSIntel  * Ethernet device configuration.
334af75078fSIntel  */
335af75078fSIntel struct rte_eth_rxmode rx_mode = {
336af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
337af75078fSIntel };
338af75078fSIntel 
33907e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
34007e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
34107e5f7bdSShahaf Shuler };
342fd8c20aaSShahaf Shuler 
343af75078fSIntel struct rte_fdir_conf fdir_conf = {
344af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
345af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
346af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
347d9d5e6f2SJingjing Wu 	.mask = {
34826f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
349d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
350d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
351d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
352d9d5e6f2SJingjing Wu 		},
353d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
354d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
355d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356d9d5e6f2SJingjing Wu 		},
357d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
358d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
35947b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
36047b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
36147b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
362d9d5e6f2SJingjing Wu 	},
363af75078fSIntel 	.drop_queue = 127,
364af75078fSIntel };
365af75078fSIntel 
3662950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
367af75078fSIntel 
368ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
369ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
370ed30d9b6SIntel 
371ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
372ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
373ed30d9b6SIntel 
374ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
375ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
376ed30d9b6SIntel 
377a4fd5eeeSElza Mathew /*
378a4fd5eeeSElza Mathew  * Display zero values by default for xstats
379a4fd5eeeSElza Mathew  */
380a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
381a4fd5eeeSElza Mathew 
382c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
383c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
3847acf894dSStephen Hurd 
385e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3867e4441c8SRemy Horton /* Bitrate statistics */
3877e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
388e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
389e25e6c70SRemy Horton uint8_t bitrate_enabled;
390e25e6c70SRemy Horton #endif
3917e4441c8SRemy Horton 
392b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
393b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
394b40f8d78SJiayu Hu 
3951960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
3961960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
3971960be7dSNelio Laranjeiro 	.select_vlan = 0,
3981960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
3991960be7dSNelio Laranjeiro 	.udp_src = 0,
4001960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4011960be7dSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
4021960be7dSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
4031960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4041960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4051960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4061960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4071960be7dSNelio Laranjeiro 	.vlan_tci = 0,
4081960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4091960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4101960be7dSNelio Laranjeiro };
4111960be7dSNelio Laranjeiro 
412dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
413dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
414dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
415dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
416dcd962fcSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
417dcd962fcSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
418dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
419dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
420dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
421dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
422dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
423dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
424dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
425dcd962fcSNelio Laranjeiro };
426dcd962fcSNelio Laranjeiro 
427ed30d9b6SIntel /* Forward function declarations */
42828caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
42928caa76aSZhiyong Yang 						   struct rte_port *port);
430edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
431f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
43276ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
433d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
434fb73e096SJeff Guo static void eth_dev_event_callback(char *device_name,
435fb73e096SJeff Guo 				enum rte_dev_event_type type,
436fb73e096SJeff Guo 				void *param);
437fb73e096SJeff Guo static int eth_dev_event_callback_register(void);
438fb73e096SJeff Guo static int eth_dev_event_callback_unregister(void);
439fb73e096SJeff Guo 
440ce8d5614SIntel 
441ce8d5614SIntel /*
442ce8d5614SIntel  * Check if all the ports are started.
443ce8d5614SIntel  * If yes, return positive value. If not, return zero.
444ce8d5614SIntel  */
445ce8d5614SIntel static int all_ports_started(void);
446ed30d9b6SIntel 
44752f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
44852f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
44952f38a20SJiayu Hu 
450af75078fSIntel /*
45198a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
452c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
453c9cafcc8SShahaf Shuler  */
454c9cafcc8SShahaf Shuler int
455c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
456c9cafcc8SShahaf Shuler {
457c9cafcc8SShahaf Shuler 	unsigned int i;
458c9cafcc8SShahaf Shuler 
459c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
460c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
461c9cafcc8SShahaf Shuler 			return 0;
462c9cafcc8SShahaf Shuler 	}
463c9cafcc8SShahaf Shuler 	return 1;
464c9cafcc8SShahaf Shuler }
465c9cafcc8SShahaf Shuler 
466c9cafcc8SShahaf Shuler /*
467af75078fSIntel  * Setup default configuration.
468af75078fSIntel  */
469af75078fSIntel static void
470af75078fSIntel set_default_fwd_lcores_config(void)
471af75078fSIntel {
472af75078fSIntel 	unsigned int i;
473af75078fSIntel 	unsigned int nb_lc;
4747acf894dSStephen Hurd 	unsigned int sock_num;
475af75078fSIntel 
476af75078fSIntel 	nb_lc = 0;
477af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
478*dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
479*dbfb8ec7SPhil Yang 			continue;
480c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
481c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
482c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
483c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
484c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
485c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
486c9cafcc8SShahaf Shuler 			}
487c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
4887acf894dSStephen Hurd 		}
489f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
490f54fe5eeSStephen Hurd 			continue;
491f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
492af75078fSIntel 	}
493af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
494af75078fSIntel 	nb_cfg_lcores = nb_lcores;
495af75078fSIntel 	nb_fwd_lcores = 1;
496af75078fSIntel }
497af75078fSIntel 
498af75078fSIntel static void
499af75078fSIntel set_def_peer_eth_addrs(void)
500af75078fSIntel {
501af75078fSIntel 	portid_t i;
502af75078fSIntel 
503af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
504af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
505af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
506af75078fSIntel 	}
507af75078fSIntel }
508af75078fSIntel 
509af75078fSIntel static void
510af75078fSIntel set_default_fwd_ports_config(void)
511af75078fSIntel {
512af75078fSIntel 	portid_t pt_id;
51365a7360cSMatan Azrad 	int i = 0;
514af75078fSIntel 
51565a7360cSMatan Azrad 	RTE_ETH_FOREACH_DEV(pt_id)
51665a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
517af75078fSIntel 
518af75078fSIntel 	nb_cfg_ports = nb_ports;
519af75078fSIntel 	nb_fwd_ports = nb_ports;
520af75078fSIntel }
521af75078fSIntel 
522af75078fSIntel void
523af75078fSIntel set_def_fwd_config(void)
524af75078fSIntel {
525af75078fSIntel 	set_default_fwd_lcores_config();
526af75078fSIntel 	set_def_peer_eth_addrs();
527af75078fSIntel 	set_default_fwd_ports_config();
528af75078fSIntel }
529af75078fSIntel 
530af75078fSIntel /*
531af75078fSIntel  * Configuration initialisation done once at init time.
532af75078fSIntel  */
533af75078fSIntel static void
534af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
535af75078fSIntel 		 unsigned int socket_id)
536af75078fSIntel {
537af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
538bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
539af75078fSIntel 	uint32_t mb_size;
540af75078fSIntel 
541dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
542af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
543148f963fSBruce Richardson 
544285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
545d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
546d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
547d1eb542eSOlivier Matz 
548b19a0c75SOlivier Matz 	if (mp_anon != 0) {
549b19a0c75SOlivier Matz 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
550bece7b6cSChristian Ehrhardt 			mb_size, (unsigned) mb_mempool_cache,
551148f963fSBruce Richardson 			sizeof(struct rte_pktmbuf_pool_private),
552148f963fSBruce Richardson 			socket_id, 0);
55324427bb9SOlivier Matz 		if (rte_mp == NULL)
55424427bb9SOlivier Matz 			goto err;
555b19a0c75SOlivier Matz 
556b19a0c75SOlivier Matz 		if (rte_mempool_populate_anon(rte_mp) == 0) {
557b19a0c75SOlivier Matz 			rte_mempool_free(rte_mp);
558b19a0c75SOlivier Matz 			rte_mp = NULL;
55924427bb9SOlivier Matz 			goto err;
560b19a0c75SOlivier Matz 		}
561b19a0c75SOlivier Matz 		rte_pktmbuf_pool_init(rte_mp, NULL);
562b19a0c75SOlivier Matz 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
563b19a0c75SOlivier Matz 	} else {
564ea0c20eaSOlivier Matz 		/* wrapper to rte_mempool_create() */
5650e798567SPavan Nikhilesh 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
5660e798567SPavan Nikhilesh 				rte_mbuf_best_mempool_ops());
567ea0c20eaSOlivier Matz 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
568ea0c20eaSOlivier Matz 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
569bece7b6cSChristian Ehrhardt 	}
570148f963fSBruce Richardson 
57124427bb9SOlivier Matz err:
572af75078fSIntel 	if (rte_mp == NULL) {
573d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
574d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
575d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
576148f963fSBruce Richardson 	} else if (verbose_level > 0) {
577591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
578af75078fSIntel 	}
579af75078fSIntel }
580af75078fSIntel 
58120a0286fSLiu Xiaofeng /*
58220a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
58320a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
58420a0286fSLiu Xiaofeng  */
58520a0286fSLiu Xiaofeng static int
58620a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
58720a0286fSLiu Xiaofeng {
58820a0286fSLiu Xiaofeng 	static int warning_once = 0;
58920a0286fSLiu Xiaofeng 
590c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
59120a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
59220a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
59320a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
59420a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
59520a0286fSLiu Xiaofeng 			       " --numa.\n");
59620a0286fSLiu Xiaofeng 		warning_once = 1;
59720a0286fSLiu Xiaofeng 		return -1;
59820a0286fSLiu Xiaofeng 	}
59920a0286fSLiu Xiaofeng 	return 0;
60020a0286fSLiu Xiaofeng }
60120a0286fSLiu Xiaofeng 
6023f7311baSWei Dai /*
6033f7311baSWei Dai  * Get the allowed maximum number of RX queues.
6043f7311baSWei Dai  * *pid return the port id which has minimal value of
6053f7311baSWei Dai  * max_rx_queues in all ports.
6063f7311baSWei Dai  */
6073f7311baSWei Dai queueid_t
6083f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
6093f7311baSWei Dai {
6103f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
6113f7311baSWei Dai 	portid_t pi;
6123f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
6133f7311baSWei Dai 
6143f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
6153f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
6163f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
6173f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
6183f7311baSWei Dai 			*pid = pi;
6193f7311baSWei Dai 		}
6203f7311baSWei Dai 	}
6213f7311baSWei Dai 	return allowed_max_rxq;
6223f7311baSWei Dai }
6233f7311baSWei Dai 
6243f7311baSWei Dai /*
6253f7311baSWei Dai  * Check input rxq is valid or not.
6263f7311baSWei Dai  * If input rxq is not greater than any of maximum number
6273f7311baSWei Dai  * of RX queues of all ports, it is valid.
6283f7311baSWei Dai  * if valid, return 0, else return -1
6293f7311baSWei Dai  */
6303f7311baSWei Dai int
6313f7311baSWei Dai check_nb_rxq(queueid_t rxq)
6323f7311baSWei Dai {
6333f7311baSWei Dai 	queueid_t allowed_max_rxq;
6343f7311baSWei Dai 	portid_t pid = 0;
6353f7311baSWei Dai 
6363f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
6373f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
6383f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
6393f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
6403f7311baSWei Dai 		       rxq,
6413f7311baSWei Dai 		       allowed_max_rxq,
6423f7311baSWei Dai 		       pid);
6433f7311baSWei Dai 		return -1;
6443f7311baSWei Dai 	}
6453f7311baSWei Dai 	return 0;
6463f7311baSWei Dai }
6473f7311baSWei Dai 
64836db4f6cSWei Dai /*
64936db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
65036db4f6cSWei Dai  * *pid return the port id which has minimal value of
65136db4f6cSWei Dai  * max_tx_queues in all ports.
65236db4f6cSWei Dai  */
65336db4f6cSWei Dai queueid_t
65436db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
65536db4f6cSWei Dai {
65636db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
65736db4f6cSWei Dai 	portid_t pi;
65836db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
65936db4f6cSWei Dai 
66036db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
66136db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
66236db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
66336db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
66436db4f6cSWei Dai 			*pid = pi;
66536db4f6cSWei Dai 		}
66636db4f6cSWei Dai 	}
66736db4f6cSWei Dai 	return allowed_max_txq;
66836db4f6cSWei Dai }
66936db4f6cSWei Dai 
67036db4f6cSWei Dai /*
67136db4f6cSWei Dai  * Check input txq is valid or not.
67236db4f6cSWei Dai  * If input txq is not greater than any of maximum number
67336db4f6cSWei Dai  * of TX queues of all ports, it is valid.
67436db4f6cSWei Dai  * if valid, return 0, else return -1
67536db4f6cSWei Dai  */
67636db4f6cSWei Dai int
67736db4f6cSWei Dai check_nb_txq(queueid_t txq)
67836db4f6cSWei Dai {
67936db4f6cSWei Dai 	queueid_t allowed_max_txq;
68036db4f6cSWei Dai 	portid_t pid = 0;
68136db4f6cSWei Dai 
68236db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
68336db4f6cSWei Dai 	if (txq > allowed_max_txq) {
68436db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
68536db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
68636db4f6cSWei Dai 		       txq,
68736db4f6cSWei Dai 		       allowed_max_txq,
68836db4f6cSWei Dai 		       pid);
68936db4f6cSWei Dai 		return -1;
69036db4f6cSWei Dai 	}
69136db4f6cSWei Dai 	return 0;
69236db4f6cSWei Dai }
69336db4f6cSWei Dai 
694af75078fSIntel static void
695af75078fSIntel init_config(void)
696af75078fSIntel {
697ce8d5614SIntel 	portid_t pid;
698af75078fSIntel 	struct rte_port *port;
699af75078fSIntel 	struct rte_mempool *mbp;
700af75078fSIntel 	unsigned int nb_mbuf_per_pool;
701af75078fSIntel 	lcoreid_t  lc_id;
7027acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
703b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
70452f38a20SJiayu Hu 	uint32_t gso_types;
705c73a9071SWei Dai 	int k;
706af75078fSIntel 
7077acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
708487f9a59SYulong Pei 
709487f9a59SYulong Pei 	if (numa_support) {
710487f9a59SYulong Pei 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
711487f9a59SYulong Pei 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
712487f9a59SYulong Pei 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
713487f9a59SYulong Pei 	}
714487f9a59SYulong Pei 
715af75078fSIntel 	/* Configuration of logical cores. */
716af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
717af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
718fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
719af75078fSIntel 	if (fwd_lcores == NULL) {
720ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
721ce8d5614SIntel 							"failed\n", nb_lcores);
722af75078fSIntel 	}
723af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
724af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
725af75078fSIntel 					       sizeof(struct fwd_lcore),
726fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
727af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
728ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
729ce8d5614SIntel 								"failed\n");
730af75078fSIntel 		}
731af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
732af75078fSIntel 	}
733af75078fSIntel 
7347d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
735ce8d5614SIntel 		port = &ports[pid];
7368b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
737fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
738384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
739ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
7407c45f6c0SFerruh Yigit 
74107e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
74207e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
74307e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
74407e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
745b6ea6408SIntel 		if (numa_support) {
746b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
747b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
748b6ea6408SIntel 			else {
749b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
75020a0286fSLiu Xiaofeng 
75120a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
75220a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
75320a0286fSLiu Xiaofeng 					socket_id = 0;
754b6ea6408SIntel 				port_per_socket[socket_id]++;
755b6ea6408SIntel 			}
756b6ea6408SIntel 		}
757b6ea6408SIntel 
758c73a9071SWei Dai 		/* Apply Rx offloads configuration */
759c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
760c73a9071SWei Dai 			port->rx_conf[k].offloads =
761c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
762c73a9071SWei Dai 		/* Apply Tx offloads configuration */
763c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
764c73a9071SWei Dai 			port->tx_conf[k].offloads =
765c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
766c73a9071SWei Dai 
767ce8d5614SIntel 		/* set flag to initialize port/queue */
768ce8d5614SIntel 		port->need_reconfig = 1;
769ce8d5614SIntel 		port->need_reconfig_queues = 1;
770ce8d5614SIntel 	}
771ce8d5614SIntel 
7723ab64341SOlivier Matz 	/*
7733ab64341SOlivier Matz 	 * Create pools of mbuf.
7743ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
7753ab64341SOlivier Matz 	 * socket 0 memory by default.
7763ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
7773ab64341SOlivier Matz 	 *
7783ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
7793ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
7803ab64341SOlivier Matz 	 */
7813ab64341SOlivier Matz 	if (param_total_num_mbufs)
7823ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
7833ab64341SOlivier Matz 	else {
7843ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
7853ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
7863ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
7873ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
7883ab64341SOlivier Matz 	}
7893ab64341SOlivier Matz 
790b6ea6408SIntel 	if (numa_support) {
791b6ea6408SIntel 		uint8_t i;
792ce8d5614SIntel 
793c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
794c9cafcc8SShahaf Shuler 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
795c9cafcc8SShahaf Shuler 					 socket_ids[i]);
7963ab64341SOlivier Matz 	} else {
7973ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
7983ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
7993ab64341SOlivier Matz 		else
8003ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
8013ab64341SOlivier Matz 						 socket_num);
8023ab64341SOlivier Matz 	}
803b6ea6408SIntel 
804b6ea6408SIntel 	init_port_config();
8055886ae07SAdrien Mazarguil 
80652f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
807aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
8085886ae07SAdrien Mazarguil 	/*
8095886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
8105886ae07SAdrien Mazarguil 	 */
8115886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
8128fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
8138fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
8148fd8bebcSAdrien Mazarguil 
8155886ae07SAdrien Mazarguil 		if (mbp == NULL)
8165886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
8175886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
81852f38a20SJiayu Hu 		/* initialize GSO context */
81952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
82052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
82152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
82252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
82352f38a20SJiayu Hu 			ETHER_CRC_LEN;
82452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
8255886ae07SAdrien Mazarguil 	}
8265886ae07SAdrien Mazarguil 
827ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
828ce8d5614SIntel 	if (init_fwd_streams() < 0)
829ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
8300c0db76fSBernard Iremonger 
8310c0db76fSBernard Iremonger 	fwd_config_setup();
832b7091f1dSJiayu Hu 
833b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
834b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
835b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
836b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
837b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
838b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
839b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
840b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
841b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
842b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
843b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
844b7091f1dSJiayu Hu 		}
845b7091f1dSJiayu Hu 	}
8460ad778b3SJasvinder Singh 
8470ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
8480ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
8490ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
8500ad778b3SJasvinder Singh 			port = &ports[pid];
8510ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
8520ad778b3SJasvinder Singh 
8530ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
8540ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
8550ad778b3SJasvinder Singh 		}
8560ad778b3SJasvinder Singh 	}
8570ad778b3SJasvinder Singh #endif
8580ad778b3SJasvinder Singh 
859ce8d5614SIntel }
860ce8d5614SIntel 
8612950a769SDeclan Doherty 
8622950a769SDeclan Doherty void
863a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
8642950a769SDeclan Doherty {
8652950a769SDeclan Doherty 	struct rte_port *port;
8662950a769SDeclan Doherty 
8672950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
8682950a769SDeclan Doherty 	port = &ports[new_port_id];
8692950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
8702950a769SDeclan Doherty 
8712950a769SDeclan Doherty 	/* set flag to initialize port/queue */
8722950a769SDeclan Doherty 	port->need_reconfig = 1;
8732950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
874a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
8752950a769SDeclan Doherty 
8762950a769SDeclan Doherty 	init_port_config();
8772950a769SDeclan Doherty }
8782950a769SDeclan Doherty 
8792950a769SDeclan Doherty 
880ce8d5614SIntel int
881ce8d5614SIntel init_fwd_streams(void)
882ce8d5614SIntel {
883ce8d5614SIntel 	portid_t pid;
884ce8d5614SIntel 	struct rte_port *port;
885ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
8865a8fb55cSReshma Pattan 	queueid_t q;
887ce8d5614SIntel 
888ce8d5614SIntel 	/* set socket id according to numa or not */
8897d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
890ce8d5614SIntel 		port = &ports[pid];
891ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
892ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
893ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
894ce8d5614SIntel 				port->dev_info.max_rx_queues);
895ce8d5614SIntel 			return -1;
896ce8d5614SIntel 		}
897ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
898ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
899ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
900ce8d5614SIntel 				port->dev_info.max_tx_queues);
901ce8d5614SIntel 			return -1;
902ce8d5614SIntel 		}
90320a0286fSLiu Xiaofeng 		if (numa_support) {
90420a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
90520a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
90620a0286fSLiu Xiaofeng 			else {
907b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
90820a0286fSLiu Xiaofeng 
90920a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
91020a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
91120a0286fSLiu Xiaofeng 					port->socket_id = 0;
91220a0286fSLiu Xiaofeng 			}
91320a0286fSLiu Xiaofeng 		}
914b6ea6408SIntel 		else {
915b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
916af75078fSIntel 				port->socket_id = 0;
917b6ea6408SIntel 			else
918b6ea6408SIntel 				port->socket_id = socket_num;
919b6ea6408SIntel 		}
920af75078fSIntel 	}
921af75078fSIntel 
9225a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
9235a8fb55cSReshma Pattan 	if (q == 0) {
9245a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
9255a8fb55cSReshma Pattan 		return -1;
9265a8fb55cSReshma Pattan 	}
9275a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
928ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
929ce8d5614SIntel 		return 0;
930ce8d5614SIntel 	/* clear the old */
931ce8d5614SIntel 	if (fwd_streams != NULL) {
932ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
933ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
934ce8d5614SIntel 				continue;
935ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
936ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
937af75078fSIntel 		}
938ce8d5614SIntel 		rte_free(fwd_streams);
939ce8d5614SIntel 		fwd_streams = NULL;
940ce8d5614SIntel 	}
941ce8d5614SIntel 
942ce8d5614SIntel 	/* init new */
943ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
9441f84c469SMatan Azrad 	if (nb_fwd_streams) {
945ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
9461f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
9471f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
948ce8d5614SIntel 		if (fwd_streams == NULL)
9491f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
9501f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
9511f84c469SMatan Azrad 				 nb_fwd_streams);
952ce8d5614SIntel 
953af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
9541f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
9551f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
9561f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
957ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
9581f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
9591f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
9601f84c469SMatan Azrad 		}
961af75078fSIntel 	}
962ce8d5614SIntel 
963ce8d5614SIntel 	return 0;
964af75078fSIntel }
965af75078fSIntel 
966af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
967af75078fSIntel static void
968af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
969af75078fSIntel {
970af75078fSIntel 	unsigned int total_burst;
971af75078fSIntel 	unsigned int nb_burst;
972af75078fSIntel 	unsigned int burst_stats[3];
973af75078fSIntel 	uint16_t pktnb_stats[3];
974af75078fSIntel 	uint16_t nb_pkt;
975af75078fSIntel 	int burst_percent[3];
976af75078fSIntel 
977af75078fSIntel 	/*
978af75078fSIntel 	 * First compute the total number of packet bursts and the
979af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
980af75078fSIntel 	 */
981af75078fSIntel 	total_burst = 0;
982af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
983af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
984af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
985af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
986af75078fSIntel 		if (nb_burst == 0)
987af75078fSIntel 			continue;
988af75078fSIntel 		total_burst += nb_burst;
989af75078fSIntel 		if (nb_burst > burst_stats[0]) {
990af75078fSIntel 			burst_stats[1] = burst_stats[0];
991af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
992af75078fSIntel 			burst_stats[0] = nb_burst;
993af75078fSIntel 			pktnb_stats[0] = nb_pkt;
994fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
995fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
996fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
997af75078fSIntel 		}
998af75078fSIntel 	}
999af75078fSIntel 	if (total_burst == 0)
1000af75078fSIntel 		return;
1001af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1002af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1003af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1004af75078fSIntel 	if (burst_stats[0] == total_burst) {
1005af75078fSIntel 		printf("]\n");
1006af75078fSIntel 		return;
1007af75078fSIntel 	}
1008af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1009af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1010af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1011af75078fSIntel 		return;
1012af75078fSIntel 	}
1013af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1014af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1015af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1016af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1017af75078fSIntel 		return;
1018af75078fSIntel 	}
1019af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1020af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1021af75078fSIntel }
1022af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1023af75078fSIntel 
1024af75078fSIntel static void
1025af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1026af75078fSIntel {
1027af75078fSIntel 	struct rte_port *port;
1028013af9b6SIntel 	uint8_t i;
1029af75078fSIntel 
1030af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
1031af75078fSIntel 
1032af75078fSIntel 	port = &ports[port_id];
1033af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
1034af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
1035013af9b6SIntel 
1036013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1037af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1038af75078fSIntel 		       "%-"PRIu64"\n",
103970bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
104070bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1041af75078fSIntel 
1042af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
1043af75078fSIntel 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1044af75078fSIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
104586057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1046f72a0fa6SStephen Hemminger 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
104770bdb186SIvan Boule 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
104870bdb186SIvan Boule 		}
1049af75078fSIntel 
1050af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1051af75078fSIntel 		       "%-"PRIu64"\n",
1052af75078fSIntel 		       stats->opackets, port->tx_dropped,
1053af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1054013af9b6SIntel 	}
1055013af9b6SIntel 	else {
1056013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1057013af9b6SIntel 		       "%14"PRIu64"\n",
105870bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
105970bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1060013af9b6SIntel 
1061013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
1062013af9b6SIntel 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1063013af9b6SIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
106486057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1065f72a0fa6SStephen Hemminger 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
106670bdb186SIvan Boule 			printf("  RX-nombufs:             %14"PRIu64"\n",
106770bdb186SIvan Boule 			       stats->rx_nombuf);
106870bdb186SIvan Boule 		}
1069013af9b6SIntel 
1070013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1071013af9b6SIntel 		       "%14"PRIu64"\n",
1072013af9b6SIntel 		       stats->opackets, port->tx_dropped,
1073013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1074013af9b6SIntel 	}
1075e659b6b4SIvan Boule 
1076af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1077af75078fSIntel 	if (port->rx_stream)
1078013af9b6SIntel 		pkt_burst_stats_display("RX",
1079013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
1080af75078fSIntel 	if (port->tx_stream)
1081013af9b6SIntel 		pkt_burst_stats_display("TX",
1082013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
1083af75078fSIntel #endif
1084af75078fSIntel 
1085013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
1086013af9b6SIntel 		printf("\n");
1087013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1088013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1089013af9b6SIntel 			       "     RX-errors:%14"PRIu64
1090013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
1091013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1092013af9b6SIntel 		}
1093013af9b6SIntel 		printf("\n");
1094013af9b6SIntel 	}
1095013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
1096013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1097013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1098013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
1099013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1100013af9b6SIntel 		}
1101013af9b6SIntel 	}
1102013af9b6SIntel 
1103af75078fSIntel 	printf("  %s--------------------------------%s\n",
1104af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
1105af75078fSIntel }
1106af75078fSIntel 
1107af75078fSIntel static void
1108af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1109af75078fSIntel {
1110af75078fSIntel 	struct fwd_stream *fs;
1111af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1112af75078fSIntel 
1113af75078fSIntel 	fs = fwd_streams[stream_id];
1114af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1115af75078fSIntel 	    (fs->fwd_dropped == 0))
1116af75078fSIntel 		return;
1117af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1118af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1119af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1120af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1121af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1122af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1123af75078fSIntel 
1124af75078fSIntel 	/* if checksum mode */
1125af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1126013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1127013af9b6SIntel 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1128af75078fSIntel 	}
1129af75078fSIntel 
1130af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1131af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1132af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1133af75078fSIntel #endif
1134af75078fSIntel }
1135af75078fSIntel 
1136af75078fSIntel static void
11377741e4cfSIntel flush_fwd_rx_queues(void)
1138af75078fSIntel {
1139af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1140af75078fSIntel 	portid_t  rxp;
11417741e4cfSIntel 	portid_t port_id;
1142af75078fSIntel 	queueid_t rxq;
1143af75078fSIntel 	uint16_t  nb_rx;
1144af75078fSIntel 	uint16_t  i;
1145af75078fSIntel 	uint8_t   j;
1146f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1147594302c7SJames Poole 	uint64_t timer_period;
1148f487715fSReshma Pattan 
1149f487715fSReshma Pattan 	/* convert to number of cycles */
1150594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1151af75078fSIntel 
1152af75078fSIntel 	for (j = 0; j < 2; j++) {
11537741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1154af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
11557741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1156f487715fSReshma Pattan 				/**
1157f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1158f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1159f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1160f487715fSReshma Pattan 				* after 1sec timer expiry.
1161f487715fSReshma Pattan 				*/
1162f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1163af75078fSIntel 				do {
11647741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1165013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1166af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1167af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1168f487715fSReshma Pattan 
1169f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1170f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1171f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1172f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1173f487715fSReshma Pattan 					(timer_tsc < timer_period));
1174f487715fSReshma Pattan 				timer_tsc = 0;
1175af75078fSIntel 			}
1176af75078fSIntel 		}
1177af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1178af75078fSIntel 	}
1179af75078fSIntel }
1180af75078fSIntel 
1181af75078fSIntel static void
1182af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1183af75078fSIntel {
1184af75078fSIntel 	struct fwd_stream **fsm;
1185af75078fSIntel 	streamid_t nb_fs;
1186af75078fSIntel 	streamid_t sm_id;
11877e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
11887e4441c8SRemy Horton 	uint64_t tics_per_1sec;
11897e4441c8SRemy Horton 	uint64_t tics_datum;
11907e4441c8SRemy Horton 	uint64_t tics_current;
11914918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1192af75078fSIntel 
11934918a357SXiaoyun Li 	cnt_ports = nb_ports;
11947e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
11957e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
11967e4441c8SRemy Horton #endif
1197af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1198af75078fSIntel 	nb_fs = fc->stream_nb;
1199af75078fSIntel 	do {
1200af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1201af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
12027e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1203e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1204e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
12057e4441c8SRemy Horton 			tics_current = rte_rdtsc();
12067e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
12077e4441c8SRemy Horton 				/* Periodic bitrate calculation */
12084918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1209e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
12104918a357SXiaoyun Li 						ports_ids[i]);
12117e4441c8SRemy Horton 				tics_datum = tics_current;
12127e4441c8SRemy Horton 			}
1213e25e6c70SRemy Horton 		}
12147e4441c8SRemy Horton #endif
121562d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
121665eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
121765eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
121862d3216dSReshma Pattan 			rte_latencystats_update();
121962d3216dSReshma Pattan #endif
122062d3216dSReshma Pattan 
1221af75078fSIntel 	} while (! fc->stopped);
1222af75078fSIntel }
1223af75078fSIntel 
1224af75078fSIntel static int
1225af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1226af75078fSIntel {
1227af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1228af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1229af75078fSIntel 	return 0;
1230af75078fSIntel }
1231af75078fSIntel 
1232af75078fSIntel /*
1233af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1234af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1235af75078fSIntel  */
1236af75078fSIntel static int
1237af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1238af75078fSIntel {
1239af75078fSIntel 	struct fwd_lcore *fwd_lc;
1240af75078fSIntel 	struct fwd_lcore tmp_lcore;
1241af75078fSIntel 
1242af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1243af75078fSIntel 	tmp_lcore = *fwd_lc;
1244af75078fSIntel 	tmp_lcore.stopped = 1;
1245af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1246af75078fSIntel 	return 0;
1247af75078fSIntel }
1248af75078fSIntel 
1249af75078fSIntel /*
1250af75078fSIntel  * Launch packet forwarding:
1251af75078fSIntel  *     - Setup per-port forwarding context.
1252af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1253af75078fSIntel  */
1254af75078fSIntel static void
1255af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1256af75078fSIntel {
1257af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1258af75078fSIntel 	unsigned int i;
1259af75078fSIntel 	unsigned int lc_id;
1260af75078fSIntel 	int diag;
1261af75078fSIntel 
1262af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1263af75078fSIntel 	if (port_fwd_begin != NULL) {
1264af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1265af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1266af75078fSIntel 	}
1267af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1268af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1269af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1270af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1271af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1272af75078fSIntel 						     fwd_lcores[i], lc_id);
1273af75078fSIntel 			if (diag != 0)
1274af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1275af75078fSIntel 				       lc_id, diag);
1276af75078fSIntel 		}
1277af75078fSIntel 	}
1278af75078fSIntel }
1279af75078fSIntel 
1280af75078fSIntel /*
128103ce2c53SMatan Azrad  * Update the forward ports list.
128203ce2c53SMatan Azrad  */
128303ce2c53SMatan Azrad void
128403ce2c53SMatan Azrad update_fwd_ports(portid_t new_pid)
128503ce2c53SMatan Azrad {
128603ce2c53SMatan Azrad 	unsigned int i;
128703ce2c53SMatan Azrad 	unsigned int new_nb_fwd_ports = 0;
128803ce2c53SMatan Azrad 	int move = 0;
128903ce2c53SMatan Azrad 
129003ce2c53SMatan Azrad 	for (i = 0; i < nb_fwd_ports; ++i) {
129103ce2c53SMatan Azrad 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
129203ce2c53SMatan Azrad 			move = 1;
129303ce2c53SMatan Azrad 		else if (move)
129403ce2c53SMatan Azrad 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
129503ce2c53SMatan Azrad 		else
129603ce2c53SMatan Azrad 			new_nb_fwd_ports++;
129703ce2c53SMatan Azrad 	}
129803ce2c53SMatan Azrad 	if (new_pid < RTE_MAX_ETHPORTS)
129903ce2c53SMatan Azrad 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
130003ce2c53SMatan Azrad 
130103ce2c53SMatan Azrad 	nb_fwd_ports = new_nb_fwd_ports;
130203ce2c53SMatan Azrad 	nb_cfg_ports = new_nb_fwd_ports;
130303ce2c53SMatan Azrad }
130403ce2c53SMatan Azrad 
130503ce2c53SMatan Azrad /*
1306af75078fSIntel  * Launch packet forwarding configuration.
1307af75078fSIntel  */
1308af75078fSIntel void
1309af75078fSIntel start_packet_forwarding(int with_tx_first)
1310af75078fSIntel {
1311af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1312af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1313af75078fSIntel 	struct rte_port *port;
1314af75078fSIntel 	unsigned int i;
1315af75078fSIntel 	portid_t   pt_id;
1316af75078fSIntel 	streamid_t sm_id;
1317af75078fSIntel 
13185a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
13195a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
13205a8fb55cSReshma Pattan 
13215a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
13225a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
13235a8fb55cSReshma Pattan 
13245a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
13255a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
13265a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
13275a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
13285a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
13295a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
13305a8fb55cSReshma Pattan 
1331ce8d5614SIntel 	if (all_ports_started() == 0) {
1332ce8d5614SIntel 		printf("Not all ports were started\n");
1333ce8d5614SIntel 		return;
1334ce8d5614SIntel 	}
1335af75078fSIntel 	if (test_done == 0) {
1336af75078fSIntel 		printf("Packet forwarding already started\n");
1337af75078fSIntel 		return;
1338af75078fSIntel 	}
1339edf87b4aSBernard Iremonger 
1340edf87b4aSBernard Iremonger 
13417741e4cfSIntel 	if(dcb_test) {
13427741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
13437741e4cfSIntel 			pt_id = fwd_ports_ids[i];
13447741e4cfSIntel 			port = &ports[pt_id];
13457741e4cfSIntel 			if (!port->dcb_flag) {
13467741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
13477741e4cfSIntel                                        "be configured in this mode.\n");
1348013af9b6SIntel 				return;
1349013af9b6SIntel 			}
13507741e4cfSIntel 		}
13517741e4cfSIntel 		if (nb_fwd_lcores == 1) {
13527741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
13537741e4cfSIntel                                "should be larger than 1.\n");
13547741e4cfSIntel 			return;
13557741e4cfSIntel 		}
13567741e4cfSIntel 	}
1357af75078fSIntel 	test_done = 0;
13587741e4cfSIntel 
135947a767b2SMatan Azrad 	fwd_config_setup();
136047a767b2SMatan Azrad 
13617741e4cfSIntel 	if(!no_flush_rx)
13627741e4cfSIntel 		flush_fwd_rx_queues();
13637741e4cfSIntel 
1364933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1365af75078fSIntel 	rxtx_config_display();
1366af75078fSIntel 
1367af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1368af75078fSIntel 		pt_id = fwd_ports_ids[i];
1369af75078fSIntel 		port = &ports[pt_id];
1370af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1371af75078fSIntel 		port->tx_dropped = 0;
1372013af9b6SIntel 
1373013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1374af75078fSIntel 	}
1375af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1376af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1377af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1378af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1379af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1380af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1381af75078fSIntel 
1382af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1383af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1384af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1385af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1386af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1387af75078fSIntel #endif
1388af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1389af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1390af75078fSIntel #endif
1391af75078fSIntel 	}
1392af75078fSIntel 	if (with_tx_first) {
1393af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1394af75078fSIntel 		if (port_fwd_begin != NULL) {
1395af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1396af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1397af75078fSIntel 		}
1398acbf77a6SZhihong Wang 		while (with_tx_first--) {
1399acbf77a6SZhihong Wang 			launch_packet_forwarding(
1400acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1401af75078fSIntel 			rte_eal_mp_wait_lcore();
1402acbf77a6SZhihong Wang 		}
1403af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1404af75078fSIntel 		if (port_fwd_end != NULL) {
1405af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1406af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1407af75078fSIntel 		}
1408af75078fSIntel 	}
1409af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1410af75078fSIntel }
1411af75078fSIntel 
1412af75078fSIntel void
1413af75078fSIntel stop_packet_forwarding(void)
1414af75078fSIntel {
1415af75078fSIntel 	struct rte_eth_stats stats;
1416af75078fSIntel 	struct rte_port *port;
1417af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1418af75078fSIntel 	int i;
1419af75078fSIntel 	portid_t   pt_id;
1420af75078fSIntel 	streamid_t sm_id;
1421af75078fSIntel 	lcoreid_t  lc_id;
1422af75078fSIntel 	uint64_t total_recv;
1423af75078fSIntel 	uint64_t total_xmit;
1424af75078fSIntel 	uint64_t total_rx_dropped;
1425af75078fSIntel 	uint64_t total_tx_dropped;
1426af75078fSIntel 	uint64_t total_rx_nombuf;
1427af75078fSIntel 	uint64_t tx_dropped;
1428af75078fSIntel 	uint64_t rx_bad_ip_csum;
1429af75078fSIntel 	uint64_t rx_bad_l4_csum;
1430af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1431af75078fSIntel 	uint64_t fwd_cycles;
1432af75078fSIntel #endif
1433b7091f1dSJiayu Hu 
1434af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1435af75078fSIntel 
1436af75078fSIntel 	if (test_done) {
1437af75078fSIntel 		printf("Packet forwarding not started\n");
1438af75078fSIntel 		return;
1439af75078fSIntel 	}
1440af75078fSIntel 	printf("Telling cores to stop...");
1441af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1442af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1443af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1444af75078fSIntel 	rte_eal_mp_wait_lcore();
1445af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1446af75078fSIntel 	if (port_fwd_end != NULL) {
1447af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1448af75078fSIntel 			pt_id = fwd_ports_ids[i];
1449af75078fSIntel 			(*port_fwd_end)(pt_id);
1450af75078fSIntel 		}
1451af75078fSIntel 	}
1452af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1453af75078fSIntel 	fwd_cycles = 0;
1454af75078fSIntel #endif
1455af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1456af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1457af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1458af75078fSIntel 			fwd_stream_stats_display(sm_id);
1459af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1460af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1461af75078fSIntel 		} else {
1462af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1463af75078fSIntel 				fwd_streams[sm_id];
1464af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1465af75078fSIntel 				fwd_streams[sm_id];
1466af75078fSIntel 		}
1467af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1468af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1469af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1470af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1471af75078fSIntel 
1472013af9b6SIntel 		rx_bad_ip_csum =
1473013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1474af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1475af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1476013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1477013af9b6SIntel 							rx_bad_ip_csum;
1478af75078fSIntel 
1479013af9b6SIntel 		rx_bad_l4_csum =
1480013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1481af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1482af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1483013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1484013af9b6SIntel 							rx_bad_l4_csum;
1485af75078fSIntel 
1486af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1487af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1488af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1489af75078fSIntel #endif
1490af75078fSIntel 	}
1491af75078fSIntel 	total_recv = 0;
1492af75078fSIntel 	total_xmit = 0;
1493af75078fSIntel 	total_rx_dropped = 0;
1494af75078fSIntel 	total_tx_dropped = 0;
1495af75078fSIntel 	total_rx_nombuf  = 0;
14967741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1497af75078fSIntel 		pt_id = fwd_ports_ids[i];
1498af75078fSIntel 
1499af75078fSIntel 		port = &ports[pt_id];
1500af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1501af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1502af75078fSIntel 		port->stats.ipackets = 0;
1503af75078fSIntel 		stats.opackets -= port->stats.opackets;
1504af75078fSIntel 		port->stats.opackets = 0;
1505af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1506af75078fSIntel 		port->stats.ibytes = 0;
1507af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1508af75078fSIntel 		port->stats.obytes = 0;
150970bdb186SIvan Boule 		stats.imissed  -= port->stats.imissed;
151070bdb186SIvan Boule 		port->stats.imissed = 0;
1511af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1512af75078fSIntel 		port->stats.oerrors = 0;
1513af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1514af75078fSIntel 		port->stats.rx_nombuf = 0;
1515af75078fSIntel 
1516af75078fSIntel 		total_recv += stats.ipackets;
1517af75078fSIntel 		total_xmit += stats.opackets;
151870bdb186SIvan Boule 		total_rx_dropped += stats.imissed;
1519af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1520af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1521af75078fSIntel 
1522af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1523af75078fSIntel 	}
1524b7091f1dSJiayu Hu 
1525af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1526af75078fSIntel 	       "%s\n",
1527af75078fSIntel 	       acc_stats_border, acc_stats_border);
1528af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1529af75078fSIntel 	       "%-"PRIu64"\n"
1530af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1531af75078fSIntel 	       "%-"PRIu64"\n",
1532af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1533af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1534af75078fSIntel 	if (total_rx_nombuf > 0)
1535af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1536af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1537af75078fSIntel 	       "%s\n",
1538af75078fSIntel 	       acc_stats_border, acc_stats_border);
1539af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1540af75078fSIntel 	if (total_recv > 0)
1541af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1542af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1543af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1544af75078fSIntel 		       fwd_cycles, total_recv);
1545af75078fSIntel #endif
1546af75078fSIntel 	printf("\nDone.\n");
1547af75078fSIntel 	test_done = 1;
1548af75078fSIntel }
1549af75078fSIntel 
1550cfae07fdSOuyang Changchun void
1551cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1552cfae07fdSOuyang Changchun {
1553492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1554cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1555cfae07fdSOuyang Changchun }
1556cfae07fdSOuyang Changchun 
1557cfae07fdSOuyang Changchun void
1558cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1559cfae07fdSOuyang Changchun {
1560492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1561cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1562cfae07fdSOuyang Changchun }
1563cfae07fdSOuyang Changchun 
1564ce8d5614SIntel static int
1565ce8d5614SIntel all_ports_started(void)
1566ce8d5614SIntel {
1567ce8d5614SIntel 	portid_t pi;
1568ce8d5614SIntel 	struct rte_port *port;
1569ce8d5614SIntel 
15707d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1571ce8d5614SIntel 		port = &ports[pi];
1572ce8d5614SIntel 		/* Check if there is a port which is not started */
157341b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
157441b05095SBernard Iremonger 			(port->slave_flag == 0))
1575ce8d5614SIntel 			return 0;
1576ce8d5614SIntel 	}
1577ce8d5614SIntel 
1578ce8d5614SIntel 	/* No port is not started */
1579ce8d5614SIntel 	return 1;
1580ce8d5614SIntel }
1581ce8d5614SIntel 
1582148f963fSBruce Richardson int
15836018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
15846018eb8cSShahaf Shuler {
15856018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
15866018eb8cSShahaf Shuler 
15876018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
15886018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
15896018eb8cSShahaf Shuler 		return 0;
15906018eb8cSShahaf Shuler 	return 1;
15916018eb8cSShahaf Shuler }
15926018eb8cSShahaf Shuler 
15936018eb8cSShahaf Shuler int
1594edab33b1STetsuya Mukawa all_ports_stopped(void)
1595edab33b1STetsuya Mukawa {
1596edab33b1STetsuya Mukawa 	portid_t pi;
1597edab33b1STetsuya Mukawa 
15987d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
15996018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
1600edab33b1STetsuya Mukawa 			return 0;
1601edab33b1STetsuya Mukawa 	}
1602edab33b1STetsuya Mukawa 
1603edab33b1STetsuya Mukawa 	return 1;
1604edab33b1STetsuya Mukawa }
1605edab33b1STetsuya Mukawa 
1606edab33b1STetsuya Mukawa int
1607edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
1608edab33b1STetsuya Mukawa {
1609edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1610edab33b1STetsuya Mukawa 		return 0;
1611edab33b1STetsuya Mukawa 
1612edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1613edab33b1STetsuya Mukawa 		return 0;
1614edab33b1STetsuya Mukawa 
1615edab33b1STetsuya Mukawa 	return 1;
1616edab33b1STetsuya Mukawa }
1617edab33b1STetsuya Mukawa 
1618edab33b1STetsuya Mukawa static int
1619edab33b1STetsuya Mukawa port_is_closed(portid_t port_id)
1620edab33b1STetsuya Mukawa {
1621edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1622edab33b1STetsuya Mukawa 		return 0;
1623edab33b1STetsuya Mukawa 
1624edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1625edab33b1STetsuya Mukawa 		return 0;
1626edab33b1STetsuya Mukawa 
1627edab33b1STetsuya Mukawa 	return 1;
1628edab33b1STetsuya Mukawa }
1629edab33b1STetsuya Mukawa 
1630edab33b1STetsuya Mukawa int
1631ce8d5614SIntel start_port(portid_t pid)
1632ce8d5614SIntel {
163392d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
1634ce8d5614SIntel 	portid_t pi;
1635ce8d5614SIntel 	queueid_t qi;
1636ce8d5614SIntel 	struct rte_port *port;
16372950a769SDeclan Doherty 	struct ether_addr mac_addr;
163876ad4a2dSGaetan Rivet 	enum rte_eth_event_type event_type;
1639ce8d5614SIntel 
16404468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
16414468635fSMichael Qiu 		return 0;
16424468635fSMichael Qiu 
1643ce8d5614SIntel 	if(dcb_config)
1644ce8d5614SIntel 		dcb_test = 1;
16457d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1646edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1647ce8d5614SIntel 			continue;
1648ce8d5614SIntel 
164992d2703eSMichael Qiu 		need_check_link_status = 0;
1650ce8d5614SIntel 		port = &ports[pi];
1651ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1652ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1653ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1654ce8d5614SIntel 			continue;
1655ce8d5614SIntel 		}
1656ce8d5614SIntel 
1657ce8d5614SIntel 		if (port->need_reconfig > 0) {
1658ce8d5614SIntel 			port->need_reconfig = 0;
1659ce8d5614SIntel 
16607ee3e944SVasily Philipov 			if (flow_isolate_all) {
16617ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
16627ee3e944SVasily Philipov 				if (ret) {
16637ee3e944SVasily Philipov 					printf("Failed to apply isolated"
16647ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
16657ee3e944SVasily Philipov 					return -1;
16667ee3e944SVasily Philipov 				}
16677ee3e944SVasily Philipov 			}
16687ee3e944SVasily Philipov 
16695706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
167020a0286fSLiu Xiaofeng 					port->socket_id);
1671ce8d5614SIntel 			/* configure port */
1672ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1673ce8d5614SIntel 						&(port->dev_conf));
1674ce8d5614SIntel 			if (diag != 0) {
1675ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1676ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1677ce8d5614SIntel 					printf("Port %d can not be set back "
1678ce8d5614SIntel 							"to stopped\n", pi);
1679ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1680ce8d5614SIntel 				/* try to reconfigure port next time */
1681ce8d5614SIntel 				port->need_reconfig = 1;
1682148f963fSBruce Richardson 				return -1;
1683ce8d5614SIntel 			}
1684ce8d5614SIntel 		}
1685ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1686ce8d5614SIntel 			port->need_reconfig_queues = 0;
1687ce8d5614SIntel 			/* setup tx queues */
1688ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1689b6ea6408SIntel 				if ((numa_support) &&
1690b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1691b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1692d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
1693d44f8a48SQi Zhang 						txring_numa[pi],
1694d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
1695b6ea6408SIntel 				else
1696b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1697d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
1698d44f8a48SQi Zhang 						port->socket_id,
1699d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
1700b6ea6408SIntel 
1701ce8d5614SIntel 				if (diag == 0)
1702ce8d5614SIntel 					continue;
1703ce8d5614SIntel 
1704ce8d5614SIntel 				/* Fail to setup tx queue, return */
1705ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1706ce8d5614SIntel 							RTE_PORT_HANDLING,
1707ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1708ce8d5614SIntel 					printf("Port %d can not be set back "
1709ce8d5614SIntel 							"to stopped\n", pi);
1710d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
1711d44f8a48SQi Zhang 				       pi);
1712ce8d5614SIntel 				/* try to reconfigure queues next time */
1713ce8d5614SIntel 				port->need_reconfig_queues = 1;
1714148f963fSBruce Richardson 				return -1;
1715ce8d5614SIntel 			}
1716ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
1717d44f8a48SQi Zhang 				/* setup rx queues */
1718b6ea6408SIntel 				if ((numa_support) &&
1719b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1720b6ea6408SIntel 					struct rte_mempool * mp =
1721b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
1722b6ea6408SIntel 					if (mp == NULL) {
1723b6ea6408SIntel 						printf("Failed to setup RX queue:"
1724b6ea6408SIntel 							"No mempool allocation"
1725b6ea6408SIntel 							" on the socket %d\n",
1726b6ea6408SIntel 							rxring_numa[pi]);
1727148f963fSBruce Richardson 						return -1;
1728b6ea6408SIntel 					}
1729b6ea6408SIntel 
1730b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1731d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
1732d44f8a48SQi Zhang 					     rxring_numa[pi],
1733d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
1734d44f8a48SQi Zhang 					     mp);
17351e1d6bddSBernard Iremonger 				} else {
17361e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
17371e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
17381e1d6bddSBernard Iremonger 					if (mp == NULL) {
17391e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
17401e1d6bddSBernard Iremonger 							"No mempool allocation"
17411e1d6bddSBernard Iremonger 							" on the socket %d\n",
17421e1d6bddSBernard Iremonger 							port->socket_id);
17431e1d6bddSBernard Iremonger 						return -1;
1744b6ea6408SIntel 					}
1745b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1746d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
1747d44f8a48SQi Zhang 					     port->socket_id,
1748d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
1749d44f8a48SQi Zhang 					     mp);
17501e1d6bddSBernard Iremonger 				}
1751ce8d5614SIntel 				if (diag == 0)
1752ce8d5614SIntel 					continue;
1753ce8d5614SIntel 
1754ce8d5614SIntel 				/* Fail to setup rx queue, return */
1755ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1756ce8d5614SIntel 							RTE_PORT_HANDLING,
1757ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1758ce8d5614SIntel 					printf("Port %d can not be set back "
1759ce8d5614SIntel 							"to stopped\n", pi);
1760d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
1761d44f8a48SQi Zhang 				       pi);
1762ce8d5614SIntel 				/* try to reconfigure queues next time */
1763ce8d5614SIntel 				port->need_reconfig_queues = 1;
1764148f963fSBruce Richardson 				return -1;
1765ce8d5614SIntel 			}
1766ce8d5614SIntel 		}
176776ad4a2dSGaetan Rivet 
1768ce8d5614SIntel 		/* start port */
1769ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
1770ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
1771ce8d5614SIntel 
1772ce8d5614SIntel 			/* Fail to setup rx queue, return */
1773ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
1774ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1775ce8d5614SIntel 				printf("Port %d can not be set back to "
1776ce8d5614SIntel 							"stopped\n", pi);
1777ce8d5614SIntel 			continue;
1778ce8d5614SIntel 		}
1779ce8d5614SIntel 
1780ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1781ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1782ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
1783ce8d5614SIntel 
17842950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
1785d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
17862950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
17872950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
17882950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1789d8c89163SZijie Pan 
1790ce8d5614SIntel 		/* at least one port started, need checking link status */
1791ce8d5614SIntel 		need_check_link_status = 1;
1792ce8d5614SIntel 	}
1793ce8d5614SIntel 
17944fb82244SMatan Azrad 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
17954fb82244SMatan Azrad 	     event_type < RTE_ETH_EVENT_MAX;
17964fb82244SMatan Azrad 	     event_type++) {
17974fb82244SMatan Azrad 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
17984fb82244SMatan Azrad 						event_type,
17994fb82244SMatan Azrad 						eth_event_callback,
18004fb82244SMatan Azrad 						NULL);
18014fb82244SMatan Azrad 		if (diag) {
18024fb82244SMatan Azrad 			printf("Failed to setup even callback for event %d\n",
18034fb82244SMatan Azrad 				event_type);
18044fb82244SMatan Azrad 			return -1;
18054fb82244SMatan Azrad 		}
18064fb82244SMatan Azrad 	}
18074fb82244SMatan Azrad 
180892d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
1809edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
181092d2703eSMichael Qiu 	else if (need_check_link_status == 0)
1811ce8d5614SIntel 		printf("Please stop the ports first\n");
1812ce8d5614SIntel 
1813ce8d5614SIntel 	printf("Done\n");
1814148f963fSBruce Richardson 	return 0;
1815ce8d5614SIntel }
1816ce8d5614SIntel 
1817ce8d5614SIntel void
1818ce8d5614SIntel stop_port(portid_t pid)
1819ce8d5614SIntel {
1820ce8d5614SIntel 	portid_t pi;
1821ce8d5614SIntel 	struct rte_port *port;
1822ce8d5614SIntel 	int need_check_link_status = 0;
1823ce8d5614SIntel 
1824ce8d5614SIntel 	if (dcb_test) {
1825ce8d5614SIntel 		dcb_test = 0;
1826ce8d5614SIntel 		dcb_config = 0;
1827ce8d5614SIntel 	}
18284468635fSMichael Qiu 
18294468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
18304468635fSMichael Qiu 		return;
18314468635fSMichael Qiu 
1832ce8d5614SIntel 	printf("Stopping ports...\n");
1833ce8d5614SIntel 
18347d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
18354468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1836ce8d5614SIntel 			continue;
1837ce8d5614SIntel 
1838a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1839a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
1840a8ef3e3aSBernard Iremonger 			continue;
1841a8ef3e3aSBernard Iremonger 		}
1842a8ef3e3aSBernard Iremonger 
18430e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
18440e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
18450e545d30SBernard Iremonger 			continue;
18460e545d30SBernard Iremonger 		}
18470e545d30SBernard Iremonger 
1848ce8d5614SIntel 		port = &ports[pi];
1849ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1850ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
1851ce8d5614SIntel 			continue;
1852ce8d5614SIntel 
1853ce8d5614SIntel 		rte_eth_dev_stop(pi);
1854ce8d5614SIntel 
1855ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1856ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1857ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1858ce8d5614SIntel 		need_check_link_status = 1;
1859ce8d5614SIntel 	}
1860bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
1861edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
1862ce8d5614SIntel 
1863ce8d5614SIntel 	printf("Done\n");
1864ce8d5614SIntel }
1865ce8d5614SIntel 
1866ce8d5614SIntel void
1867ce8d5614SIntel close_port(portid_t pid)
1868ce8d5614SIntel {
1869ce8d5614SIntel 	portid_t pi;
1870ce8d5614SIntel 	struct rte_port *port;
1871ce8d5614SIntel 
18724468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
18734468635fSMichael Qiu 		return;
18744468635fSMichael Qiu 
1875ce8d5614SIntel 	printf("Closing ports...\n");
1876ce8d5614SIntel 
18777d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
18784468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1879ce8d5614SIntel 			continue;
1880ce8d5614SIntel 
1881a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1882a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
1883a8ef3e3aSBernard Iremonger 			continue;
1884a8ef3e3aSBernard Iremonger 		}
1885a8ef3e3aSBernard Iremonger 
18860e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
18870e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
18880e545d30SBernard Iremonger 			continue;
18890e545d30SBernard Iremonger 		}
18900e545d30SBernard Iremonger 
1891ce8d5614SIntel 		port = &ports[pi];
1892ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1893d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1894d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
1895d4e8ad64SMichael Qiu 			continue;
1896d4e8ad64SMichael Qiu 		}
1897d4e8ad64SMichael Qiu 
1898d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
1899ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1900ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1901ce8d5614SIntel 			continue;
1902ce8d5614SIntel 		}
1903ce8d5614SIntel 
1904938a184aSAdrien Mazarguil 		if (port->flow_list)
1905938a184aSAdrien Mazarguil 			port_flow_flush(pi);
1906ce8d5614SIntel 		rte_eth_dev_close(pi);
1907ce8d5614SIntel 
1908ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1909ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1910b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
1911ce8d5614SIntel 	}
1912ce8d5614SIntel 
1913ce8d5614SIntel 	printf("Done\n");
1914ce8d5614SIntel }
1915ce8d5614SIntel 
1916edab33b1STetsuya Mukawa void
191797f1e196SWei Dai reset_port(portid_t pid)
191897f1e196SWei Dai {
191997f1e196SWei Dai 	int diag;
192097f1e196SWei Dai 	portid_t pi;
192197f1e196SWei Dai 	struct rte_port *port;
192297f1e196SWei Dai 
192397f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
192497f1e196SWei Dai 		return;
192597f1e196SWei Dai 
192697f1e196SWei Dai 	printf("Resetting ports...\n");
192797f1e196SWei Dai 
192897f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
192997f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
193097f1e196SWei Dai 			continue;
193197f1e196SWei Dai 
193297f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
193397f1e196SWei Dai 			printf("Please remove port %d from forwarding "
193497f1e196SWei Dai 			       "configuration.\n", pi);
193597f1e196SWei Dai 			continue;
193697f1e196SWei Dai 		}
193797f1e196SWei Dai 
193897f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
193997f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
194097f1e196SWei Dai 			       pi);
194197f1e196SWei Dai 			continue;
194297f1e196SWei Dai 		}
194397f1e196SWei Dai 
194497f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
194597f1e196SWei Dai 		if (diag == 0) {
194697f1e196SWei Dai 			port = &ports[pi];
194797f1e196SWei Dai 			port->need_reconfig = 1;
194897f1e196SWei Dai 			port->need_reconfig_queues = 1;
194997f1e196SWei Dai 		} else {
195097f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
195197f1e196SWei Dai 		}
195297f1e196SWei Dai 	}
195397f1e196SWei Dai 
195497f1e196SWei Dai 	printf("Done\n");
195597f1e196SWei Dai }
195697f1e196SWei Dai 
1957fb73e096SJeff Guo static int
1958fb73e096SJeff Guo eth_dev_event_callback_register(void)
1959fb73e096SJeff Guo {
1960fb73e096SJeff Guo 	int ret;
1961fb73e096SJeff Guo 
1962fb73e096SJeff Guo 	/* register the device event callback */
1963fb73e096SJeff Guo 	ret = rte_dev_event_callback_register(NULL,
1964fb73e096SJeff Guo 		eth_dev_event_callback, NULL);
1965fb73e096SJeff Guo 	if (ret) {
1966fb73e096SJeff Guo 		printf("Failed to register device event callback\n");
1967fb73e096SJeff Guo 		return -1;
1968fb73e096SJeff Guo 	}
1969fb73e096SJeff Guo 
1970fb73e096SJeff Guo 	return 0;
1971fb73e096SJeff Guo }
1972fb73e096SJeff Guo 
1973fb73e096SJeff Guo 
1974fb73e096SJeff Guo static int
1975fb73e096SJeff Guo eth_dev_event_callback_unregister(void)
1976fb73e096SJeff Guo {
1977fb73e096SJeff Guo 	int ret;
1978fb73e096SJeff Guo 
1979fb73e096SJeff Guo 	/* unregister the device event callback */
1980fb73e096SJeff Guo 	ret = rte_dev_event_callback_unregister(NULL,
1981fb73e096SJeff Guo 		eth_dev_event_callback, NULL);
1982fb73e096SJeff Guo 	if (ret < 0) {
1983fb73e096SJeff Guo 		printf("Failed to unregister device event callback\n");
1984fb73e096SJeff Guo 		return -1;
1985fb73e096SJeff Guo 	}
1986fb73e096SJeff Guo 
1987fb73e096SJeff Guo 	return 0;
1988fb73e096SJeff Guo }
1989fb73e096SJeff Guo 
199097f1e196SWei Dai void
1991edab33b1STetsuya Mukawa attach_port(char *identifier)
1992ce8d5614SIntel {
1993ebf5e9b7SBernard Iremonger 	portid_t pi = 0;
1994931126baSBernard Iremonger 	unsigned int socket_id;
1995ce8d5614SIntel 
1996edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
1997edab33b1STetsuya Mukawa 
1998edab33b1STetsuya Mukawa 	if (identifier == NULL) {
1999edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2000edab33b1STetsuya Mukawa 		return;
2001ce8d5614SIntel 	}
2002ce8d5614SIntel 
2003edab33b1STetsuya Mukawa 	if (rte_eth_dev_attach(identifier, &pi))
2004edab33b1STetsuya Mukawa 		return;
2005edab33b1STetsuya Mukawa 
2006931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2007931126baSBernard Iremonger 	/* if socket_id is invalid, set to 0 */
2008931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
2009931126baSBernard Iremonger 		socket_id = 0;
2010931126baSBernard Iremonger 	reconfig(pi, socket_id);
2011edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2012edab33b1STetsuya Mukawa 
20134918a357SXiaoyun Li 	ports_ids[nb_ports] = pi;
2014d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
2015edab33b1STetsuya Mukawa 
2016edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2017edab33b1STetsuya Mukawa 
201803ce2c53SMatan Azrad 	update_fwd_ports(pi);
201903ce2c53SMatan Azrad 
2020edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2021edab33b1STetsuya Mukawa 	printf("Done\n");
2022edab33b1STetsuya Mukawa }
2023edab33b1STetsuya Mukawa 
2024edab33b1STetsuya Mukawa void
202528caa76aSZhiyong Yang detach_port(portid_t port_id)
20265f4ec54fSChen Jing D(Mark) {
2027edab33b1STetsuya Mukawa 	char name[RTE_ETH_NAME_MAX_LEN];
20284918a357SXiaoyun Li 	uint16_t i;
20295f4ec54fSChen Jing D(Mark) 
2030edab33b1STetsuya Mukawa 	printf("Detaching a port...\n");
20315f4ec54fSChen Jing D(Mark) 
2032edab33b1STetsuya Mukawa 	if (!port_is_closed(port_id)) {
2033edab33b1STetsuya Mukawa 		printf("Please close port first\n");
2034edab33b1STetsuya Mukawa 		return;
2035edab33b1STetsuya Mukawa 	}
2036edab33b1STetsuya Mukawa 
2037938a184aSAdrien Mazarguil 	if (ports[port_id].flow_list)
2038938a184aSAdrien Mazarguil 		port_flow_flush(port_id);
2039938a184aSAdrien Mazarguil 
20403070419eSGaetan Rivet 	if (rte_eth_dev_detach(port_id, name)) {
2041adea04c4SZhiyong Yang 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2042edab33b1STetsuya Mukawa 		return;
20433070419eSGaetan Rivet 	}
2044edab33b1STetsuya Mukawa 
20454918a357SXiaoyun Li 	for (i = 0; i < nb_ports; i++) {
20464918a357SXiaoyun Li 		if (ports_ids[i] == port_id) {
20474918a357SXiaoyun Li 			ports_ids[i] = ports_ids[nb_ports-1];
20484918a357SXiaoyun Li 			ports_ids[nb_ports-1] = 0;
20494918a357SXiaoyun Li 			break;
20504918a357SXiaoyun Li 		}
20514918a357SXiaoyun Li 	}
2052d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
2053edab33b1STetsuya Mukawa 
205403ce2c53SMatan Azrad 	update_fwd_ports(RTE_MAX_ETHPORTS);
205503ce2c53SMatan Azrad 
2056adea04c4SZhiyong Yang 	printf("Port %u is detached. Now total ports is %d\n",
2057adea04c4SZhiyong Yang 			port_id, nb_ports);
2058edab33b1STetsuya Mukawa 	printf("Done\n");
2059edab33b1STetsuya Mukawa 	return;
20605f4ec54fSChen Jing D(Mark) }
20615f4ec54fSChen Jing D(Mark) 
2062af75078fSIntel void
2063af75078fSIntel pmd_test_exit(void)
2064af75078fSIntel {
2065124909d7SZhiyong Yang 	struct rte_device *device;
2066af75078fSIntel 	portid_t pt_id;
2067fb73e096SJeff Guo 	int ret;
2068af75078fSIntel 
20698210ec25SPablo de Lara 	if (test_done == 0)
20708210ec25SPablo de Lara 		stop_packet_forwarding();
20718210ec25SPablo de Lara 
2072d3a274ceSZhihong Wang 	if (ports != NULL) {
2073d3a274ceSZhihong Wang 		no_link_check = 1;
20747d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
2075d3a274ceSZhihong Wang 			printf("\nShutting down port %d...\n", pt_id);
2076af75078fSIntel 			fflush(stdout);
2077d3a274ceSZhihong Wang 			stop_port(pt_id);
2078d3a274ceSZhihong Wang 			close_port(pt_id);
2079124909d7SZhiyong Yang 
2080124909d7SZhiyong Yang 			/*
2081124909d7SZhiyong Yang 			 * This is a workaround to fix a virtio-user issue that
2082124909d7SZhiyong Yang 			 * requires to call clean-up routine to remove existing
2083124909d7SZhiyong Yang 			 * socket.
2084124909d7SZhiyong Yang 			 * This workaround valid only for testpmd, needs a fix
2085124909d7SZhiyong Yang 			 * valid for all applications.
2086124909d7SZhiyong Yang 			 * TODO: Implement proper resource cleanup
2087124909d7SZhiyong Yang 			 */
2088124909d7SZhiyong Yang 			device = rte_eth_devices[pt_id].device;
2089124909d7SZhiyong Yang 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2090124909d7SZhiyong Yang 				detach_port(pt_id);
2091af75078fSIntel 		}
2092d3a274ceSZhihong Wang 	}
2093fb73e096SJeff Guo 
2094fb73e096SJeff Guo 	if (hot_plug) {
2095fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
2096fb73e096SJeff Guo 		if (ret)
2097fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2098fb73e096SJeff Guo 				"fail to stop device event monitor.");
2099fb73e096SJeff Guo 
2100fb73e096SJeff Guo 		ret = eth_dev_event_callback_unregister();
2101fb73e096SJeff Guo 		if (ret)
2102fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2103fb73e096SJeff Guo 				"fail to unregister all event callbacks.");
2104fb73e096SJeff Guo 	}
2105fb73e096SJeff Guo 
2106d3a274ceSZhihong Wang 	printf("\nBye...\n");
2107af75078fSIntel }
2108af75078fSIntel 
2109af75078fSIntel typedef void (*cmd_func_t)(void);
2110af75078fSIntel struct pmd_test_command {
2111af75078fSIntel 	const char *cmd_name;
2112af75078fSIntel 	cmd_func_t cmd_func;
2113af75078fSIntel };
2114af75078fSIntel 
2115af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2116af75078fSIntel 
2117ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2118af75078fSIntel static void
2119edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2120af75078fSIntel {
2121ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2122ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2123f8244c63SZhiyong Yang 	portid_t portid;
2124f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2125ce8d5614SIntel 	struct rte_eth_link link;
2126ce8d5614SIntel 
2127ce8d5614SIntel 	printf("Checking link statuses...\n");
2128ce8d5614SIntel 	fflush(stdout);
2129ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2130ce8d5614SIntel 		all_ports_up = 1;
21317d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2132ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2133ce8d5614SIntel 				continue;
2134ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2135ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2136ce8d5614SIntel 			/* print link status if flag set */
2137ce8d5614SIntel 			if (print_flag == 1) {
2138ce8d5614SIntel 				if (link.link_status)
2139f8244c63SZhiyong Yang 					printf(
2140f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2141f8244c63SZhiyong Yang 					portid, link.link_speed,
2142ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2143ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2144ce8d5614SIntel 				else
2145f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2146ce8d5614SIntel 				continue;
2147ce8d5614SIntel 			}
2148ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
214909419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2150ce8d5614SIntel 				all_ports_up = 0;
2151ce8d5614SIntel 				break;
2152ce8d5614SIntel 			}
2153ce8d5614SIntel 		}
2154ce8d5614SIntel 		/* after finally printing all link status, get out */
2155ce8d5614SIntel 		if (print_flag == 1)
2156ce8d5614SIntel 			break;
2157ce8d5614SIntel 
2158ce8d5614SIntel 		if (all_ports_up == 0) {
2159ce8d5614SIntel 			fflush(stdout);
2160ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2161ce8d5614SIntel 		}
2162ce8d5614SIntel 
2163ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2164ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2165ce8d5614SIntel 			print_flag = 1;
2166ce8d5614SIntel 		}
21678ea656f8SGaetan Rivet 
21688ea656f8SGaetan Rivet 		if (lsc_interrupt)
21698ea656f8SGaetan Rivet 			break;
2170ce8d5614SIntel 	}
2171af75078fSIntel }
2172af75078fSIntel 
2173284c908cSGaetan Rivet static void
2174284c908cSGaetan Rivet rmv_event_callback(void *arg)
2175284c908cSGaetan Rivet {
21763b97888aSMatan Azrad 	int need_to_start = 0;
21770da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
217828caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2179284c908cSGaetan Rivet 
2180284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2181284c908cSGaetan Rivet 
21823b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
21833b97888aSMatan Azrad 		need_to_start = 1;
21843b97888aSMatan Azrad 		stop_packet_forwarding();
21853b97888aSMatan Azrad 	}
21860da2a62bSMatan Azrad 	no_link_check = 1;
2187284c908cSGaetan Rivet 	stop_port(port_id);
21880da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2189284c908cSGaetan Rivet 	close_port(port_id);
21903b97888aSMatan Azrad 	detach_port(port_id);
21913b97888aSMatan Azrad 	if (need_to_start)
21923b97888aSMatan Azrad 		start_packet_forwarding(0);
2193284c908cSGaetan Rivet }
2194284c908cSGaetan Rivet 
219576ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2196d6af1a13SBernard Iremonger static int
2197f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2198d6af1a13SBernard Iremonger 		  void *ret_param)
219976ad4a2dSGaetan Rivet {
220076ad4a2dSGaetan Rivet 	static const char * const event_desc[] = {
220176ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
220276ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
220376ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
220476ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
220576ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2206badb87c1SAnoob Joseph 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
220776ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
220876ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
22094fb82244SMatan Azrad 		[RTE_ETH_EVENT_NEW] = "device probed",
22104fb82244SMatan Azrad 		[RTE_ETH_EVENT_DESTROY] = "device released",
221176ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MAX] = NULL,
221276ad4a2dSGaetan Rivet 	};
221376ad4a2dSGaetan Rivet 
221476ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2215d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
221676ad4a2dSGaetan Rivet 
221776ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
221876ad4a2dSGaetan Rivet 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
221976ad4a2dSGaetan Rivet 			port_id, __func__, type);
222076ad4a2dSGaetan Rivet 		fflush(stderr);
22213af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
222276ad4a2dSGaetan Rivet 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
222376ad4a2dSGaetan Rivet 			event_desc[type]);
222476ad4a2dSGaetan Rivet 		fflush(stdout);
222576ad4a2dSGaetan Rivet 	}
2226284c908cSGaetan Rivet 
22270e45c64dSMatan Azrad 	if (port_id_is_invalid(port_id, DISABLED_WARN))
22280e45c64dSMatan Azrad 		return 0;
22290e45c64dSMatan Azrad 
2230284c908cSGaetan Rivet 	switch (type) {
2231284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
2232284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2233284c908cSGaetan Rivet 				rmv_event_callback, (void *)(intptr_t)port_id))
2234284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2235284c908cSGaetan Rivet 		break;
2236284c908cSGaetan Rivet 	default:
2237284c908cSGaetan Rivet 		break;
2238284c908cSGaetan Rivet 	}
2239d6af1a13SBernard Iremonger 	return 0;
224076ad4a2dSGaetan Rivet }
224176ad4a2dSGaetan Rivet 
2242fb73e096SJeff Guo /* This function is used by the interrupt thread */
2243fb73e096SJeff Guo static void
2244fb73e096SJeff Guo eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2245fb73e096SJeff Guo 			     __rte_unused void *arg)
2246fb73e096SJeff Guo {
2247fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2248fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2249fb73e096SJeff Guo 			__func__, type);
2250fb73e096SJeff Guo 		fflush(stderr);
2251fb73e096SJeff Guo 	}
2252fb73e096SJeff Guo 
2253fb73e096SJeff Guo 	switch (type) {
2254fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2255fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2256fb73e096SJeff Guo 			device_name);
2257fb73e096SJeff Guo 		/* TODO: After finish failure handle, begin to stop
2258fb73e096SJeff Guo 		 * packet forward, stop port, close port, detach port.
2259fb73e096SJeff Guo 		 */
2260fb73e096SJeff Guo 		break;
2261fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2262fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2263fb73e096SJeff Guo 			device_name);
2264fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2265fb73e096SJeff Guo 		 * begin to attach port.
2266fb73e096SJeff Guo 		 */
2267fb73e096SJeff Guo 		break;
2268fb73e096SJeff Guo 	default:
2269fb73e096SJeff Guo 		break;
2270fb73e096SJeff Guo 	}
2271fb73e096SJeff Guo }
2272fb73e096SJeff Guo 
2273013af9b6SIntel static int
227428caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2275af75078fSIntel {
2276013af9b6SIntel 	uint16_t i;
2277af75078fSIntel 	int diag;
2278013af9b6SIntel 	uint8_t mapping_found = 0;
2279af75078fSIntel 
2280013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2281013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2282013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2283013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2284013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2285013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2286013af9b6SIntel 			if (diag != 0)
2287013af9b6SIntel 				return diag;
2288013af9b6SIntel 			mapping_found = 1;
2289af75078fSIntel 		}
2290013af9b6SIntel 	}
2291013af9b6SIntel 	if (mapping_found)
2292013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2293013af9b6SIntel 	return 0;
2294013af9b6SIntel }
2295013af9b6SIntel 
2296013af9b6SIntel static int
229728caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2298013af9b6SIntel {
2299013af9b6SIntel 	uint16_t i;
2300013af9b6SIntel 	int diag;
2301013af9b6SIntel 	uint8_t mapping_found = 0;
2302013af9b6SIntel 
2303013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2304013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2305013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2306013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2307013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2308013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2309013af9b6SIntel 			if (diag != 0)
2310013af9b6SIntel 				return diag;
2311013af9b6SIntel 			mapping_found = 1;
2312013af9b6SIntel 		}
2313013af9b6SIntel 	}
2314013af9b6SIntel 	if (mapping_found)
2315013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2316013af9b6SIntel 	return 0;
2317013af9b6SIntel }
2318013af9b6SIntel 
2319013af9b6SIntel static void
232028caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2321013af9b6SIntel {
2322013af9b6SIntel 	int diag = 0;
2323013af9b6SIntel 
2324013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2325af75078fSIntel 	if (diag != 0) {
2326013af9b6SIntel 		if (diag == -ENOTSUP) {
2327013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2328013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2329013af9b6SIntel 		}
2330013af9b6SIntel 		else
2331013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2332013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2333013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2334af75078fSIntel 					pi, diag);
2335af75078fSIntel 	}
2336013af9b6SIntel 
2337013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2338af75078fSIntel 	if (diag != 0) {
2339013af9b6SIntel 		if (diag == -ENOTSUP) {
2340013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2341013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2342013af9b6SIntel 		}
2343013af9b6SIntel 		else
2344013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2345013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2346013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2347af75078fSIntel 					pi, diag);
2348af75078fSIntel 	}
2349af75078fSIntel }
2350af75078fSIntel 
2351f2c5125aSPablo de Lara static void
2352f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2353f2c5125aSPablo de Lara {
2354d44f8a48SQi Zhang 	uint16_t qid;
2355f2c5125aSPablo de Lara 
2356d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
2357d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2358d44f8a48SQi Zhang 
2359d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2360f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2361d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2362f2c5125aSPablo de Lara 
2363f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2364d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2365f2c5125aSPablo de Lara 
2366f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2367d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2368f2c5125aSPablo de Lara 
2369f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2370d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2371f2c5125aSPablo de Lara 
2372f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2373d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2374f2c5125aSPablo de Lara 
2375d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2376d44f8a48SQi Zhang 	}
2377d44f8a48SQi Zhang 
2378d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
2379d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2380d44f8a48SQi Zhang 
2381d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2382f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2383d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2384f2c5125aSPablo de Lara 
2385f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2386d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2387f2c5125aSPablo de Lara 
2388f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2389d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2390f2c5125aSPablo de Lara 
2391f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2392d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2393f2c5125aSPablo de Lara 
2394f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2395d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2396d44f8a48SQi Zhang 
2397d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2398d44f8a48SQi Zhang 	}
2399f2c5125aSPablo de Lara }
2400f2c5125aSPablo de Lara 
2401013af9b6SIntel void
2402013af9b6SIntel init_port_config(void)
2403013af9b6SIntel {
2404013af9b6SIntel 	portid_t pid;
2405013af9b6SIntel 	struct rte_port *port;
2406013af9b6SIntel 
24077d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2408013af9b6SIntel 		port = &ports[pid];
2409013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2410422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
24113ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2412013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
241390892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2414422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2415af75078fSIntel 		} else {
2416013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2417013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2418af75078fSIntel 		}
24193ce690d3SBruce Richardson 
24205f592039SJingjing Wu 		if (port->dcb_flag == 0) {
24213ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
24223ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
24233ce690d3SBruce Richardson 			else
24243ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
24253ce690d3SBruce Richardson 		}
24263ce690d3SBruce Richardson 
2427f2c5125aSPablo de Lara 		rxtx_port_config(port);
2428013af9b6SIntel 
2429013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2430013af9b6SIntel 
2431013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
243250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2433e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
24347b7e5ba7SIntel #endif
24358ea656f8SGaetan Rivet 
24368ea656f8SGaetan Rivet 		if (lsc_interrupt &&
24378ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
24388ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
24398ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2440284c908cSGaetan Rivet 		if (rmv_interrupt &&
2441284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2442284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2443284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2444013af9b6SIntel 	}
2445013af9b6SIntel }
2446013af9b6SIntel 
244741b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
244841b05095SBernard Iremonger {
244941b05095SBernard Iremonger 	struct rte_port *port;
245041b05095SBernard Iremonger 
245141b05095SBernard Iremonger 	port = &ports[slave_pid];
245241b05095SBernard Iremonger 	port->slave_flag = 1;
245341b05095SBernard Iremonger }
245441b05095SBernard Iremonger 
245541b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
245641b05095SBernard Iremonger {
245741b05095SBernard Iremonger 	struct rte_port *port;
245841b05095SBernard Iremonger 
245941b05095SBernard Iremonger 	port = &ports[slave_pid];
246041b05095SBernard Iremonger 	port->slave_flag = 0;
246141b05095SBernard Iremonger }
246241b05095SBernard Iremonger 
24630e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
24640e545d30SBernard Iremonger {
24650e545d30SBernard Iremonger 	struct rte_port *port;
24660e545d30SBernard Iremonger 
24670e545d30SBernard Iremonger 	port = &ports[slave_pid];
2468b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2469b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2470b8b8b344SMatan Azrad 		return 1;
2471b8b8b344SMatan Azrad 	return 0;
24720e545d30SBernard Iremonger }
24730e545d30SBernard Iremonger 
2474013af9b6SIntel const uint16_t vlan_tags[] = {
2475013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2476013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2477013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2478013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2479013af9b6SIntel };
2480013af9b6SIntel 
2481013af9b6SIntel static  int
2482ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
24831a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
24841a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
24851a572499SJingjing Wu 		 uint8_t pfc_en)
2486013af9b6SIntel {
2487013af9b6SIntel 	uint8_t i;
2488ac7c491cSKonstantin Ananyev 	int32_t rc;
2489ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
2490af75078fSIntel 
2491af75078fSIntel 	/*
2492013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2493013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2494af75078fSIntel 	 */
24951a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
24961a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
24971a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
24981a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
24991a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2500013af9b6SIntel 
2501547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
25021a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
25031a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
25041a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
25051a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
25061a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
25071a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2508013af9b6SIntel 
25091a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
25101a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
25111a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
25121a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
25131a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2514af75078fSIntel 		}
2515013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2516f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2517f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2518013af9b6SIntel 		}
2519013af9b6SIntel 
2520013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
252132e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
252232e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
25231a572499SJingjing Wu 	} else {
25241a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
25251a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
25261a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
25271a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2528013af9b6SIntel 
2529ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2530ac7c491cSKonstantin Ananyev 		if (rc != 0)
2531ac7c491cSKonstantin Ananyev 			return rc;
2532ac7c491cSKonstantin Ananyev 
25331a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
25341a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
25351a572499SJingjing Wu 
2536bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2537bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2538bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2539013af9b6SIntel 		}
2540ac7c491cSKonstantin Ananyev 
25411a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2542ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
254332e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
25441a572499SJingjing Wu 	}
25451a572499SJingjing Wu 
25461a572499SJingjing Wu 	if (pfc_en)
25471a572499SJingjing Wu 		eth_conf->dcb_capability_en =
25481a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2549013af9b6SIntel 	else
2550013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2551013af9b6SIntel 
2552013af9b6SIntel 	return 0;
2553013af9b6SIntel }
2554013af9b6SIntel 
2555013af9b6SIntel int
25561a572499SJingjing Wu init_port_dcb_config(portid_t pid,
25571a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
25581a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
25591a572499SJingjing Wu 		     uint8_t pfc_en)
2560013af9b6SIntel {
2561013af9b6SIntel 	struct rte_eth_conf port_conf;
2562013af9b6SIntel 	struct rte_port *rte_port;
2563013af9b6SIntel 	int retval;
2564013af9b6SIntel 	uint16_t i;
2565013af9b6SIntel 
25662a977b89SWenzhuo Lu 	rte_port = &ports[pid];
2567013af9b6SIntel 
2568013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2569013af9b6SIntel 	/* Enter DCB configuration status */
2570013af9b6SIntel 	dcb_config = 1;
2571013af9b6SIntel 
2572d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2573d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
2574d5354e89SYanglong Wu 
2575013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2576ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2577013af9b6SIntel 	if (retval < 0)
2578013af9b6SIntel 		return retval;
25790074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2580013af9b6SIntel 
25812f203d44SQi Zhang 	/* re-configure the device . */
25822f203d44SQi Zhang 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
25832a977b89SWenzhuo Lu 
25842a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
25852a977b89SWenzhuo Lu 
25862a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
25872a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
25882a977b89SWenzhuo Lu 	 */
25892a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
25902a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
25912a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
25922a977b89SWenzhuo Lu 			" for port %d.", pid);
25932a977b89SWenzhuo Lu 		return -1;
25942a977b89SWenzhuo Lu 	}
25952a977b89SWenzhuo Lu 
25962a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
25972a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
25982a977b89SWenzhuo Lu 	 */
25992a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
260086ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
260186ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
260286ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
260386ef65eeSBernard Iremonger 		} else {
26042a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
26052a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
260686ef65eeSBernard Iremonger 		}
26072a977b89SWenzhuo Lu 	} else {
26082a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
26092a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
26102a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
26112a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
26122a977b89SWenzhuo Lu 		} else {
26132a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
26142a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
26152a977b89SWenzhuo Lu 
26162a977b89SWenzhuo Lu 		}
26172a977b89SWenzhuo Lu 	}
26182a977b89SWenzhuo Lu 	rx_free_thresh = 64;
26192a977b89SWenzhuo Lu 
2620013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2621013af9b6SIntel 
2622f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
2623013af9b6SIntel 	/* VLAN filter */
26240074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
26251a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2626013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
2627013af9b6SIntel 
2628013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2629013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
2630013af9b6SIntel 
26317741e4cfSIntel 	rte_port->dcb_flag = 1;
26327741e4cfSIntel 
2633013af9b6SIntel 	return 0;
2634af75078fSIntel }
2635af75078fSIntel 
2636ffc468ffSTetsuya Mukawa static void
2637ffc468ffSTetsuya Mukawa init_port(void)
2638ffc468ffSTetsuya Mukawa {
2639ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
2640ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
2641ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2642ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
2643ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
2644ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
2645ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
2646ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
2647ffc468ffSTetsuya Mukawa 	}
2648ffc468ffSTetsuya Mukawa }
2649ffc468ffSTetsuya Mukawa 
2650d3a274ceSZhihong Wang static void
2651d3a274ceSZhihong Wang force_quit(void)
2652d3a274ceSZhihong Wang {
2653d3a274ceSZhihong Wang 	pmd_test_exit();
2654d3a274ceSZhihong Wang 	prompt_exit();
2655d3a274ceSZhihong Wang }
2656d3a274ceSZhihong Wang 
2657d3a274ceSZhihong Wang static void
2658cfea1f30SPablo de Lara print_stats(void)
2659cfea1f30SPablo de Lara {
2660cfea1f30SPablo de Lara 	uint8_t i;
2661cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2662cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2663cfea1f30SPablo de Lara 
2664cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
2665cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
2666cfea1f30SPablo de Lara 
2667cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
2668cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2669cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
2670cfea1f30SPablo de Lara }
2671cfea1f30SPablo de Lara 
2672cfea1f30SPablo de Lara static void
2673d3a274ceSZhihong Wang signal_handler(int signum)
2674d3a274ceSZhihong Wang {
2675d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
2676d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
2677d3a274ceSZhihong Wang 				signum);
2678102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
2679102b7329SReshma Pattan 		/* uninitialize packet capture framework */
2680102b7329SReshma Pattan 		rte_pdump_uninit();
2681102b7329SReshma Pattan #endif
268262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
268362d3216dSReshma Pattan 		rte_latencystats_uninit();
268462d3216dSReshma Pattan #endif
2685d3a274ceSZhihong Wang 		force_quit();
2686d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
2687d9a191a0SPhil Yang 		f_quit = 1;
2688d3a274ceSZhihong Wang 		/* exit with the expected status */
2689d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
2690d3a274ceSZhihong Wang 		kill(getpid(), signum);
2691d3a274ceSZhihong Wang 	}
2692d3a274ceSZhihong Wang }
2693d3a274ceSZhihong Wang 
2694af75078fSIntel int
2695af75078fSIntel main(int argc, char** argv)
2696af75078fSIntel {
2697af75078fSIntel 	int diag;
2698f8244c63SZhiyong Yang 	portid_t port_id;
26994918a357SXiaoyun Li 	uint16_t count;
2700fb73e096SJeff Guo 	int ret;
2701af75078fSIntel 
2702d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
2703d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
2704d3a274ceSZhihong Wang 
2705af75078fSIntel 	diag = rte_eal_init(argc, argv);
2706af75078fSIntel 	if (diag < 0)
2707af75078fSIntel 		rte_panic("Cannot init EAL\n");
2708af75078fSIntel 
2709285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
2710285fd101SOlivier Matz 	if (testpmd_logtype < 0)
2711285fd101SOlivier Matz 		rte_panic("Cannot register log type");
2712285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2713285fd101SOlivier Matz 
27144aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
27154aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
27164aa0d012SAnatoly Burakov 	rte_pdump_init(NULL);
27174aa0d012SAnatoly Burakov #endif
27184aa0d012SAnatoly Burakov 
27194918a357SXiaoyun Li 	count = 0;
27204918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
27214918a357SXiaoyun Li 		ports_ids[count] = port_id;
27224918a357SXiaoyun Li 		count++;
27234918a357SXiaoyun Li 	}
27244918a357SXiaoyun Li 	nb_ports = (portid_t) count;
27254aa0d012SAnatoly Burakov 	if (nb_ports == 0)
27264aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
27274aa0d012SAnatoly Burakov 
27284aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
27294aa0d012SAnatoly Burakov 	init_port();
27304aa0d012SAnatoly Burakov 
27314aa0d012SAnatoly Burakov 	set_def_fwd_config();
27324aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
27334aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
27344aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
27354aa0d012SAnatoly Burakov 
2736e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
2737e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
2738e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
2739e505d84cSAnatoly Burakov #endif
2740e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
2741e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
2742e505d84cSAnatoly Burakov #endif
2743e505d84cSAnatoly Burakov 
2744fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
2745fb7b8b32SAnatoly Burakov #ifdef RTE_EXEC_ENV_BSDAPP
2746fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
2747fb7b8b32SAnatoly Burakov #else
2748fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
2749fb7b8b32SAnatoly Burakov #endif
2750fb7b8b32SAnatoly Burakov 
2751e505d84cSAnatoly Burakov 	argc -= diag;
2752e505d84cSAnatoly Burakov 	argv += diag;
2753e505d84cSAnatoly Burakov 	if (argc > 1)
2754e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
2755e505d84cSAnatoly Burakov 
2756e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2757285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
27581c036b16SEelco Chaudron 			strerror(errno));
27591c036b16SEelco Chaudron 	}
27601c036b16SEelco Chaudron 
276199cabef0SPablo de Lara 	if (tx_first && interactive)
276299cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
276399cabef0SPablo de Lara 				"interactive mode.\n");
27648820cba4SDavid Hunt 
27658820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
27668820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
27678820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
27688820cba4SDavid Hunt 		lsc_interrupt = 0;
27698820cba4SDavid Hunt 	}
27708820cba4SDavid Hunt 
27715a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
27725a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
27735a8fb55cSReshma Pattan 
27745a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2775af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2776af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
2777af75078fSIntel 		       nb_rxq, nb_txq);
2778af75078fSIntel 
2779af75078fSIntel 	init_config();
2780fb73e096SJeff Guo 
2781fb73e096SJeff Guo 	if (hot_plug) {
2782fb73e096SJeff Guo 		/* enable hot plug monitoring */
2783fb73e096SJeff Guo 		ret = rte_dev_event_monitor_start();
2784fb73e096SJeff Guo 		if (ret) {
2785fb73e096SJeff Guo 			rte_errno = EINVAL;
2786fb73e096SJeff Guo 			return -1;
2787fb73e096SJeff Guo 		}
2788fb73e096SJeff Guo 		eth_dev_event_callback_register();
2789fb73e096SJeff Guo 
2790fb73e096SJeff Guo 	}
2791fb73e096SJeff Guo 
2792148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
2793148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2794af75078fSIntel 
2795ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
27967d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
2797ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
2798af75078fSIntel 
27997e4441c8SRemy Horton 	/* Init metrics library */
28007e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
28017e4441c8SRemy Horton 
280262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
280362d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
280462d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
280562d3216dSReshma Pattan 		if (ret)
280662d3216dSReshma Pattan 			printf("Warning: latencystats init()"
280762d3216dSReshma Pattan 				" returned error %d\n",	ret);
280862d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
280962d3216dSReshma Pattan 			latencystats_lcore_id);
281062d3216dSReshma Pattan 	}
281162d3216dSReshma Pattan #endif
281262d3216dSReshma Pattan 
28137e4441c8SRemy Horton 	/* Setup bitrate stats */
28147e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
2815e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
28167e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
28177e4441c8SRemy Horton 		if (bitrate_data == NULL)
2818e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
2819e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
28207e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
2821e25e6c70SRemy Horton 	}
28227e4441c8SRemy Horton #endif
28237e4441c8SRemy Horton 
28240d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
282581ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
282681ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
282781ef862bSAllain Legacy 
2828ca7feb22SCyril Chemparathy 	if (interactive == 1) {
2829ca7feb22SCyril Chemparathy 		if (auto_start) {
2830ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
2831ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
2832ca7feb22SCyril Chemparathy 		}
2833af75078fSIntel 		prompt();
28340de738cfSJiayu Hu 		pmd_test_exit();
2835ca7feb22SCyril Chemparathy 	} else
28360d56cb81SThomas Monjalon #endif
28370d56cb81SThomas Monjalon 	{
2838af75078fSIntel 		char c;
2839af75078fSIntel 		int rc;
2840af75078fSIntel 
2841d9a191a0SPhil Yang 		f_quit = 0;
2842d9a191a0SPhil Yang 
2843af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
284499cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
2845cfea1f30SPablo de Lara 		if (stats_period != 0) {
2846cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2847cfea1f30SPablo de Lara 			uint64_t timer_period;
2848cfea1f30SPablo de Lara 
2849cfea1f30SPablo de Lara 			/* Convert to number of cycles */
2850cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
2851cfea1f30SPablo de Lara 
2852d9a191a0SPhil Yang 			while (f_quit == 0) {
2853cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
2854cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
2855cfea1f30SPablo de Lara 
2856cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
2857cfea1f30SPablo de Lara 					print_stats();
2858cfea1f30SPablo de Lara 					/* Reset the timer */
2859cfea1f30SPablo de Lara 					diff_time = 0;
2860cfea1f30SPablo de Lara 				}
2861cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
2862cfea1f30SPablo de Lara 				prev_time = cur_time;
2863cfea1f30SPablo de Lara 				sleep(1);
2864cfea1f30SPablo de Lara 			}
2865cfea1f30SPablo de Lara 		}
2866cfea1f30SPablo de Lara 
2867af75078fSIntel 		printf("Press enter to exit\n");
2868af75078fSIntel 		rc = read(0, &c, 1);
2869d3a274ceSZhihong Wang 		pmd_test_exit();
2870af75078fSIntel 		if (rc < 0)
2871af75078fSIntel 			return 1;
2872af75078fSIntel 	}
2873af75078fSIntel 
2874af75078fSIntel 	return 0;
2875af75078fSIntel }
2876