xref: /dpdk/app/test-pmd/testpmd.c (revision 58d475b78c603cda5df527880ac46d10e98ad5f4)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70c7f5dba7SAnatoly Burakov #else
71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72c7f5dba7SAnatoly Burakov #endif
73c7f5dba7SAnatoly Burakov 
74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77c7f5dba7SAnatoly Burakov #else
78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov #endif
80c7f5dba7SAnatoly Burakov 
81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
140af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1810ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1820ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1835b590fbeSJasvinder Singh #endif
184af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
185af75078fSIntel 	&ieee1588_fwd_engine,
186af75078fSIntel #endif
187af75078fSIntel 	NULL,
188af75078fSIntel };
189af75078fSIntel 
190af75078fSIntel struct fwd_config cur_fwd_config;
191af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
192bf56fce1SZhihong Wang uint32_t retry_enabled;
193bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
194bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
195af75078fSIntel 
196af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
197c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
198c8798818SIntel                                       * specified on command-line. */
199cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
200d9a191a0SPhil Yang 
201d9a191a0SPhil Yang /*
202d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
203d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
204d9a191a0SPhil Yang  */
205d9a191a0SPhil Yang uint8_t f_quit;
206d9a191a0SPhil Yang 
207af75078fSIntel /*
208af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
209af75078fSIntel  */
210af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
211af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
212af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
213af75078fSIntel };
214af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
215af75078fSIntel 
21679bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
21779bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
21879bec05bSKonstantin Ananyev 
219af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
220e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
221af75078fSIntel 
222900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
223900550deSIntel uint8_t dcb_config = 0;
224900550deSIntel 
225900550deSIntel /* Whether the dcb is in testing status */
226900550deSIntel uint8_t dcb_test = 0;
227900550deSIntel 
228af75078fSIntel /*
229af75078fSIntel  * Configurable number of RX/TX queues.
230af75078fSIntel  */
231af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
232af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
233af75078fSIntel 
234af75078fSIntel /*
235af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2368599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
237af75078fSIntel  */
2388599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2398599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
240af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
241af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
242af75078fSIntel 
243f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
244af75078fSIntel /*
245af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
246af75078fSIntel  */
247af75078fSIntel 
248f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
249f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
250f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
251af75078fSIntel 
252f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
253f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
254f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
255af75078fSIntel 
256af75078fSIntel /*
257af75078fSIntel  * Configurable value of RX free threshold.
258af75078fSIntel  */
259f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
260af75078fSIntel 
261af75078fSIntel /*
262ce8d5614SIntel  * Configurable value of RX drop enable.
263ce8d5614SIntel  */
264f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
265ce8d5614SIntel 
266ce8d5614SIntel /*
267af75078fSIntel  * Configurable value of TX free threshold.
268af75078fSIntel  */
269f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
270af75078fSIntel 
271af75078fSIntel /*
272af75078fSIntel  * Configurable value of TX RS bit threshold.
273af75078fSIntel  */
274f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
275af75078fSIntel 
276af75078fSIntel /*
277af75078fSIntel  * Receive Side Scaling (RSS) configuration.
278af75078fSIntel  */
2798a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
280af75078fSIntel 
281af75078fSIntel /*
282af75078fSIntel  * Port topology configuration
283af75078fSIntel  */
284af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
285af75078fSIntel 
2867741e4cfSIntel /*
2877741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
2887741e4cfSIntel  */
2897741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
2907741e4cfSIntel 
291af75078fSIntel /*
2927ee3e944SVasily Philipov  * Flow API isolated mode.
2937ee3e944SVasily Philipov  */
2947ee3e944SVasily Philipov uint8_t flow_isolate_all;
2957ee3e944SVasily Philipov 
2967ee3e944SVasily Philipov /*
297bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
298bc202406SDavid Marchand  */
299bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
300bc202406SDavid Marchand 
301bc202406SDavid Marchand /*
3028ea656f8SGaetan Rivet  * Enable link status change notification
3038ea656f8SGaetan Rivet  */
3048ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3058ea656f8SGaetan Rivet 
3068ea656f8SGaetan Rivet /*
307284c908cSGaetan Rivet  * Enable device removal notification.
308284c908cSGaetan Rivet  */
309284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
310284c908cSGaetan Rivet 
311fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
312fb73e096SJeff Guo 
313284c908cSGaetan Rivet /*
3143af72783SGaetan Rivet  * Display or mask ether events
3153af72783SGaetan Rivet  * Default to all events except VF_MBOX
3163af72783SGaetan Rivet  */
3173af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3183af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3193af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3203af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
321badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3223af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3233af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
324e505d84cSAnatoly Burakov /*
325e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
326e505d84cSAnatoly Burakov  */
327e505d84cSAnatoly Burakov int do_mlockall = 0;
3283af72783SGaetan Rivet 
3293af72783SGaetan Rivet /*
3307b7e5ba7SIntel  * NIC bypass mode configuration options.
3317b7e5ba7SIntel  */
3327b7e5ba7SIntel 
33350c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3347b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
335e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3367b7e5ba7SIntel #endif
3377b7e5ba7SIntel 
338e261265eSRadu Nicolau 
33962d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
34062d3216dSReshma Pattan 
34162d3216dSReshma Pattan /*
34262d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
34362d3216dSReshma Pattan  */
34462d3216dSReshma Pattan uint8_t latencystats_enabled;
34562d3216dSReshma Pattan 
34662d3216dSReshma Pattan /*
34762d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
34862d3216dSReshma Pattan  */
34962d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
35062d3216dSReshma Pattan 
35162d3216dSReshma Pattan #endif
35262d3216dSReshma Pattan 
3537b7e5ba7SIntel /*
354af75078fSIntel  * Ethernet device configuration.
355af75078fSIntel  */
356af75078fSIntel struct rte_eth_rxmode rx_mode = {
357af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
358af75078fSIntel };
359af75078fSIntel 
36007e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
36107e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
36207e5f7bdSShahaf Shuler };
363fd8c20aaSShahaf Shuler 
364af75078fSIntel struct rte_fdir_conf fdir_conf = {
365af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
366af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
367af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
368d9d5e6f2SJingjing Wu 	.mask = {
36926f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
370d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
371d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
372d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
373d9d5e6f2SJingjing Wu 		},
374d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
375d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
376d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
377d9d5e6f2SJingjing Wu 		},
378d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
379d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
38047b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
38147b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
38247b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
383d9d5e6f2SJingjing Wu 	},
384af75078fSIntel 	.drop_queue = 127,
385af75078fSIntel };
386af75078fSIntel 
3872950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
388af75078fSIntel 
389ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
390ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
391ed30d9b6SIntel 
392ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
393ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
394ed30d9b6SIntel 
395ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
396ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
397ed30d9b6SIntel 
398a4fd5eeeSElza Mathew /*
399a4fd5eeeSElza Mathew  * Display zero values by default for xstats
400a4fd5eeeSElza Mathew  */
401a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
402a4fd5eeeSElza Mathew 
403c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
404c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4057acf894dSStephen Hurd 
406e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4077e4441c8SRemy Horton /* Bitrate statistics */
4087e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
409e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
410e25e6c70SRemy Horton uint8_t bitrate_enabled;
411e25e6c70SRemy Horton #endif
4127e4441c8SRemy Horton 
413b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
414b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
415b40f8d78SJiayu Hu 
4161960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4171960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4181960be7dSNelio Laranjeiro 	.select_vlan = 0,
4191960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4201960be7dSNelio Laranjeiro 	.udp_src = 0,
4211960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4221960be7dSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
4231960be7dSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
4241960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4251960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4261960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4271960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4281960be7dSNelio Laranjeiro 	.vlan_tci = 0,
4291960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4301960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4311960be7dSNelio Laranjeiro };
4321960be7dSNelio Laranjeiro 
433dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
434dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
435dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
436dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
437dcd962fcSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
438dcd962fcSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
439dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
440dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
441dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
442dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
443dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
444dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
445dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
446dcd962fcSNelio Laranjeiro };
447dcd962fcSNelio Laranjeiro 
448ed30d9b6SIntel /* Forward function declarations */
44928caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
45028caa76aSZhiyong Yang 						   struct rte_port *port);
451edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
452f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
45376ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
454d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
455fb73e096SJeff Guo static void eth_dev_event_callback(char *device_name,
456fb73e096SJeff Guo 				enum rte_dev_event_type type,
457fb73e096SJeff Guo 				void *param);
458fb73e096SJeff Guo static int eth_dev_event_callback_register(void);
459fb73e096SJeff Guo static int eth_dev_event_callback_unregister(void);
460fb73e096SJeff Guo 
461ce8d5614SIntel 
462ce8d5614SIntel /*
463ce8d5614SIntel  * Check if all the ports are started.
464ce8d5614SIntel  * If yes, return positive value. If not, return zero.
465ce8d5614SIntel  */
466ce8d5614SIntel static int all_ports_started(void);
467ed30d9b6SIntel 
46852f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
46952f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
47052f38a20SJiayu Hu 
471af75078fSIntel /*
47298a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
473c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
474c9cafcc8SShahaf Shuler  */
475c9cafcc8SShahaf Shuler int
476c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
477c9cafcc8SShahaf Shuler {
478c9cafcc8SShahaf Shuler 	unsigned int i;
479c9cafcc8SShahaf Shuler 
480c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
481c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
482c9cafcc8SShahaf Shuler 			return 0;
483c9cafcc8SShahaf Shuler 	}
484c9cafcc8SShahaf Shuler 	return 1;
485c9cafcc8SShahaf Shuler }
486c9cafcc8SShahaf Shuler 
487c9cafcc8SShahaf Shuler /*
488af75078fSIntel  * Setup default configuration.
489af75078fSIntel  */
490af75078fSIntel static void
491af75078fSIntel set_default_fwd_lcores_config(void)
492af75078fSIntel {
493af75078fSIntel 	unsigned int i;
494af75078fSIntel 	unsigned int nb_lc;
4957acf894dSStephen Hurd 	unsigned int sock_num;
496af75078fSIntel 
497af75078fSIntel 	nb_lc = 0;
498af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
499dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
500dbfb8ec7SPhil Yang 			continue;
501c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
502c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
503c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
504c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
505c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
506c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
507c9cafcc8SShahaf Shuler 			}
508c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5097acf894dSStephen Hurd 		}
510f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
511f54fe5eeSStephen Hurd 			continue;
512f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
513af75078fSIntel 	}
514af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
515af75078fSIntel 	nb_cfg_lcores = nb_lcores;
516af75078fSIntel 	nb_fwd_lcores = 1;
517af75078fSIntel }
518af75078fSIntel 
519af75078fSIntel static void
520af75078fSIntel set_def_peer_eth_addrs(void)
521af75078fSIntel {
522af75078fSIntel 	portid_t i;
523af75078fSIntel 
524af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
525af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
526af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
527af75078fSIntel 	}
528af75078fSIntel }
529af75078fSIntel 
530af75078fSIntel static void
531af75078fSIntel set_default_fwd_ports_config(void)
532af75078fSIntel {
533af75078fSIntel 	portid_t pt_id;
53465a7360cSMatan Azrad 	int i = 0;
535af75078fSIntel 
53665a7360cSMatan Azrad 	RTE_ETH_FOREACH_DEV(pt_id)
53765a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
538af75078fSIntel 
539af75078fSIntel 	nb_cfg_ports = nb_ports;
540af75078fSIntel 	nb_fwd_ports = nb_ports;
541af75078fSIntel }
542af75078fSIntel 
543af75078fSIntel void
544af75078fSIntel set_def_fwd_config(void)
545af75078fSIntel {
546af75078fSIntel 	set_default_fwd_lcores_config();
547af75078fSIntel 	set_def_peer_eth_addrs();
548af75078fSIntel 	set_default_fwd_ports_config();
549af75078fSIntel }
550af75078fSIntel 
551c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
552c7f5dba7SAnatoly Burakov static int
553c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
554c7f5dba7SAnatoly Burakov {
555c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
556c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
557c7f5dba7SAnatoly Burakov 
558c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
559c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
560c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
561c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
562c7f5dba7SAnatoly Burakov 	 */
563c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
564c7f5dba7SAnatoly Burakov 
565c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
566c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
567c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
568c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
569c7f5dba7SAnatoly Burakov 		return -1;
570c7f5dba7SAnatoly Burakov 	}
571c7f5dba7SAnatoly Burakov 
572c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
573c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
574c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
575c7f5dba7SAnatoly Burakov 
576c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
577c7f5dba7SAnatoly Burakov 
578c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
579c7f5dba7SAnatoly Burakov 
580c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
581c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
582c7f5dba7SAnatoly Burakov 		return -1;
583c7f5dba7SAnatoly Burakov 	}
584c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
585c7f5dba7SAnatoly Burakov 
586c7f5dba7SAnatoly Burakov 	return 0;
587c7f5dba7SAnatoly Burakov }
588c7f5dba7SAnatoly Burakov 
589c7f5dba7SAnatoly Burakov static inline uint32_t
590c7f5dba7SAnatoly Burakov bsf64(uint64_t v)
591c7f5dba7SAnatoly Burakov {
592c7f5dba7SAnatoly Burakov 	return (uint32_t)__builtin_ctzll(v);
593c7f5dba7SAnatoly Burakov }
594c7f5dba7SAnatoly Burakov 
595c7f5dba7SAnatoly Burakov static inline uint32_t
596c7f5dba7SAnatoly Burakov log2_u64(uint64_t v)
597c7f5dba7SAnatoly Burakov {
598c7f5dba7SAnatoly Burakov 	if (v == 0)
599c7f5dba7SAnatoly Burakov 		return 0;
600c7f5dba7SAnatoly Burakov 	v = rte_align64pow2(v);
601c7f5dba7SAnatoly Burakov 	return bsf64(v);
602c7f5dba7SAnatoly Burakov }
603c7f5dba7SAnatoly Burakov 
604c7f5dba7SAnatoly Burakov static int
605c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
606c7f5dba7SAnatoly Burakov {
607c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
608c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
609c7f5dba7SAnatoly Burakov 	 */
610c7f5dba7SAnatoly Burakov 	int log2 = log2_u64(page_sz);
611c7f5dba7SAnatoly Burakov 
612c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
613c7f5dba7SAnatoly Burakov }
614c7f5dba7SAnatoly Burakov 
615c7f5dba7SAnatoly Burakov static void *
616c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
617c7f5dba7SAnatoly Burakov {
618c7f5dba7SAnatoly Burakov 	void *addr;
619c7f5dba7SAnatoly Burakov 	int flags;
620c7f5dba7SAnatoly Burakov 
621c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
622c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
623c7f5dba7SAnatoly Burakov 	if (huge)
624c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
625c7f5dba7SAnatoly Burakov 
626c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
627c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
628c7f5dba7SAnatoly Burakov 		return NULL;
629c7f5dba7SAnatoly Burakov 
630c7f5dba7SAnatoly Burakov 	return addr;
631c7f5dba7SAnatoly Burakov }
632c7f5dba7SAnatoly Burakov 
633c7f5dba7SAnatoly Burakov struct extmem_param {
634c7f5dba7SAnatoly Burakov 	void *addr;
635c7f5dba7SAnatoly Burakov 	size_t len;
636c7f5dba7SAnatoly Burakov 	size_t pgsz;
637c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
638c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
639c7f5dba7SAnatoly Burakov };
640c7f5dba7SAnatoly Burakov 
641c7f5dba7SAnatoly Burakov static int
642c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
643c7f5dba7SAnatoly Burakov 		bool huge)
644c7f5dba7SAnatoly Burakov {
645c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
646c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
647c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
648c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
649c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
650c7f5dba7SAnatoly Burakov 	void *addr;
651c7f5dba7SAnatoly Burakov 	int ret;
652c7f5dba7SAnatoly Burakov 
653c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
654c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
655c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
656c7f5dba7SAnatoly Burakov 			continue;
657c7f5dba7SAnatoly Burakov 
658c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
659c7f5dba7SAnatoly Burakov 
660c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
661c7f5dba7SAnatoly Burakov 		if (!huge)
662c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
663c7f5dba7SAnatoly Burakov 
664c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
665c7f5dba7SAnatoly Burakov 		if (ret < 0) {
666c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
667c7f5dba7SAnatoly Burakov 			return -1;
668c7f5dba7SAnatoly Burakov 		}
669c7f5dba7SAnatoly Burakov 
670c7f5dba7SAnatoly Burakov 		/* allocate our memory */
671c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
672c7f5dba7SAnatoly Burakov 
673c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
674c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
675c7f5dba7SAnatoly Burakov 		 * try another one.
676c7f5dba7SAnatoly Burakov 		 */
677c7f5dba7SAnatoly Burakov 		if (addr == NULL)
678c7f5dba7SAnatoly Burakov 			continue;
679c7f5dba7SAnatoly Burakov 
680c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
681c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
682c7f5dba7SAnatoly Burakov 
683c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
684c7f5dba7SAnatoly Burakov 
685c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
686c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
687c7f5dba7SAnatoly Burakov 			goto fail;
688c7f5dba7SAnatoly Burakov 		}
689c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
690c7f5dba7SAnatoly Burakov 		if (!huge)
691c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
692c7f5dba7SAnatoly Burakov 
693c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
694c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
695c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
696c7f5dba7SAnatoly Burakov 			size_t offset;
697c7f5dba7SAnatoly Burakov 			void *cur;
698c7f5dba7SAnatoly Burakov 
699c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
700c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
701c7f5dba7SAnatoly Burakov 
702c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
703c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
704c7f5dba7SAnatoly Burakov 
705c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
706c7f5dba7SAnatoly Burakov 
707c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
708c7f5dba7SAnatoly Burakov 		}
709c7f5dba7SAnatoly Burakov 
710c7f5dba7SAnatoly Burakov 		break;
711c7f5dba7SAnatoly Burakov 	}
712c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
713c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
714c7f5dba7SAnatoly Burakov 		return -1;
715c7f5dba7SAnatoly Burakov 
716c7f5dba7SAnatoly Burakov 	param->addr = addr;
717c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
718c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
719c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
720c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
721c7f5dba7SAnatoly Burakov 
722c7f5dba7SAnatoly Burakov 	return 0;
723c7f5dba7SAnatoly Burakov fail:
724c7f5dba7SAnatoly Burakov 	if (iovas)
725c7f5dba7SAnatoly Burakov 		free(iovas);
726c7f5dba7SAnatoly Burakov 	if (addr)
727c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
728c7f5dba7SAnatoly Burakov 
729c7f5dba7SAnatoly Burakov 	return -1;
730c7f5dba7SAnatoly Burakov }
731c7f5dba7SAnatoly Burakov 
732c7f5dba7SAnatoly Burakov static int
733c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
734c7f5dba7SAnatoly Burakov {
735c7f5dba7SAnatoly Burakov 	struct extmem_param param;
736c7f5dba7SAnatoly Burakov 	int socket_id, ret;
737c7f5dba7SAnatoly Burakov 
738c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
739c7f5dba7SAnatoly Burakov 
740c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
741c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
742c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
743c7f5dba7SAnatoly Burakov 		/* create our heap */
744c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
745c7f5dba7SAnatoly Burakov 		if (ret < 0) {
746c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
747c7f5dba7SAnatoly Burakov 			return -1;
748c7f5dba7SAnatoly Burakov 		}
749c7f5dba7SAnatoly Burakov 	}
750c7f5dba7SAnatoly Burakov 
751c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
752c7f5dba7SAnatoly Burakov 	if (ret < 0) {
753c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
754c7f5dba7SAnatoly Burakov 		return -1;
755c7f5dba7SAnatoly Burakov 	}
756c7f5dba7SAnatoly Burakov 
757c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
758c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
759c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
760c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
761c7f5dba7SAnatoly Burakov 
762c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
763c7f5dba7SAnatoly Burakov 
764c7f5dba7SAnatoly Burakov 	/* not needed any more */
765c7f5dba7SAnatoly Burakov 	free(param.iova_table);
766c7f5dba7SAnatoly Burakov 
767c7f5dba7SAnatoly Burakov 	if (ret < 0) {
768c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
769c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
770c7f5dba7SAnatoly Burakov 		return -1;
771c7f5dba7SAnatoly Burakov 	}
772c7f5dba7SAnatoly Burakov 
773c7f5dba7SAnatoly Burakov 	/* success */
774c7f5dba7SAnatoly Burakov 
775c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
776c7f5dba7SAnatoly Burakov 			param.len >> 20);
777c7f5dba7SAnatoly Burakov 
778c7f5dba7SAnatoly Burakov 	return 0;
779c7f5dba7SAnatoly Burakov }
780c7f5dba7SAnatoly Burakov 
781af75078fSIntel /*
782af75078fSIntel  * Configuration initialisation done once at init time.
783af75078fSIntel  */
784af75078fSIntel static void
785af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
786af75078fSIntel 		 unsigned int socket_id)
787af75078fSIntel {
788af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
789bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
790af75078fSIntel 	uint32_t mb_size;
791af75078fSIntel 
792dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
793af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
794148f963fSBruce Richardson 
795285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
796d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
797d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
798d1eb542eSOlivier Matz 
799c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
800c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
801c7f5dba7SAnatoly Burakov 		{
802c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
803c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
804c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
805c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
806c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
807c7f5dba7SAnatoly Burakov 			break;
808c7f5dba7SAnatoly Burakov 		}
809c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
810c7f5dba7SAnatoly Burakov 		{
811b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
812c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
813148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
814148f963fSBruce Richardson 				socket_id, 0);
81524427bb9SOlivier Matz 			if (rte_mp == NULL)
81624427bb9SOlivier Matz 				goto err;
817b19a0c75SOlivier Matz 
818b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
819b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
820b19a0c75SOlivier Matz 				rte_mp = NULL;
82124427bb9SOlivier Matz 				goto err;
822b19a0c75SOlivier Matz 			}
823b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
824b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
825c7f5dba7SAnatoly Burakov 			break;
826c7f5dba7SAnatoly Burakov 		}
827c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
828c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
829c7f5dba7SAnatoly Burakov 		{
830c7f5dba7SAnatoly Burakov 			int heap_socket;
831c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
832c7f5dba7SAnatoly Burakov 
833c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
834c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
835c7f5dba7SAnatoly Burakov 
836c7f5dba7SAnatoly Burakov 			heap_socket =
837c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
838c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
839c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
840c7f5dba7SAnatoly Burakov 
8410e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
8420e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
843ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
844c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
845c7f5dba7SAnatoly Burakov 					heap_socket);
846c7f5dba7SAnatoly Burakov 			break;
847c7f5dba7SAnatoly Burakov 		}
848c7f5dba7SAnatoly Burakov 	default:
849c7f5dba7SAnatoly Burakov 		{
850c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
851c7f5dba7SAnatoly Burakov 		}
852bece7b6cSChristian Ehrhardt 	}
853148f963fSBruce Richardson 
85424427bb9SOlivier Matz err:
855af75078fSIntel 	if (rte_mp == NULL) {
856d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
857d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
858d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
859148f963fSBruce Richardson 	} else if (verbose_level > 0) {
860591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
861af75078fSIntel 	}
862af75078fSIntel }
863af75078fSIntel 
86420a0286fSLiu Xiaofeng /*
86520a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
86620a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
86720a0286fSLiu Xiaofeng  */
86820a0286fSLiu Xiaofeng static int
86920a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
87020a0286fSLiu Xiaofeng {
87120a0286fSLiu Xiaofeng 	static int warning_once = 0;
87220a0286fSLiu Xiaofeng 
873c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
87420a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
87520a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
87620a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
87720a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
87820a0286fSLiu Xiaofeng 			       " --numa.\n");
87920a0286fSLiu Xiaofeng 		warning_once = 1;
88020a0286fSLiu Xiaofeng 		return -1;
88120a0286fSLiu Xiaofeng 	}
88220a0286fSLiu Xiaofeng 	return 0;
88320a0286fSLiu Xiaofeng }
88420a0286fSLiu Xiaofeng 
8853f7311baSWei Dai /*
8863f7311baSWei Dai  * Get the allowed maximum number of RX queues.
8873f7311baSWei Dai  * *pid return the port id which has minimal value of
8883f7311baSWei Dai  * max_rx_queues in all ports.
8893f7311baSWei Dai  */
8903f7311baSWei Dai queueid_t
8913f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
8923f7311baSWei Dai {
8933f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
8943f7311baSWei Dai 	portid_t pi;
8953f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
8963f7311baSWei Dai 
8973f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
8983f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
8993f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
9003f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
9013f7311baSWei Dai 			*pid = pi;
9023f7311baSWei Dai 		}
9033f7311baSWei Dai 	}
9043f7311baSWei Dai 	return allowed_max_rxq;
9053f7311baSWei Dai }
9063f7311baSWei Dai 
9073f7311baSWei Dai /*
9083f7311baSWei Dai  * Check input rxq is valid or not.
9093f7311baSWei Dai  * If input rxq is not greater than any of maximum number
9103f7311baSWei Dai  * of RX queues of all ports, it is valid.
9113f7311baSWei Dai  * if valid, return 0, else return -1
9123f7311baSWei Dai  */
9133f7311baSWei Dai int
9143f7311baSWei Dai check_nb_rxq(queueid_t rxq)
9153f7311baSWei Dai {
9163f7311baSWei Dai 	queueid_t allowed_max_rxq;
9173f7311baSWei Dai 	portid_t pid = 0;
9183f7311baSWei Dai 
9193f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
9203f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
9213f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
9223f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
9233f7311baSWei Dai 		       rxq,
9243f7311baSWei Dai 		       allowed_max_rxq,
9253f7311baSWei Dai 		       pid);
9263f7311baSWei Dai 		return -1;
9273f7311baSWei Dai 	}
9283f7311baSWei Dai 	return 0;
9293f7311baSWei Dai }
9303f7311baSWei Dai 
93136db4f6cSWei Dai /*
93236db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
93336db4f6cSWei Dai  * *pid return the port id which has minimal value of
93436db4f6cSWei Dai  * max_tx_queues in all ports.
93536db4f6cSWei Dai  */
93636db4f6cSWei Dai queueid_t
93736db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
93836db4f6cSWei Dai {
93936db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
94036db4f6cSWei Dai 	portid_t pi;
94136db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
94236db4f6cSWei Dai 
94336db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
94436db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
94536db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
94636db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
94736db4f6cSWei Dai 			*pid = pi;
94836db4f6cSWei Dai 		}
94936db4f6cSWei Dai 	}
95036db4f6cSWei Dai 	return allowed_max_txq;
95136db4f6cSWei Dai }
95236db4f6cSWei Dai 
95336db4f6cSWei Dai /*
95436db4f6cSWei Dai  * Check input txq is valid or not.
95536db4f6cSWei Dai  * If input txq is not greater than any of maximum number
95636db4f6cSWei Dai  * of TX queues of all ports, it is valid.
95736db4f6cSWei Dai  * if valid, return 0, else return -1
95836db4f6cSWei Dai  */
95936db4f6cSWei Dai int
96036db4f6cSWei Dai check_nb_txq(queueid_t txq)
96136db4f6cSWei Dai {
96236db4f6cSWei Dai 	queueid_t allowed_max_txq;
96336db4f6cSWei Dai 	portid_t pid = 0;
96436db4f6cSWei Dai 
96536db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
96636db4f6cSWei Dai 	if (txq > allowed_max_txq) {
96736db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
96836db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
96936db4f6cSWei Dai 		       txq,
97036db4f6cSWei Dai 		       allowed_max_txq,
97136db4f6cSWei Dai 		       pid);
97236db4f6cSWei Dai 		return -1;
97336db4f6cSWei Dai 	}
97436db4f6cSWei Dai 	return 0;
97536db4f6cSWei Dai }
97636db4f6cSWei Dai 
977af75078fSIntel static void
978af75078fSIntel init_config(void)
979af75078fSIntel {
980ce8d5614SIntel 	portid_t pid;
981af75078fSIntel 	struct rte_port *port;
982af75078fSIntel 	struct rte_mempool *mbp;
983af75078fSIntel 	unsigned int nb_mbuf_per_pool;
984af75078fSIntel 	lcoreid_t  lc_id;
9857acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
986b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
98752f38a20SJiayu Hu 	uint32_t gso_types;
988c73a9071SWei Dai 	int k;
989af75078fSIntel 
9907acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
991487f9a59SYulong Pei 
992487f9a59SYulong Pei 	if (numa_support) {
993487f9a59SYulong Pei 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
994487f9a59SYulong Pei 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
995487f9a59SYulong Pei 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
996487f9a59SYulong Pei 	}
997487f9a59SYulong Pei 
998af75078fSIntel 	/* Configuration of logical cores. */
999af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1000af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1001fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1002af75078fSIntel 	if (fwd_lcores == NULL) {
1003ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1004ce8d5614SIntel 							"failed\n", nb_lcores);
1005af75078fSIntel 	}
1006af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1007af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1008af75078fSIntel 					       sizeof(struct fwd_lcore),
1009fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1010af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1011ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1012ce8d5614SIntel 								"failed\n");
1013af75078fSIntel 		}
1014af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1015af75078fSIntel 	}
1016af75078fSIntel 
10177d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1018ce8d5614SIntel 		port = &ports[pid];
10198b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1020fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1021384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
1022ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
10237c45f6c0SFerruh Yigit 
102407e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
102507e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
102607e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
102707e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1028b6ea6408SIntel 		if (numa_support) {
1029b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1030b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1031b6ea6408SIntel 			else {
1032b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
103320a0286fSLiu Xiaofeng 
103420a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
103520a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
103620a0286fSLiu Xiaofeng 					socket_id = 0;
1037b6ea6408SIntel 				port_per_socket[socket_id]++;
1038b6ea6408SIntel 			}
1039b6ea6408SIntel 		}
1040b6ea6408SIntel 
1041c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1042c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1043c73a9071SWei Dai 			port->rx_conf[k].offloads =
1044c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1045c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1046c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1047c73a9071SWei Dai 			port->tx_conf[k].offloads =
1048c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1049c73a9071SWei Dai 
1050ce8d5614SIntel 		/* set flag to initialize port/queue */
1051ce8d5614SIntel 		port->need_reconfig = 1;
1052ce8d5614SIntel 		port->need_reconfig_queues = 1;
1053ce8d5614SIntel 	}
1054ce8d5614SIntel 
10553ab64341SOlivier Matz 	/*
10563ab64341SOlivier Matz 	 * Create pools of mbuf.
10573ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
10583ab64341SOlivier Matz 	 * socket 0 memory by default.
10593ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
10603ab64341SOlivier Matz 	 *
10613ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
10623ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
10633ab64341SOlivier Matz 	 */
10643ab64341SOlivier Matz 	if (param_total_num_mbufs)
10653ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
10663ab64341SOlivier Matz 	else {
10673ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
10683ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
10693ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
10703ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
10713ab64341SOlivier Matz 	}
10723ab64341SOlivier Matz 
1073b6ea6408SIntel 	if (numa_support) {
1074b6ea6408SIntel 		uint8_t i;
1075ce8d5614SIntel 
1076c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1077c9cafcc8SShahaf Shuler 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1078c9cafcc8SShahaf Shuler 					 socket_ids[i]);
10793ab64341SOlivier Matz 	} else {
10803ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
10813ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
10823ab64341SOlivier Matz 		else
10833ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
10843ab64341SOlivier Matz 						 socket_num);
10853ab64341SOlivier Matz 	}
1086b6ea6408SIntel 
1087b6ea6408SIntel 	init_port_config();
10885886ae07SAdrien Mazarguil 
108952f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1090aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
10915886ae07SAdrien Mazarguil 	/*
10925886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
10935886ae07SAdrien Mazarguil 	 */
10945886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
10958fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
10968fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
10978fd8bebcSAdrien Mazarguil 
10985886ae07SAdrien Mazarguil 		if (mbp == NULL)
10995886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
11005886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
110152f38a20SJiayu Hu 		/* initialize GSO context */
110252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
110352f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
110452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
110552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
110652f38a20SJiayu Hu 			ETHER_CRC_LEN;
110752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
11085886ae07SAdrien Mazarguil 	}
11095886ae07SAdrien Mazarguil 
1110ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1111ce8d5614SIntel 	if (init_fwd_streams() < 0)
1112ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
11130c0db76fSBernard Iremonger 
11140c0db76fSBernard Iremonger 	fwd_config_setup();
1115b7091f1dSJiayu Hu 
1116b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1117b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1118b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1119b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1120b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1121b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1122b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1123b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1124b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1125b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1126b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1127b7091f1dSJiayu Hu 		}
1128b7091f1dSJiayu Hu 	}
11290ad778b3SJasvinder Singh 
11300ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
11310ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
11320ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
11330ad778b3SJasvinder Singh 			port = &ports[pid];
11340ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
11350ad778b3SJasvinder Singh 
11360ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
11370ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
11380ad778b3SJasvinder Singh 		}
11390ad778b3SJasvinder Singh 	}
11400ad778b3SJasvinder Singh #endif
11410ad778b3SJasvinder Singh 
1142ce8d5614SIntel }
1143ce8d5614SIntel 
11442950a769SDeclan Doherty 
11452950a769SDeclan Doherty void
1146a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
11472950a769SDeclan Doherty {
11482950a769SDeclan Doherty 	struct rte_port *port;
11492950a769SDeclan Doherty 
11502950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
11512950a769SDeclan Doherty 	port = &ports[new_port_id];
11522950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
11532950a769SDeclan Doherty 
11542950a769SDeclan Doherty 	/* set flag to initialize port/queue */
11552950a769SDeclan Doherty 	port->need_reconfig = 1;
11562950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1157a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
11582950a769SDeclan Doherty 
11592950a769SDeclan Doherty 	init_port_config();
11602950a769SDeclan Doherty }
11612950a769SDeclan Doherty 
11622950a769SDeclan Doherty 
1163ce8d5614SIntel int
1164ce8d5614SIntel init_fwd_streams(void)
1165ce8d5614SIntel {
1166ce8d5614SIntel 	portid_t pid;
1167ce8d5614SIntel 	struct rte_port *port;
1168ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
11695a8fb55cSReshma Pattan 	queueid_t q;
1170ce8d5614SIntel 
1171ce8d5614SIntel 	/* set socket id according to numa or not */
11727d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1173ce8d5614SIntel 		port = &ports[pid];
1174ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1175ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1176ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1177ce8d5614SIntel 				port->dev_info.max_rx_queues);
1178ce8d5614SIntel 			return -1;
1179ce8d5614SIntel 		}
1180ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1181ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1182ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1183ce8d5614SIntel 				port->dev_info.max_tx_queues);
1184ce8d5614SIntel 			return -1;
1185ce8d5614SIntel 		}
118620a0286fSLiu Xiaofeng 		if (numa_support) {
118720a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
118820a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
118920a0286fSLiu Xiaofeng 			else {
1190b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
119120a0286fSLiu Xiaofeng 
119220a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
119320a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
119420a0286fSLiu Xiaofeng 					port->socket_id = 0;
119520a0286fSLiu Xiaofeng 			}
119620a0286fSLiu Xiaofeng 		}
1197b6ea6408SIntel 		else {
1198b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1199af75078fSIntel 				port->socket_id = 0;
1200b6ea6408SIntel 			else
1201b6ea6408SIntel 				port->socket_id = socket_num;
1202b6ea6408SIntel 		}
1203af75078fSIntel 	}
1204af75078fSIntel 
12055a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
12065a8fb55cSReshma Pattan 	if (q == 0) {
12075a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
12085a8fb55cSReshma Pattan 		return -1;
12095a8fb55cSReshma Pattan 	}
12105a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1211ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1212ce8d5614SIntel 		return 0;
1213ce8d5614SIntel 	/* clear the old */
1214ce8d5614SIntel 	if (fwd_streams != NULL) {
1215ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1216ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1217ce8d5614SIntel 				continue;
1218ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1219ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1220af75078fSIntel 		}
1221ce8d5614SIntel 		rte_free(fwd_streams);
1222ce8d5614SIntel 		fwd_streams = NULL;
1223ce8d5614SIntel 	}
1224ce8d5614SIntel 
1225ce8d5614SIntel 	/* init new */
1226ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
12271f84c469SMatan Azrad 	if (nb_fwd_streams) {
1228ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
12291f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
12301f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1231ce8d5614SIntel 		if (fwd_streams == NULL)
12321f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
12331f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
12341f84c469SMatan Azrad 				 nb_fwd_streams);
1235ce8d5614SIntel 
1236af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
12371f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
12381f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
12391f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1240ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
12411f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
12421f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
12431f84c469SMatan Azrad 		}
1244af75078fSIntel 	}
1245ce8d5614SIntel 
1246ce8d5614SIntel 	return 0;
1247af75078fSIntel }
1248af75078fSIntel 
1249af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1250af75078fSIntel static void
1251af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1252af75078fSIntel {
1253af75078fSIntel 	unsigned int total_burst;
1254af75078fSIntel 	unsigned int nb_burst;
1255af75078fSIntel 	unsigned int burst_stats[3];
1256af75078fSIntel 	uint16_t pktnb_stats[3];
1257af75078fSIntel 	uint16_t nb_pkt;
1258af75078fSIntel 	int burst_percent[3];
1259af75078fSIntel 
1260af75078fSIntel 	/*
1261af75078fSIntel 	 * First compute the total number of packet bursts and the
1262af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1263af75078fSIntel 	 */
1264af75078fSIntel 	total_burst = 0;
1265af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1266af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1267af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1268af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1269af75078fSIntel 		if (nb_burst == 0)
1270af75078fSIntel 			continue;
1271af75078fSIntel 		total_burst += nb_burst;
1272af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1273af75078fSIntel 			burst_stats[1] = burst_stats[0];
1274af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1275af75078fSIntel 			burst_stats[0] = nb_burst;
1276af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1277fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1278fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1279fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1280af75078fSIntel 		}
1281af75078fSIntel 	}
1282af75078fSIntel 	if (total_burst == 0)
1283af75078fSIntel 		return;
1284af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1285af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1286af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1287af75078fSIntel 	if (burst_stats[0] == total_burst) {
1288af75078fSIntel 		printf("]\n");
1289af75078fSIntel 		return;
1290af75078fSIntel 	}
1291af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1292af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1293af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1294af75078fSIntel 		return;
1295af75078fSIntel 	}
1296af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1297af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1298af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1299af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1300af75078fSIntel 		return;
1301af75078fSIntel 	}
1302af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1303af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1304af75078fSIntel }
1305af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1306af75078fSIntel 
1307af75078fSIntel static void
1308af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1309af75078fSIntel {
1310af75078fSIntel 	struct rte_port *port;
1311013af9b6SIntel 	uint8_t i;
1312af75078fSIntel 
1313af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
1314af75078fSIntel 
1315af75078fSIntel 	port = &ports[port_id];
1316af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
1317af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
1318013af9b6SIntel 
1319013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1320af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1321af75078fSIntel 		       "%-"PRIu64"\n",
132270bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
132370bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1324af75078fSIntel 
1325af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
1326*58d475b7SJerin Jacob 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1327*58d475b7SJerin Jacob 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1328*58d475b7SJerin Jacob 			       port->rx_bad_outer_l4_csum);
132986057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1330f72a0fa6SStephen Hemminger 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
133170bdb186SIvan Boule 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
133270bdb186SIvan Boule 		}
1333af75078fSIntel 
1334af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1335af75078fSIntel 		       "%-"PRIu64"\n",
1336af75078fSIntel 		       stats->opackets, port->tx_dropped,
1337af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1338013af9b6SIntel 	}
1339013af9b6SIntel 	else {
1340013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1341013af9b6SIntel 		       "%14"PRIu64"\n",
134270bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
134370bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1344013af9b6SIntel 
1345013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
1346*58d475b7SJerin Jacob 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
1347*58d475b7SJerin Jacob 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1348*58d475b7SJerin Jacob 			       port->rx_bad_outer_l4_csum);
134986057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1350f72a0fa6SStephen Hemminger 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
135170bdb186SIvan Boule 			printf("  RX-nombufs:             %14"PRIu64"\n",
135270bdb186SIvan Boule 			       stats->rx_nombuf);
135370bdb186SIvan Boule 		}
1354013af9b6SIntel 
1355013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1356013af9b6SIntel 		       "%14"PRIu64"\n",
1357013af9b6SIntel 		       stats->opackets, port->tx_dropped,
1358013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1359013af9b6SIntel 	}
1360e659b6b4SIvan Boule 
1361af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1362af75078fSIntel 	if (port->rx_stream)
1363013af9b6SIntel 		pkt_burst_stats_display("RX",
1364013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
1365af75078fSIntel 	if (port->tx_stream)
1366013af9b6SIntel 		pkt_burst_stats_display("TX",
1367013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
1368af75078fSIntel #endif
1369af75078fSIntel 
1370013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
1371013af9b6SIntel 		printf("\n");
1372013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1373013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1374013af9b6SIntel 			       "     RX-errors:%14"PRIu64
1375013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
1376013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1377013af9b6SIntel 		}
1378013af9b6SIntel 		printf("\n");
1379013af9b6SIntel 	}
1380013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
1381013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1382013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1383013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
1384013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1385013af9b6SIntel 		}
1386013af9b6SIntel 	}
1387013af9b6SIntel 
1388af75078fSIntel 	printf("  %s--------------------------------%s\n",
1389af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
1390af75078fSIntel }
1391af75078fSIntel 
1392af75078fSIntel static void
1393af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1394af75078fSIntel {
1395af75078fSIntel 	struct fwd_stream *fs;
1396af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1397af75078fSIntel 
1398af75078fSIntel 	fs = fwd_streams[stream_id];
1399af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1400af75078fSIntel 	    (fs->fwd_dropped == 0))
1401af75078fSIntel 		return;
1402af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1403af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1404af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1405af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1406af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1407af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1408af75078fSIntel 
1409af75078fSIntel 	/* if checksum mode */
1410af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1411013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1412*58d475b7SJerin Jacob 			"%-14u Rx- bad outer L4 checksum: %-14u\n",
1413*58d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1414*58d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1415af75078fSIntel 	}
1416af75078fSIntel 
1417af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1418af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1419af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1420af75078fSIntel #endif
1421af75078fSIntel }
1422af75078fSIntel 
1423af75078fSIntel static void
14247741e4cfSIntel flush_fwd_rx_queues(void)
1425af75078fSIntel {
1426af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1427af75078fSIntel 	portid_t  rxp;
14287741e4cfSIntel 	portid_t port_id;
1429af75078fSIntel 	queueid_t rxq;
1430af75078fSIntel 	uint16_t  nb_rx;
1431af75078fSIntel 	uint16_t  i;
1432af75078fSIntel 	uint8_t   j;
1433f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1434594302c7SJames Poole 	uint64_t timer_period;
1435f487715fSReshma Pattan 
1436f487715fSReshma Pattan 	/* convert to number of cycles */
1437594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1438af75078fSIntel 
1439af75078fSIntel 	for (j = 0; j < 2; j++) {
14407741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1441af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
14427741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1443f487715fSReshma Pattan 				/**
1444f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1445f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1446f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1447f487715fSReshma Pattan 				* after 1sec timer expiry.
1448f487715fSReshma Pattan 				*/
1449f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1450af75078fSIntel 				do {
14517741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1452013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1453af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1454af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1455f487715fSReshma Pattan 
1456f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1457f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1458f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1459f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1460f487715fSReshma Pattan 					(timer_tsc < timer_period));
1461f487715fSReshma Pattan 				timer_tsc = 0;
1462af75078fSIntel 			}
1463af75078fSIntel 		}
1464af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1465af75078fSIntel 	}
1466af75078fSIntel }
1467af75078fSIntel 
1468af75078fSIntel static void
1469af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1470af75078fSIntel {
1471af75078fSIntel 	struct fwd_stream **fsm;
1472af75078fSIntel 	streamid_t nb_fs;
1473af75078fSIntel 	streamid_t sm_id;
14747e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
14757e4441c8SRemy Horton 	uint64_t tics_per_1sec;
14767e4441c8SRemy Horton 	uint64_t tics_datum;
14777e4441c8SRemy Horton 	uint64_t tics_current;
14784918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1479af75078fSIntel 
14804918a357SXiaoyun Li 	cnt_ports = nb_ports;
14817e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
14827e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
14837e4441c8SRemy Horton #endif
1484af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1485af75078fSIntel 	nb_fs = fc->stream_nb;
1486af75078fSIntel 	do {
1487af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1488af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
14897e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1490e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1491e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
14927e4441c8SRemy Horton 			tics_current = rte_rdtsc();
14937e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
14947e4441c8SRemy Horton 				/* Periodic bitrate calculation */
14954918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1496e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
14974918a357SXiaoyun Li 						ports_ids[i]);
14987e4441c8SRemy Horton 				tics_datum = tics_current;
14997e4441c8SRemy Horton 			}
1500e25e6c70SRemy Horton 		}
15017e4441c8SRemy Horton #endif
150262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
150365eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
150465eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
150562d3216dSReshma Pattan 			rte_latencystats_update();
150662d3216dSReshma Pattan #endif
150762d3216dSReshma Pattan 
1508af75078fSIntel 	} while (! fc->stopped);
1509af75078fSIntel }
1510af75078fSIntel 
1511af75078fSIntel static int
1512af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1513af75078fSIntel {
1514af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1515af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1516af75078fSIntel 	return 0;
1517af75078fSIntel }
1518af75078fSIntel 
1519af75078fSIntel /*
1520af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1521af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1522af75078fSIntel  */
1523af75078fSIntel static int
1524af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1525af75078fSIntel {
1526af75078fSIntel 	struct fwd_lcore *fwd_lc;
1527af75078fSIntel 	struct fwd_lcore tmp_lcore;
1528af75078fSIntel 
1529af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1530af75078fSIntel 	tmp_lcore = *fwd_lc;
1531af75078fSIntel 	tmp_lcore.stopped = 1;
1532af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1533af75078fSIntel 	return 0;
1534af75078fSIntel }
1535af75078fSIntel 
1536af75078fSIntel /*
1537af75078fSIntel  * Launch packet forwarding:
1538af75078fSIntel  *     - Setup per-port forwarding context.
1539af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1540af75078fSIntel  */
1541af75078fSIntel static void
1542af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1543af75078fSIntel {
1544af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1545af75078fSIntel 	unsigned int i;
1546af75078fSIntel 	unsigned int lc_id;
1547af75078fSIntel 	int diag;
1548af75078fSIntel 
1549af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1550af75078fSIntel 	if (port_fwd_begin != NULL) {
1551af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1552af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1553af75078fSIntel 	}
1554af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1555af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1556af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1557af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1558af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1559af75078fSIntel 						     fwd_lcores[i], lc_id);
1560af75078fSIntel 			if (diag != 0)
1561af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1562af75078fSIntel 				       lc_id, diag);
1563af75078fSIntel 		}
1564af75078fSIntel 	}
1565af75078fSIntel }
1566af75078fSIntel 
1567af75078fSIntel /*
156803ce2c53SMatan Azrad  * Update the forward ports list.
156903ce2c53SMatan Azrad  */
157003ce2c53SMatan Azrad void
157103ce2c53SMatan Azrad update_fwd_ports(portid_t new_pid)
157203ce2c53SMatan Azrad {
157303ce2c53SMatan Azrad 	unsigned int i;
157403ce2c53SMatan Azrad 	unsigned int new_nb_fwd_ports = 0;
157503ce2c53SMatan Azrad 	int move = 0;
157603ce2c53SMatan Azrad 
157703ce2c53SMatan Azrad 	for (i = 0; i < nb_fwd_ports; ++i) {
157803ce2c53SMatan Azrad 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
157903ce2c53SMatan Azrad 			move = 1;
158003ce2c53SMatan Azrad 		else if (move)
158103ce2c53SMatan Azrad 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
158203ce2c53SMatan Azrad 		else
158303ce2c53SMatan Azrad 			new_nb_fwd_ports++;
158403ce2c53SMatan Azrad 	}
158503ce2c53SMatan Azrad 	if (new_pid < RTE_MAX_ETHPORTS)
158603ce2c53SMatan Azrad 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
158703ce2c53SMatan Azrad 
158803ce2c53SMatan Azrad 	nb_fwd_ports = new_nb_fwd_ports;
158903ce2c53SMatan Azrad 	nb_cfg_ports = new_nb_fwd_ports;
159003ce2c53SMatan Azrad }
159103ce2c53SMatan Azrad 
159203ce2c53SMatan Azrad /*
1593af75078fSIntel  * Launch packet forwarding configuration.
1594af75078fSIntel  */
1595af75078fSIntel void
1596af75078fSIntel start_packet_forwarding(int with_tx_first)
1597af75078fSIntel {
1598af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1599af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1600af75078fSIntel 	struct rte_port *port;
1601af75078fSIntel 	unsigned int i;
1602af75078fSIntel 	portid_t   pt_id;
1603af75078fSIntel 	streamid_t sm_id;
1604af75078fSIntel 
16055a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
16065a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
16075a8fb55cSReshma Pattan 
16085a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
16095a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
16105a8fb55cSReshma Pattan 
16115a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
16125a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
16135a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
16145a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
16155a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
16165a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
16175a8fb55cSReshma Pattan 
1618ce8d5614SIntel 	if (all_ports_started() == 0) {
1619ce8d5614SIntel 		printf("Not all ports were started\n");
1620ce8d5614SIntel 		return;
1621ce8d5614SIntel 	}
1622af75078fSIntel 	if (test_done == 0) {
1623af75078fSIntel 		printf("Packet forwarding already started\n");
1624af75078fSIntel 		return;
1625af75078fSIntel 	}
1626edf87b4aSBernard Iremonger 
1627edf87b4aSBernard Iremonger 
16287741e4cfSIntel 	if(dcb_test) {
16297741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
16307741e4cfSIntel 			pt_id = fwd_ports_ids[i];
16317741e4cfSIntel 			port = &ports[pt_id];
16327741e4cfSIntel 			if (!port->dcb_flag) {
16337741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
16347741e4cfSIntel                                        "be configured in this mode.\n");
1635013af9b6SIntel 				return;
1636013af9b6SIntel 			}
16377741e4cfSIntel 		}
16387741e4cfSIntel 		if (nb_fwd_lcores == 1) {
16397741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
16407741e4cfSIntel                                "should be larger than 1.\n");
16417741e4cfSIntel 			return;
16427741e4cfSIntel 		}
16437741e4cfSIntel 	}
1644af75078fSIntel 	test_done = 0;
16457741e4cfSIntel 
164647a767b2SMatan Azrad 	fwd_config_setup();
164747a767b2SMatan Azrad 
16487741e4cfSIntel 	if(!no_flush_rx)
16497741e4cfSIntel 		flush_fwd_rx_queues();
16507741e4cfSIntel 
1651933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1652af75078fSIntel 	rxtx_config_display();
1653af75078fSIntel 
1654af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1655af75078fSIntel 		pt_id = fwd_ports_ids[i];
1656af75078fSIntel 		port = &ports[pt_id];
1657af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1658af75078fSIntel 		port->tx_dropped = 0;
1659013af9b6SIntel 
1660013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1661af75078fSIntel 	}
1662af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1663af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1664af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1665af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1666af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1667af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1668*58d475b7SJerin Jacob 		fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1669af75078fSIntel 
1670af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1671af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1672af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1673af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1674af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1675af75078fSIntel #endif
1676af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1677af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1678af75078fSIntel #endif
1679af75078fSIntel 	}
1680af75078fSIntel 	if (with_tx_first) {
1681af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1682af75078fSIntel 		if (port_fwd_begin != NULL) {
1683af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1684af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1685af75078fSIntel 		}
1686acbf77a6SZhihong Wang 		while (with_tx_first--) {
1687acbf77a6SZhihong Wang 			launch_packet_forwarding(
1688acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1689af75078fSIntel 			rte_eal_mp_wait_lcore();
1690acbf77a6SZhihong Wang 		}
1691af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1692af75078fSIntel 		if (port_fwd_end != NULL) {
1693af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1694af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1695af75078fSIntel 		}
1696af75078fSIntel 	}
1697af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1698af75078fSIntel }
1699af75078fSIntel 
1700af75078fSIntel void
1701af75078fSIntel stop_packet_forwarding(void)
1702af75078fSIntel {
1703af75078fSIntel 	struct rte_eth_stats stats;
1704af75078fSIntel 	struct rte_port *port;
1705af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1706af75078fSIntel 	int i;
1707af75078fSIntel 	portid_t   pt_id;
1708af75078fSIntel 	streamid_t sm_id;
1709af75078fSIntel 	lcoreid_t  lc_id;
1710af75078fSIntel 	uint64_t total_recv;
1711af75078fSIntel 	uint64_t total_xmit;
1712af75078fSIntel 	uint64_t total_rx_dropped;
1713af75078fSIntel 	uint64_t total_tx_dropped;
1714af75078fSIntel 	uint64_t total_rx_nombuf;
1715af75078fSIntel 	uint64_t tx_dropped;
1716af75078fSIntel 	uint64_t rx_bad_ip_csum;
1717af75078fSIntel 	uint64_t rx_bad_l4_csum;
1718af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1719af75078fSIntel 	uint64_t fwd_cycles;
1720af75078fSIntel #endif
1721b7091f1dSJiayu Hu 
1722af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1723af75078fSIntel 
1724af75078fSIntel 	if (test_done) {
1725af75078fSIntel 		printf("Packet forwarding not started\n");
1726af75078fSIntel 		return;
1727af75078fSIntel 	}
1728af75078fSIntel 	printf("Telling cores to stop...");
1729af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1730af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1731af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1732af75078fSIntel 	rte_eal_mp_wait_lcore();
1733af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1734af75078fSIntel 	if (port_fwd_end != NULL) {
1735af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1736af75078fSIntel 			pt_id = fwd_ports_ids[i];
1737af75078fSIntel 			(*port_fwd_end)(pt_id);
1738af75078fSIntel 		}
1739af75078fSIntel 	}
1740af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1741af75078fSIntel 	fwd_cycles = 0;
1742af75078fSIntel #endif
1743af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1744af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1745af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1746af75078fSIntel 			fwd_stream_stats_display(sm_id);
1747af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1748af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1749af75078fSIntel 		} else {
1750af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1751af75078fSIntel 				fwd_streams[sm_id];
1752af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1753af75078fSIntel 				fwd_streams[sm_id];
1754af75078fSIntel 		}
1755af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1756af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1757af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1758af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1759af75078fSIntel 
1760013af9b6SIntel 		rx_bad_ip_csum =
1761013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1762af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1763af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1764013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1765013af9b6SIntel 							rx_bad_ip_csum;
1766af75078fSIntel 
1767013af9b6SIntel 		rx_bad_l4_csum =
1768013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1769af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1770af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1771013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1772013af9b6SIntel 							rx_bad_l4_csum;
1773af75078fSIntel 
1774*58d475b7SJerin Jacob 		ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1775*58d475b7SJerin Jacob 				fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1776*58d475b7SJerin Jacob 
1777af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1778af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1779af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1780af75078fSIntel #endif
1781af75078fSIntel 	}
1782af75078fSIntel 	total_recv = 0;
1783af75078fSIntel 	total_xmit = 0;
1784af75078fSIntel 	total_rx_dropped = 0;
1785af75078fSIntel 	total_tx_dropped = 0;
1786af75078fSIntel 	total_rx_nombuf  = 0;
17877741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1788af75078fSIntel 		pt_id = fwd_ports_ids[i];
1789af75078fSIntel 
1790af75078fSIntel 		port = &ports[pt_id];
1791af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1792af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1793af75078fSIntel 		port->stats.ipackets = 0;
1794af75078fSIntel 		stats.opackets -= port->stats.opackets;
1795af75078fSIntel 		port->stats.opackets = 0;
1796af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1797af75078fSIntel 		port->stats.ibytes = 0;
1798af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1799af75078fSIntel 		port->stats.obytes = 0;
180070bdb186SIvan Boule 		stats.imissed  -= port->stats.imissed;
180170bdb186SIvan Boule 		port->stats.imissed = 0;
1802af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1803af75078fSIntel 		port->stats.oerrors = 0;
1804af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1805af75078fSIntel 		port->stats.rx_nombuf = 0;
1806af75078fSIntel 
1807af75078fSIntel 		total_recv += stats.ipackets;
1808af75078fSIntel 		total_xmit += stats.opackets;
180970bdb186SIvan Boule 		total_rx_dropped += stats.imissed;
1810af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1811af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1812af75078fSIntel 
1813af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1814af75078fSIntel 	}
1815b7091f1dSJiayu Hu 
1816af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1817af75078fSIntel 	       "%s\n",
1818af75078fSIntel 	       acc_stats_border, acc_stats_border);
1819af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1820af75078fSIntel 	       "%-"PRIu64"\n"
1821af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1822af75078fSIntel 	       "%-"PRIu64"\n",
1823af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1824af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1825af75078fSIntel 	if (total_rx_nombuf > 0)
1826af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1827af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1828af75078fSIntel 	       "%s\n",
1829af75078fSIntel 	       acc_stats_border, acc_stats_border);
1830af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1831af75078fSIntel 	if (total_recv > 0)
1832af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1833af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1834af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1835af75078fSIntel 		       fwd_cycles, total_recv);
1836af75078fSIntel #endif
1837af75078fSIntel 	printf("\nDone.\n");
1838af75078fSIntel 	test_done = 1;
1839af75078fSIntel }
1840af75078fSIntel 
1841cfae07fdSOuyang Changchun void
1842cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1843cfae07fdSOuyang Changchun {
1844492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1845cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1846cfae07fdSOuyang Changchun }
1847cfae07fdSOuyang Changchun 
1848cfae07fdSOuyang Changchun void
1849cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1850cfae07fdSOuyang Changchun {
1851492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1852cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1853cfae07fdSOuyang Changchun }
1854cfae07fdSOuyang Changchun 
1855ce8d5614SIntel static int
1856ce8d5614SIntel all_ports_started(void)
1857ce8d5614SIntel {
1858ce8d5614SIntel 	portid_t pi;
1859ce8d5614SIntel 	struct rte_port *port;
1860ce8d5614SIntel 
18617d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1862ce8d5614SIntel 		port = &ports[pi];
1863ce8d5614SIntel 		/* Check if there is a port which is not started */
186441b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
186541b05095SBernard Iremonger 			(port->slave_flag == 0))
1866ce8d5614SIntel 			return 0;
1867ce8d5614SIntel 	}
1868ce8d5614SIntel 
1869ce8d5614SIntel 	/* No port is not started */
1870ce8d5614SIntel 	return 1;
1871ce8d5614SIntel }
1872ce8d5614SIntel 
1873148f963fSBruce Richardson int
18746018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
18756018eb8cSShahaf Shuler {
18766018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
18776018eb8cSShahaf Shuler 
18786018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
18796018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
18806018eb8cSShahaf Shuler 		return 0;
18816018eb8cSShahaf Shuler 	return 1;
18826018eb8cSShahaf Shuler }
18836018eb8cSShahaf Shuler 
18846018eb8cSShahaf Shuler int
1885edab33b1STetsuya Mukawa all_ports_stopped(void)
1886edab33b1STetsuya Mukawa {
1887edab33b1STetsuya Mukawa 	portid_t pi;
1888edab33b1STetsuya Mukawa 
18897d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
18906018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
1891edab33b1STetsuya Mukawa 			return 0;
1892edab33b1STetsuya Mukawa 	}
1893edab33b1STetsuya Mukawa 
1894edab33b1STetsuya Mukawa 	return 1;
1895edab33b1STetsuya Mukawa }
1896edab33b1STetsuya Mukawa 
1897edab33b1STetsuya Mukawa int
1898edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
1899edab33b1STetsuya Mukawa {
1900edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1901edab33b1STetsuya Mukawa 		return 0;
1902edab33b1STetsuya Mukawa 
1903edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1904edab33b1STetsuya Mukawa 		return 0;
1905edab33b1STetsuya Mukawa 
1906edab33b1STetsuya Mukawa 	return 1;
1907edab33b1STetsuya Mukawa }
1908edab33b1STetsuya Mukawa 
1909edab33b1STetsuya Mukawa static int
1910edab33b1STetsuya Mukawa port_is_closed(portid_t port_id)
1911edab33b1STetsuya Mukawa {
1912edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1913edab33b1STetsuya Mukawa 		return 0;
1914edab33b1STetsuya Mukawa 
1915edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1916edab33b1STetsuya Mukawa 		return 0;
1917edab33b1STetsuya Mukawa 
1918edab33b1STetsuya Mukawa 	return 1;
1919edab33b1STetsuya Mukawa }
1920edab33b1STetsuya Mukawa 
1921edab33b1STetsuya Mukawa int
1922ce8d5614SIntel start_port(portid_t pid)
1923ce8d5614SIntel {
192492d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
1925ce8d5614SIntel 	portid_t pi;
1926ce8d5614SIntel 	queueid_t qi;
1927ce8d5614SIntel 	struct rte_port *port;
19282950a769SDeclan Doherty 	struct ether_addr mac_addr;
192976ad4a2dSGaetan Rivet 	enum rte_eth_event_type event_type;
1930ce8d5614SIntel 
19314468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
19324468635fSMichael Qiu 		return 0;
19334468635fSMichael Qiu 
1934ce8d5614SIntel 	if(dcb_config)
1935ce8d5614SIntel 		dcb_test = 1;
19367d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1937edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1938ce8d5614SIntel 			continue;
1939ce8d5614SIntel 
194092d2703eSMichael Qiu 		need_check_link_status = 0;
1941ce8d5614SIntel 		port = &ports[pi];
1942ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1943ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1944ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1945ce8d5614SIntel 			continue;
1946ce8d5614SIntel 		}
1947ce8d5614SIntel 
1948ce8d5614SIntel 		if (port->need_reconfig > 0) {
1949ce8d5614SIntel 			port->need_reconfig = 0;
1950ce8d5614SIntel 
19517ee3e944SVasily Philipov 			if (flow_isolate_all) {
19527ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
19537ee3e944SVasily Philipov 				if (ret) {
19547ee3e944SVasily Philipov 					printf("Failed to apply isolated"
19557ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
19567ee3e944SVasily Philipov 					return -1;
19577ee3e944SVasily Philipov 				}
19587ee3e944SVasily Philipov 			}
19597ee3e944SVasily Philipov 
19605706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
196120a0286fSLiu Xiaofeng 					port->socket_id);
1962ce8d5614SIntel 			/* configure port */
1963ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1964ce8d5614SIntel 						&(port->dev_conf));
1965ce8d5614SIntel 			if (diag != 0) {
1966ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1967ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1968ce8d5614SIntel 					printf("Port %d can not be set back "
1969ce8d5614SIntel 							"to stopped\n", pi);
1970ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1971ce8d5614SIntel 				/* try to reconfigure port next time */
1972ce8d5614SIntel 				port->need_reconfig = 1;
1973148f963fSBruce Richardson 				return -1;
1974ce8d5614SIntel 			}
1975ce8d5614SIntel 		}
1976ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1977ce8d5614SIntel 			port->need_reconfig_queues = 0;
1978ce8d5614SIntel 			/* setup tx queues */
1979ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1980b6ea6408SIntel 				if ((numa_support) &&
1981b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1982b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1983d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
1984d44f8a48SQi Zhang 						txring_numa[pi],
1985d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
1986b6ea6408SIntel 				else
1987b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1988d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
1989d44f8a48SQi Zhang 						port->socket_id,
1990d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
1991b6ea6408SIntel 
1992ce8d5614SIntel 				if (diag == 0)
1993ce8d5614SIntel 					continue;
1994ce8d5614SIntel 
1995ce8d5614SIntel 				/* Fail to setup tx queue, return */
1996ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1997ce8d5614SIntel 							RTE_PORT_HANDLING,
1998ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1999ce8d5614SIntel 					printf("Port %d can not be set back "
2000ce8d5614SIntel 							"to stopped\n", pi);
2001d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2002d44f8a48SQi Zhang 				       pi);
2003ce8d5614SIntel 				/* try to reconfigure queues next time */
2004ce8d5614SIntel 				port->need_reconfig_queues = 1;
2005148f963fSBruce Richardson 				return -1;
2006ce8d5614SIntel 			}
2007ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2008d44f8a48SQi Zhang 				/* setup rx queues */
2009b6ea6408SIntel 				if ((numa_support) &&
2010b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2011b6ea6408SIntel 					struct rte_mempool * mp =
2012b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2013b6ea6408SIntel 					if (mp == NULL) {
2014b6ea6408SIntel 						printf("Failed to setup RX queue:"
2015b6ea6408SIntel 							"No mempool allocation"
2016b6ea6408SIntel 							" on the socket %d\n",
2017b6ea6408SIntel 							rxring_numa[pi]);
2018148f963fSBruce Richardson 						return -1;
2019b6ea6408SIntel 					}
2020b6ea6408SIntel 
2021b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2022d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2023d44f8a48SQi Zhang 					     rxring_numa[pi],
2024d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2025d44f8a48SQi Zhang 					     mp);
20261e1d6bddSBernard Iremonger 				} else {
20271e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
20281e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
20291e1d6bddSBernard Iremonger 					if (mp == NULL) {
20301e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
20311e1d6bddSBernard Iremonger 							"No mempool allocation"
20321e1d6bddSBernard Iremonger 							" on the socket %d\n",
20331e1d6bddSBernard Iremonger 							port->socket_id);
20341e1d6bddSBernard Iremonger 						return -1;
2035b6ea6408SIntel 					}
2036b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2037d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2038d44f8a48SQi Zhang 					     port->socket_id,
2039d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2040d44f8a48SQi Zhang 					     mp);
20411e1d6bddSBernard Iremonger 				}
2042ce8d5614SIntel 				if (diag == 0)
2043ce8d5614SIntel 					continue;
2044ce8d5614SIntel 
2045ce8d5614SIntel 				/* Fail to setup rx queue, return */
2046ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2047ce8d5614SIntel 							RTE_PORT_HANDLING,
2048ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2049ce8d5614SIntel 					printf("Port %d can not be set back "
2050ce8d5614SIntel 							"to stopped\n", pi);
2051d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2052d44f8a48SQi Zhang 				       pi);
2053ce8d5614SIntel 				/* try to reconfigure queues next time */
2054ce8d5614SIntel 				port->need_reconfig_queues = 1;
2055148f963fSBruce Richardson 				return -1;
2056ce8d5614SIntel 			}
2057ce8d5614SIntel 		}
205876ad4a2dSGaetan Rivet 
2059ce8d5614SIntel 		/* start port */
2060ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2061ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2062ce8d5614SIntel 
2063ce8d5614SIntel 			/* Fail to setup rx queue, return */
2064ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2065ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2066ce8d5614SIntel 				printf("Port %d can not be set back to "
2067ce8d5614SIntel 							"stopped\n", pi);
2068ce8d5614SIntel 			continue;
2069ce8d5614SIntel 		}
2070ce8d5614SIntel 
2071ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2072ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2073ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2074ce8d5614SIntel 
20752950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
2076d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
20772950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
20782950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
20792950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2080d8c89163SZijie Pan 
2081ce8d5614SIntel 		/* at least one port started, need checking link status */
2082ce8d5614SIntel 		need_check_link_status = 1;
2083ce8d5614SIntel 	}
2084ce8d5614SIntel 
20854fb82244SMatan Azrad 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
20864fb82244SMatan Azrad 	     event_type < RTE_ETH_EVENT_MAX;
20874fb82244SMatan Azrad 	     event_type++) {
20884fb82244SMatan Azrad 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
20894fb82244SMatan Azrad 						event_type,
20904fb82244SMatan Azrad 						eth_event_callback,
20914fb82244SMatan Azrad 						NULL);
20924fb82244SMatan Azrad 		if (diag) {
20934fb82244SMatan Azrad 			printf("Failed to setup even callback for event %d\n",
20944fb82244SMatan Azrad 				event_type);
20954fb82244SMatan Azrad 			return -1;
20964fb82244SMatan Azrad 		}
20974fb82244SMatan Azrad 	}
20984fb82244SMatan Azrad 
209992d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2100edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
210192d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2102ce8d5614SIntel 		printf("Please stop the ports first\n");
2103ce8d5614SIntel 
2104ce8d5614SIntel 	printf("Done\n");
2105148f963fSBruce Richardson 	return 0;
2106ce8d5614SIntel }
2107ce8d5614SIntel 
2108ce8d5614SIntel void
2109ce8d5614SIntel stop_port(portid_t pid)
2110ce8d5614SIntel {
2111ce8d5614SIntel 	portid_t pi;
2112ce8d5614SIntel 	struct rte_port *port;
2113ce8d5614SIntel 	int need_check_link_status = 0;
2114ce8d5614SIntel 
2115ce8d5614SIntel 	if (dcb_test) {
2116ce8d5614SIntel 		dcb_test = 0;
2117ce8d5614SIntel 		dcb_config = 0;
2118ce8d5614SIntel 	}
21194468635fSMichael Qiu 
21204468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21214468635fSMichael Qiu 		return;
21224468635fSMichael Qiu 
2123ce8d5614SIntel 	printf("Stopping ports...\n");
2124ce8d5614SIntel 
21257d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21264468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2127ce8d5614SIntel 			continue;
2128ce8d5614SIntel 
2129a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2130a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2131a8ef3e3aSBernard Iremonger 			continue;
2132a8ef3e3aSBernard Iremonger 		}
2133a8ef3e3aSBernard Iremonger 
21340e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
21350e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
21360e545d30SBernard Iremonger 			continue;
21370e545d30SBernard Iremonger 		}
21380e545d30SBernard Iremonger 
2139ce8d5614SIntel 		port = &ports[pi];
2140ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2141ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2142ce8d5614SIntel 			continue;
2143ce8d5614SIntel 
2144ce8d5614SIntel 		rte_eth_dev_stop(pi);
2145ce8d5614SIntel 
2146ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2147ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2148ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2149ce8d5614SIntel 		need_check_link_status = 1;
2150ce8d5614SIntel 	}
2151bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2152edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2153ce8d5614SIntel 
2154ce8d5614SIntel 	printf("Done\n");
2155ce8d5614SIntel }
2156ce8d5614SIntel 
2157ce8d5614SIntel void
2158ce8d5614SIntel close_port(portid_t pid)
2159ce8d5614SIntel {
2160ce8d5614SIntel 	portid_t pi;
2161ce8d5614SIntel 	struct rte_port *port;
2162ce8d5614SIntel 
21634468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21644468635fSMichael Qiu 		return;
21654468635fSMichael Qiu 
2166ce8d5614SIntel 	printf("Closing ports...\n");
2167ce8d5614SIntel 
21687d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21694468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2170ce8d5614SIntel 			continue;
2171ce8d5614SIntel 
2172a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2173a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2174a8ef3e3aSBernard Iremonger 			continue;
2175a8ef3e3aSBernard Iremonger 		}
2176a8ef3e3aSBernard Iremonger 
21770e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
21780e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
21790e545d30SBernard Iremonger 			continue;
21800e545d30SBernard Iremonger 		}
21810e545d30SBernard Iremonger 
2182ce8d5614SIntel 		port = &ports[pi];
2183ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2184d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2185d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2186d4e8ad64SMichael Qiu 			continue;
2187d4e8ad64SMichael Qiu 		}
2188d4e8ad64SMichael Qiu 
2189d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2190ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2191ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2192ce8d5614SIntel 			continue;
2193ce8d5614SIntel 		}
2194ce8d5614SIntel 
2195938a184aSAdrien Mazarguil 		if (port->flow_list)
2196938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2197ce8d5614SIntel 		rte_eth_dev_close(pi);
2198ce8d5614SIntel 
2199ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2200ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2201b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2202ce8d5614SIntel 	}
2203ce8d5614SIntel 
2204ce8d5614SIntel 	printf("Done\n");
2205ce8d5614SIntel }
2206ce8d5614SIntel 
2207edab33b1STetsuya Mukawa void
220897f1e196SWei Dai reset_port(portid_t pid)
220997f1e196SWei Dai {
221097f1e196SWei Dai 	int diag;
221197f1e196SWei Dai 	portid_t pi;
221297f1e196SWei Dai 	struct rte_port *port;
221397f1e196SWei Dai 
221497f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
221597f1e196SWei Dai 		return;
221697f1e196SWei Dai 
221797f1e196SWei Dai 	printf("Resetting ports...\n");
221897f1e196SWei Dai 
221997f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
222097f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
222197f1e196SWei Dai 			continue;
222297f1e196SWei Dai 
222397f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
222497f1e196SWei Dai 			printf("Please remove port %d from forwarding "
222597f1e196SWei Dai 			       "configuration.\n", pi);
222697f1e196SWei Dai 			continue;
222797f1e196SWei Dai 		}
222897f1e196SWei Dai 
222997f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
223097f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
223197f1e196SWei Dai 			       pi);
223297f1e196SWei Dai 			continue;
223397f1e196SWei Dai 		}
223497f1e196SWei Dai 
223597f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
223697f1e196SWei Dai 		if (diag == 0) {
223797f1e196SWei Dai 			port = &ports[pi];
223897f1e196SWei Dai 			port->need_reconfig = 1;
223997f1e196SWei Dai 			port->need_reconfig_queues = 1;
224097f1e196SWei Dai 		} else {
224197f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
224297f1e196SWei Dai 		}
224397f1e196SWei Dai 	}
224497f1e196SWei Dai 
224597f1e196SWei Dai 	printf("Done\n");
224697f1e196SWei Dai }
224797f1e196SWei Dai 
2248fb73e096SJeff Guo static int
2249fb73e096SJeff Guo eth_dev_event_callback_register(void)
2250fb73e096SJeff Guo {
2251fb73e096SJeff Guo 	int ret;
2252fb73e096SJeff Guo 
2253fb73e096SJeff Guo 	/* register the device event callback */
2254fb73e096SJeff Guo 	ret = rte_dev_event_callback_register(NULL,
2255fb73e096SJeff Guo 		eth_dev_event_callback, NULL);
2256fb73e096SJeff Guo 	if (ret) {
2257fb73e096SJeff Guo 		printf("Failed to register device event callback\n");
2258fb73e096SJeff Guo 		return -1;
2259fb73e096SJeff Guo 	}
2260fb73e096SJeff Guo 
2261fb73e096SJeff Guo 	return 0;
2262fb73e096SJeff Guo }
2263fb73e096SJeff Guo 
2264fb73e096SJeff Guo 
2265fb73e096SJeff Guo static int
2266fb73e096SJeff Guo eth_dev_event_callback_unregister(void)
2267fb73e096SJeff Guo {
2268fb73e096SJeff Guo 	int ret;
2269fb73e096SJeff Guo 
2270fb73e096SJeff Guo 	/* unregister the device event callback */
2271fb73e096SJeff Guo 	ret = rte_dev_event_callback_unregister(NULL,
2272fb73e096SJeff Guo 		eth_dev_event_callback, NULL);
2273fb73e096SJeff Guo 	if (ret < 0) {
2274fb73e096SJeff Guo 		printf("Failed to unregister device event callback\n");
2275fb73e096SJeff Guo 		return -1;
2276fb73e096SJeff Guo 	}
2277fb73e096SJeff Guo 
2278fb73e096SJeff Guo 	return 0;
2279fb73e096SJeff Guo }
2280fb73e096SJeff Guo 
228197f1e196SWei Dai void
2282edab33b1STetsuya Mukawa attach_port(char *identifier)
2283ce8d5614SIntel {
2284ebf5e9b7SBernard Iremonger 	portid_t pi = 0;
2285931126baSBernard Iremonger 	unsigned int socket_id;
2286ce8d5614SIntel 
2287edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2288edab33b1STetsuya Mukawa 
2289edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2290edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2291edab33b1STetsuya Mukawa 		return;
2292ce8d5614SIntel 	}
2293ce8d5614SIntel 
2294edab33b1STetsuya Mukawa 	if (rte_eth_dev_attach(identifier, &pi))
2295edab33b1STetsuya Mukawa 		return;
2296edab33b1STetsuya Mukawa 
2297931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2298931126baSBernard Iremonger 	/* if socket_id is invalid, set to 0 */
2299931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
2300931126baSBernard Iremonger 		socket_id = 0;
2301931126baSBernard Iremonger 	reconfig(pi, socket_id);
2302edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2303edab33b1STetsuya Mukawa 
23044918a357SXiaoyun Li 	ports_ids[nb_ports] = pi;
2305d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
2306edab33b1STetsuya Mukawa 
2307edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2308edab33b1STetsuya Mukawa 
230903ce2c53SMatan Azrad 	update_fwd_ports(pi);
231003ce2c53SMatan Azrad 
2311edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2312edab33b1STetsuya Mukawa 	printf("Done\n");
2313edab33b1STetsuya Mukawa }
2314edab33b1STetsuya Mukawa 
2315edab33b1STetsuya Mukawa void
231628caa76aSZhiyong Yang detach_port(portid_t port_id)
23175f4ec54fSChen Jing D(Mark) {
2318edab33b1STetsuya Mukawa 	char name[RTE_ETH_NAME_MAX_LEN];
23194918a357SXiaoyun Li 	uint16_t i;
23205f4ec54fSChen Jing D(Mark) 
2321edab33b1STetsuya Mukawa 	printf("Detaching a port...\n");
23225f4ec54fSChen Jing D(Mark) 
2323edab33b1STetsuya Mukawa 	if (!port_is_closed(port_id)) {
2324edab33b1STetsuya Mukawa 		printf("Please close port first\n");
2325edab33b1STetsuya Mukawa 		return;
2326edab33b1STetsuya Mukawa 	}
2327edab33b1STetsuya Mukawa 
2328938a184aSAdrien Mazarguil 	if (ports[port_id].flow_list)
2329938a184aSAdrien Mazarguil 		port_flow_flush(port_id);
2330938a184aSAdrien Mazarguil 
23313070419eSGaetan Rivet 	if (rte_eth_dev_detach(port_id, name)) {
2332adea04c4SZhiyong Yang 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2333edab33b1STetsuya Mukawa 		return;
23343070419eSGaetan Rivet 	}
2335edab33b1STetsuya Mukawa 
23364918a357SXiaoyun Li 	for (i = 0; i < nb_ports; i++) {
23374918a357SXiaoyun Li 		if (ports_ids[i] == port_id) {
23384918a357SXiaoyun Li 			ports_ids[i] = ports_ids[nb_ports-1];
23394918a357SXiaoyun Li 			ports_ids[nb_ports-1] = 0;
23404918a357SXiaoyun Li 			break;
23414918a357SXiaoyun Li 		}
23424918a357SXiaoyun Li 	}
2343d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
2344edab33b1STetsuya Mukawa 
234503ce2c53SMatan Azrad 	update_fwd_ports(RTE_MAX_ETHPORTS);
234603ce2c53SMatan Azrad 
2347adea04c4SZhiyong Yang 	printf("Port %u is detached. Now total ports is %d\n",
2348adea04c4SZhiyong Yang 			port_id, nb_ports);
2349edab33b1STetsuya Mukawa 	printf("Done\n");
2350edab33b1STetsuya Mukawa 	return;
23515f4ec54fSChen Jing D(Mark) }
23525f4ec54fSChen Jing D(Mark) 
2353af75078fSIntel void
2354af75078fSIntel pmd_test_exit(void)
2355af75078fSIntel {
2356124909d7SZhiyong Yang 	struct rte_device *device;
2357af75078fSIntel 	portid_t pt_id;
2358fb73e096SJeff Guo 	int ret;
2359af75078fSIntel 
23608210ec25SPablo de Lara 	if (test_done == 0)
23618210ec25SPablo de Lara 		stop_packet_forwarding();
23628210ec25SPablo de Lara 
2363d3a274ceSZhihong Wang 	if (ports != NULL) {
2364d3a274ceSZhihong Wang 		no_link_check = 1;
23657d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
2366d3a274ceSZhihong Wang 			printf("\nShutting down port %d...\n", pt_id);
2367af75078fSIntel 			fflush(stdout);
2368d3a274ceSZhihong Wang 			stop_port(pt_id);
2369d3a274ceSZhihong Wang 			close_port(pt_id);
2370124909d7SZhiyong Yang 
2371124909d7SZhiyong Yang 			/*
2372124909d7SZhiyong Yang 			 * This is a workaround to fix a virtio-user issue that
2373124909d7SZhiyong Yang 			 * requires to call clean-up routine to remove existing
2374124909d7SZhiyong Yang 			 * socket.
2375124909d7SZhiyong Yang 			 * This workaround valid only for testpmd, needs a fix
2376124909d7SZhiyong Yang 			 * valid for all applications.
2377124909d7SZhiyong Yang 			 * TODO: Implement proper resource cleanup
2378124909d7SZhiyong Yang 			 */
2379124909d7SZhiyong Yang 			device = rte_eth_devices[pt_id].device;
2380124909d7SZhiyong Yang 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2381124909d7SZhiyong Yang 				detach_port(pt_id);
2382af75078fSIntel 		}
2383d3a274ceSZhihong Wang 	}
2384fb73e096SJeff Guo 
2385fb73e096SJeff Guo 	if (hot_plug) {
2386fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
2387fb73e096SJeff Guo 		if (ret)
2388fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2389fb73e096SJeff Guo 				"fail to stop device event monitor.");
2390fb73e096SJeff Guo 
2391fb73e096SJeff Guo 		ret = eth_dev_event_callback_unregister();
2392fb73e096SJeff Guo 		if (ret)
2393fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2394fb73e096SJeff Guo 				"fail to unregister all event callbacks.");
2395fb73e096SJeff Guo 	}
2396fb73e096SJeff Guo 
2397d3a274ceSZhihong Wang 	printf("\nBye...\n");
2398af75078fSIntel }
2399af75078fSIntel 
2400af75078fSIntel typedef void (*cmd_func_t)(void);
2401af75078fSIntel struct pmd_test_command {
2402af75078fSIntel 	const char *cmd_name;
2403af75078fSIntel 	cmd_func_t cmd_func;
2404af75078fSIntel };
2405af75078fSIntel 
2406af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2407af75078fSIntel 
2408ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2409af75078fSIntel static void
2410edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2411af75078fSIntel {
2412ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2413ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2414f8244c63SZhiyong Yang 	portid_t portid;
2415f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2416ce8d5614SIntel 	struct rte_eth_link link;
2417ce8d5614SIntel 
2418ce8d5614SIntel 	printf("Checking link statuses...\n");
2419ce8d5614SIntel 	fflush(stdout);
2420ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2421ce8d5614SIntel 		all_ports_up = 1;
24227d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2423ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2424ce8d5614SIntel 				continue;
2425ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2426ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2427ce8d5614SIntel 			/* print link status if flag set */
2428ce8d5614SIntel 			if (print_flag == 1) {
2429ce8d5614SIntel 				if (link.link_status)
2430f8244c63SZhiyong Yang 					printf(
2431f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2432f8244c63SZhiyong Yang 					portid, link.link_speed,
2433ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2434ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2435ce8d5614SIntel 				else
2436f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2437ce8d5614SIntel 				continue;
2438ce8d5614SIntel 			}
2439ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
244009419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2441ce8d5614SIntel 				all_ports_up = 0;
2442ce8d5614SIntel 				break;
2443ce8d5614SIntel 			}
2444ce8d5614SIntel 		}
2445ce8d5614SIntel 		/* after finally printing all link status, get out */
2446ce8d5614SIntel 		if (print_flag == 1)
2447ce8d5614SIntel 			break;
2448ce8d5614SIntel 
2449ce8d5614SIntel 		if (all_ports_up == 0) {
2450ce8d5614SIntel 			fflush(stdout);
2451ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2452ce8d5614SIntel 		}
2453ce8d5614SIntel 
2454ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2455ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2456ce8d5614SIntel 			print_flag = 1;
2457ce8d5614SIntel 		}
24588ea656f8SGaetan Rivet 
24598ea656f8SGaetan Rivet 		if (lsc_interrupt)
24608ea656f8SGaetan Rivet 			break;
2461ce8d5614SIntel 	}
2462af75078fSIntel }
2463af75078fSIntel 
2464284c908cSGaetan Rivet static void
2465284c908cSGaetan Rivet rmv_event_callback(void *arg)
2466284c908cSGaetan Rivet {
24673b97888aSMatan Azrad 	int need_to_start = 0;
24680da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
246928caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2470284c908cSGaetan Rivet 
2471284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2472284c908cSGaetan Rivet 
24733b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
24743b97888aSMatan Azrad 		need_to_start = 1;
24753b97888aSMatan Azrad 		stop_packet_forwarding();
24763b97888aSMatan Azrad 	}
24770da2a62bSMatan Azrad 	no_link_check = 1;
2478284c908cSGaetan Rivet 	stop_port(port_id);
24790da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2480284c908cSGaetan Rivet 	close_port(port_id);
24813b97888aSMatan Azrad 	detach_port(port_id);
24823b97888aSMatan Azrad 	if (need_to_start)
24833b97888aSMatan Azrad 		start_packet_forwarding(0);
2484284c908cSGaetan Rivet }
2485284c908cSGaetan Rivet 
248676ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2487d6af1a13SBernard Iremonger static int
2488f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2489d6af1a13SBernard Iremonger 		  void *ret_param)
249076ad4a2dSGaetan Rivet {
249176ad4a2dSGaetan Rivet 	static const char * const event_desc[] = {
249276ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
249376ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
249476ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
249576ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
249676ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2497badb87c1SAnoob Joseph 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
249876ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
249976ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
25004fb82244SMatan Azrad 		[RTE_ETH_EVENT_NEW] = "device probed",
25014fb82244SMatan Azrad 		[RTE_ETH_EVENT_DESTROY] = "device released",
250276ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MAX] = NULL,
250376ad4a2dSGaetan Rivet 	};
250476ad4a2dSGaetan Rivet 
250576ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2506d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
250776ad4a2dSGaetan Rivet 
250876ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
250976ad4a2dSGaetan Rivet 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
251076ad4a2dSGaetan Rivet 			port_id, __func__, type);
251176ad4a2dSGaetan Rivet 		fflush(stderr);
25123af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
251376ad4a2dSGaetan Rivet 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
251476ad4a2dSGaetan Rivet 			event_desc[type]);
251576ad4a2dSGaetan Rivet 		fflush(stdout);
251676ad4a2dSGaetan Rivet 	}
2517284c908cSGaetan Rivet 
25180e45c64dSMatan Azrad 	if (port_id_is_invalid(port_id, DISABLED_WARN))
25190e45c64dSMatan Azrad 		return 0;
25200e45c64dSMatan Azrad 
2521284c908cSGaetan Rivet 	switch (type) {
2522284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
2523284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2524284c908cSGaetan Rivet 				rmv_event_callback, (void *)(intptr_t)port_id))
2525284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2526284c908cSGaetan Rivet 		break;
2527284c908cSGaetan Rivet 	default:
2528284c908cSGaetan Rivet 		break;
2529284c908cSGaetan Rivet 	}
2530d6af1a13SBernard Iremonger 	return 0;
253176ad4a2dSGaetan Rivet }
253276ad4a2dSGaetan Rivet 
2533fb73e096SJeff Guo /* This function is used by the interrupt thread */
2534fb73e096SJeff Guo static void
2535fb73e096SJeff Guo eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2536fb73e096SJeff Guo 			     __rte_unused void *arg)
2537fb73e096SJeff Guo {
2538fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2539fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2540fb73e096SJeff Guo 			__func__, type);
2541fb73e096SJeff Guo 		fflush(stderr);
2542fb73e096SJeff Guo 	}
2543fb73e096SJeff Guo 
2544fb73e096SJeff Guo 	switch (type) {
2545fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2546fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2547fb73e096SJeff Guo 			device_name);
2548fb73e096SJeff Guo 		/* TODO: After finish failure handle, begin to stop
2549fb73e096SJeff Guo 		 * packet forward, stop port, close port, detach port.
2550fb73e096SJeff Guo 		 */
2551fb73e096SJeff Guo 		break;
2552fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2553fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2554fb73e096SJeff Guo 			device_name);
2555fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2556fb73e096SJeff Guo 		 * begin to attach port.
2557fb73e096SJeff Guo 		 */
2558fb73e096SJeff Guo 		break;
2559fb73e096SJeff Guo 	default:
2560fb73e096SJeff Guo 		break;
2561fb73e096SJeff Guo 	}
2562fb73e096SJeff Guo }
2563fb73e096SJeff Guo 
2564013af9b6SIntel static int
256528caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2566af75078fSIntel {
2567013af9b6SIntel 	uint16_t i;
2568af75078fSIntel 	int diag;
2569013af9b6SIntel 	uint8_t mapping_found = 0;
2570af75078fSIntel 
2571013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2572013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2573013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2574013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2575013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2576013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2577013af9b6SIntel 			if (diag != 0)
2578013af9b6SIntel 				return diag;
2579013af9b6SIntel 			mapping_found = 1;
2580af75078fSIntel 		}
2581013af9b6SIntel 	}
2582013af9b6SIntel 	if (mapping_found)
2583013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2584013af9b6SIntel 	return 0;
2585013af9b6SIntel }
2586013af9b6SIntel 
2587013af9b6SIntel static int
258828caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2589013af9b6SIntel {
2590013af9b6SIntel 	uint16_t i;
2591013af9b6SIntel 	int diag;
2592013af9b6SIntel 	uint8_t mapping_found = 0;
2593013af9b6SIntel 
2594013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2595013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2596013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2597013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2598013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2599013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2600013af9b6SIntel 			if (diag != 0)
2601013af9b6SIntel 				return diag;
2602013af9b6SIntel 			mapping_found = 1;
2603013af9b6SIntel 		}
2604013af9b6SIntel 	}
2605013af9b6SIntel 	if (mapping_found)
2606013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2607013af9b6SIntel 	return 0;
2608013af9b6SIntel }
2609013af9b6SIntel 
2610013af9b6SIntel static void
261128caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2612013af9b6SIntel {
2613013af9b6SIntel 	int diag = 0;
2614013af9b6SIntel 
2615013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2616af75078fSIntel 	if (diag != 0) {
2617013af9b6SIntel 		if (diag == -ENOTSUP) {
2618013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2619013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2620013af9b6SIntel 		}
2621013af9b6SIntel 		else
2622013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2623013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2624013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2625af75078fSIntel 					pi, diag);
2626af75078fSIntel 	}
2627013af9b6SIntel 
2628013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2629af75078fSIntel 	if (diag != 0) {
2630013af9b6SIntel 		if (diag == -ENOTSUP) {
2631013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2632013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2633013af9b6SIntel 		}
2634013af9b6SIntel 		else
2635013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2636013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2637013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2638af75078fSIntel 					pi, diag);
2639af75078fSIntel 	}
2640af75078fSIntel }
2641af75078fSIntel 
2642f2c5125aSPablo de Lara static void
2643f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2644f2c5125aSPablo de Lara {
2645d44f8a48SQi Zhang 	uint16_t qid;
2646f2c5125aSPablo de Lara 
2647d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
2648d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2649d44f8a48SQi Zhang 
2650d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2651f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2652d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2653f2c5125aSPablo de Lara 
2654f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2655d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2656f2c5125aSPablo de Lara 
2657f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2658d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2659f2c5125aSPablo de Lara 
2660f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2661d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2662f2c5125aSPablo de Lara 
2663f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2664d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2665f2c5125aSPablo de Lara 
2666d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2667d44f8a48SQi Zhang 	}
2668d44f8a48SQi Zhang 
2669d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
2670d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2671d44f8a48SQi Zhang 
2672d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2673f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2674d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2675f2c5125aSPablo de Lara 
2676f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2677d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2678f2c5125aSPablo de Lara 
2679f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2680d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2681f2c5125aSPablo de Lara 
2682f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2683d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2684f2c5125aSPablo de Lara 
2685f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2686d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2687d44f8a48SQi Zhang 
2688d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2689d44f8a48SQi Zhang 	}
2690f2c5125aSPablo de Lara }
2691f2c5125aSPablo de Lara 
2692013af9b6SIntel void
2693013af9b6SIntel init_port_config(void)
2694013af9b6SIntel {
2695013af9b6SIntel 	portid_t pid;
2696013af9b6SIntel 	struct rte_port *port;
2697013af9b6SIntel 
26987d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2699013af9b6SIntel 		port = &ports[pid];
2700013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2701422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
27023ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2703013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
270490892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2705422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2706af75078fSIntel 		} else {
2707013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2708013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2709af75078fSIntel 		}
27103ce690d3SBruce Richardson 
27115f592039SJingjing Wu 		if (port->dcb_flag == 0) {
27123ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
27133ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
27143ce690d3SBruce Richardson 			else
27153ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
27163ce690d3SBruce Richardson 		}
27173ce690d3SBruce Richardson 
2718f2c5125aSPablo de Lara 		rxtx_port_config(port);
2719013af9b6SIntel 
2720013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2721013af9b6SIntel 
2722013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
272350c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2724e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
27257b7e5ba7SIntel #endif
27268ea656f8SGaetan Rivet 
27278ea656f8SGaetan Rivet 		if (lsc_interrupt &&
27288ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
27298ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
27308ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2731284c908cSGaetan Rivet 		if (rmv_interrupt &&
2732284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2733284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2734284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2735013af9b6SIntel 	}
2736013af9b6SIntel }
2737013af9b6SIntel 
273841b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
273941b05095SBernard Iremonger {
274041b05095SBernard Iremonger 	struct rte_port *port;
274141b05095SBernard Iremonger 
274241b05095SBernard Iremonger 	port = &ports[slave_pid];
274341b05095SBernard Iremonger 	port->slave_flag = 1;
274441b05095SBernard Iremonger }
274541b05095SBernard Iremonger 
274641b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
274741b05095SBernard Iremonger {
274841b05095SBernard Iremonger 	struct rte_port *port;
274941b05095SBernard Iremonger 
275041b05095SBernard Iremonger 	port = &ports[slave_pid];
275141b05095SBernard Iremonger 	port->slave_flag = 0;
275241b05095SBernard Iremonger }
275341b05095SBernard Iremonger 
27540e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
27550e545d30SBernard Iremonger {
27560e545d30SBernard Iremonger 	struct rte_port *port;
27570e545d30SBernard Iremonger 
27580e545d30SBernard Iremonger 	port = &ports[slave_pid];
2759b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2760b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2761b8b8b344SMatan Azrad 		return 1;
2762b8b8b344SMatan Azrad 	return 0;
27630e545d30SBernard Iremonger }
27640e545d30SBernard Iremonger 
2765013af9b6SIntel const uint16_t vlan_tags[] = {
2766013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2767013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2768013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2769013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2770013af9b6SIntel };
2771013af9b6SIntel 
2772013af9b6SIntel static  int
2773ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
27741a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
27751a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
27761a572499SJingjing Wu 		 uint8_t pfc_en)
2777013af9b6SIntel {
2778013af9b6SIntel 	uint8_t i;
2779ac7c491cSKonstantin Ananyev 	int32_t rc;
2780ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
2781af75078fSIntel 
2782af75078fSIntel 	/*
2783013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2784013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2785af75078fSIntel 	 */
27861a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
27871a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
27881a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
27891a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
27901a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2791013af9b6SIntel 
2792547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
27931a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
27941a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
27951a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
27961a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
27971a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
27981a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2799013af9b6SIntel 
28001a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
28011a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
28021a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
28031a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
28041a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2805af75078fSIntel 		}
2806013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2807f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2808f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2809013af9b6SIntel 		}
2810013af9b6SIntel 
2811013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
281232e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
281332e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
28141a572499SJingjing Wu 	} else {
28151a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
28161a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
28171a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
28181a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2819013af9b6SIntel 
2820ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2821ac7c491cSKonstantin Ananyev 		if (rc != 0)
2822ac7c491cSKonstantin Ananyev 			return rc;
2823ac7c491cSKonstantin Ananyev 
28241a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
28251a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
28261a572499SJingjing Wu 
2827bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2828bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2829bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2830013af9b6SIntel 		}
2831ac7c491cSKonstantin Ananyev 
28321a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2833ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
283432e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
28351a572499SJingjing Wu 	}
28361a572499SJingjing Wu 
28371a572499SJingjing Wu 	if (pfc_en)
28381a572499SJingjing Wu 		eth_conf->dcb_capability_en =
28391a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2840013af9b6SIntel 	else
2841013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2842013af9b6SIntel 
2843013af9b6SIntel 	return 0;
2844013af9b6SIntel }
2845013af9b6SIntel 
2846013af9b6SIntel int
28471a572499SJingjing Wu init_port_dcb_config(portid_t pid,
28481a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
28491a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
28501a572499SJingjing Wu 		     uint8_t pfc_en)
2851013af9b6SIntel {
2852013af9b6SIntel 	struct rte_eth_conf port_conf;
2853013af9b6SIntel 	struct rte_port *rte_port;
2854013af9b6SIntel 	int retval;
2855013af9b6SIntel 	uint16_t i;
2856013af9b6SIntel 
28572a977b89SWenzhuo Lu 	rte_port = &ports[pid];
2858013af9b6SIntel 
2859013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2860013af9b6SIntel 	/* Enter DCB configuration status */
2861013af9b6SIntel 	dcb_config = 1;
2862013af9b6SIntel 
2863d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2864d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
2865d5354e89SYanglong Wu 
2866013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2867ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2868013af9b6SIntel 	if (retval < 0)
2869013af9b6SIntel 		return retval;
28700074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2871013af9b6SIntel 
28722f203d44SQi Zhang 	/* re-configure the device . */
28732f203d44SQi Zhang 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
28742a977b89SWenzhuo Lu 
28752a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
28762a977b89SWenzhuo Lu 
28772a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
28782a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
28792a977b89SWenzhuo Lu 	 */
28802a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
28812a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
28822a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
28832a977b89SWenzhuo Lu 			" for port %d.", pid);
28842a977b89SWenzhuo Lu 		return -1;
28852a977b89SWenzhuo Lu 	}
28862a977b89SWenzhuo Lu 
28872a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
28882a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
28892a977b89SWenzhuo Lu 	 */
28902a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
289186ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
289286ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
289386ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
289486ef65eeSBernard Iremonger 		} else {
28952a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
28962a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
289786ef65eeSBernard Iremonger 		}
28982a977b89SWenzhuo Lu 	} else {
28992a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
29002a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
29012a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29022a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
29032a977b89SWenzhuo Lu 		} else {
29042a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
29052a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
29062a977b89SWenzhuo Lu 
29072a977b89SWenzhuo Lu 		}
29082a977b89SWenzhuo Lu 	}
29092a977b89SWenzhuo Lu 	rx_free_thresh = 64;
29102a977b89SWenzhuo Lu 
2911013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2912013af9b6SIntel 
2913f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
2914013af9b6SIntel 	/* VLAN filter */
29150074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
29161a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2917013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
2918013af9b6SIntel 
2919013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2920013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
2921013af9b6SIntel 
29227741e4cfSIntel 	rte_port->dcb_flag = 1;
29237741e4cfSIntel 
2924013af9b6SIntel 	return 0;
2925af75078fSIntel }
2926af75078fSIntel 
2927ffc468ffSTetsuya Mukawa static void
2928ffc468ffSTetsuya Mukawa init_port(void)
2929ffc468ffSTetsuya Mukawa {
2930ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
2931ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
2932ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2933ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
2934ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
2935ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
2936ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
2937ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
2938ffc468ffSTetsuya Mukawa 	}
2939ffc468ffSTetsuya Mukawa }
2940ffc468ffSTetsuya Mukawa 
2941d3a274ceSZhihong Wang static void
2942d3a274ceSZhihong Wang force_quit(void)
2943d3a274ceSZhihong Wang {
2944d3a274ceSZhihong Wang 	pmd_test_exit();
2945d3a274ceSZhihong Wang 	prompt_exit();
2946d3a274ceSZhihong Wang }
2947d3a274ceSZhihong Wang 
2948d3a274ceSZhihong Wang static void
2949cfea1f30SPablo de Lara print_stats(void)
2950cfea1f30SPablo de Lara {
2951cfea1f30SPablo de Lara 	uint8_t i;
2952cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2953cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2954cfea1f30SPablo de Lara 
2955cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
2956cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
2957cfea1f30SPablo de Lara 
2958cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
2959cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2960cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
2961cfea1f30SPablo de Lara }
2962cfea1f30SPablo de Lara 
2963cfea1f30SPablo de Lara static void
2964d3a274ceSZhihong Wang signal_handler(int signum)
2965d3a274ceSZhihong Wang {
2966d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
2967d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
2968d3a274ceSZhihong Wang 				signum);
2969102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
2970102b7329SReshma Pattan 		/* uninitialize packet capture framework */
2971102b7329SReshma Pattan 		rte_pdump_uninit();
2972102b7329SReshma Pattan #endif
297362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
297462d3216dSReshma Pattan 		rte_latencystats_uninit();
297562d3216dSReshma Pattan #endif
2976d3a274ceSZhihong Wang 		force_quit();
2977d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
2978d9a191a0SPhil Yang 		f_quit = 1;
2979d3a274ceSZhihong Wang 		/* exit with the expected status */
2980d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
2981d3a274ceSZhihong Wang 		kill(getpid(), signum);
2982d3a274ceSZhihong Wang 	}
2983d3a274ceSZhihong Wang }
2984d3a274ceSZhihong Wang 
2985af75078fSIntel int
2986af75078fSIntel main(int argc, char** argv)
2987af75078fSIntel {
2988af75078fSIntel 	int diag;
2989f8244c63SZhiyong Yang 	portid_t port_id;
29904918a357SXiaoyun Li 	uint16_t count;
2991fb73e096SJeff Guo 	int ret;
2992af75078fSIntel 
2993d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
2994d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
2995d3a274ceSZhihong Wang 
2996af75078fSIntel 	diag = rte_eal_init(argc, argv);
2997af75078fSIntel 	if (diag < 0)
2998af75078fSIntel 		rte_panic("Cannot init EAL\n");
2999af75078fSIntel 
3000285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3001285fd101SOlivier Matz 	if (testpmd_logtype < 0)
3002285fd101SOlivier Matz 		rte_panic("Cannot register log type");
3003285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3004285fd101SOlivier Matz 
30054aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
30064aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
30074aa0d012SAnatoly Burakov 	rte_pdump_init(NULL);
30084aa0d012SAnatoly Burakov #endif
30094aa0d012SAnatoly Burakov 
30104918a357SXiaoyun Li 	count = 0;
30114918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
30124918a357SXiaoyun Li 		ports_ids[count] = port_id;
30134918a357SXiaoyun Li 		count++;
30144918a357SXiaoyun Li 	}
30154918a357SXiaoyun Li 	nb_ports = (portid_t) count;
30164aa0d012SAnatoly Burakov 	if (nb_ports == 0)
30174aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
30184aa0d012SAnatoly Burakov 
30194aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
30204aa0d012SAnatoly Burakov 	init_port();
30214aa0d012SAnatoly Burakov 
30224aa0d012SAnatoly Burakov 	set_def_fwd_config();
30234aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
30244aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
30254aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
30264aa0d012SAnatoly Burakov 
3027e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3028e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3029e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3030e505d84cSAnatoly Burakov #endif
3031e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3032e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3033e505d84cSAnatoly Burakov #endif
3034e505d84cSAnatoly Burakov 
3035fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
3036fb7b8b32SAnatoly Burakov #ifdef RTE_EXEC_ENV_BSDAPP
3037fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3038fb7b8b32SAnatoly Burakov #else
3039fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3040fb7b8b32SAnatoly Burakov #endif
3041fb7b8b32SAnatoly Burakov 
3042e505d84cSAnatoly Burakov 	argc -= diag;
3043e505d84cSAnatoly Burakov 	argv += diag;
3044e505d84cSAnatoly Burakov 	if (argc > 1)
3045e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3046e505d84cSAnatoly Burakov 
3047e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3048285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
30491c036b16SEelco Chaudron 			strerror(errno));
30501c036b16SEelco Chaudron 	}
30511c036b16SEelco Chaudron 
305299cabef0SPablo de Lara 	if (tx_first && interactive)
305399cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
305499cabef0SPablo de Lara 				"interactive mode.\n");
30558820cba4SDavid Hunt 
30568820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
30578820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
30588820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
30598820cba4SDavid Hunt 		lsc_interrupt = 0;
30608820cba4SDavid Hunt 	}
30618820cba4SDavid Hunt 
30625a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
30635a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
30645a8fb55cSReshma Pattan 
30655a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3066af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3067af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3068af75078fSIntel 		       nb_rxq, nb_txq);
3069af75078fSIntel 
3070af75078fSIntel 	init_config();
3071fb73e096SJeff Guo 
3072fb73e096SJeff Guo 	if (hot_plug) {
3073fb73e096SJeff Guo 		/* enable hot plug monitoring */
3074fb73e096SJeff Guo 		ret = rte_dev_event_monitor_start();
3075fb73e096SJeff Guo 		if (ret) {
3076fb73e096SJeff Guo 			rte_errno = EINVAL;
3077fb73e096SJeff Guo 			return -1;
3078fb73e096SJeff Guo 		}
3079fb73e096SJeff Guo 		eth_dev_event_callback_register();
3080fb73e096SJeff Guo 
3081fb73e096SJeff Guo 	}
3082fb73e096SJeff Guo 
3083148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
3084148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3085af75078fSIntel 
3086ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
30877d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
3088ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
3089af75078fSIntel 
30907e4441c8SRemy Horton 	/* Init metrics library */
30917e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
30927e4441c8SRemy Horton 
309362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
309462d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
309562d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
309662d3216dSReshma Pattan 		if (ret)
309762d3216dSReshma Pattan 			printf("Warning: latencystats init()"
309862d3216dSReshma Pattan 				" returned error %d\n",	ret);
309962d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
310062d3216dSReshma Pattan 			latencystats_lcore_id);
310162d3216dSReshma Pattan 	}
310262d3216dSReshma Pattan #endif
310362d3216dSReshma Pattan 
31047e4441c8SRemy Horton 	/* Setup bitrate stats */
31057e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3106e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
31077e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
31087e4441c8SRemy Horton 		if (bitrate_data == NULL)
3109e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3110e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
31117e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3112e25e6c70SRemy Horton 	}
31137e4441c8SRemy Horton #endif
31147e4441c8SRemy Horton 
31150d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
311681ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
311781ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
311881ef862bSAllain Legacy 
3119ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3120ca7feb22SCyril Chemparathy 		if (auto_start) {
3121ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3122ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3123ca7feb22SCyril Chemparathy 		}
3124af75078fSIntel 		prompt();
31250de738cfSJiayu Hu 		pmd_test_exit();
3126ca7feb22SCyril Chemparathy 	} else
31270d56cb81SThomas Monjalon #endif
31280d56cb81SThomas Monjalon 	{
3129af75078fSIntel 		char c;
3130af75078fSIntel 		int rc;
3131af75078fSIntel 
3132d9a191a0SPhil Yang 		f_quit = 0;
3133d9a191a0SPhil Yang 
3134af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
313599cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3136cfea1f30SPablo de Lara 		if (stats_period != 0) {
3137cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3138cfea1f30SPablo de Lara 			uint64_t timer_period;
3139cfea1f30SPablo de Lara 
3140cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3141cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3142cfea1f30SPablo de Lara 
3143d9a191a0SPhil Yang 			while (f_quit == 0) {
3144cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3145cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3146cfea1f30SPablo de Lara 
3147cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3148cfea1f30SPablo de Lara 					print_stats();
3149cfea1f30SPablo de Lara 					/* Reset the timer */
3150cfea1f30SPablo de Lara 					diff_time = 0;
3151cfea1f30SPablo de Lara 				}
3152cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3153cfea1f30SPablo de Lara 				prev_time = cur_time;
3154cfea1f30SPablo de Lara 				sleep(1);
3155cfea1f30SPablo de Lara 			}
3156cfea1f30SPablo de Lara 		}
3157cfea1f30SPablo de Lara 
3158af75078fSIntel 		printf("Press enter to exit\n");
3159af75078fSIntel 		rc = read(0, &c, 1);
3160d3a274ceSZhihong Wang 		pmd_test_exit();
3161af75078fSIntel 		if (rc < 0)
3162af75078fSIntel 			return 1;
3163af75078fSIntel 	}
3164af75078fSIntel 
3165af75078fSIntel 	return 0;
3166af75078fSIntel }
3167