xref: /dpdk/app/test-pmd/testpmd.c (revision 687a5b12b14f02f0d3624f4b36b34dba8119a680)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
131c036b16SEelco Chaudron #include <sys/mman.h>
140fd1386cSStephen Hemminger #include <sys/select.h>
15761f7ae1SJie Zhou #endif
16af75078fSIntel #include <sys/types.h>
17af75078fSIntel #include <errno.h>
18fb73e096SJeff Guo #include <stdbool.h>
19af75078fSIntel 
20af75078fSIntel #include <sys/queue.h>
21af75078fSIntel #include <sys/stat.h>
22af75078fSIntel 
23af75078fSIntel #include <stdint.h>
24af75078fSIntel #include <unistd.h>
25af75078fSIntel #include <inttypes.h>
26af75078fSIntel 
27af75078fSIntel #include <rte_common.h>
28d1eb542eSOlivier Matz #include <rte_errno.h>
29af75078fSIntel #include <rte_byteorder.h>
30af75078fSIntel #include <rte_log.h>
31af75078fSIntel #include <rte_debug.h>
32af75078fSIntel #include <rte_cycles.h>
33af75078fSIntel #include <rte_memory.h>
34af75078fSIntel #include <rte_memcpy.h>
35af75078fSIntel #include <rte_launch.h>
36770ebc06SDavid Marchand #include <rte_bus.h>
37af75078fSIntel #include <rte_eal.h>
38284c908cSGaetan Rivet #include <rte_alarm.h>
39af75078fSIntel #include <rte_per_lcore.h>
40af75078fSIntel #include <rte_lcore.h>
41af75078fSIntel #include <rte_branch_prediction.h>
42af75078fSIntel #include <rte_mempool.h>
43af75078fSIntel #include <rte_malloc.h>
44af75078fSIntel #include <rte_mbuf.h>
450e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
46af75078fSIntel #include <rte_interrupts.h>
47af75078fSIntel #include <rte_ether.h>
48af75078fSIntel #include <rte_ethdev.h>
49edab33b1STetsuya Mukawa #include <rte_dev.h>
50af75078fSIntel #include <rte_string_fns.h>
51a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
52e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
53e261265eSRadu Nicolau #endif
54a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
55102b7329SReshma Pattan #include <rte_pdump.h>
56102b7329SReshma Pattan #endif
57938a184aSAdrien Mazarguil #include <rte_flow.h>
58bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
597e4441c8SRemy Horton #include <rte_metrics.h>
60bb9be9a4SDavid Marchand #endif
61a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
627e4441c8SRemy Horton #include <rte_bitrate.h>
637e4441c8SRemy Horton #endif
64a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6562d3216dSReshma Pattan #include <rte_latencystats.h>
6662d3216dSReshma Pattan #endif
67761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS
68761f7ae1SJie Zhou #include <process.h>
69761f7ae1SJie Zhou #endif
70e46372d7SHuisong Li #ifdef RTE_NET_BOND
71e46372d7SHuisong Li #include <rte_eth_bond.h>
72e46372d7SHuisong Li #endif
73f41a5092SSpike Du #ifdef RTE_NET_MLX5
74f41a5092SSpike Du #include "mlx5_testpmd.h"
75f41a5092SSpike Du #endif
76af75078fSIntel 
77af75078fSIntel #include "testpmd.h"
78af75078fSIntel 
79c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
80c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
81c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
82c7f5dba7SAnatoly Burakov #else
83c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
84c7f5dba7SAnatoly Burakov #endif
85c7f5dba7SAnatoly Burakov 
86c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
87c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
88c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
89c7f5dba7SAnatoly Burakov #else
90c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
91c7f5dba7SAnatoly Burakov #endif
92c7f5dba7SAnatoly Burakov 
93c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
9413b19642SDmitry Kozlyuk /*
9513b19642SDmitry Kozlyuk  * Zone size with the malloc overhead (max of debug and release variants)
9613b19642SDmitry Kozlyuk  * must fit into the smallest supported hugepage size (2M),
9713b19642SDmitry Kozlyuk  * so that an IOVA-contiguous zone of this size can always be allocated
9813b19642SDmitry Kozlyuk  * if there are free 2M hugepages.
9913b19642SDmitry Kozlyuk  */
10013b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
101c7f5dba7SAnatoly Burakov 
102af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
103285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
104af75078fSIntel 
105cb056611SStephen Hemminger /* use main core for command line ? */
106af75078fSIntel uint8_t interactive = 0;
107ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
10899cabef0SPablo de Lara uint8_t tx_first;
10981ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
110af75078fSIntel 
111af75078fSIntel /*
112af75078fSIntel  * NUMA support configuration.
113af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
114af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
115af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
116af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
117af75078fSIntel  */
118999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
119af75078fSIntel 
120af75078fSIntel /*
121b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
122b6ea6408SIntel  * not configured.
123b6ea6408SIntel  */
124b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
125b6ea6408SIntel 
126b6ea6408SIntel /*
127c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
128c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
129c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
130c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
131c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
132148f963fSBruce Richardson  */
133c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
134148f963fSBruce Richardson 
135148f963fSBruce Richardson /*
13663531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
13763531389SGeorgios Katsikas  * is allocated.
13863531389SGeorgios Katsikas  */
13963531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
14063531389SGeorgios Katsikas 
14163531389SGeorgios Katsikas /*
14263531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
14363531389SGeorgios Katsikas  * is allocated.
14463531389SGeorgios Katsikas  */
14563531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
14663531389SGeorgios Katsikas 
14763531389SGeorgios Katsikas /*
14863531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
14963531389SGeorgios Katsikas  * is allocated.
15063531389SGeorgios Katsikas  */
15163531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
15263531389SGeorgios Katsikas 
15363531389SGeorgios Katsikas /*
154af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
155af75078fSIntel  * forwarded.
156547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
157af75078fSIntel  * ports.
158af75078fSIntel  */
1596d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
160af75078fSIntel portid_t nb_peer_eth_addrs = 0;
161af75078fSIntel 
162af75078fSIntel /*
163af75078fSIntel  * Probed Target Environment.
164af75078fSIntel  */
165af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
166af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
167af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
168af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
169af75078fSIntel 
1704918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1714918a357SXiaoyun Li 
172af75078fSIntel /*
173af75078fSIntel  * Test Forwarding Configuration.
174af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
175af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
176af75078fSIntel  */
177af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
178af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
179af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
180af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
181af75078fSIntel 
182af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
183af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
184af75078fSIntel 
185af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
186af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
187af75078fSIntel 
188af75078fSIntel /*
189af75078fSIntel  * Forwarding engines.
190af75078fSIntel  */
191af75078fSIntel struct fwd_engine * fwd_engines[] = {
192af75078fSIntel 	&io_fwd_engine,
193af75078fSIntel 	&mac_fwd_engine,
194d47388f1SCyril Chemparathy 	&mac_swap_engine,
195e9e23a61SCyril Chemparathy 	&flow_gen_engine,
196af75078fSIntel 	&rx_only_engine,
197af75078fSIntel 	&tx_only_engine,
198af75078fSIntel 	&csum_fwd_engine,
199168dfa61SIvan Boule 	&icmp_echo_engine,
2003c156061SJens Freimann 	&noisy_vnf_engine,
2012564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
2025fe42bc6SFeifei Wang 	&recycle_mbufs_engine,
203af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
204af75078fSIntel 	&ieee1588_fwd_engine,
205af75078fSIntel #endif
20659840375SXueming Li 	&shared_rxq_engine,
207af75078fSIntel 	NULL,
208af75078fSIntel };
209af75078fSIntel 
21026cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
21159fcf854SShahaf Shuler uint16_t mempool_flags;
212401b744dSShahaf Shuler 
213af75078fSIntel struct fwd_config cur_fwd_config;
214af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
215bf56fce1SZhihong Wang uint32_t retry_enabled;
216bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
217bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
218af75078fSIntel 
21926cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
22026cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
22126cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
22226cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
223c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
224c8798818SIntel                                       * specified on command-line. */
225cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
226d9a191a0SPhil Yang 
22763b72657SIvan Ilchenko /** Extended statistics to show. */
22863b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display;
22963b72657SIvan Ilchenko 
23063b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */
23163b72657SIvan Ilchenko 
232d9a191a0SPhil Yang /*
233d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
234d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
235d9a191a0SPhil Yang  */
2360fd1386cSStephen Hemminger volatile uint8_t f_quit;
2373889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */
238d9a191a0SPhil Yang 
239af75078fSIntel /*
2401bb4a528SFerruh Yigit  * Max Rx frame size, set by '--max-pkt-len' parameter.
2411bb4a528SFerruh Yigit  */
2421bb4a528SFerruh Yigit uint32_t max_rx_pkt_len;
2431bb4a528SFerruh Yigit 
2441bb4a528SFerruh Yigit /*
2450f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2460f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2470f2096d7SViacheslav Ovsiienko  */
2480f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2490f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
25091c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
25191c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
25252e2e7edSYuan Wang uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT];
2530f2096d7SViacheslav Ovsiienko 
254a4bf5421SHanumanth Pothula uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature */
255a4bf5421SHanumanth Pothula 
2560f2096d7SViacheslav Ovsiienko /*
257af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
258af75078fSIntel  */
259af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
260af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
261af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
262af75078fSIntel };
263af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
264af75078fSIntel 
26579bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
26679bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
26779bec05bSKonstantin Ananyev 
26882010ef5SYongseok Koh uint8_t txonly_multi_flow;
26982010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
27082010ef5SYongseok Koh 
2714940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2724940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2734940344dSViacheslav Ovsiienko 
2744940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2754940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2764940344dSViacheslav Ovsiienko 
277af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
2786c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
279861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
280e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
281af75078fSIntel 
282900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
283900550deSIntel uint8_t dcb_config = 0;
284900550deSIntel 
285af75078fSIntel /*
286af75078fSIntel  * Configurable number of RX/TX queues.
287af75078fSIntel  */
2881c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
289af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
290af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
291af75078fSIntel 
292af75078fSIntel /*
293af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2948599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
295af75078fSIntel  */
2964ed89049SDavid Marchand #define RX_DESC_DEFAULT 0
2974ed89049SDavid Marchand #define TX_DESC_DEFAULT 0
2984ed89049SDavid Marchand uint16_t nb_rxd = RX_DESC_DEFAULT; /**< Number of RX descriptors. */
2994ed89049SDavid Marchand uint16_t nb_txd = TX_DESC_DEFAULT; /**< Number of TX descriptors. */
300af75078fSIntel 
301f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
302af75078fSIntel /*
303af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
304af75078fSIntel  */
305af75078fSIntel 
306f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
307f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
308f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
309af75078fSIntel 
310f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
311f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
312f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
313af75078fSIntel 
314af75078fSIntel /*
315af75078fSIntel  * Configurable value of RX free threshold.
316af75078fSIntel  */
317f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
318af75078fSIntel 
319af75078fSIntel /*
320ce8d5614SIntel  * Configurable value of RX drop enable.
321ce8d5614SIntel  */
322f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
323ce8d5614SIntel 
324ce8d5614SIntel /*
325af75078fSIntel  * Configurable value of TX free threshold.
326af75078fSIntel  */
327f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
328af75078fSIntel 
329af75078fSIntel /*
330af75078fSIntel  * Configurable value of TX RS bit threshold.
331af75078fSIntel  */
332f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
333af75078fSIntel 
334af75078fSIntel /*
3351d343c19SMike Pattrick  * Configurable sub-forwarding mode for the noisy_vnf forwarding mode.
3361d343c19SMike Pattrick  */
3371d343c19SMike Pattrick enum noisy_fwd_mode noisy_fwd_mode;
3381d343c19SMike Pattrick 
3391d343c19SMike Pattrick /* String version of enum noisy_fwd_mode */
3401d343c19SMike Pattrick const char * const noisy_fwd_mode_desc[] = {
3411d343c19SMike Pattrick 	[NOISY_FWD_MODE_IO] = "io",
3421d343c19SMike Pattrick 	[NOISY_FWD_MODE_MAC] = "mac",
3431d343c19SMike Pattrick 	[NOISY_FWD_MODE_MACSWAP] = "macswap",
3441d343c19SMike Pattrick 	[NOISY_FWD_MODE_5TSWAP] = "5tswap",
3451d343c19SMike Pattrick 	[NOISY_FWD_MODE_MAX] = NULL,
3461d343c19SMike Pattrick };
3471d343c19SMike Pattrick 
3481d343c19SMike Pattrick /*
3493c156061SJens Freimann  * Configurable value of buffered packets before sending.
3503c156061SJens Freimann  */
3513c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3523c156061SJens Freimann 
3533c156061SJens Freimann /*
3543c156061SJens Freimann  * Configurable value of packet buffer timeout.
3553c156061SJens Freimann  */
3563c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3573c156061SJens Freimann 
3583c156061SJens Freimann /*
3593c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3603c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3613c156061SJens Freimann  */
3623c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3633c156061SJens Freimann 
3643c156061SJens Freimann /*
3653c156061SJens Freimann  * Configurable value of number of random writes done in
3663c156061SJens Freimann  * VNF simulation memory area.
3673c156061SJens Freimann  */
3683c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3693c156061SJens Freimann 
3703c156061SJens Freimann /*
3713c156061SJens Freimann  * Configurable value of number of random reads done in
3723c156061SJens Freimann  * VNF simulation memory area.
3733c156061SJens Freimann  */
3743c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3753c156061SJens Freimann 
3763c156061SJens Freimann /*
3773c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3783c156061SJens Freimann  * VNF simulation memory area.
3793c156061SJens Freimann  */
3803c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3813c156061SJens Freimann 
3823c156061SJens Freimann /*
383af75078fSIntel  * Receive Side Scaling (RSS) configuration.
384af75078fSIntel  */
385295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
386af75078fSIntel 
387af75078fSIntel /*
388af75078fSIntel  * Port topology configuration
389af75078fSIntel  */
390af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
391af75078fSIntel 
3927741e4cfSIntel /*
3937741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3947741e4cfSIntel  */
3957741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3967741e4cfSIntel 
397af75078fSIntel /*
3987ee3e944SVasily Philipov  * Flow API isolated mode.
3997ee3e944SVasily Philipov  */
4007ee3e944SVasily Philipov uint8_t flow_isolate_all;
4017ee3e944SVasily Philipov 
4027ee3e944SVasily Philipov /*
403543df472SChengwen Feng  * Disable port flow flush when stop port.
404543df472SChengwen Feng  */
405543df472SChengwen Feng uint8_t no_flow_flush = 0; /* do flow flush by default */
406543df472SChengwen Feng 
407543df472SChengwen Feng /*
408bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
409bc202406SDavid Marchand  */
410bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
411bc202406SDavid Marchand 
412bc202406SDavid Marchand /*
4136937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
4146937d210SStephen Hemminger  */
4156937d210SStephen Hemminger uint8_t no_device_start = 0;
4166937d210SStephen Hemminger 
4176937d210SStephen Hemminger /*
4188ea656f8SGaetan Rivet  * Enable link status change notification
4198ea656f8SGaetan Rivet  */
4208ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
4218ea656f8SGaetan Rivet 
4228ea656f8SGaetan Rivet /*
423284c908cSGaetan Rivet  * Enable device removal notification.
424284c908cSGaetan Rivet  */
425284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
426284c908cSGaetan Rivet 
427fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
428fb73e096SJeff Guo 
4294f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
4304f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
4314f1ed78eSThomas Monjalon 
432b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
433b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
434b0a9354aSPavan Nikhilesh 
43501817b10SBing Zhao /* Hairpin ports configuration mode. */
43623095155SDariusz Sosnowski uint32_t hairpin_mode;
43701817b10SBing Zhao 
43897b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
43997b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
44097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
44197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
44297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
44397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
44497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
44597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
44697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
44797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
44897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
44997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
4500e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
451bc70e559SSpike Du 	[RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached",
452eb0d471aSKalesh AP 	[RTE_ETH_EVENT_ERR_RECOVERING] = "error recovering",
453eb0d471aSKalesh AP 	[RTE_ETH_EVENT_RECOVERY_SUCCESS] = "error recovery successful",
454eb0d471aSKalesh AP 	[RTE_ETH_EVENT_RECOVERY_FAILED] = "error recovery failed",
45597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
45697b5d8b5SThomas Monjalon };
45797b5d8b5SThomas Monjalon 
458284c908cSGaetan Rivet /*
4593af72783SGaetan Rivet  * Display or mask ether events
4603af72783SGaetan Rivet  * Default to all events except VF_MBOX
4613af72783SGaetan Rivet  */
4623af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4633af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4643af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4653af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
466badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4673af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4680e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
469eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED) |
470eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_ERR_RECOVERING) |
471eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_SUCCESS) |
472eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_FAILED);
473e505d84cSAnatoly Burakov /*
474e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
475e505d84cSAnatoly Burakov  */
476e505d84cSAnatoly Burakov int do_mlockall = 0;
4773af72783SGaetan Rivet 
478a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
47962d3216dSReshma Pattan 
48062d3216dSReshma Pattan /*
48162d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
48262d3216dSReshma Pattan  */
48362d3216dSReshma Pattan uint8_t latencystats_enabled;
48462d3216dSReshma Pattan 
48562d3216dSReshma Pattan /*
4867be78d02SJosh Soref  * Lcore ID to service latency statistics.
48762d3216dSReshma Pattan  */
48862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
48962d3216dSReshma Pattan 
49062d3216dSReshma Pattan #endif
49162d3216dSReshma Pattan 
4927b7e5ba7SIntel /*
493af75078fSIntel  * Ethernet device configuration.
494af75078fSIntel  */
4951bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode;
496af75078fSIntel 
49707e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
498295968d1SFerruh Yigit 	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
49907e5f7bdSShahaf Shuler };
500fd8c20aaSShahaf Shuler 
5012950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
502af75078fSIntel 
503a4fd5eeeSElza Mathew /*
504a4fd5eeeSElza Mathew  * Display zero values by default for xstats
505a4fd5eeeSElza Mathew  */
506a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
507a4fd5eeeSElza Mathew 
508bc700b67SDharmik Thakkar /*
509bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
510bc700b67SDharmik Thakkar  */
511bc700b67SDharmik Thakkar uint8_t record_core_cycles;
512bc700b67SDharmik Thakkar 
5130e4b1963SDharmik Thakkar /*
5140e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
5150e4b1963SDharmik Thakkar  */
5160e4b1963SDharmik Thakkar uint8_t record_burst_stats;
5170e4b1963SDharmik Thakkar 
518f4d178c1SXueming Li /*
519f4d178c1SXueming Li  * Number of ports per shared Rx queue group, 0 disable.
520f4d178c1SXueming Li  */
521f4d178c1SXueming Li uint32_t rxq_share;
522f4d178c1SXueming Li 
523c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
524c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5257acf894dSStephen Hurd 
526a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
5277e4441c8SRemy Horton /* Bitrate statistics */
5287e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
529e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
530e25e6c70SRemy Horton uint8_t bitrate_enabled;
531e25e6c70SRemy Horton #endif
5327e4441c8SRemy Horton 
5336970401eSDavid Marchand #ifdef RTE_LIB_GRO
534b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
535b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
5366970401eSDavid Marchand #endif
537b40f8d78SJiayu Hu 
538f9295aa2SXiaoyu Min /*
539f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
540f9295aa2SXiaoyu Min  */
541295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
542f9295aa2SXiaoyu Min 
543b7b78a08SAjit Khaparde /*
544b7b78a08SAjit Khaparde  * Used to set forced link speed
545b7b78a08SAjit Khaparde  */
546b7b78a08SAjit Khaparde uint32_t eth_link_speed;
547b7b78a08SAjit Khaparde 
548a550baf2SMin Hu (Connor) /*
549a550baf2SMin Hu (Connor)  * ID of the current process in multi-process, used to
550a550baf2SMin Hu (Connor)  * configure the queues to be polled.
551a550baf2SMin Hu (Connor)  */
552a550baf2SMin Hu (Connor) int proc_id;
553a550baf2SMin Hu (Connor) 
554a550baf2SMin Hu (Connor) /*
555a550baf2SMin Hu (Connor)  * Number of processes in multi-process, used to
556a550baf2SMin Hu (Connor)  * configure the queues to be polled.
557a550baf2SMin Hu (Connor)  */
558a550baf2SMin Hu (Connor) unsigned int num_procs = 1;
559a550baf2SMin Hu (Connor) 
560f6d8a6d3SIvan Malov static void
561f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id)
562f6d8a6d3SIvan Malov {
563f6d8a6d3SIvan Malov 	uint64_t rx_meta_features = 0;
564f6d8a6d3SIvan Malov 	int ret;
565f6d8a6d3SIvan Malov 
566f6d8a6d3SIvan Malov 	if (!is_proc_primary())
567f6d8a6d3SIvan Malov 		return;
568f6d8a6d3SIvan Malov 
569f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
570f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
571f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
572f6d8a6d3SIvan Malov 
573f6d8a6d3SIvan Malov 	ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
574f6d8a6d3SIvan Malov 	if (ret == 0) {
575f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
576f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
577f6d8a6d3SIvan Malov 				    port_id);
578f6d8a6d3SIvan Malov 		}
579f6d8a6d3SIvan Malov 
580f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
581f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
582f6d8a6d3SIvan Malov 				    port_id);
583f6d8a6d3SIvan Malov 		}
584f6d8a6d3SIvan Malov 
585f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
586f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
587f6d8a6d3SIvan Malov 				    port_id);
588f6d8a6d3SIvan Malov 		}
589f6d8a6d3SIvan Malov 	} else if (ret != -ENOTSUP) {
590f6d8a6d3SIvan Malov 		rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
591f6d8a6d3SIvan Malov 			 port_id, rte_strerror(-ret));
592f6d8a6d3SIvan Malov 	}
593f6d8a6d3SIvan Malov }
594f6d8a6d3SIvan Malov 
595a550baf2SMin Hu (Connor) static int
596a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
597a550baf2SMin Hu (Connor) 		      const struct rte_eth_conf *dev_conf)
598a550baf2SMin Hu (Connor) {
599a550baf2SMin Hu (Connor) 	if (is_proc_primary())
600a550baf2SMin Hu (Connor) 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
601a550baf2SMin Hu (Connor) 					dev_conf);
602a550baf2SMin Hu (Connor) 	return 0;
603a550baf2SMin Hu (Connor) }
604a550baf2SMin Hu (Connor) 
605a550baf2SMin Hu (Connor) static int
60615e34522SLong Wu change_bonding_member_port_status(portid_t bond_pid, bool is_stop)
607e46372d7SHuisong Li {
608e46372d7SHuisong Li #ifdef RTE_NET_BOND
609e46372d7SHuisong Li 
61015e34522SLong Wu 	portid_t member_pids[RTE_MAX_ETHPORTS];
611e46372d7SHuisong Li 	struct rte_port *port;
61215e34522SLong Wu 	int num_members;
61315e34522SLong Wu 	portid_t member_pid;
614e46372d7SHuisong Li 	int i;
615e46372d7SHuisong Li 
61615e34522SLong Wu 	num_members = rte_eth_bond_members_get(bond_pid, member_pids,
617e46372d7SHuisong Li 						RTE_MAX_ETHPORTS);
61815e34522SLong Wu 	if (num_members < 0) {
61915e34522SLong Wu 		fprintf(stderr, "Failed to get member list for port = %u\n",
620e46372d7SHuisong Li 			bond_pid);
62115e34522SLong Wu 		return num_members;
622e46372d7SHuisong Li 	}
623e46372d7SHuisong Li 
62415e34522SLong Wu 	for (i = 0; i < num_members; i++) {
62515e34522SLong Wu 		member_pid = member_pids[i];
62615e34522SLong Wu 		port = &ports[member_pid];
627e46372d7SHuisong Li 		port->port_status =
628e46372d7SHuisong Li 			is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
629e46372d7SHuisong Li 	}
630e46372d7SHuisong Li #else
631e46372d7SHuisong Li 	RTE_SET_USED(bond_pid);
632e46372d7SHuisong Li 	RTE_SET_USED(is_stop);
633e46372d7SHuisong Li #endif
634e46372d7SHuisong Li 	return 0;
635e46372d7SHuisong Li }
636e46372d7SHuisong Li 
637e46372d7SHuisong Li static int
638a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id)
639a550baf2SMin Hu (Connor) {
640e46372d7SHuisong Li 	int ret;
641e46372d7SHuisong Li 
642e46372d7SHuisong Li 	if (is_proc_primary()) {
643e46372d7SHuisong Li 		ret = rte_eth_dev_start(port_id);
644e46372d7SHuisong Li 		if (ret != 0)
645e46372d7SHuisong Li 			return ret;
646e46372d7SHuisong Li 
647e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
648e46372d7SHuisong Li 
649e46372d7SHuisong Li 		/*
6504f840086SLong Wu 		 * Starting a bonding port also starts all members under the bonding
651e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
65215e34522SLong Wu 		 * port status of these members.
653e46372d7SHuisong Li 		 */
654e46372d7SHuisong Li 		if (port->bond_flag == 1)
65515e34522SLong Wu 			return change_bonding_member_port_status(port_id, false);
656e46372d7SHuisong Li 	}
657a550baf2SMin Hu (Connor) 
658a550baf2SMin Hu (Connor) 	return 0;
659a550baf2SMin Hu (Connor) }
660a550baf2SMin Hu (Connor) 
661a550baf2SMin Hu (Connor) static int
662a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id)
663a550baf2SMin Hu (Connor) {
664e46372d7SHuisong Li 	int ret;
665e46372d7SHuisong Li 
666e46372d7SHuisong Li 	if (is_proc_primary()) {
667e46372d7SHuisong Li 		ret = rte_eth_dev_stop(port_id);
668e46372d7SHuisong Li 		if (ret != 0)
669e46372d7SHuisong Li 			return ret;
670e46372d7SHuisong Li 
671e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
672e46372d7SHuisong Li 
673e46372d7SHuisong Li 		/*
6744f840086SLong Wu 		 * Stopping a bonding port also stops all members under the bonding
675e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
67615e34522SLong Wu 		 * port status of these members.
677e46372d7SHuisong Li 		 */
678e46372d7SHuisong Li 		if (port->bond_flag == 1)
67915e34522SLong Wu 			return change_bonding_member_port_status(port_id, true);
680e46372d7SHuisong Li 	}
681a550baf2SMin Hu (Connor) 
682a550baf2SMin Hu (Connor) 	return 0;
683a550baf2SMin Hu (Connor) }
684a550baf2SMin Hu (Connor) 
685a550baf2SMin Hu (Connor) static void
686a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp)
687a550baf2SMin Hu (Connor) {
688a550baf2SMin Hu (Connor) 	if (is_proc_primary())
689a550baf2SMin Hu (Connor) 		rte_mempool_free(mp);
690a550baf2SMin Hu (Connor) }
691a550baf2SMin Hu (Connor) 
692a550baf2SMin Hu (Connor) static int
693a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
694a550baf2SMin Hu (Connor) {
695a550baf2SMin Hu (Connor) 	if (is_proc_primary())
696a550baf2SMin Hu (Connor) 		return rte_eth_dev_set_mtu(port_id, mtu);
697a550baf2SMin Hu (Connor) 
698a550baf2SMin Hu (Connor) 	return 0;
699a550baf2SMin Hu (Connor) }
700a550baf2SMin Hu (Connor) 
701ed30d9b6SIntel /* Forward function declarations */
702c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
703edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
704f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
70576ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
706d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
707cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
708fb73e096SJeff Guo 				enum rte_dev_event_type type,
709fb73e096SJeff Guo 				void *param);
71063b72657SIvan Ilchenko static void fill_xstats_display_info(void);
711ce8d5614SIntel 
712ce8d5614SIntel /*
713ce8d5614SIntel  * Check if all the ports are started.
714ce8d5614SIntel  * If yes, return positive value. If not, return zero.
715ce8d5614SIntel  */
716ce8d5614SIntel static int all_ports_started(void);
717ed30d9b6SIntel 
7186970401eSDavid Marchand #ifdef RTE_LIB_GSO
71952f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
72035b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
7216970401eSDavid Marchand #endif
72252f38a20SJiayu Hu 
723b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
724b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
725b57b66a9SOri Kam 
72663b72657SIvan Ilchenko 
727af75078fSIntel /*
72898a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
729c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
730c9cafcc8SShahaf Shuler  */
731c9cafcc8SShahaf Shuler int
732c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
733c9cafcc8SShahaf Shuler {
734c9cafcc8SShahaf Shuler 	unsigned int i;
735c9cafcc8SShahaf Shuler 
736c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
737c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
738c9cafcc8SShahaf Shuler 			return 0;
739c9cafcc8SShahaf Shuler 	}
740c9cafcc8SShahaf Shuler 	return 1;
741c9cafcc8SShahaf Shuler }
742c9cafcc8SShahaf Shuler 
743c9cafcc8SShahaf Shuler /*
744af75078fSIntel  * Setup default configuration.
745af75078fSIntel  */
746af75078fSIntel static void
747af75078fSIntel set_default_fwd_lcores_config(void)
748af75078fSIntel {
749af75078fSIntel 	unsigned int i;
750af75078fSIntel 	unsigned int nb_lc;
7517acf894dSStephen Hurd 	unsigned int sock_num;
752af75078fSIntel 
753af75078fSIntel 	nb_lc = 0;
754af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
755dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
756dbfb8ec7SPhil Yang 			continue;
757c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
758c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
759c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
760c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
761c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
762c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
763c9cafcc8SShahaf Shuler 			}
764c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
7657acf894dSStephen Hurd 		}
766cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
767f54fe5eeSStephen Hurd 			continue;
768f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
769af75078fSIntel 	}
770af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
771af75078fSIntel 	nb_cfg_lcores = nb_lcores;
772af75078fSIntel 	nb_fwd_lcores = 1;
773af75078fSIntel }
774af75078fSIntel 
775af75078fSIntel static void
776af75078fSIntel set_def_peer_eth_addrs(void)
777af75078fSIntel {
778af75078fSIntel 	portid_t i;
779af75078fSIntel 
780af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
78135b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
782af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
783af75078fSIntel 	}
784af75078fSIntel }
785af75078fSIntel 
786af75078fSIntel static void
787af75078fSIntel set_default_fwd_ports_config(void)
788af75078fSIntel {
789af75078fSIntel 	portid_t pt_id;
79065a7360cSMatan Azrad 	int i = 0;
791af75078fSIntel 
792effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
79365a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
794af75078fSIntel 
795effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
796effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
797effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
798effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
799effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
800effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
801effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
802effdb8bbSPhil Yang 			}
803effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
804effdb8bbSPhil Yang 		}
805effdb8bbSPhil Yang 	}
806effdb8bbSPhil Yang 
807af75078fSIntel 	nb_cfg_ports = nb_ports;
808af75078fSIntel 	nb_fwd_ports = nb_ports;
809af75078fSIntel }
810af75078fSIntel 
811af75078fSIntel void
812af75078fSIntel set_def_fwd_config(void)
813af75078fSIntel {
814af75078fSIntel 	set_default_fwd_lcores_config();
815af75078fSIntel 	set_def_peer_eth_addrs();
816af75078fSIntel 	set_default_fwd_ports_config();
817af75078fSIntel }
818af75078fSIntel 
819761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
820c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
821c7f5dba7SAnatoly Burakov static int
822c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
823c7f5dba7SAnatoly Burakov {
824c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
825c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
826c7f5dba7SAnatoly Burakov 
827c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
828c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
829c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
830c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
831c7f5dba7SAnatoly Burakov 	 */
832c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
833c7f5dba7SAnatoly Burakov 
834c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
835c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
836c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
837c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
838c7f5dba7SAnatoly Burakov 		return -1;
839c7f5dba7SAnatoly Burakov 	}
840c7f5dba7SAnatoly Burakov 
841c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
842c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
843c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
844c7f5dba7SAnatoly Burakov 
845c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
846c7f5dba7SAnatoly Burakov 
847c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
848c7f5dba7SAnatoly Burakov 
849c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
850c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
851c7f5dba7SAnatoly Burakov 		return -1;
852c7f5dba7SAnatoly Burakov 	}
853c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
854c7f5dba7SAnatoly Burakov 
855c7f5dba7SAnatoly Burakov 	return 0;
856c7f5dba7SAnatoly Burakov }
857c7f5dba7SAnatoly Burakov 
858c7f5dba7SAnatoly Burakov static int
859c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
860c7f5dba7SAnatoly Burakov {
861c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
862c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
863c7f5dba7SAnatoly Burakov 	 */
8649d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
865c7f5dba7SAnatoly Burakov 
866c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
867c7f5dba7SAnatoly Burakov }
868c7f5dba7SAnatoly Burakov 
869c7f5dba7SAnatoly Burakov static void *
870c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
871c7f5dba7SAnatoly Burakov {
872c7f5dba7SAnatoly Burakov 	void *addr;
873c7f5dba7SAnatoly Burakov 	int flags;
874c7f5dba7SAnatoly Burakov 
875c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
876c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
877c7f5dba7SAnatoly Burakov 	if (huge)
878c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
879c7f5dba7SAnatoly Burakov 
880c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
881c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
882c7f5dba7SAnatoly Burakov 		return NULL;
883c7f5dba7SAnatoly Burakov 
884c7f5dba7SAnatoly Burakov 	return addr;
885c7f5dba7SAnatoly Burakov }
886c7f5dba7SAnatoly Burakov 
887c7f5dba7SAnatoly Burakov struct extmem_param {
888c7f5dba7SAnatoly Burakov 	void *addr;
889c7f5dba7SAnatoly Burakov 	size_t len;
890c7f5dba7SAnatoly Burakov 	size_t pgsz;
891c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
892c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
893c7f5dba7SAnatoly Burakov };
894c7f5dba7SAnatoly Burakov 
895c7f5dba7SAnatoly Burakov static int
896c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
897c7f5dba7SAnatoly Burakov 		bool huge)
898c7f5dba7SAnatoly Burakov {
899c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
900c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
901c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
902c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
903c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
904c7f5dba7SAnatoly Burakov 	void *addr;
905c7f5dba7SAnatoly Burakov 	int ret;
906c7f5dba7SAnatoly Burakov 
907c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
908c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
909c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
910c7f5dba7SAnatoly Burakov 			continue;
911c7f5dba7SAnatoly Burakov 
912c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
913c7f5dba7SAnatoly Burakov 
914c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
915c7f5dba7SAnatoly Burakov 		if (!huge)
916c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
917c7f5dba7SAnatoly Burakov 
918c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
919c7f5dba7SAnatoly Burakov 		if (ret < 0) {
920c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
921c7f5dba7SAnatoly Burakov 			return -1;
922c7f5dba7SAnatoly Burakov 		}
923c7f5dba7SAnatoly Burakov 
924c7f5dba7SAnatoly Burakov 		/* allocate our memory */
925c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
926c7f5dba7SAnatoly Burakov 
927c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
928c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
929c7f5dba7SAnatoly Burakov 		 * try another one.
930c7f5dba7SAnatoly Burakov 		 */
931c7f5dba7SAnatoly Burakov 		if (addr == NULL)
932c7f5dba7SAnatoly Burakov 			continue;
933c7f5dba7SAnatoly Burakov 
934c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
935c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
936c7f5dba7SAnatoly Burakov 
937c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
938c7f5dba7SAnatoly Burakov 
939c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
940c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
941c7f5dba7SAnatoly Burakov 			goto fail;
942c7f5dba7SAnatoly Burakov 		}
943c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
944c7f5dba7SAnatoly Burakov 		if (!huge)
945c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
946c7f5dba7SAnatoly Burakov 
947c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
948c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
949c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
950c7f5dba7SAnatoly Burakov 			size_t offset;
951c7f5dba7SAnatoly Burakov 			void *cur;
952c7f5dba7SAnatoly Burakov 
953c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
954c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
955c7f5dba7SAnatoly Burakov 
956c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
957c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
958c7f5dba7SAnatoly Burakov 
959c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
960c7f5dba7SAnatoly Burakov 
961c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
962c7f5dba7SAnatoly Burakov 		}
963c7f5dba7SAnatoly Burakov 
964c7f5dba7SAnatoly Burakov 		break;
965c7f5dba7SAnatoly Burakov 	}
966c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
967c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
968c7f5dba7SAnatoly Burakov 		return -1;
969c7f5dba7SAnatoly Burakov 
970c7f5dba7SAnatoly Burakov 	param->addr = addr;
971c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
972c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
973c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
974c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
975c7f5dba7SAnatoly Burakov 
976c7f5dba7SAnatoly Burakov 	return 0;
977c7f5dba7SAnatoly Burakov fail:
978c7f5dba7SAnatoly Burakov 	free(iovas);
979c7f5dba7SAnatoly Burakov 	if (addr)
980c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
981c7f5dba7SAnatoly Burakov 
982c7f5dba7SAnatoly Burakov 	return -1;
983c7f5dba7SAnatoly Burakov }
984c7f5dba7SAnatoly Burakov 
985c7f5dba7SAnatoly Burakov static int
986c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
987c7f5dba7SAnatoly Burakov {
988c7f5dba7SAnatoly Burakov 	struct extmem_param param;
989c7f5dba7SAnatoly Burakov 	int socket_id, ret;
990c7f5dba7SAnatoly Burakov 
991c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
992c7f5dba7SAnatoly Burakov 
993c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
994c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
995c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
996c7f5dba7SAnatoly Burakov 		/* create our heap */
997c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
998c7f5dba7SAnatoly Burakov 		if (ret < 0) {
999c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
1000c7f5dba7SAnatoly Burakov 			return -1;
1001c7f5dba7SAnatoly Burakov 		}
1002c7f5dba7SAnatoly Burakov 	}
1003c7f5dba7SAnatoly Burakov 
1004c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
1005c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1006c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
1007c7f5dba7SAnatoly Burakov 		return -1;
1008c7f5dba7SAnatoly Burakov 	}
1009c7f5dba7SAnatoly Burakov 
1010c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
1011c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
1012c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
1013c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
1014c7f5dba7SAnatoly Burakov 
1015c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
1016c7f5dba7SAnatoly Burakov 
1017c7f5dba7SAnatoly Burakov 	/* not needed any more */
1018c7f5dba7SAnatoly Burakov 	free(param.iova_table);
1019c7f5dba7SAnatoly Burakov 
1020c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1021c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1022c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
1023c7f5dba7SAnatoly Burakov 		return -1;
1024c7f5dba7SAnatoly Burakov 	}
1025c7f5dba7SAnatoly Burakov 
1026c7f5dba7SAnatoly Burakov 	/* success */
1027c7f5dba7SAnatoly Burakov 
1028c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1029c7f5dba7SAnatoly Burakov 			param.len >> 20);
1030c7f5dba7SAnatoly Burakov 
1031c7f5dba7SAnatoly Burakov 	return 0;
1032c7f5dba7SAnatoly Burakov }
10333a0968c8SShahaf Shuler static void
10343a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10353a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10363a0968c8SShahaf Shuler {
10373a0968c8SShahaf Shuler 	uint16_t pid = 0;
10383a0968c8SShahaf Shuler 	int ret;
10393a0968c8SShahaf Shuler 
10403a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10410a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10423a0968c8SShahaf Shuler 
10430a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10440a0821bcSPaulis Gributs 		if (ret != 0) {
10450a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10460a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10470a0821bcSPaulis Gributs 				    "mempool unmapping will not be performed\n",
10480a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10490a0821bcSPaulis Gributs 			continue;
10500a0821bcSPaulis Gributs 		}
10510a0821bcSPaulis Gributs 
10520a0821bcSPaulis Gributs 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
10533a0968c8SShahaf Shuler 		if (ret) {
10543a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10553a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
10563a0968c8SShahaf Shuler 				    "for device %s\n",
1057ec5ecd7eSDavid Marchand 				    memhdr->addr, rte_dev_name(dev_info.device));
10583a0968c8SShahaf Shuler 		}
10593a0968c8SShahaf Shuler 	}
10603a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
10613a0968c8SShahaf Shuler 	if (ret) {
10623a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10633a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
10643a0968c8SShahaf Shuler 	}
10653a0968c8SShahaf Shuler }
10663a0968c8SShahaf Shuler 
10673a0968c8SShahaf Shuler static void
10683a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10693a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10703a0968c8SShahaf Shuler {
10713a0968c8SShahaf Shuler 	uint16_t pid = 0;
10723a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
10733a0968c8SShahaf Shuler 	int ret;
10743a0968c8SShahaf Shuler 
10753a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
10763a0968c8SShahaf Shuler 				  page_size);
10773a0968c8SShahaf Shuler 	if (ret) {
10783a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10793a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
10803a0968c8SShahaf Shuler 		return;
10813a0968c8SShahaf Shuler 	}
10823a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10830a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10843a0968c8SShahaf Shuler 
10850a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10860a0821bcSPaulis Gributs 		if (ret != 0) {
10870a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10880a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10890a0821bcSPaulis Gributs 				    "mempool mapping will not be performed\n",
10900a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10910a0821bcSPaulis Gributs 			continue;
10920a0821bcSPaulis Gributs 		}
10930a0821bcSPaulis Gributs 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
10943a0968c8SShahaf Shuler 		if (ret) {
10953a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10963a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
10973a0968c8SShahaf Shuler 				    "for device %s\n",
1098ec5ecd7eSDavid Marchand 				    memhdr->addr, rte_dev_name(dev_info.device));
10993a0968c8SShahaf Shuler 		}
11003a0968c8SShahaf Shuler 	}
11013a0968c8SShahaf Shuler }
1102761f7ae1SJie Zhou #endif
1103c7f5dba7SAnatoly Burakov 
110472512e18SViacheslav Ovsiienko static unsigned int
110572512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
110672512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
110772512e18SViacheslav Ovsiienko {
110872512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
110972512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
111072512e18SViacheslav Ovsiienko 	uint16_t elt_size;
111172512e18SViacheslav Ovsiienko 
111272512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
111372512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
111472512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
111572512e18SViacheslav Ovsiienko 
111672512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
111772512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
111872512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
111972512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
112072512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
112172512e18SViacheslav Ovsiienko 		return 0;
112272512e18SViacheslav Ovsiienko 	}
112372512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
112472512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
112572512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
112672512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
112772512e18SViacheslav Ovsiienko 		int ret;
112872512e18SViacheslav Ovsiienko 
112972512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
113072512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
113172512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
113272512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
113372512e18SViacheslav Ovsiienko 			ext_num = 0;
113472512e18SViacheslav Ovsiienko 			break;
113572512e18SViacheslav Ovsiienko 		}
113613b19642SDmitry Kozlyuk 		mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
113772512e18SViacheslav Ovsiienko 					 socket_id,
113872512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_IOVA_CONTIG |
113972512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_1GB |
114013b19642SDmitry Kozlyuk 					 RTE_MEMZONE_SIZE_HINT_ONLY);
114172512e18SViacheslav Ovsiienko 		if (mz == NULL) {
114272512e18SViacheslav Ovsiienko 			/*
114372512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
114472512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
114572512e18SViacheslav Ovsiienko 			 */
114672512e18SViacheslav Ovsiienko 			errno = ENOMEM;
114772512e18SViacheslav Ovsiienko 			ext_num = 0;
114872512e18SViacheslav Ovsiienko 			break;
114972512e18SViacheslav Ovsiienko 		}
115072512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
115172512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
115272512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
115372512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
115472512e18SViacheslav Ovsiienko 	}
115572512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
115672512e18SViacheslav Ovsiienko 		free(xmem);
115772512e18SViacheslav Ovsiienko 		xmem = NULL;
115872512e18SViacheslav Ovsiienko 	}
115972512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
116072512e18SViacheslav Ovsiienko 	return ext_num;
116172512e18SViacheslav Ovsiienko }
116272512e18SViacheslav Ovsiienko 
1163af75078fSIntel /*
1164af75078fSIntel  * Configuration initialisation done once at init time.
1165af75078fSIntel  */
1166401b744dSShahaf Shuler static struct rte_mempool *
1167af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
116826cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
1169af75078fSIntel {
1170af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1171bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
1172761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1173af75078fSIntel 	uint32_t mb_size;
1174af75078fSIntel 
1175dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1176761f7ae1SJie Zhou #endif
117726cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1178a550baf2SMin Hu (Connor) 	if (!is_proc_primary()) {
1179a550baf2SMin Hu (Connor) 		rte_mp = rte_mempool_lookup(pool_name);
1180a550baf2SMin Hu (Connor) 		if (rte_mp == NULL)
1181a550baf2SMin Hu (Connor) 			rte_exit(EXIT_FAILURE,
1182a550baf2SMin Hu (Connor) 				"Get mbuf pool for socket %u failed: %s\n",
1183a550baf2SMin Hu (Connor) 				socket_id, rte_strerror(rte_errno));
1184a550baf2SMin Hu (Connor) 		return rte_mp;
1185a550baf2SMin Hu (Connor) 	}
1186148f963fSBruce Richardson 
1187285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
1188d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1189d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1190d1eb542eSOlivier Matz 
1191c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
1192c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
1193c7f5dba7SAnatoly Burakov 		{
1194c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
1195c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1196c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
1197c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1198c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1199c7f5dba7SAnatoly Burakov 			break;
1200c7f5dba7SAnatoly Burakov 		}
1201761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1202c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
1203c7f5dba7SAnatoly Burakov 		{
1204b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1205c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1206148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
120759fcf854SShahaf Shuler 				socket_id, mempool_flags);
120824427bb9SOlivier Matz 			if (rte_mp == NULL)
120924427bb9SOlivier Matz 				goto err;
1210b19a0c75SOlivier Matz 
1211b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1212b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1213b19a0c75SOlivier Matz 				rte_mp = NULL;
121424427bb9SOlivier Matz 				goto err;
1215b19a0c75SOlivier Matz 			}
1216b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1217b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
12183a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1219c7f5dba7SAnatoly Burakov 			break;
1220c7f5dba7SAnatoly Burakov 		}
1221c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1222c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1223c7f5dba7SAnatoly Burakov 		{
1224c7f5dba7SAnatoly Burakov 			int heap_socket;
1225c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1226c7f5dba7SAnatoly Burakov 
1227c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1228c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1229c7f5dba7SAnatoly Burakov 
1230c7f5dba7SAnatoly Burakov 			heap_socket =
1231c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1232c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1233c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1234c7f5dba7SAnatoly Burakov 
12350e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
12360e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1237ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1238c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1239c7f5dba7SAnatoly Burakov 					heap_socket);
1240c7f5dba7SAnatoly Burakov 			break;
1241c7f5dba7SAnatoly Burakov 		}
1242761f7ae1SJie Zhou #endif
124372512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
124472512e18SViacheslav Ovsiienko 		{
124572512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
124672512e18SViacheslav Ovsiienko 			unsigned int ext_num;
124772512e18SViacheslav Ovsiienko 
124872512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
124972512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
125072512e18SViacheslav Ovsiienko 			if (ext_num == 0)
125172512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
125272512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
125372512e18SViacheslav Ovsiienko 
125472512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
125572512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
125672512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
125772512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
125872512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
125972512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
126072512e18SViacheslav Ovsiienko 			free(ext_mem);
126172512e18SViacheslav Ovsiienko 			break;
126272512e18SViacheslav Ovsiienko 		}
1263c7f5dba7SAnatoly Burakov 	default:
1264c7f5dba7SAnatoly Burakov 		{
1265c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1266c7f5dba7SAnatoly Burakov 		}
1267bece7b6cSChristian Ehrhardt 	}
1268148f963fSBruce Richardson 
1269761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
127024427bb9SOlivier Matz err:
1271761f7ae1SJie Zhou #endif
1272af75078fSIntel 	if (rte_mp == NULL) {
1273d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1274d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1275d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1276148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1277591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1278af75078fSIntel 	}
1279401b744dSShahaf Shuler 	return rte_mp;
1280af75078fSIntel }
1281af75078fSIntel 
128220a0286fSLiu Xiaofeng /*
128320a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
128420a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
128520a0286fSLiu Xiaofeng  */
128620a0286fSLiu Xiaofeng static int
128720a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
128820a0286fSLiu Xiaofeng {
128920a0286fSLiu Xiaofeng 	static int warning_once = 0;
129020a0286fSLiu Xiaofeng 
1291c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
129220a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
129361a3b0e5SAndrew Rybchenko 			fprintf(stderr,
129461a3b0e5SAndrew Rybchenko 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
129520a0286fSLiu Xiaofeng 		warning_once = 1;
129620a0286fSLiu Xiaofeng 		return -1;
129720a0286fSLiu Xiaofeng 	}
129820a0286fSLiu Xiaofeng 	return 0;
129920a0286fSLiu Xiaofeng }
130020a0286fSLiu Xiaofeng 
13013f7311baSWei Dai /*
13023f7311baSWei Dai  * Get the allowed maximum number of RX queues.
13033f7311baSWei Dai  * *pid return the port id which has minimal value of
13043f7311baSWei Dai  * max_rx_queues in all ports.
13053f7311baSWei Dai  */
13063f7311baSWei Dai queueid_t
13073f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
13083f7311baSWei Dai {
13099e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
13106f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
13113f7311baSWei Dai 	portid_t pi;
13123f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
13133f7311baSWei Dai 
13143f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13156f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13166f51deb9SIvan Ilchenko 			continue;
13176f51deb9SIvan Ilchenko 
13186f51deb9SIvan Ilchenko 		max_rxq_valid = true;
13193f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
13203f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
13213f7311baSWei Dai 			*pid = pi;
13223f7311baSWei Dai 		}
13233f7311baSWei Dai 	}
13246f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
13253f7311baSWei Dai }
13263f7311baSWei Dai 
13273f7311baSWei Dai /*
13283f7311baSWei Dai  * Check input rxq is valid or not.
13293f7311baSWei Dai  * If input rxq is not greater than any of maximum number
13303f7311baSWei Dai  * of RX queues of all ports, it is valid.
13313f7311baSWei Dai  * if valid, return 0, else return -1
13323f7311baSWei Dai  */
13333f7311baSWei Dai int
13343f7311baSWei Dai check_nb_rxq(queueid_t rxq)
13353f7311baSWei Dai {
13363f7311baSWei Dai 	queueid_t allowed_max_rxq;
13373f7311baSWei Dai 	portid_t pid = 0;
13383f7311baSWei Dai 
13393f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
13403f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
134161a3b0e5SAndrew Rybchenko 		fprintf(stderr,
134261a3b0e5SAndrew Rybchenko 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
134361a3b0e5SAndrew Rybchenko 			rxq, allowed_max_rxq, pid);
13443f7311baSWei Dai 		return -1;
13453f7311baSWei Dai 	}
13463f7311baSWei Dai 	return 0;
13473f7311baSWei Dai }
13483f7311baSWei Dai 
134936db4f6cSWei Dai /*
135036db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
135136db4f6cSWei Dai  * *pid return the port id which has minimal value of
135236db4f6cSWei Dai  * max_tx_queues in all ports.
135336db4f6cSWei Dai  */
135436db4f6cSWei Dai queueid_t
135536db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
135636db4f6cSWei Dai {
13579e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
13586f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
135936db4f6cSWei Dai 	portid_t pi;
136036db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
136136db4f6cSWei Dai 
136236db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13636f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13646f51deb9SIvan Ilchenko 			continue;
13656f51deb9SIvan Ilchenko 
13666f51deb9SIvan Ilchenko 		max_txq_valid = true;
136736db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
136836db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
136936db4f6cSWei Dai 			*pid = pi;
137036db4f6cSWei Dai 		}
137136db4f6cSWei Dai 	}
13726f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
137336db4f6cSWei Dai }
137436db4f6cSWei Dai 
137536db4f6cSWei Dai /*
137636db4f6cSWei Dai  * Check input txq is valid or not.
137736db4f6cSWei Dai  * If input txq is not greater than any of maximum number
137836db4f6cSWei Dai  * of TX queues of all ports, it is valid.
137936db4f6cSWei Dai  * if valid, return 0, else return -1
138036db4f6cSWei Dai  */
138136db4f6cSWei Dai int
138236db4f6cSWei Dai check_nb_txq(queueid_t txq)
138336db4f6cSWei Dai {
138436db4f6cSWei Dai 	queueid_t allowed_max_txq;
138536db4f6cSWei Dai 	portid_t pid = 0;
138636db4f6cSWei Dai 
138736db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
138836db4f6cSWei Dai 	if (txq > allowed_max_txq) {
138961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
139061a3b0e5SAndrew Rybchenko 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
139161a3b0e5SAndrew Rybchenko 			txq, allowed_max_txq, pid);
139236db4f6cSWei Dai 		return -1;
139336db4f6cSWei Dai 	}
139436db4f6cSWei Dai 	return 0;
139536db4f6cSWei Dai }
139636db4f6cSWei Dai 
13971c69df45SOri Kam /*
139899e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
139999e040d3SLijun Ou  * *pid return the port id which has minimal value of
140099e040d3SLijun Ou  * max_rxd in all queues of all ports.
140199e040d3SLijun Ou  */
140299e040d3SLijun Ou static uint16_t
140399e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
140499e040d3SLijun Ou {
140599e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
140699e040d3SLijun Ou 	portid_t pi;
140799e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
140899e040d3SLijun Ou 
140999e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
141099e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
141199e040d3SLijun Ou 			continue;
141299e040d3SLijun Ou 
141399e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
141499e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
141599e040d3SLijun Ou 			*pid = pi;
141699e040d3SLijun Ou 		}
141799e040d3SLijun Ou 	}
141899e040d3SLijun Ou 	return allowed_max_rxd;
141999e040d3SLijun Ou }
142099e040d3SLijun Ou 
142199e040d3SLijun Ou /*
142299e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
142399e040d3SLijun Ou  * *pid return the port id which has minimal value of
142499e040d3SLijun Ou  * min_rxd in all queues of all ports.
142599e040d3SLijun Ou  */
142699e040d3SLijun Ou static uint16_t
142799e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
142899e040d3SLijun Ou {
142999e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
143099e040d3SLijun Ou 	portid_t pi;
143199e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
143299e040d3SLijun Ou 
143399e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
143499e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
143599e040d3SLijun Ou 			continue;
143699e040d3SLijun Ou 
143799e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
143899e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
143999e040d3SLijun Ou 			*pid = pi;
144099e040d3SLijun Ou 		}
144199e040d3SLijun Ou 	}
144299e040d3SLijun Ou 
144399e040d3SLijun Ou 	return allowed_min_rxd;
144499e040d3SLijun Ou }
144599e040d3SLijun Ou 
144699e040d3SLijun Ou /*
144799e040d3SLijun Ou  * Check input rxd is valid or not.
144899e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
144999e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
145099e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
145199e040d3SLijun Ou  * if valid, return 0, else return -1
145299e040d3SLijun Ou  */
145399e040d3SLijun Ou int
145499e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
145599e040d3SLijun Ou {
145699e040d3SLijun Ou 	uint16_t allowed_max_rxd;
145799e040d3SLijun Ou 	uint16_t allowed_min_rxd;
145899e040d3SLijun Ou 	portid_t pid = 0;
145999e040d3SLijun Ou 
146099e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
146199e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
146261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
146361a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
146461a3b0e5SAndrew Rybchenko 			rxd, allowed_max_rxd, pid);
146599e040d3SLijun Ou 		return -1;
146699e040d3SLijun Ou 	}
146799e040d3SLijun Ou 
146899e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
146999e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
147061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
147161a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
147261a3b0e5SAndrew Rybchenko 			rxd, allowed_min_rxd, pid);
147399e040d3SLijun Ou 		return -1;
147499e040d3SLijun Ou 	}
147599e040d3SLijun Ou 
147699e040d3SLijun Ou 	return 0;
147799e040d3SLijun Ou }
147899e040d3SLijun Ou 
147999e040d3SLijun Ou /*
148099e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
148199e040d3SLijun Ou  * *pid return the port id which has minimal value of
148299e040d3SLijun Ou  * max_txd in every tx queue.
148399e040d3SLijun Ou  */
148499e040d3SLijun Ou static uint16_t
148599e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
148699e040d3SLijun Ou {
148799e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
148899e040d3SLijun Ou 	portid_t pi;
148999e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
149099e040d3SLijun Ou 
149199e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
149299e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
149399e040d3SLijun Ou 			continue;
149499e040d3SLijun Ou 
149599e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
149699e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
149799e040d3SLijun Ou 			*pid = pi;
149899e040d3SLijun Ou 		}
149999e040d3SLijun Ou 	}
150099e040d3SLijun Ou 	return allowed_max_txd;
150199e040d3SLijun Ou }
150299e040d3SLijun Ou 
150399e040d3SLijun Ou /*
150499e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
150599e040d3SLijun Ou  * *pid return the port id which has minimal value of
150699e040d3SLijun Ou  * min_txd in every tx queue.
150799e040d3SLijun Ou  */
150899e040d3SLijun Ou static uint16_t
150999e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
151099e040d3SLijun Ou {
151199e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
151299e040d3SLijun Ou 	portid_t pi;
151399e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
151499e040d3SLijun Ou 
151599e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
151699e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
151799e040d3SLijun Ou 			continue;
151899e040d3SLijun Ou 
151999e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
152099e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
152199e040d3SLijun Ou 			*pid = pi;
152299e040d3SLijun Ou 		}
152399e040d3SLijun Ou 	}
152499e040d3SLijun Ou 
152599e040d3SLijun Ou 	return allowed_min_txd;
152699e040d3SLijun Ou }
152799e040d3SLijun Ou 
152899e040d3SLijun Ou /*
152999e040d3SLijun Ou  * Check input txd is valid or not.
153099e040d3SLijun Ou  * If input txd is not greater than any of maximum number
153199e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
153299e040d3SLijun Ou  * if valid, return 0, else return -1
153399e040d3SLijun Ou  */
153499e040d3SLijun Ou int
153599e040d3SLijun Ou check_nb_txd(queueid_t txd)
153699e040d3SLijun Ou {
153799e040d3SLijun Ou 	uint16_t allowed_max_txd;
153899e040d3SLijun Ou 	uint16_t allowed_min_txd;
153999e040d3SLijun Ou 	portid_t pid = 0;
154099e040d3SLijun Ou 
154199e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
154299e040d3SLijun Ou 	if (txd > allowed_max_txd) {
154361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
154461a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
154561a3b0e5SAndrew Rybchenko 			txd, allowed_max_txd, pid);
154699e040d3SLijun Ou 		return -1;
154799e040d3SLijun Ou 	}
154899e040d3SLijun Ou 
154999e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
155099e040d3SLijun Ou 	if (txd < allowed_min_txd) {
155161a3b0e5SAndrew Rybchenko 		fprintf(stderr,
155261a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
155361a3b0e5SAndrew Rybchenko 			txd, allowed_min_txd, pid);
155499e040d3SLijun Ou 		return -1;
155599e040d3SLijun Ou 	}
155699e040d3SLijun Ou 	return 0;
155799e040d3SLijun Ou }
155899e040d3SLijun Ou 
155999e040d3SLijun Ou 
156099e040d3SLijun Ou /*
15611c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
15621c69df45SOri Kam  * *pid return the port id which has minimal value of
15631c69df45SOri Kam  * max_hairpin_queues in all ports.
15641c69df45SOri Kam  */
15651c69df45SOri Kam queueid_t
15661c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
15671c69df45SOri Kam {
15689e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
15691c69df45SOri Kam 	portid_t pi;
15701c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
15711c69df45SOri Kam 
15721c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
15731c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
15741c69df45SOri Kam 			*pid = pi;
15751c69df45SOri Kam 			return 0;
15761c69df45SOri Kam 		}
15771c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
15781c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
15791c69df45SOri Kam 			*pid = pi;
15801c69df45SOri Kam 		}
15811c69df45SOri Kam 	}
15821c69df45SOri Kam 	return allowed_max_hairpinq;
15831c69df45SOri Kam }
15841c69df45SOri Kam 
15851c69df45SOri Kam /*
15861c69df45SOri Kam  * Check input hairpin is valid or not.
15871c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
15881c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
15891c69df45SOri Kam  * if valid, return 0, else return -1
15901c69df45SOri Kam  */
15911c69df45SOri Kam int
15921c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
15931c69df45SOri Kam {
15941c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
15951c69df45SOri Kam 	portid_t pid = 0;
15961c69df45SOri Kam 
15971c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
15981c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
159961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
160061a3b0e5SAndrew Rybchenko 			"Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
16011c69df45SOri Kam 			hairpinq, allowed_max_hairpinq, pid);
16021c69df45SOri Kam 		return -1;
16031c69df45SOri Kam 	}
16041c69df45SOri Kam 	return 0;
16051c69df45SOri Kam }
16061c69df45SOri Kam 
16071bb4a528SFerruh Yigit static int
16081bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info)
16091bb4a528SFerruh Yigit {
16101bb4a528SFerruh Yigit 	uint32_t eth_overhead;
16111bb4a528SFerruh Yigit 
16121bb4a528SFerruh Yigit 	if (dev_info->max_mtu != UINT16_MAX &&
16131bb4a528SFerruh Yigit 	    dev_info->max_rx_pktlen > dev_info->max_mtu)
16141bb4a528SFerruh Yigit 		eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
16151bb4a528SFerruh Yigit 	else
16161bb4a528SFerruh Yigit 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
16171bb4a528SFerruh Yigit 
16181bb4a528SFerruh Yigit 	return eth_overhead;
16191bb4a528SFerruh Yigit }
16201bb4a528SFerruh Yigit 
1621af75078fSIntel static void
1622b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id)
1623b6b8a1ebSViacheslav Ovsiienko {
1624b6b8a1ebSViacheslav Ovsiienko 	struct rte_port *port = &ports[pid];
1625b6b8a1ebSViacheslav Ovsiienko 	int ret;
1626b6b8a1ebSViacheslav Ovsiienko 	int i;
1627b6b8a1ebSViacheslav Ovsiienko 
1628f6d8a6d3SIvan Malov 	eth_rx_metadata_negotiate_mp(pid);
1629f6d8a6d3SIvan Malov 
1630b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.txmode = tx_mode;
1631b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.rxmode = rx_mode;
1632b6b8a1ebSViacheslav Ovsiienko 
1633b6b8a1ebSViacheslav Ovsiienko 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1634b6b8a1ebSViacheslav Ovsiienko 	if (ret != 0)
1635b6b8a1ebSViacheslav Ovsiienko 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1636b6b8a1ebSViacheslav Ovsiienko 
1637295968d1SFerruh Yigit 	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1638b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.txmode.offloads &=
1639295968d1SFerruh Yigit 			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1640b6b8a1ebSViacheslav Ovsiienko 
1641b6b8a1ebSViacheslav Ovsiienko 	/* Apply Rx offloads configuration */
1642b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
16433c4426dbSDmitry Kozlyuk 		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1644b6b8a1ebSViacheslav Ovsiienko 	/* Apply Tx offloads configuration */
1645b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
16463c4426dbSDmitry Kozlyuk 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1647b6b8a1ebSViacheslav Ovsiienko 
1648b6b8a1ebSViacheslav Ovsiienko 	if (eth_link_speed)
1649b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.link_speeds = eth_link_speed;
1650b6b8a1ebSViacheslav Ovsiienko 
16511bb4a528SFerruh Yigit 	if (max_rx_pkt_len)
16521bb4a528SFerruh Yigit 		port->dev_conf.rxmode.mtu = max_rx_pkt_len -
16531bb4a528SFerruh Yigit 			get_eth_overhead(&port->dev_info);
16541bb4a528SFerruh Yigit 
1655b6b8a1ebSViacheslav Ovsiienko 	/* set flag to initialize port/queue */
1656b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig = 1;
1657b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig_queues = 1;
1658b6b8a1ebSViacheslav Ovsiienko 	port->socket_id = socket_id;
1659b6b8a1ebSViacheslav Ovsiienko 	port->tx_metadata = 0;
1660b6b8a1ebSViacheslav Ovsiienko 
1661b6b8a1ebSViacheslav Ovsiienko 	/*
1662b6b8a1ebSViacheslav Ovsiienko 	 * Check for maximum number of segments per MTU.
1663b6b8a1ebSViacheslav Ovsiienko 	 * Accordingly update the mbuf data size.
1664b6b8a1ebSViacheslav Ovsiienko 	 */
1665b6b8a1ebSViacheslav Ovsiienko 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1666b6b8a1ebSViacheslav Ovsiienko 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
16671bb4a528SFerruh Yigit 		uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
16681bb4a528SFerruh Yigit 		uint16_t mtu;
1669b6b8a1ebSViacheslav Ovsiienko 
16701bb4a528SFerruh Yigit 		if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
16711bb4a528SFerruh Yigit 			uint16_t data_size = (mtu + eth_overhead) /
16721bb4a528SFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
16731bb4a528SFerruh Yigit 			uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
16741bb4a528SFerruh Yigit 
16751bb4a528SFerruh Yigit 			if (buffer_size > mbuf_data_size[0]) {
16761bb4a528SFerruh Yigit 				mbuf_data_size[0] = buffer_size;
1677b6b8a1ebSViacheslav Ovsiienko 				TESTPMD_LOG(WARNING,
1678b6b8a1ebSViacheslav Ovsiienko 					"Configured mbuf size of the first segment %hu\n",
1679b6b8a1ebSViacheslav Ovsiienko 					mbuf_data_size[0]);
1680b6b8a1ebSViacheslav Ovsiienko 			}
1681b6b8a1ebSViacheslav Ovsiienko 		}
1682b6b8a1ebSViacheslav Ovsiienko 	}
16831bb4a528SFerruh Yigit }
1684b6b8a1ebSViacheslav Ovsiienko 
1685b6b8a1ebSViacheslav Ovsiienko static void
1686af75078fSIntel init_config(void)
1687af75078fSIntel {
1688ce8d5614SIntel 	portid_t pid;
1689af75078fSIntel 	struct rte_mempool *mbp;
1690af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1691af75078fSIntel 	lcoreid_t  lc_id;
16926970401eSDavid Marchand #ifdef RTE_LIB_GRO
1693b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
16946970401eSDavid Marchand #endif
16956970401eSDavid Marchand #ifdef RTE_LIB_GSO
169652f38a20SJiayu Hu 	uint32_t gso_types;
16976970401eSDavid Marchand #endif
1698487f9a59SYulong Pei 
1699af75078fSIntel 	/* Configuration of logical cores. */
1700af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1701af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1702fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1703af75078fSIntel 	if (fwd_lcores == NULL) {
1704ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1705ce8d5614SIntel 							"failed\n", nb_lcores);
1706af75078fSIntel 	}
1707af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1708af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1709af75078fSIntel 					       sizeof(struct fwd_lcore),
1710fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1711af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1712ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1713ce8d5614SIntel 								"failed\n");
1714af75078fSIntel 		}
1715af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1716af75078fSIntel 	}
1717af75078fSIntel 
17187d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1719b6b8a1ebSViacheslav Ovsiienko 		uint32_t socket_id;
17206f51deb9SIvan Ilchenko 
1721b6ea6408SIntel 		if (numa_support) {
1722b6b8a1ebSViacheslav Ovsiienko 			socket_id = port_numa[pid];
1723b6b8a1ebSViacheslav Ovsiienko 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1724b6b8a1ebSViacheslav Ovsiienko 				socket_id = rte_eth_dev_socket_id(pid);
172520a0286fSLiu Xiaofeng 
172629841336SPhil Yang 				/*
172729841336SPhil Yang 				 * if socket_id is invalid,
172829841336SPhil Yang 				 * set to the first available socket.
172929841336SPhil Yang 				 */
173020a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
173129841336SPhil Yang 					socket_id = socket_ids[0];
1732b6ea6408SIntel 			}
1733b6b8a1ebSViacheslav Ovsiienko 		} else {
1734b6b8a1ebSViacheslav Ovsiienko 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1735b6b8a1ebSViacheslav Ovsiienko 				    0 : socket_num;
1736b6ea6408SIntel 		}
1737b6b8a1ebSViacheslav Ovsiienko 		/* Apply default TxRx configuration for all ports */
1738b6b8a1ebSViacheslav Ovsiienko 		init_config_port_offloads(pid, socket_id);
1739ce8d5614SIntel 	}
17403ab64341SOlivier Matz 	/*
17413ab64341SOlivier Matz 	 * Create pools of mbuf.
17423ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
17433ab64341SOlivier Matz 	 * socket 0 memory by default.
17443ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
17453ab64341SOlivier Matz 	 *
17463ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
17473ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
17483ab64341SOlivier Matz 	 */
17493ab64341SOlivier Matz 	if (param_total_num_mbufs)
17503ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
17513ab64341SOlivier Matz 	else {
17524ed89049SDavid Marchand 		nb_mbuf_per_pool = RX_DESC_MAX +
17533ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
17544ed89049SDavid Marchand 			TX_DESC_MAX + MAX_PKT_BURST;
17553ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
17563ab64341SOlivier Matz 	}
17573ab64341SOlivier Matz 
1758b6ea6408SIntel 	if (numa_support) {
175926cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1760ce8d5614SIntel 
1761c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
176226cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
176326cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
176426cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1765401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
176626cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
17673ab64341SOlivier Matz 	} else {
176826cbb419SViacheslav Ovsiienko 		uint8_t i;
176926cbb419SViacheslav Ovsiienko 
177026cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
177126cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
177226cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1773401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
177426cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
177526cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
17763ab64341SOlivier Matz 	}
1777b6ea6408SIntel 
1778b6ea6408SIntel 	init_port_config();
17795886ae07SAdrien Mazarguil 
17806970401eSDavid Marchand #ifdef RTE_LIB_GSO
1781295968d1SFerruh Yigit 	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1782295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
17836970401eSDavid Marchand #endif
17845886ae07SAdrien Mazarguil 	/*
17855886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
17865886ae07SAdrien Mazarguil 	 */
17875886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
17888fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
178926cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
17908fd8bebcSAdrien Mazarguil 
17915886ae07SAdrien Mazarguil 		if (mbp == NULL)
179226cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
17935886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
17946970401eSDavid Marchand #ifdef RTE_LIB_GSO
179552f38a20SJiayu Hu 		/* initialize GSO context */
179652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
179752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
179852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
179935b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
180035b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
180152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
18026970401eSDavid Marchand #endif
18035886ae07SAdrien Mazarguil 	}
18045886ae07SAdrien Mazarguil 
18050c0db76fSBernard Iremonger 	fwd_config_setup();
1806b7091f1dSJiayu Hu 
18076970401eSDavid Marchand #ifdef RTE_LIB_GRO
1808b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1809b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1810b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1811b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1812b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1813b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1814b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1815b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1816b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1817b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1818b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1819b7091f1dSJiayu Hu 		}
1820b7091f1dSJiayu Hu 	}
18216970401eSDavid Marchand #endif
1822ce8d5614SIntel }
1823ce8d5614SIntel 
18242950a769SDeclan Doherty 
18252950a769SDeclan Doherty void
1826a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
18272950a769SDeclan Doherty {
18282950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
1829b6b8a1ebSViacheslav Ovsiienko 	init_config_port_offloads(new_port_id, socket_id);
18302950a769SDeclan Doherty 	init_port_config();
18312950a769SDeclan Doherty }
18322950a769SDeclan Doherty 
1833ce8d5614SIntel int
1834ce8d5614SIntel init_fwd_streams(void)
1835ce8d5614SIntel {
1836ce8d5614SIntel 	portid_t pid;
1837ce8d5614SIntel 	struct rte_port *port;
1838ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
18395a8fb55cSReshma Pattan 	queueid_t q;
1840ce8d5614SIntel 
1841ce8d5614SIntel 	/* set socket id according to numa or not */
18427d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1843ce8d5614SIntel 		port = &ports[pid];
1844ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
184561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
184661a3b0e5SAndrew Rybchenko 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
184761a3b0e5SAndrew Rybchenko 				nb_rxq, port->dev_info.max_rx_queues);
1848ce8d5614SIntel 			return -1;
1849ce8d5614SIntel 		}
1850ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
185161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
185261a3b0e5SAndrew Rybchenko 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
185361a3b0e5SAndrew Rybchenko 				nb_txq, port->dev_info.max_tx_queues);
1854ce8d5614SIntel 			return -1;
1855ce8d5614SIntel 		}
185620a0286fSLiu Xiaofeng 		if (numa_support) {
185720a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
185820a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
185920a0286fSLiu Xiaofeng 			else {
1860b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
186120a0286fSLiu Xiaofeng 
186229841336SPhil Yang 				/*
186329841336SPhil Yang 				 * if socket_id is invalid,
186429841336SPhil Yang 				 * set to the first available socket.
186529841336SPhil Yang 				 */
186620a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
186729841336SPhil Yang 					port->socket_id = socket_ids[0];
186820a0286fSLiu Xiaofeng 			}
186920a0286fSLiu Xiaofeng 		}
1870b6ea6408SIntel 		else {
1871b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1872af75078fSIntel 				port->socket_id = 0;
1873b6ea6408SIntel 			else
1874b6ea6408SIntel 				port->socket_id = socket_num;
1875b6ea6408SIntel 		}
1876af75078fSIntel 	}
1877af75078fSIntel 
18785a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
18795a8fb55cSReshma Pattan 	if (q == 0) {
188061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
188161a3b0e5SAndrew Rybchenko 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
18825a8fb55cSReshma Pattan 		return -1;
18835a8fb55cSReshma Pattan 	}
18845a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1885ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1886ce8d5614SIntel 		return 0;
1887ce8d5614SIntel 	/* clear the old */
1888ce8d5614SIntel 	if (fwd_streams != NULL) {
1889ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1890ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1891ce8d5614SIntel 				continue;
1892ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1893ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1894af75078fSIntel 		}
1895ce8d5614SIntel 		rte_free(fwd_streams);
1896ce8d5614SIntel 		fwd_streams = NULL;
1897ce8d5614SIntel 	}
1898ce8d5614SIntel 
1899ce8d5614SIntel 	/* init new */
1900ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
19011f84c469SMatan Azrad 	if (nb_fwd_streams) {
1902ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
19031f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
19041f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1905ce8d5614SIntel 		if (fwd_streams == NULL)
19061f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
19071f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
19081f84c469SMatan Azrad 				 nb_fwd_streams);
1909ce8d5614SIntel 
1910af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
19111f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
19121f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
19131f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1914ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
19151f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
19161f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
19171f84c469SMatan Azrad 		}
1918af75078fSIntel 	}
1919ce8d5614SIntel 
1920ce8d5614SIntel 	return 0;
1921af75078fSIntel }
1922af75078fSIntel 
1923af75078fSIntel static void
1924af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1925af75078fSIntel {
19267569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
192785de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
19287569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
19297569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1930af75078fSIntel 	uint16_t nb_pkt;
19317569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
19327569b8c1SHonnappa Nagarahalli 	int i;
1933af75078fSIntel 
1934af75078fSIntel 	/*
1935af75078fSIntel 	 * First compute the total number of packet bursts and the
1936af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1937af75078fSIntel 	 */
19387569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
19397569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
19407569b8c1SHonnappa Nagarahalli 
19417569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
19427569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
19437569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
19447569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
19457569b8c1SHonnappa Nagarahalli 
19467569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
19476a8b64fdSEli Britstein 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1948af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
19497569b8c1SHonnappa Nagarahalli 
1950af75078fSIntel 		if (nb_burst == 0)
1951af75078fSIntel 			continue;
19527569b8c1SHonnappa Nagarahalli 
1953af75078fSIntel 		total_burst += nb_burst;
19547569b8c1SHonnappa Nagarahalli 
19557569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
19567569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
19577569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1958fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1959fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
19607569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
19617569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
19627569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1963af75078fSIntel 		}
1964af75078fSIntel 	}
1965af75078fSIntel 	if (total_burst == 0)
1966af75078fSIntel 		return;
19677569b8c1SHonnappa Nagarahalli 
19687569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
19697569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
19707569b8c1SHonnappa Nagarahalli 		if (i == 3) {
19717569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1972af75078fSIntel 			return;
1973af75078fSIntel 		}
19747569b8c1SHonnappa Nagarahalli 
19757569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
19767569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
19777569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
19787569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1979af75078fSIntel 			return;
1980af75078fSIntel 		}
19817569b8c1SHonnappa Nagarahalli 
19827569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
19837569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
19847569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
19857569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
19867569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1987af75078fSIntel 	}
1988af75078fSIntel }
1989af75078fSIntel 
1990af75078fSIntel static void
1991af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1992af75078fSIntel {
1993af75078fSIntel 	struct fwd_stream *fs;
1994af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1995af75078fSIntel 
1996af75078fSIntel 	fs = fwd_streams[stream_id];
1997af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1998af75078fSIntel 	    (fs->fwd_dropped == 0))
1999af75078fSIntel 		return;
2000af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
2001af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
2002af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
2003af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
2004c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
2005c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
2006af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
2007af75078fSIntel 
2008af75078fSIntel 	/* if checksum mode */
2009af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
2010c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
2011c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
2012c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
201358d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
201458d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
2015d139cf23SLance Richardson 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
2016d139cf23SLance Richardson 			fs->rx_bad_outer_ip_csum);
201794d65546SDavid Marchand 	} else {
201894d65546SDavid Marchand 		printf("\n");
2019af75078fSIntel 	}
2020af75078fSIntel 
20210e4b1963SDharmik Thakkar 	if (record_burst_stats) {
2022af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2023af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
20240e4b1963SDharmik Thakkar 	}
2025af75078fSIntel }
2026af75078fSIntel 
202753324971SDavid Marchand void
202853324971SDavid Marchand fwd_stats_display(void)
202953324971SDavid Marchand {
203053324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
203153324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
203253324971SDavid Marchand 	struct {
203353324971SDavid Marchand 		struct fwd_stream *rx_stream;
203453324971SDavid Marchand 		struct fwd_stream *tx_stream;
203553324971SDavid Marchand 		uint64_t tx_dropped;
203653324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
203753324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
203853324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
2039d139cf23SLance Richardson 		uint64_t rx_bad_outer_ip_csum;
204053324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
204153324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
204253324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
204353324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
204453324971SDavid Marchand 	struct rte_eth_stats stats;
204553324971SDavid Marchand 	uint64_t fwd_cycles = 0;
204653324971SDavid Marchand 	uint64_t total_recv = 0;
204753324971SDavid Marchand 	uint64_t total_xmit = 0;
204853324971SDavid Marchand 	struct rte_port *port;
204953324971SDavid Marchand 	streamid_t sm_id;
205053324971SDavid Marchand 	portid_t pt_id;
2051baef6bbfSMin Hu (Connor) 	int ret;
205253324971SDavid Marchand 	int i;
205353324971SDavid Marchand 
205453324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
205553324971SDavid Marchand 
205653324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
205753324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
205853324971SDavid Marchand 
205953324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
206053324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
206153324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
206253324971SDavid Marchand 		} else {
206353324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
206453324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
206553324971SDavid Marchand 		}
206653324971SDavid Marchand 
206753324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
206853324971SDavid Marchand 
206953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
207053324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
207153324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
207253324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
2073d139cf23SLance Richardson 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2074d139cf23SLance Richardson 				fs->rx_bad_outer_ip_csum;
207553324971SDavid Marchand 
2076bc700b67SDharmik Thakkar 		if (record_core_cycles)
207799a4974aSRobin Jarry 			fwd_cycles += fs->busy_cycles;
207853324971SDavid Marchand 	}
207953324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2080c3fd1e60SFerruh Yigit 		uint64_t tx_dropped = 0;
2081c3fd1e60SFerruh Yigit 
208253324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
208353324971SDavid Marchand 		port = &ports[pt_id];
208453324971SDavid Marchand 
2085baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &stats);
2086baef6bbfSMin Hu (Connor) 		if (ret != 0) {
2087baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2088baef6bbfSMin Hu (Connor) 				"%s: Error: failed to get stats (port %u): %d",
2089baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
2090baef6bbfSMin Hu (Connor) 			continue;
2091baef6bbfSMin Hu (Connor) 		}
209253324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
209353324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
209453324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
209553324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
209653324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
209753324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
209853324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
209953324971SDavid Marchand 
210053324971SDavid Marchand 		total_recv += stats.ipackets;
210153324971SDavid Marchand 		total_xmit += stats.opackets;
210253324971SDavid Marchand 		total_rx_dropped += stats.imissed;
2103c3fd1e60SFerruh Yigit 		tx_dropped += ports_stats[pt_id].tx_dropped;
2104c3fd1e60SFerruh Yigit 		tx_dropped += stats.oerrors;
2105c3fd1e60SFerruh Yigit 		total_tx_dropped += tx_dropped;
210653324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
210753324971SDavid Marchand 
210853324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
210953324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
211053324971SDavid Marchand 
211108dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
211208dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
211353324971SDavid Marchand 		       stats.ipackets + stats.imissed);
211453324971SDavid Marchand 
2115d139cf23SLance Richardson 		if (cur_fwd_eng == &csum_fwd_engine) {
211653324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
211753324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
211853324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
211953324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
212053324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
212153324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
2122d139cf23SLance Richardson 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2123d139cf23SLance Richardson 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
2124d139cf23SLance Richardson 		}
212553324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
212608dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
212708dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
212853324971SDavid Marchand 		}
212953324971SDavid Marchand 
213008dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
213153324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
2132c3fd1e60SFerruh Yigit 		       stats.opackets, tx_dropped,
2133c3fd1e60SFerruh Yigit 		       stats.opackets + tx_dropped);
213453324971SDavid Marchand 
21350e4b1963SDharmik Thakkar 		if (record_burst_stats) {
213653324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
213753324971SDavid Marchand 				pkt_burst_stats_display("RX",
213853324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
213953324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
214053324971SDavid Marchand 				pkt_burst_stats_display("TX",
214153324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
21420e4b1963SDharmik Thakkar 		}
214353324971SDavid Marchand 
214453324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
214553324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
214653324971SDavid Marchand 	}
214753324971SDavid Marchand 
214853324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
214953324971SDavid Marchand 	       "%s\n",
215053324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
215153324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
215253324971SDavid Marchand 	       "%-"PRIu64"\n"
215353324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
215453324971SDavid Marchand 	       "%-"PRIu64"\n",
215553324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
215653324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
215753324971SDavid Marchand 	if (total_rx_nombuf > 0)
215853324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
215953324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
216053324971SDavid Marchand 	       "%s\n",
216153324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
2162bc700b67SDharmik Thakkar 	if (record_core_cycles) {
21634c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
21643a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
21653a164e00SPhil Yang 			uint64_t total_pkts = 0;
21663a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
21673a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
21683a164e00SPhil Yang 				total_pkts = total_xmit;
21693a164e00SPhil Yang 			else
21703a164e00SPhil Yang 				total_pkts = total_recv;
21713a164e00SPhil Yang 
217299a4974aSRobin Jarry 			printf("\n  CPU cycles/packet=%.2F (busy cycles="
21733a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
21744c0497b1SDharmik Thakkar 			       " MHz Clock\n",
21753a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
21763a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
21774c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
21783a164e00SPhil Yang 		}
2179bc700b67SDharmik Thakkar 	}
218053324971SDavid Marchand }
218153324971SDavid Marchand 
218253324971SDavid Marchand void
218353324971SDavid Marchand fwd_stats_reset(void)
218453324971SDavid Marchand {
218553324971SDavid Marchand 	streamid_t sm_id;
218653324971SDavid Marchand 	portid_t pt_id;
2187baef6bbfSMin Hu (Connor) 	int ret;
218853324971SDavid Marchand 	int i;
218953324971SDavid Marchand 
219053324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
219153324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
2192baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2193baef6bbfSMin Hu (Connor) 		if (ret != 0)
2194baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2195baef6bbfSMin Hu (Connor) 				"%s: Error: failed to clear stats (port %u):%d",
2196baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
219753324971SDavid Marchand 	}
219853324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
219953324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
220053324971SDavid Marchand 
220153324971SDavid Marchand 		fs->rx_packets = 0;
220253324971SDavid Marchand 		fs->tx_packets = 0;
220353324971SDavid Marchand 		fs->fwd_dropped = 0;
220453324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
220553324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
220653324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
2207d139cf23SLance Richardson 		fs->rx_bad_outer_ip_csum = 0;
220853324971SDavid Marchand 
220953324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
221053324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
221199a4974aSRobin Jarry 		fs->busy_cycles = 0;
221253324971SDavid Marchand 	}
221353324971SDavid Marchand }
221453324971SDavid Marchand 
2215af75078fSIntel static void
22167741e4cfSIntel flush_fwd_rx_queues(void)
2217af75078fSIntel {
2218af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2219af75078fSIntel 	portid_t  rxp;
22207741e4cfSIntel 	portid_t port_id;
2221af75078fSIntel 	queueid_t rxq;
2222af75078fSIntel 	uint16_t  nb_rx;
2223af75078fSIntel 	uint8_t   j;
2224f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2225594302c7SJames Poole 	uint64_t timer_period;
2226f487715fSReshma Pattan 
2227a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
2228a550baf2SMin Hu (Connor) 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2229a550baf2SMin Hu (Connor) 		return;
2230a550baf2SMin Hu (Connor) 	}
2231a550baf2SMin Hu (Connor) 
2232f487715fSReshma Pattan 	/* convert to number of cycles */
2233594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2234af75078fSIntel 
2235af75078fSIntel 	for (j = 0; j < 2; j++) {
22367741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2237af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
22387741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
22393c4426dbSDmitry Kozlyuk 
22403c4426dbSDmitry Kozlyuk 				/* Polling stopped queues is prohibited. */
22413c4426dbSDmitry Kozlyuk 				if (ports[port_id].rxq[rxq].state ==
22423c4426dbSDmitry Kozlyuk 				    RTE_ETH_QUEUE_STATE_STOPPED)
22433c4426dbSDmitry Kozlyuk 					continue;
22443c4426dbSDmitry Kozlyuk 
2245f487715fSReshma Pattan 				/**
2246f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2247f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2248f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2249f487715fSReshma Pattan 				* after 1sec timer expiry.
2250f487715fSReshma Pattan 				*/
2251f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2252af75078fSIntel 				do {
22537741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2254013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2255d00fee5dSDavid Marchand 					rte_pktmbuf_free_bulk(pkts_burst, nb_rx);
2256f487715fSReshma Pattan 
2257f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2258f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2259f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2260f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2261f487715fSReshma Pattan 					(timer_tsc < timer_period));
2262f487715fSReshma Pattan 				timer_tsc = 0;
2263af75078fSIntel 			}
2264af75078fSIntel 		}
2265af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2266af75078fSIntel 	}
2267af75078fSIntel }
2268af75078fSIntel 
2269af75078fSIntel static void
2270af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2271af75078fSIntel {
2272af75078fSIntel 	struct fwd_stream **fsm;
227399a4974aSRobin Jarry 	uint64_t prev_tsc;
2274af75078fSIntel 	streamid_t nb_fs;
2275af75078fSIntel 	streamid_t sm_id;
2276a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
22777e4441c8SRemy Horton 	uint64_t tics_per_1sec;
22787e4441c8SRemy Horton 	uint64_t tics_datum;
22797e4441c8SRemy Horton 	uint64_t tics_current;
22804918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2281af75078fSIntel 
22824918a357SXiaoyun Li 	cnt_ports = nb_ports;
22837e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
22847e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
22857e4441c8SRemy Horton #endif
2286af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2287af75078fSIntel 	nb_fs = fc->stream_nb;
228899a4974aSRobin Jarry 	prev_tsc = rte_rdtsc();
2289af75078fSIntel 	do {
229006c20561SDavid Marchand 		for (sm_id = 0; sm_id < nb_fs; sm_id++) {
229106c20561SDavid Marchand 			struct fwd_stream *fs = fsm[sm_id];
229206c20561SDavid Marchand 			uint64_t start_fs_tsc = 0;
229306c20561SDavid Marchand 			bool busy;
229406c20561SDavid Marchand 
229506c20561SDavid Marchand 			if (fs->disabled)
229606c20561SDavid Marchand 				continue;
229706c20561SDavid Marchand 			if (record_core_cycles)
229806c20561SDavid Marchand 				start_fs_tsc = rte_rdtsc();
229906c20561SDavid Marchand 			busy = (*pkt_fwd)(fs);
230006c20561SDavid Marchand 			if (record_core_cycles && busy)
230106c20561SDavid Marchand 				fs->busy_cycles += rte_rdtsc() - start_fs_tsc;
230206c20561SDavid Marchand 		}
2303a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2304e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2305e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
23067e4441c8SRemy Horton 			tics_current = rte_rdtsc();
23077e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
23087e4441c8SRemy Horton 				/* Periodic bitrate calculation */
23094918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2310e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
23114918a357SXiaoyun Li 						ports_ids[i]);
23127e4441c8SRemy Horton 				tics_datum = tics_current;
23137e4441c8SRemy Horton 			}
2314e25e6c70SRemy Horton 		}
23157e4441c8SRemy Horton #endif
2316a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
231765eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
231865eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
231962d3216dSReshma Pattan 			rte_latencystats_update();
232062d3216dSReshma Pattan #endif
232199a4974aSRobin Jarry 		if (record_core_cycles) {
232299a4974aSRobin Jarry 			uint64_t tsc = rte_rdtsc();
232362d3216dSReshma Pattan 
232499a4974aSRobin Jarry 			fc->total_cycles += tsc - prev_tsc;
232599a4974aSRobin Jarry 			prev_tsc = tsc;
232699a4974aSRobin Jarry 		}
2327af75078fSIntel 	} while (! fc->stopped);
2328af75078fSIntel }
2329af75078fSIntel 
2330af75078fSIntel static int
233199a4974aSRobin Jarry lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
233299a4974aSRobin Jarry {
233399a4974aSRobin Jarry 	struct fwd_stream **fsm;
233499a4974aSRobin Jarry 	struct fwd_lcore *fc;
233599a4974aSRobin Jarry 	streamid_t nb_fs;
233699a4974aSRobin Jarry 	streamid_t sm_id;
233799a4974aSRobin Jarry 
233899a4974aSRobin Jarry 	fc = lcore_to_fwd_lcore(lcore_id);
233999a4974aSRobin Jarry 	if (fc == NULL)
234099a4974aSRobin Jarry 		return -1;
234199a4974aSRobin Jarry 
234299a4974aSRobin Jarry 	fsm = &fwd_streams[fc->stream_idx];
234399a4974aSRobin Jarry 	nb_fs = fc->stream_nb;
234499a4974aSRobin Jarry 	usage->busy_cycles = 0;
234599a4974aSRobin Jarry 	usage->total_cycles = fc->total_cycles;
234699a4974aSRobin Jarry 
234799a4974aSRobin Jarry 	for (sm_id = 0; sm_id < nb_fs; sm_id++) {
234899a4974aSRobin Jarry 		if (!fsm[sm_id]->disabled)
234999a4974aSRobin Jarry 			usage->busy_cycles += fsm[sm_id]->busy_cycles;
235099a4974aSRobin Jarry 	}
235199a4974aSRobin Jarry 
235299a4974aSRobin Jarry 	return 0;
235399a4974aSRobin Jarry }
235499a4974aSRobin Jarry 
235599a4974aSRobin Jarry static int
2356af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2357af75078fSIntel {
2358af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2359af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2360af75078fSIntel 	return 0;
2361af75078fSIntel }
2362af75078fSIntel 
2363af75078fSIntel /*
2364af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2365af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2366af75078fSIntel  */
2367af75078fSIntel static int
2368af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2369af75078fSIntel {
2370af75078fSIntel 	struct fwd_lcore *fwd_lc;
2371af75078fSIntel 	struct fwd_lcore tmp_lcore;
2372af75078fSIntel 
2373af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2374af75078fSIntel 	tmp_lcore = *fwd_lc;
2375af75078fSIntel 	tmp_lcore.stopped = 1;
2376af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2377af75078fSIntel 	return 0;
2378af75078fSIntel }
2379af75078fSIntel 
2380af75078fSIntel /*
2381af75078fSIntel  * Launch packet forwarding:
2382af75078fSIntel  *     - Setup per-port forwarding context.
2383af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2384af75078fSIntel  */
2385af75078fSIntel static void
2386af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2387af75078fSIntel {
2388af75078fSIntel 	unsigned int i;
2389af75078fSIntel 	unsigned int lc_id;
2390af75078fSIntel 	int diag;
2391af75078fSIntel 
2392af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2393af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2394af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2395af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2396af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2397af75078fSIntel 						     fwd_lcores[i], lc_id);
2398af75078fSIntel 			if (diag != 0)
239961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
240061a3b0e5SAndrew Rybchenko 					"launch lcore %u failed - diag=%d\n",
2401af75078fSIntel 					lc_id, diag);
2402af75078fSIntel 		}
2403af75078fSIntel 	}
2404af75078fSIntel }
2405af75078fSIntel 
2406180ba023SDavid Marchand void
2407180ba023SDavid Marchand common_fwd_stream_init(struct fwd_stream *fs)
2408180ba023SDavid Marchand {
2409180ba023SDavid Marchand 	bool rx_stopped, tx_stopped;
2410180ba023SDavid Marchand 
2411180ba023SDavid Marchand 	rx_stopped = (ports[fs->rx_port].rxq[fs->rx_queue].state == RTE_ETH_QUEUE_STATE_STOPPED);
2412180ba023SDavid Marchand 	tx_stopped = (ports[fs->tx_port].txq[fs->tx_queue].state == RTE_ETH_QUEUE_STATE_STOPPED);
2413180ba023SDavid Marchand 	fs->disabled = rx_stopped || tx_stopped;
2414180ba023SDavid Marchand }
2415180ba023SDavid Marchand 
24165028f207SShiyang He static void
24175028f207SShiyang He update_rx_queue_state(uint16_t port_id, uint16_t queue_id)
24185028f207SShiyang He {
24195028f207SShiyang He 	struct rte_eth_rxq_info rx_qinfo;
24205028f207SShiyang He 	int32_t rc;
24215028f207SShiyang He 
24225028f207SShiyang He 	rc = rte_eth_rx_queue_info_get(port_id,
24235028f207SShiyang He 			queue_id, &rx_qinfo);
24245028f207SShiyang He 	if (rc == 0) {
24255028f207SShiyang He 		ports[port_id].rxq[queue_id].state =
24265028f207SShiyang He 			rx_qinfo.queue_state;
24275028f207SShiyang He 	} else if (rc == -ENOTSUP) {
24285028f207SShiyang He 		/*
24295028f207SShiyang He 		 * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED
24305028f207SShiyang He 		 * to ensure that the PMDs do not implement
24315028f207SShiyang He 		 * rte_eth_rx_queue_info_get can forward.
24325028f207SShiyang He 		 */
24335028f207SShiyang He 		ports[port_id].rxq[queue_id].state =
24345028f207SShiyang He 			RTE_ETH_QUEUE_STATE_STARTED;
24355028f207SShiyang He 	} else {
24365028f207SShiyang He 		TESTPMD_LOG(WARNING,
24375028f207SShiyang He 			"Failed to get rx queue info\n");
24385028f207SShiyang He 	}
24395028f207SShiyang He }
24405028f207SShiyang He 
24415028f207SShiyang He static void
24425028f207SShiyang He update_tx_queue_state(uint16_t port_id, uint16_t queue_id)
24435028f207SShiyang He {
24445028f207SShiyang He 	struct rte_eth_txq_info tx_qinfo;
24455028f207SShiyang He 	int32_t rc;
24465028f207SShiyang He 
24475028f207SShiyang He 	rc = rte_eth_tx_queue_info_get(port_id,
24485028f207SShiyang He 			queue_id, &tx_qinfo);
24495028f207SShiyang He 	if (rc == 0) {
24505028f207SShiyang He 		ports[port_id].txq[queue_id].state =
24515028f207SShiyang He 			tx_qinfo.queue_state;
24525028f207SShiyang He 	} else if (rc == -ENOTSUP) {
24535028f207SShiyang He 		/*
24545028f207SShiyang He 		 * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED
24555028f207SShiyang He 		 * to ensure that the PMDs do not implement
24565028f207SShiyang He 		 * rte_eth_tx_queue_info_get can forward.
24575028f207SShiyang He 		 */
24585028f207SShiyang He 		ports[port_id].txq[queue_id].state =
24595028f207SShiyang He 			RTE_ETH_QUEUE_STATE_STARTED;
24605028f207SShiyang He 	} else {
24615028f207SShiyang He 		TESTPMD_LOG(WARNING,
24625028f207SShiyang He 			"Failed to get tx queue info\n");
24635028f207SShiyang He 	}
24645028f207SShiyang He }
24655028f207SShiyang He 
24665028f207SShiyang He static void
24675028f207SShiyang He update_queue_state(void)
24685028f207SShiyang He {
24695028f207SShiyang He 	portid_t pi;
24705028f207SShiyang He 	queueid_t qi;
24715028f207SShiyang He 
24725028f207SShiyang He 	RTE_ETH_FOREACH_DEV(pi) {
24735028f207SShiyang He 		for (qi = 0; qi < nb_rxq; qi++)
24745028f207SShiyang He 			update_rx_queue_state(pi, qi);
24755028f207SShiyang He 		for (qi = 0; qi < nb_txq; qi++)
24765028f207SShiyang He 			update_tx_queue_state(pi, qi);
24775028f207SShiyang He 	}
24785028f207SShiyang He }
24795028f207SShiyang He 
2480af75078fSIntel /*
2481af75078fSIntel  * Launch packet forwarding configuration.
2482af75078fSIntel  */
2483af75078fSIntel void
2484af75078fSIntel start_packet_forwarding(int with_tx_first)
2485af75078fSIntel {
2486af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2487af75078fSIntel 	port_fwd_end_t  port_fwd_end;
24883c4426dbSDmitry Kozlyuk 	stream_init_t stream_init = cur_fwd_eng->stream_init;
2489af75078fSIntel 	unsigned int i;
2490af75078fSIntel 
24915a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
24925a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
24935a8fb55cSReshma Pattan 
24945a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
24955a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
24965a8fb55cSReshma Pattan 
24975a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
24985a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
24995a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
25005a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
25015a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
25025a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
25035a8fb55cSReshma Pattan 
2504ce8d5614SIntel 	if (all_ports_started() == 0) {
250561a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Not all ports were started\n");
2506ce8d5614SIntel 		return;
2507ce8d5614SIntel 	}
2508af75078fSIntel 	if (test_done == 0) {
250961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding already started\n");
2510af75078fSIntel 		return;
2511af75078fSIntel 	}
25127741e4cfSIntel 
251347a767b2SMatan Azrad 	fwd_config_setup();
251447a767b2SMatan Azrad 
251565744833SXueming Li 	pkt_fwd_config_display(&cur_fwd_config);
251665744833SXueming Li 	if (!pkt_fwd_shared_rxq_check())
251765744833SXueming Li 		return;
251865744833SXueming Li 
25195028f207SShiyang He 	if (stream_init != NULL) {
2520d7d802daSFerruh Yigit 		if (rte_eal_process_type() == RTE_PROC_SECONDARY)
25215028f207SShiyang He 			update_queue_state();
25223c4426dbSDmitry Kozlyuk 		for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
25233c4426dbSDmitry Kozlyuk 			stream_init(fwd_streams[i]);
25245028f207SShiyang He 	}
25253c4426dbSDmitry Kozlyuk 
2526a78040c9SAlvin Zhang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2527a78040c9SAlvin Zhang 	if (port_fwd_begin != NULL) {
2528a78040c9SAlvin Zhang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2529a78040c9SAlvin Zhang 			if (port_fwd_begin(fwd_ports_ids[i])) {
2530a78040c9SAlvin Zhang 				fprintf(stderr,
2531a78040c9SAlvin Zhang 					"Packet forwarding is not ready\n");
2532a78040c9SAlvin Zhang 				return;
2533a78040c9SAlvin Zhang 			}
2534a78040c9SAlvin Zhang 		}
2535a78040c9SAlvin Zhang 	}
2536a78040c9SAlvin Zhang 
2537a78040c9SAlvin Zhang 	if (with_tx_first) {
2538a78040c9SAlvin Zhang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2539a78040c9SAlvin Zhang 		if (port_fwd_begin != NULL) {
2540a78040c9SAlvin Zhang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2541a78040c9SAlvin Zhang 				if (port_fwd_begin(fwd_ports_ids[i])) {
2542a78040c9SAlvin Zhang 					fprintf(stderr,
2543a78040c9SAlvin Zhang 						"Packet forwarding is not ready\n");
2544a78040c9SAlvin Zhang 					return;
2545a78040c9SAlvin Zhang 				}
2546a78040c9SAlvin Zhang 			}
2547a78040c9SAlvin Zhang 		}
2548a78040c9SAlvin Zhang 	}
2549a78040c9SAlvin Zhang 
2550a78040c9SAlvin Zhang 	test_done = 0;
2551a78040c9SAlvin Zhang 
25527741e4cfSIntel 	if(!no_flush_rx)
25537741e4cfSIntel 		flush_fwd_rx_queues();
25547741e4cfSIntel 
2555af75078fSIntel 	rxtx_config_display();
2556af75078fSIntel 
255753324971SDavid Marchand 	fwd_stats_reset();
2558af75078fSIntel 	if (with_tx_first) {
2559acbf77a6SZhihong Wang 		while (with_tx_first--) {
2560acbf77a6SZhihong Wang 			launch_packet_forwarding(
2561acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2562af75078fSIntel 			rte_eal_mp_wait_lcore();
2563acbf77a6SZhihong Wang 		}
2564af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2565af75078fSIntel 		if (port_fwd_end != NULL) {
2566af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2567af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2568af75078fSIntel 		}
2569af75078fSIntel 	}
2570af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2571af75078fSIntel }
2572af75078fSIntel 
2573af75078fSIntel void
2574af75078fSIntel stop_packet_forwarding(void)
2575af75078fSIntel {
2576af75078fSIntel 	port_fwd_end_t port_fwd_end;
2577af75078fSIntel 	lcoreid_t lc_id;
257853324971SDavid Marchand 	portid_t pt_id;
257953324971SDavid Marchand 	int i;
2580af75078fSIntel 
2581af75078fSIntel 	if (test_done) {
258261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding not started\n");
2583af75078fSIntel 		return;
2584af75078fSIntel 	}
2585af75078fSIntel 	printf("Telling cores to stop...");
2586af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2587af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2588af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2589af75078fSIntel 	rte_eal_mp_wait_lcore();
2590af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2591af75078fSIntel 	if (port_fwd_end != NULL) {
2592af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2593af75078fSIntel 			pt_id = fwd_ports_ids[i];
2594af75078fSIntel 			(*port_fwd_end)(pt_id);
2595af75078fSIntel 		}
2596af75078fSIntel 	}
2597c185d42cSDavid Marchand 
259853324971SDavid Marchand 	fwd_stats_display();
259958d475b7SJerin Jacob 
2600af75078fSIntel 	printf("\nDone.\n");
2601af75078fSIntel 	test_done = 1;
2602af75078fSIntel }
2603af75078fSIntel 
2604cfae07fdSOuyang Changchun void
2605cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2606cfae07fdSOuyang Changchun {
2607492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
260861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link up fail.\n");
2609cfae07fdSOuyang Changchun }
2610cfae07fdSOuyang Changchun 
2611cfae07fdSOuyang Changchun void
2612cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2613cfae07fdSOuyang Changchun {
2614492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
261561a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link down fail.\n");
2616cfae07fdSOuyang Changchun }
2617cfae07fdSOuyang Changchun 
2618ce8d5614SIntel static int
2619ce8d5614SIntel all_ports_started(void)
2620ce8d5614SIntel {
2621ce8d5614SIntel 	portid_t pi;
2622ce8d5614SIntel 	struct rte_port *port;
2623ce8d5614SIntel 
26247d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2625ce8d5614SIntel 		port = &ports[pi];
2626ce8d5614SIntel 		/* Check if there is a port which is not started */
262741b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
262815e34522SLong Wu 			(port->member_flag == 0))
2629ce8d5614SIntel 			return 0;
2630ce8d5614SIntel 	}
2631ce8d5614SIntel 
2632ce8d5614SIntel 	/* No port is not started */
2633ce8d5614SIntel 	return 1;
2634ce8d5614SIntel }
2635ce8d5614SIntel 
2636148f963fSBruce Richardson int
26376018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
26386018eb8cSShahaf Shuler {
26396018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
26406018eb8cSShahaf Shuler 
26416018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
264215e34522SLong Wu 	    (port->member_flag == 0))
26436018eb8cSShahaf Shuler 		return 0;
26446018eb8cSShahaf Shuler 	return 1;
26456018eb8cSShahaf Shuler }
26466018eb8cSShahaf Shuler 
26476018eb8cSShahaf Shuler int
2648edab33b1STetsuya Mukawa all_ports_stopped(void)
2649edab33b1STetsuya Mukawa {
2650edab33b1STetsuya Mukawa 	portid_t pi;
2651edab33b1STetsuya Mukawa 
26527d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
26536018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2654edab33b1STetsuya Mukawa 			return 0;
2655edab33b1STetsuya Mukawa 	}
2656edab33b1STetsuya Mukawa 
2657edab33b1STetsuya Mukawa 	return 1;
2658edab33b1STetsuya Mukawa }
2659edab33b1STetsuya Mukawa 
2660edab33b1STetsuya Mukawa int
2661edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2662edab33b1STetsuya Mukawa {
2663edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2664edab33b1STetsuya Mukawa 		return 0;
2665edab33b1STetsuya Mukawa 
2666edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2667edab33b1STetsuya Mukawa 		return 0;
2668edab33b1STetsuya Mukawa 
2669edab33b1STetsuya Mukawa 	return 1;
2670edab33b1STetsuya Mukawa }
2671edab33b1STetsuya Mukawa 
267223095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8)
267323095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9)
267423095155SDariusz Sosnowski 
267523095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12)
267623095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13)
267723095155SDariusz Sosnowski 
267823095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16)
267923095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17)
268023095155SDariusz Sosnowski 
268123095155SDariusz Sosnowski 
26821c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
26831c69df45SOri Kam static int
268401817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
26851c69df45SOri Kam {
26861c69df45SOri Kam 	queueid_t qi;
26871c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
26881c69df45SOri Kam 		.peer_count = 1,
26891c69df45SOri Kam 	};
26901c69df45SOri Kam 	int i;
26911c69df45SOri Kam 	int diag;
26921c69df45SOri Kam 	struct rte_port *port = &ports[pi];
269301817b10SBing Zhao 	uint16_t peer_rx_port = pi;
269401817b10SBing Zhao 	uint16_t peer_tx_port = pi;
269501817b10SBing Zhao 	uint32_t manual = 1;
269601817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
269723095155SDariusz Sosnowski 	uint32_t rx_force_memory = hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY;
269823095155SDariusz Sosnowski 	uint32_t rx_locked_memory = hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY;
269923095155SDariusz Sosnowski 	uint32_t rx_rte_memory = hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY;
270023095155SDariusz Sosnowski 	uint32_t tx_force_memory = hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY;
270123095155SDariusz Sosnowski 	uint32_t tx_locked_memory = hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY;
270223095155SDariusz Sosnowski 	uint32_t tx_rte_memory = hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY;
270301817b10SBing Zhao 
270401817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
270501817b10SBing Zhao 		peer_rx_port = pi;
270601817b10SBing Zhao 		peer_tx_port = pi;
270701817b10SBing Zhao 		manual = 0;
270801817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
270901817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
271001817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
271101817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
271201817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
271301817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
271401817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
271501817b10SBing Zhao 			peer_rx_port = p_pi;
271601817b10SBing Zhao 		} else {
271701817b10SBing Zhao 			uint16_t next_pi;
271801817b10SBing Zhao 
271901817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
272001817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
272101817b10SBing Zhao 				peer_rx_port = next_pi;
272201817b10SBing Zhao 		}
272301817b10SBing Zhao 		manual = 1;
272401817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
272501817b10SBing Zhao 		if (cnt_pi & 0x1) {
272601817b10SBing Zhao 			peer_rx_port = p_pi;
272701817b10SBing Zhao 		} else {
272801817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
272901817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
273001817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
273101817b10SBing Zhao 				peer_rx_port = pi;
273201817b10SBing Zhao 		}
273301817b10SBing Zhao 		peer_tx_port = peer_rx_port;
273401817b10SBing Zhao 		manual = 1;
273501817b10SBing Zhao 	}
27361c69df45SOri Kam 
27371c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
273801817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
27391c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
274001817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
274101817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
274223095155SDariusz Sosnowski 		hairpin_conf.force_memory = !!tx_force_memory;
274323095155SDariusz Sosnowski 		hairpin_conf.use_locked_device_memory = !!tx_locked_memory;
274423095155SDariusz Sosnowski 		hairpin_conf.use_rte_memory = !!tx_rte_memory;
27451c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
27461c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
27471c69df45SOri Kam 		i++;
27481c69df45SOri Kam 		if (diag == 0)
27491c69df45SOri Kam 			continue;
27501c69df45SOri Kam 
27511c69df45SOri Kam 		/* Fail to setup rx queue, return */
2752eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2753eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2754eac341d3SJoyce Kong 		else
275561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
275661a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
275761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
275861a3b0e5SAndrew Rybchenko 			pi);
27591c69df45SOri Kam 		/* try to reconfigure queues next time */
27601c69df45SOri Kam 		port->need_reconfig_queues = 1;
27611c69df45SOri Kam 		return -1;
27621c69df45SOri Kam 	}
27631c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
276401817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
27651c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
276601817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
276701817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
276823095155SDariusz Sosnowski 		hairpin_conf.force_memory = !!rx_force_memory;
276923095155SDariusz Sosnowski 		hairpin_conf.use_locked_device_memory = !!rx_locked_memory;
277023095155SDariusz Sosnowski 		hairpin_conf.use_rte_memory = !!rx_rte_memory;
27711c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
27721c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
27731c69df45SOri Kam 		i++;
27741c69df45SOri Kam 		if (diag == 0)
27751c69df45SOri Kam 			continue;
27761c69df45SOri Kam 
27771c69df45SOri Kam 		/* Fail to setup rx queue, return */
2778eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2779eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2780eac341d3SJoyce Kong 		else
278161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
278261a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
278361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
278461a3b0e5SAndrew Rybchenko 			pi);
27851c69df45SOri Kam 		/* try to reconfigure queues next time */
27861c69df45SOri Kam 		port->need_reconfig_queues = 1;
27871c69df45SOri Kam 		return -1;
27881c69df45SOri Kam 	}
27891c69df45SOri Kam 	return 0;
27901c69df45SOri Kam }
27911c69df45SOri Kam 
27922befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
27932befc67fSViacheslav Ovsiienko int
27942befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
27952befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
27962befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
27972befc67fSViacheslav Ovsiienko {
27982befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
27994f04edcdSHanumanth Pothula 	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
28004f04edcdSHanumanth Pothula 	struct rte_mempool *mpx;
28012befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
280254a0f4d7SYuan Wang 	uint32_t prev_hdrs = 0;
28032befc67fSViacheslav Ovsiienko 	int ret;
28042befc67fSViacheslav Ovsiienko 
28054f04edcdSHanumanth Pothula 
2806a4bf5421SHanumanth Pothula 	if ((rx_pkt_nb_segs > 1) &&
2807a4bf5421SHanumanth Pothula 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
28084f04edcdSHanumanth Pothula 		/* multi-segment configuration */
28092befc67fSViacheslav Ovsiienko 		for (i = 0; i < rx_pkt_nb_segs; i++) {
28102befc67fSViacheslav Ovsiienko 			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
28112befc67fSViacheslav Ovsiienko 			/*
28122befc67fSViacheslav Ovsiienko 			 * Use last valid pool for the segments with number
28132befc67fSViacheslav Ovsiienko 			 * exceeding the pool index.
28142befc67fSViacheslav Ovsiienko 			 */
28151108c33eSRaja Zidane 			mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
28162befc67fSViacheslav Ovsiienko 			mpx = mbuf_pool_find(socket_id, mp_n);
28172befc67fSViacheslav Ovsiienko 			/* Handle zero as mbuf data buffer size. */
28182befc67fSViacheslav Ovsiienko 			rx_seg->offset = i < rx_pkt_nb_offs ?
28192befc67fSViacheslav Ovsiienko 					   rx_pkt_seg_offsets[i] : 0;
28202befc67fSViacheslav Ovsiienko 			rx_seg->mp = mpx ? mpx : mp;
282152e2e7edSYuan Wang 			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
282254a0f4d7SYuan Wang 				rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
282354a0f4d7SYuan Wang 				prev_hdrs |= rx_seg->proto_hdr;
282452e2e7edSYuan Wang 			} else {
282552e2e7edSYuan Wang 				rx_seg->length = rx_pkt_seg_lengths[i] ?
282652e2e7edSYuan Wang 						rx_pkt_seg_lengths[i] :
282752e2e7edSYuan Wang 						mbuf_data_size[mp_n];
282852e2e7edSYuan Wang 			}
28292befc67fSViacheslav Ovsiienko 		}
28302befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = rx_pkt_nb_segs;
28312befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = rx_useg;
2832a4bf5421SHanumanth Pothula 		rx_conf->rx_mempools = NULL;
2833a4bf5421SHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2834a4bf5421SHanumanth Pothula 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2835a4bf5421SHanumanth Pothula 				    socket_id, rx_conf, NULL);
2836a4bf5421SHanumanth Pothula 		rx_conf->rx_seg = NULL;
2837a4bf5421SHanumanth Pothula 		rx_conf->rx_nseg = 0;
2838a4bf5421SHanumanth Pothula 	} else if (multi_rx_mempool == 1) {
28394f04edcdSHanumanth Pothula 		/* multi-pool configuration */
2840a4bf5421SHanumanth Pothula 		struct rte_eth_dev_info dev_info;
2841a4bf5421SHanumanth Pothula 
2842a4bf5421SHanumanth Pothula 		if (mbuf_data_size_n <= 1) {
2843a4bf5421SHanumanth Pothula 			fprintf(stderr, "Invalid number of mempools %u\n",
2844a4bf5421SHanumanth Pothula 				mbuf_data_size_n);
2845a4bf5421SHanumanth Pothula 			return -EINVAL;
2846a4bf5421SHanumanth Pothula 		}
2847a4bf5421SHanumanth Pothula 		ret = rte_eth_dev_info_get(port_id, &dev_info);
2848a4bf5421SHanumanth Pothula 		if (ret != 0)
2849a4bf5421SHanumanth Pothula 			return ret;
2850a4bf5421SHanumanth Pothula 		if (dev_info.max_rx_mempools == 0) {
2851a4bf5421SHanumanth Pothula 			fprintf(stderr,
2852a4bf5421SHanumanth Pothula 				"Port %u doesn't support requested multi-rx-mempool configuration.\n",
2853a4bf5421SHanumanth Pothula 				port_id);
2854a4bf5421SHanumanth Pothula 			return -ENOTSUP;
2855a4bf5421SHanumanth Pothula 		}
28564f04edcdSHanumanth Pothula 		for (i = 0; i < mbuf_data_size_n; i++) {
28574f04edcdSHanumanth Pothula 			mpx = mbuf_pool_find(socket_id, i);
28584f04edcdSHanumanth Pothula 			rx_mempool[i] = mpx ? mpx : mp;
28594f04edcdSHanumanth Pothula 		}
28604f04edcdSHanumanth Pothula 		rx_conf->rx_mempools = rx_mempool;
28614f04edcdSHanumanth Pothula 		rx_conf->rx_nmempool = mbuf_data_size_n;
2862a4bf5421SHanumanth Pothula 		rx_conf->rx_seg = NULL;
2863a4bf5421SHanumanth Pothula 		rx_conf->rx_nseg = 0;
28642befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
28652befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
2866a4bf5421SHanumanth Pothula 		rx_conf->rx_mempools = NULL;
2867a4bf5421SHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2868a4bf5421SHanumanth Pothula 	} else {
2869a4bf5421SHanumanth Pothula 		/* Single pool/segment configuration */
28702befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
28712befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
28724f04edcdSHanumanth Pothula 		rx_conf->rx_mempools = NULL;
28734f04edcdSHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2874a4bf5421SHanumanth Pothula 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2875a4bf5421SHanumanth Pothula 				    socket_id, rx_conf, mp);
2876a4bf5421SHanumanth Pothula 	}
2877a4bf5421SHanumanth Pothula 
28783c4426dbSDmitry Kozlyuk 	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
28793c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
28803c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
28812befc67fSViacheslav Ovsiienko 	return ret;
28822befc67fSViacheslav Ovsiienko }
28832befc67fSViacheslav Ovsiienko 
288463b72657SIvan Ilchenko static int
288563b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi)
288663b72657SIvan Ilchenko {
288763b72657SIvan Ilchenko 	uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
288863b72657SIvan Ilchenko 	uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
288963b72657SIvan Ilchenko 	uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
289063b72657SIvan Ilchenko 
289163b72657SIvan Ilchenko 	if (xstats_display_num == 0)
289263b72657SIvan Ilchenko 		return 0;
289363b72657SIvan Ilchenko 
289463b72657SIvan Ilchenko 	*ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
289563b72657SIvan Ilchenko 	if (*ids_supp == NULL)
289663b72657SIvan Ilchenko 		goto fail_ids_supp;
289763b72657SIvan Ilchenko 
289863b72657SIvan Ilchenko 	*prev_values = calloc(xstats_display_num,
289963b72657SIvan Ilchenko 			      sizeof(**prev_values));
290063b72657SIvan Ilchenko 	if (*prev_values == NULL)
290163b72657SIvan Ilchenko 		goto fail_prev_values;
290263b72657SIvan Ilchenko 
290363b72657SIvan Ilchenko 	*curr_values = calloc(xstats_display_num,
290463b72657SIvan Ilchenko 			      sizeof(**curr_values));
290563b72657SIvan Ilchenko 	if (*curr_values == NULL)
290663b72657SIvan Ilchenko 		goto fail_curr_values;
290763b72657SIvan Ilchenko 
290863b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = true;
290963b72657SIvan Ilchenko 
291063b72657SIvan Ilchenko 	return 0;
291163b72657SIvan Ilchenko 
291263b72657SIvan Ilchenko fail_curr_values:
291363b72657SIvan Ilchenko 	free(*prev_values);
291463b72657SIvan Ilchenko fail_prev_values:
291563b72657SIvan Ilchenko 	free(*ids_supp);
291663b72657SIvan Ilchenko fail_ids_supp:
291763b72657SIvan Ilchenko 	return -ENOMEM;
291863b72657SIvan Ilchenko }
291963b72657SIvan Ilchenko 
292063b72657SIvan Ilchenko static void
292163b72657SIvan Ilchenko free_xstats_display_info(portid_t pi)
292263b72657SIvan Ilchenko {
292363b72657SIvan Ilchenko 	if (!ports[pi].xstats_info.allocated)
292463b72657SIvan Ilchenko 		return;
292563b72657SIvan Ilchenko 	free(ports[pi].xstats_info.ids_supp);
292663b72657SIvan Ilchenko 	free(ports[pi].xstats_info.prev_values);
292763b72657SIvan Ilchenko 	free(ports[pi].xstats_info.curr_values);
292863b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = false;
292963b72657SIvan Ilchenko }
293063b72657SIvan Ilchenko 
293163b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */
293263b72657SIvan Ilchenko static void
293363b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi)
293463b72657SIvan Ilchenko {
293563b72657SIvan Ilchenko 	unsigned int stat, stat_supp;
293663b72657SIvan Ilchenko 	const char *xstat_name;
293763b72657SIvan Ilchenko 	struct rte_port *port;
293863b72657SIvan Ilchenko 	uint64_t *ids_supp;
293963b72657SIvan Ilchenko 	int rc;
294063b72657SIvan Ilchenko 
294163b72657SIvan Ilchenko 	if (xstats_display_num == 0)
294263b72657SIvan Ilchenko 		return;
294363b72657SIvan Ilchenko 
294463b72657SIvan Ilchenko 	if (pi == (portid_t)RTE_PORT_ALL) {
294563b72657SIvan Ilchenko 		fill_xstats_display_info();
294663b72657SIvan Ilchenko 		return;
294763b72657SIvan Ilchenko 	}
294863b72657SIvan Ilchenko 
294963b72657SIvan Ilchenko 	port = &ports[pi];
295063b72657SIvan Ilchenko 	if (port->port_status != RTE_PORT_STARTED)
295163b72657SIvan Ilchenko 		return;
295263b72657SIvan Ilchenko 
295363b72657SIvan Ilchenko 	if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
295463b72657SIvan Ilchenko 		rte_exit(EXIT_FAILURE,
295563b72657SIvan Ilchenko 			 "Failed to allocate xstats display memory\n");
295663b72657SIvan Ilchenko 
295763b72657SIvan Ilchenko 	ids_supp = port->xstats_info.ids_supp;
295863b72657SIvan Ilchenko 	for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
295963b72657SIvan Ilchenko 		xstat_name = xstats_display[stat].name;
296063b72657SIvan Ilchenko 		rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
296163b72657SIvan Ilchenko 						   ids_supp + stat_supp);
296263b72657SIvan Ilchenko 		if (rc != 0) {
296363b72657SIvan Ilchenko 			fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
296463b72657SIvan Ilchenko 				xstat_name, pi, stat);
296563b72657SIvan Ilchenko 			continue;
296663b72657SIvan Ilchenko 		}
296763b72657SIvan Ilchenko 		stat_supp++;
296863b72657SIvan Ilchenko 	}
296963b72657SIvan Ilchenko 
297063b72657SIvan Ilchenko 	port->xstats_info.ids_supp_sz = stat_supp;
297163b72657SIvan Ilchenko }
297263b72657SIvan Ilchenko 
297363b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */
297463b72657SIvan Ilchenko static void
297563b72657SIvan Ilchenko fill_xstats_display_info(void)
297663b72657SIvan Ilchenko {
297763b72657SIvan Ilchenko 	portid_t pi;
297863b72657SIvan Ilchenko 
297963b72657SIvan Ilchenko 	if (xstats_display_num == 0)
298063b72657SIvan Ilchenko 		return;
298163b72657SIvan Ilchenko 
298263b72657SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(pi)
298363b72657SIvan Ilchenko 		fill_xstats_display_info_for_port(pi);
298463b72657SIvan Ilchenko }
298563b72657SIvan Ilchenko 
29867c06f1abSHuisong Li /*
29877c06f1abSHuisong Li  * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
298815e34522SLong Wu  * device in dev_info is zero when no member is added. And its capability
298915e34522SLong Wu  * will be updated when add a new member device. So adding a member device need
29907c06f1abSHuisong Li  * to update the port configurations of bonding device.
29917c06f1abSHuisong Li  */
29927c06f1abSHuisong Li static void
29937c06f1abSHuisong Li update_bonding_port_dev_conf(portid_t bond_pid)
29947c06f1abSHuisong Li {
29957c06f1abSHuisong Li #ifdef RTE_NET_BOND
29967c06f1abSHuisong Li 	struct rte_port *port = &ports[bond_pid];
29977c06f1abSHuisong Li 	uint16_t i;
29987c06f1abSHuisong Li 	int ret;
29997c06f1abSHuisong Li 
30007c06f1abSHuisong Li 	ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info);
30017c06f1abSHuisong Li 	if (ret != 0) {
30027c06f1abSHuisong Li 		fprintf(stderr, "Failed to get dev info for port = %u\n",
30037c06f1abSHuisong Li 			bond_pid);
30047c06f1abSHuisong Li 		return;
30057c06f1abSHuisong Li 	}
30067c06f1abSHuisong Li 
30077c06f1abSHuisong Li 	if (port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
30087c06f1abSHuisong Li 		port->dev_conf.txmode.offloads |=
30097c06f1abSHuisong Li 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
30107c06f1abSHuisong Li 	/* Apply Tx offloads configuration */
30117c06f1abSHuisong Li 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
30127c06f1abSHuisong Li 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
30137c06f1abSHuisong Li 
30147c06f1abSHuisong Li 	port->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
30157c06f1abSHuisong Li 				port->dev_info.flow_type_rss_offloads;
30167c06f1abSHuisong Li #else
30177c06f1abSHuisong Li 	RTE_SET_USED(bond_pid);
30187c06f1abSHuisong Li #endif
30197c06f1abSHuisong Li }
30207c06f1abSHuisong Li 
3021edab33b1STetsuya Mukawa int
3022ce8d5614SIntel start_port(portid_t pid)
3023ce8d5614SIntel {
3024cdede073SFerruh Yigit 	int diag;
3025ce8d5614SIntel 	portid_t pi;
302601817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
302701817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
302801817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
302901817b10SBing Zhao 	uint16_t cnt_pi = 0;
303001817b10SBing Zhao 	uint16_t cfg_pi = 0;
303101817b10SBing Zhao 	int peer_pi;
3032ce8d5614SIntel 	queueid_t qi;
3033ce8d5614SIntel 	struct rte_port *port;
30341c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
3035cdede073SFerruh Yigit 	bool at_least_one_port_exist = false;
3036cdede073SFerruh Yigit 	bool all_ports_already_started = true;
3037cdede073SFerruh Yigit 	bool at_least_one_port_successfully_started = false;
3038ce8d5614SIntel 
30394468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
30404468635fSMichael Qiu 		return 0;
30414468635fSMichael Qiu 
30427d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
3043edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3044ce8d5614SIntel 			continue;
3045ce8d5614SIntel 
304615e34522SLong Wu 		if (port_is_bonding_member(pi)) {
3047d8c079a5SMin Hu (Connor) 			fprintf(stderr,
30484f840086SLong Wu 				"Please remove port %d from bonding device.\n",
3049d8c079a5SMin Hu (Connor) 				pi);
3050d8c079a5SMin Hu (Connor) 			continue;
3051d8c079a5SMin Hu (Connor) 		}
3052d8c079a5SMin Hu (Connor) 
3053cdede073SFerruh Yigit 		at_least_one_port_exist = true;
3054cdede073SFerruh Yigit 
3055ce8d5614SIntel 		port = &ports[pi];
3056cdede073SFerruh Yigit 		if (port->port_status == RTE_PORT_STOPPED) {
3057eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
3058cdede073SFerruh Yigit 			all_ports_already_started = false;
3059cdede073SFerruh Yigit 		} else {
306061a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is now not stopped\n", pi);
3061ce8d5614SIntel 			continue;
3062ce8d5614SIntel 		}
3063ce8d5614SIntel 
3064ce8d5614SIntel 		if (port->need_reconfig > 0) {
3065655eae01SJie Wang 			struct rte_eth_conf dev_conf;
3066655eae01SJie Wang 			int k;
3067655eae01SJie Wang 
3068ce8d5614SIntel 			port->need_reconfig = 0;
3069ce8d5614SIntel 
30707ee3e944SVasily Philipov 			if (flow_isolate_all) {
30717ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
30727ee3e944SVasily Philipov 				if (ret) {
307361a3b0e5SAndrew Rybchenko 					fprintf(stderr,
307461a3b0e5SAndrew Rybchenko 						"Failed to apply isolated mode on port %d\n",
307561a3b0e5SAndrew Rybchenko 						pi);
30767ee3e944SVasily Philipov 					return -1;
30777ee3e944SVasily Philipov 				}
30787ee3e944SVasily Philipov 			}
3079b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
30805706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
308120a0286fSLiu Xiaofeng 					port->socket_id);
30821c69df45SOri Kam 			if (nb_hairpinq > 0 &&
30831c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
308461a3b0e5SAndrew Rybchenko 				fprintf(stderr,
308561a3b0e5SAndrew Rybchenko 					"Port %d doesn't support hairpin queues\n",
308661a3b0e5SAndrew Rybchenko 					pi);
30871c69df45SOri Kam 				return -1;
30881c69df45SOri Kam 			}
30891bb4a528SFerruh Yigit 
30907c06f1abSHuisong Li 			if (port->bond_flag == 1 && port->update_conf == 1) {
30917c06f1abSHuisong Li 				update_bonding_port_dev_conf(pi);
30927c06f1abSHuisong Li 				port->update_conf = 0;
30937c06f1abSHuisong Li 			}
30947c06f1abSHuisong Li 
3095ce8d5614SIntel 			/* configure port */
3096a550baf2SMin Hu (Connor) 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
30971c69df45SOri Kam 						     nb_txq + nb_hairpinq,
3098ce8d5614SIntel 						     &(port->dev_conf));
3099ce8d5614SIntel 			if (diag != 0) {
3100eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3101eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3102eac341d3SJoyce Kong 				else
310361a3b0e5SAndrew Rybchenko 					fprintf(stderr,
310461a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
310561a3b0e5SAndrew Rybchenko 						pi);
310661a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Fail to configure port %d\n",
310761a3b0e5SAndrew Rybchenko 					pi);
3108ce8d5614SIntel 				/* try to reconfigure port next time */
3109ce8d5614SIntel 				port->need_reconfig = 1;
3110148f963fSBruce Richardson 				return -1;
3111ce8d5614SIntel 			}
3112655eae01SJie Wang 			/* get device configuration*/
3113655eae01SJie Wang 			if (0 !=
3114655eae01SJie Wang 				eth_dev_conf_get_print_err(pi, &dev_conf)) {
3115655eae01SJie Wang 				fprintf(stderr,
3116655eae01SJie Wang 					"port %d can not get device configuration\n",
3117655eae01SJie Wang 					pi);
3118655eae01SJie Wang 				return -1;
3119655eae01SJie Wang 			}
3120655eae01SJie Wang 			/* Apply Rx offloads configuration */
3121655eae01SJie Wang 			if (dev_conf.rxmode.offloads !=
3122655eae01SJie Wang 			    port->dev_conf.rxmode.offloads) {
3123655eae01SJie Wang 				port->dev_conf.rxmode.offloads |=
3124655eae01SJie Wang 					dev_conf.rxmode.offloads;
3125655eae01SJie Wang 				for (k = 0;
3126655eae01SJie Wang 				     k < port->dev_info.max_rx_queues;
3127655eae01SJie Wang 				     k++)
31283c4426dbSDmitry Kozlyuk 					port->rxq[k].conf.offloads |=
3129655eae01SJie Wang 						dev_conf.rxmode.offloads;
3130655eae01SJie Wang 			}
3131655eae01SJie Wang 			/* Apply Tx offloads configuration */
3132655eae01SJie Wang 			if (dev_conf.txmode.offloads !=
3133655eae01SJie Wang 			    port->dev_conf.txmode.offloads) {
3134655eae01SJie Wang 				port->dev_conf.txmode.offloads |=
3135655eae01SJie Wang 					dev_conf.txmode.offloads;
3136655eae01SJie Wang 				for (k = 0;
3137655eae01SJie Wang 				     k < port->dev_info.max_tx_queues;
3138655eae01SJie Wang 				     k++)
31393c4426dbSDmitry Kozlyuk 					port->txq[k].conf.offloads |=
3140655eae01SJie Wang 						dev_conf.txmode.offloads;
3141655eae01SJie Wang 			}
3142ce8d5614SIntel 		}
3143a550baf2SMin Hu (Connor) 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
3144ce8d5614SIntel 			port->need_reconfig_queues = 0;
3145ce8d5614SIntel 			/* setup tx queues */
3146ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
31473c4426dbSDmitry Kozlyuk 				struct rte_eth_txconf *conf =
31483c4426dbSDmitry Kozlyuk 							&port->txq[qi].conf;
31493c4426dbSDmitry Kozlyuk 
3150b6ea6408SIntel 				if ((numa_support) &&
3151b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
3152b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
3153d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
3154d44f8a48SQi Zhang 						txring_numa[pi],
31553c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
3156b6ea6408SIntel 				else
3157b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
3158d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
3159d44f8a48SQi Zhang 						port->socket_id,
31603c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
3161b6ea6408SIntel 
31623c4426dbSDmitry Kozlyuk 				if (diag == 0) {
31633c4426dbSDmitry Kozlyuk 					port->txq[qi].state =
31643c4426dbSDmitry Kozlyuk 						conf->tx_deferred_start ?
31653c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
31663c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
3167ce8d5614SIntel 					continue;
31683c4426dbSDmitry Kozlyuk 				}
3169ce8d5614SIntel 
3170ce8d5614SIntel 				/* Fail to setup tx queue, return */
3171eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3172eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3173eac341d3SJoyce Kong 				else
317461a3b0e5SAndrew Rybchenko 					fprintf(stderr,
317561a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
317661a3b0e5SAndrew Rybchenko 						pi);
317761a3b0e5SAndrew Rybchenko 				fprintf(stderr,
317861a3b0e5SAndrew Rybchenko 					"Fail to configure port %d tx queues\n",
3179d44f8a48SQi Zhang 					pi);
3180ce8d5614SIntel 				/* try to reconfigure queues next time */
3181ce8d5614SIntel 				port->need_reconfig_queues = 1;
3182148f963fSBruce Richardson 				return -1;
3183ce8d5614SIntel 			}
3184ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
3185d44f8a48SQi Zhang 				/* setup rx queues */
3186b6ea6408SIntel 				if ((numa_support) &&
3187b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
3188b6ea6408SIntel 					struct rte_mempool * mp =
318926cbb419SViacheslav Ovsiienko 						mbuf_pool_find
319026cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
3191b6ea6408SIntel 					if (mp == NULL) {
319261a3b0e5SAndrew Rybchenko 						fprintf(stderr,
319361a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
3194b6ea6408SIntel 							rxring_numa[pi]);
3195148f963fSBruce Richardson 						return -1;
3196b6ea6408SIntel 					}
3197b6ea6408SIntel 
31982befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
3199d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
3200d44f8a48SQi Zhang 					     rxring_numa[pi],
32013c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
3202d44f8a48SQi Zhang 					     mp);
32031e1d6bddSBernard Iremonger 				} else {
32041e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
320526cbb419SViacheslav Ovsiienko 						mbuf_pool_find
320626cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
32071e1d6bddSBernard Iremonger 					if (mp == NULL) {
320861a3b0e5SAndrew Rybchenko 						fprintf(stderr,
320961a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
32101e1d6bddSBernard Iremonger 							port->socket_id);
32111e1d6bddSBernard Iremonger 						return -1;
3212b6ea6408SIntel 					}
32132befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
3214d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
3215d44f8a48SQi Zhang 					     port->socket_id,
32163c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
3217d44f8a48SQi Zhang 					     mp);
32181e1d6bddSBernard Iremonger 				}
3219ce8d5614SIntel 				if (diag == 0)
3220ce8d5614SIntel 					continue;
3221ce8d5614SIntel 
3222ce8d5614SIntel 				/* Fail to setup rx queue, return */
3223eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3224eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3225eac341d3SJoyce Kong 				else
322661a3b0e5SAndrew Rybchenko 					fprintf(stderr,
322761a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
322861a3b0e5SAndrew Rybchenko 						pi);
322961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
323061a3b0e5SAndrew Rybchenko 					"Fail to configure port %d rx queues\n",
3231d44f8a48SQi Zhang 					pi);
3232ce8d5614SIntel 				/* try to reconfigure queues next time */
3233ce8d5614SIntel 				port->need_reconfig_queues = 1;
3234148f963fSBruce Richardson 				return -1;
3235ce8d5614SIntel 			}
32361c69df45SOri Kam 			/* setup hairpin queues */
323701817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
32381c69df45SOri Kam 				return -1;
3239ce8d5614SIntel 		}
3240b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
3241b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
3242b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3243b0a9354aSPavan Nikhilesh 					NULL, 0);
3244b0a9354aSPavan Nikhilesh 			if (diag < 0)
324561a3b0e5SAndrew Rybchenko 				fprintf(stderr,
3246b0a9354aSPavan Nikhilesh 					"Port %d: Failed to disable Ptype parsing\n",
3247b0a9354aSPavan Nikhilesh 					pi);
3248b0a9354aSPavan Nikhilesh 		}
3249b0a9354aSPavan Nikhilesh 
325001817b10SBing Zhao 		p_pi = pi;
325101817b10SBing Zhao 		cnt_pi++;
325201817b10SBing Zhao 
3253ce8d5614SIntel 		/* start port */
3254a550baf2SMin Hu (Connor) 		diag = eth_dev_start_mp(pi);
325552f2c6f2SAndrew Rybchenko 		if (diag < 0) {
325661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Fail to start port %d: %s\n",
325761a3b0e5SAndrew Rybchenko 				pi, rte_strerror(-diag));
3258ce8d5614SIntel 
3259ce8d5614SIntel 			/* Fail to setup rx queue, return */
3260eac341d3SJoyce Kong 			if (port->port_status == RTE_PORT_HANDLING)
3261eac341d3SJoyce Kong 				port->port_status = RTE_PORT_STOPPED;
3262eac341d3SJoyce Kong 			else
326361a3b0e5SAndrew Rybchenko 				fprintf(stderr,
326461a3b0e5SAndrew Rybchenko 					"Port %d can not be set back to stopped\n",
326561a3b0e5SAndrew Rybchenko 					pi);
3266ce8d5614SIntel 			continue;
3267ce8d5614SIntel 		}
3268ce8d5614SIntel 
3269eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3270eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STARTED;
3271eac341d3SJoyce Kong 		else
327261a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into started\n",
327361a3b0e5SAndrew Rybchenko 				pi);
3274ce8d5614SIntel 
32755ffc4a2aSYuying Zhang 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3276c2c4f87bSAman Deep Singh 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3277a7db3afcSAman Deep Singh 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3278d8c89163SZijie Pan 
3279cdede073SFerruh Yigit 		at_least_one_port_successfully_started = true;
328001817b10SBing Zhao 
328101817b10SBing Zhao 		pl[cfg_pi++] = pi;
3282ce8d5614SIntel 	}
3283ce8d5614SIntel 
3284d7d802daSFerruh Yigit 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
32855028f207SShiyang He 		update_queue_state();
32865028f207SShiyang He 
3287cdede073SFerruh Yigit 	if (at_least_one_port_successfully_started && !no_link_check)
3288edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3289cdede073SFerruh Yigit 	else if (at_least_one_port_exist & all_ports_already_started)
329061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Please stop the ports first\n");
3291ce8d5614SIntel 
329201817b10SBing Zhao 	if (hairpin_mode & 0xf) {
329301817b10SBing Zhao 		uint16_t i;
329401817b10SBing Zhao 		int j;
329501817b10SBing Zhao 
329601817b10SBing Zhao 		/* bind all started hairpin ports */
329701817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
329801817b10SBing Zhao 			pi = pl[i];
329901817b10SBing Zhao 			/* bind current Tx to all peer Rx */
330001817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
330101817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
330201817b10SBing Zhao 			if (peer_pi < 0)
330301817b10SBing Zhao 				return peer_pi;
330401817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
330501817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
330601817b10SBing Zhao 					continue;
330701817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
330801817b10SBing Zhao 				if (diag < 0) {
330961a3b0e5SAndrew Rybchenko 					fprintf(stderr,
331061a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
331101817b10SBing Zhao 						pi, peer_pl[j],
331201817b10SBing Zhao 						rte_strerror(-diag));
331301817b10SBing Zhao 					return -1;
331401817b10SBing Zhao 				}
331501817b10SBing Zhao 			}
331601817b10SBing Zhao 			/* bind all peer Tx to current Rx */
331701817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
331801817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
331901817b10SBing Zhao 			if (peer_pi < 0)
332001817b10SBing Zhao 				return peer_pi;
332101817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
332201817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
332301817b10SBing Zhao 					continue;
332401817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
332501817b10SBing Zhao 				if (diag < 0) {
332661a3b0e5SAndrew Rybchenko 					fprintf(stderr,
332761a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
332801817b10SBing Zhao 						peer_pl[j], pi,
332901817b10SBing Zhao 						rte_strerror(-diag));
333001817b10SBing Zhao 					return -1;
333101817b10SBing Zhao 				}
333201817b10SBing Zhao 			}
333301817b10SBing Zhao 		}
333401817b10SBing Zhao 	}
333501817b10SBing Zhao 
333663b72657SIvan Ilchenko 	fill_xstats_display_info_for_port(pid);
333763b72657SIvan Ilchenko 
3338ce8d5614SIntel 	printf("Done\n");
3339148f963fSBruce Richardson 	return 0;
3340ce8d5614SIntel }
3341ce8d5614SIntel 
3342ce8d5614SIntel void
3343ce8d5614SIntel stop_port(portid_t pid)
3344ce8d5614SIntel {
3345ce8d5614SIntel 	portid_t pi;
3346ce8d5614SIntel 	struct rte_port *port;
3347ce8d5614SIntel 	int need_check_link_status = 0;
334801817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
334901817b10SBing Zhao 	int peer_pi;
335047a4e1fbSDariusz Sosnowski 	int ret;
3351ce8d5614SIntel 
33524468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
33534468635fSMichael Qiu 		return;
33544468635fSMichael Qiu 
3355ce8d5614SIntel 	printf("Stopping ports...\n");
3356ce8d5614SIntel 
33577d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
33584468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3359ce8d5614SIntel 			continue;
3360ce8d5614SIntel 
3361a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
336261a3b0e5SAndrew Rybchenko 			fprintf(stderr,
336361a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
336461a3b0e5SAndrew Rybchenko 				pi);
3365a8ef3e3aSBernard Iremonger 			continue;
3366a8ef3e3aSBernard Iremonger 		}
3367a8ef3e3aSBernard Iremonger 
336815e34522SLong Wu 		if (port_is_bonding_member(pi)) {
336961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
33704f840086SLong Wu 				"Please remove port %d from bonding device.\n",
337161a3b0e5SAndrew Rybchenko 				pi);
33720e545d30SBernard Iremonger 			continue;
33730e545d30SBernard Iremonger 		}
33740e545d30SBernard Iremonger 
3375ce8d5614SIntel 		port = &ports[pi];
3376eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STARTED)
3377eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
3378eac341d3SJoyce Kong 		else
3379ce8d5614SIntel 			continue;
3380ce8d5614SIntel 
338101817b10SBing Zhao 		if (hairpin_mode & 0xf) {
338201817b10SBing Zhao 			int j;
338301817b10SBing Zhao 
338401817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
338501817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
338601817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
338701817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
338801817b10SBing Zhao 			if (peer_pi < 0)
338901817b10SBing Zhao 				continue;
339001817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
339101817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
339201817b10SBing Zhao 					continue;
339301817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
339401817b10SBing Zhao 			}
339501817b10SBing Zhao 		}
339601817b10SBing Zhao 
3397543df472SChengwen Feng 		if (port->flow_list && !no_flow_flush)
33980f93edbfSGregory Etelson 			port_flow_flush(pi);
33990f93edbfSGregory Etelson 
340047a4e1fbSDariusz Sosnowski 		ret = eth_dev_stop_mp(pi);
340147a4e1fbSDariusz Sosnowski 		if (ret != 0) {
3402e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3403e62c5a12SIvan Ilchenko 				pi);
340447a4e1fbSDariusz Sosnowski 			/* Allow to retry stopping the port. */
340547a4e1fbSDariusz Sosnowski 			port->port_status = RTE_PORT_STARTED;
340647a4e1fbSDariusz Sosnowski 			continue;
340747a4e1fbSDariusz Sosnowski 		}
3408ce8d5614SIntel 
3409eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3410eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
3411eac341d3SJoyce Kong 		else
341261a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into stopped\n",
341361a3b0e5SAndrew Rybchenko 				pi);
3414ce8d5614SIntel 		need_check_link_status = 1;
3415ce8d5614SIntel 	}
3416bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
3417edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3418ce8d5614SIntel 
3419ce8d5614SIntel 	printf("Done\n");
3420ce8d5614SIntel }
3421ce8d5614SIntel 
3422ce6959bfSWisam Jaddo static void
34234f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
3424ce6959bfSWisam Jaddo {
34254f1de450SThomas Monjalon 	portid_t i;
34264f1de450SThomas Monjalon 	portid_t new_total = 0;
3427ce6959bfSWisam Jaddo 
34284f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
34294f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
34304f1de450SThomas Monjalon 			array[new_total] = array[i];
34314f1de450SThomas Monjalon 			new_total++;
3432ce6959bfSWisam Jaddo 		}
34334f1de450SThomas Monjalon 	*total = new_total;
34344f1de450SThomas Monjalon }
34354f1de450SThomas Monjalon 
34364f1de450SThomas Monjalon static void
34374f1de450SThomas Monjalon remove_invalid_ports(void)
34384f1de450SThomas Monjalon {
34394f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
34404f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
34414f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
3442ce6959bfSWisam Jaddo }
3443ce6959bfSWisam Jaddo 
34443889a322SHuisong Li static void
34454b27989dSDmitry Kozlyuk flush_port_owned_resources(portid_t pi)
34464b27989dSDmitry Kozlyuk {
34474b27989dSDmitry Kozlyuk 	mcast_addr_pool_destroy(pi);
34484b27989dSDmitry Kozlyuk 	port_flow_flush(pi);
34496d736e05SSuanming Mou 	port_flow_template_table_flush(pi);
34506d736e05SSuanming Mou 	port_flow_pattern_template_flush(pi);
34516d736e05SSuanming Mou 	port_flow_actions_template_flush(pi);
3452653c0812SRongwei Liu 	port_flex_item_flush(pi);
34534b27989dSDmitry Kozlyuk 	port_action_handle_flush(pi);
34544b27989dSDmitry Kozlyuk }
34554b27989dSDmitry Kozlyuk 
34564b27989dSDmitry Kozlyuk static void
345715e34522SLong Wu clear_bonding_member_device(portid_t *member_pids, uint16_t num_members)
34583889a322SHuisong Li {
34593889a322SHuisong Li 	struct rte_port *port;
346015e34522SLong Wu 	portid_t member_pid;
34613889a322SHuisong Li 	uint16_t i;
34623889a322SHuisong Li 
346315e34522SLong Wu 	for (i = 0; i < num_members; i++) {
346415e34522SLong Wu 		member_pid = member_pids[i];
346515e34522SLong Wu 		if (port_is_started(member_pid) == 1) {
346615e34522SLong Wu 			if (rte_eth_dev_stop(member_pid) != 0)
34673889a322SHuisong Li 				fprintf(stderr, "rte_eth_dev_stop failed for port %u\n",
346815e34522SLong Wu 					member_pid);
34693889a322SHuisong Li 
347015e34522SLong Wu 			port = &ports[member_pid];
34713889a322SHuisong Li 			port->port_status = RTE_PORT_STOPPED;
34723889a322SHuisong Li 		}
34733889a322SHuisong Li 
347415e34522SLong Wu 		clear_port_member_flag(member_pid);
34753889a322SHuisong Li 
347615e34522SLong Wu 		/* Close member device when testpmd quit or is killed. */
34773889a322SHuisong Li 		if (cl_quit == 1 || f_quit == 1)
347815e34522SLong Wu 			rte_eth_dev_close(member_pid);
34793889a322SHuisong Li 	}
34803889a322SHuisong Li }
34813889a322SHuisong Li 
3482ce8d5614SIntel void
3483ce8d5614SIntel close_port(portid_t pid)
3484ce8d5614SIntel {
3485ce8d5614SIntel 	portid_t pi;
3486ce8d5614SIntel 	struct rte_port *port;
348715e34522SLong Wu 	portid_t member_pids[RTE_MAX_ETHPORTS];
348815e34522SLong Wu 	int num_members = 0;
3489ce8d5614SIntel 
34904468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
34914468635fSMichael Qiu 		return;
34924468635fSMichael Qiu 
3493ce8d5614SIntel 	printf("Closing ports...\n");
3494ce8d5614SIntel 
34957d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
34964468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3497ce8d5614SIntel 			continue;
3498ce8d5614SIntel 
3499a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
350061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
350161a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
350261a3b0e5SAndrew Rybchenko 				pi);
3503a8ef3e3aSBernard Iremonger 			continue;
3504a8ef3e3aSBernard Iremonger 		}
3505a8ef3e3aSBernard Iremonger 
350615e34522SLong Wu 		if (port_is_bonding_member(pi)) {
350761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
35084f840086SLong Wu 				"Please remove port %d from bonding device.\n",
350961a3b0e5SAndrew Rybchenko 				pi);
35100e545d30SBernard Iremonger 			continue;
35110e545d30SBernard Iremonger 		}
35120e545d30SBernard Iremonger 
3513ce8d5614SIntel 		port = &ports[pi];
3514eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_CLOSED) {
351561a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is already closed\n", pi);
3516d4e8ad64SMichael Qiu 			continue;
3517d4e8ad64SMichael Qiu 		}
3518d4e8ad64SMichael Qiu 
3519a550baf2SMin Hu (Connor) 		if (is_proc_primary()) {
35204b27989dSDmitry Kozlyuk 			flush_port_owned_resources(pi);
35213889a322SHuisong Li #ifdef RTE_NET_BOND
35223889a322SHuisong Li 			if (port->bond_flag == 1)
352315e34522SLong Wu 				num_members = rte_eth_bond_members_get(pi,
352415e34522SLong Wu 						member_pids, RTE_MAX_ETHPORTS);
35253889a322SHuisong Li #endif
3526ce8d5614SIntel 			rte_eth_dev_close(pi);
35273889a322SHuisong Li 			/*
35284f840086SLong Wu 			 * If this port is bonding device, all members under the
35293889a322SHuisong Li 			 * device need to be removed or closed.
35303889a322SHuisong Li 			 */
353115e34522SLong Wu 			if (port->bond_flag == 1 && num_members > 0)
353215e34522SLong Wu 				clear_bonding_member_device(member_pids,
353315e34522SLong Wu 							num_members);
3534ce8d5614SIntel 		}
353563b72657SIvan Ilchenko 
353663b72657SIvan Ilchenko 		free_xstats_display_info(pi);
3537a550baf2SMin Hu (Connor) 	}
3538ce8d5614SIntel 
353985c6571cSThomas Monjalon 	remove_invalid_ports();
3540ce8d5614SIntel 	printf("Done\n");
3541ce8d5614SIntel }
3542ce8d5614SIntel 
3543edab33b1STetsuya Mukawa void
354497f1e196SWei Dai reset_port(portid_t pid)
354597f1e196SWei Dai {
354697f1e196SWei Dai 	int diag;
354797f1e196SWei Dai 	portid_t pi;
354897f1e196SWei Dai 	struct rte_port *port;
354997f1e196SWei Dai 
355097f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
355197f1e196SWei Dai 		return;
355297f1e196SWei Dai 
35531cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
35541cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
355561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
355661a3b0e5SAndrew Rybchenko 			"Can not reset port(s), please stop port(s) first.\n");
35571cde1b9aSShougang Wang 		return;
35581cde1b9aSShougang Wang 	}
35591cde1b9aSShougang Wang 
356097f1e196SWei Dai 	printf("Resetting ports...\n");
356197f1e196SWei Dai 
356297f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
356397f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
356497f1e196SWei Dai 			continue;
356597f1e196SWei Dai 
356697f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
356761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
356861a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
356961a3b0e5SAndrew Rybchenko 				pi);
357097f1e196SWei Dai 			continue;
357197f1e196SWei Dai 		}
357297f1e196SWei Dai 
357315e34522SLong Wu 		if (port_is_bonding_member(pi)) {
357461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
35754f840086SLong Wu 				"Please remove port %d from bonding device.\n",
357697f1e196SWei Dai 				pi);
357797f1e196SWei Dai 			continue;
357897f1e196SWei Dai 		}
357997f1e196SWei Dai 
3580e9351eaaSQiming Yang 		if (is_proc_primary()) {
358197f1e196SWei Dai 			diag = rte_eth_dev_reset(pi);
358297f1e196SWei Dai 			if (diag == 0) {
358397f1e196SWei Dai 				port = &ports[pi];
358497f1e196SWei Dai 				port->need_reconfig = 1;
358597f1e196SWei Dai 				port->need_reconfig_queues = 1;
358697f1e196SWei Dai 			} else {
358761a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Failed to reset port %d. diag=%d\n",
358861a3b0e5SAndrew Rybchenko 					pi, diag);
358997f1e196SWei Dai 			}
359097f1e196SWei Dai 		}
3591e9351eaaSQiming Yang 	}
359297f1e196SWei Dai 
359397f1e196SWei Dai 	printf("Done\n");
359497f1e196SWei Dai }
359597f1e196SWei Dai 
359697f1e196SWei Dai void
3597edab33b1STetsuya Mukawa attach_port(char *identifier)
3598ce8d5614SIntel {
35994f1ed78eSThomas Monjalon 	portid_t pi;
3600c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
3601ce8d5614SIntel 
3602edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
3603edab33b1STetsuya Mukawa 
3604edab33b1STetsuya Mukawa 	if (identifier == NULL) {
360561a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Invalid parameters are specified\n");
3606edab33b1STetsuya Mukawa 		return;
3607ce8d5614SIntel 	}
3608ce8d5614SIntel 
360975b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
3610c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3611edab33b1STetsuya Mukawa 		return;
3612c9cce428SThomas Monjalon 	}
3613c9cce428SThomas Monjalon 
36144f1ed78eSThomas Monjalon 	/* first attach mode: event */
36154f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
36164f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
36174f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
36184f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
36194f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
36204f1ed78eSThomas Monjalon 				setup_attached_port(pi);
36214f1ed78eSThomas Monjalon 		return;
36224f1ed78eSThomas Monjalon 	}
36234f1ed78eSThomas Monjalon 
36244f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
362586fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
36264f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
362786fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
362886fa5de1SThomas Monjalon 			continue; /* port was already attached before */
3629c9cce428SThomas Monjalon 		setup_attached_port(pi);
3630c9cce428SThomas Monjalon 	}
363186fa5de1SThomas Monjalon }
3632c9cce428SThomas Monjalon 
3633c9cce428SThomas Monjalon static void
3634c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
3635c9cce428SThomas Monjalon {
3636c9cce428SThomas Monjalon 	unsigned int socket_id;
363734fc1051SIvan Ilchenko 	int ret;
3638edab33b1STetsuya Mukawa 
3639931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
364029841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
3641931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
364229841336SPhil Yang 		socket_id = socket_ids[0];
3643931126baSBernard Iremonger 	reconfig(pi, socket_id);
364434fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
364534fc1051SIvan Ilchenko 	if (ret != 0)
364661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
364761a3b0e5SAndrew Rybchenko 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
364834fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
3649edab33b1STetsuya Mukawa 
36504f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
36514f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
36524f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
36534f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
3654edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
3655edab33b1STetsuya Mukawa 
3656edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3657edab33b1STetsuya Mukawa 	printf("Done\n");
3658edab33b1STetsuya Mukawa }
3659edab33b1STetsuya Mukawa 
36600654d4a8SThomas Monjalon static void
36610654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
36625f4ec54fSChen Jing D(Mark) {
3663f8e5baa2SThomas Monjalon 	portid_t sibling;
3664f8e5baa2SThomas Monjalon 
3665f8e5baa2SThomas Monjalon 	if (dev == NULL) {
366661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Device already removed\n");
3667f8e5baa2SThomas Monjalon 		return;
3668f8e5baa2SThomas Monjalon 	}
3669f8e5baa2SThomas Monjalon 
36700654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3671938a184aSAdrien Mazarguil 
36722a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
36732a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
36742a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
367561a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
367661a3b0e5SAndrew Rybchenko 					sibling);
36772a449871SThomas Monjalon 				return;
36782a449871SThomas Monjalon 			}
36794b27989dSDmitry Kozlyuk 			flush_port_owned_resources(sibling);
36802a449871SThomas Monjalon 		}
36812a449871SThomas Monjalon 	}
36822a449871SThomas Monjalon 
368375b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3684ec5ecd7eSDavid Marchand 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", rte_dev_name(dev));
3685edab33b1STetsuya Mukawa 		return;
36863070419eSGaetan Rivet 	}
36874f1de450SThomas Monjalon 	remove_invalid_ports();
368803ce2c53SMatan Azrad 
36890654d4a8SThomas Monjalon 	printf("Device is detached\n");
3690f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3691edab33b1STetsuya Mukawa 	printf("Done\n");
3692edab33b1STetsuya Mukawa 	return;
36935f4ec54fSChen Jing D(Mark) }
36945f4ec54fSChen Jing D(Mark) 
3695af75078fSIntel void
36960654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
36970654d4a8SThomas Monjalon {
36980a0821bcSPaulis Gributs 	int ret;
36990a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
37000a0821bcSPaulis Gributs 
37010654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
37020654d4a8SThomas Monjalon 		return;
37030654d4a8SThomas Monjalon 
37040654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
37050654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
370661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port not stopped\n");
37070654d4a8SThomas Monjalon 			return;
37080654d4a8SThomas Monjalon 		}
370961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Port was not closed\n");
37100654d4a8SThomas Monjalon 	}
37110654d4a8SThomas Monjalon 
37120a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
37130a0821bcSPaulis Gributs 	if (ret != 0) {
37140a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
37150a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
37160a0821bcSPaulis Gributs 			port_id);
37170a0821bcSPaulis Gributs 		return;
37180a0821bcSPaulis Gributs 	}
37190a0821bcSPaulis Gributs 	detach_device(dev_info.device);
37200654d4a8SThomas Monjalon }
37210654d4a8SThomas Monjalon 
37220654d4a8SThomas Monjalon void
37235edee5f6SThomas Monjalon detach_devargs(char *identifier)
372455e51c96SNithin Dabilpuram {
372555e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
372655e51c96SNithin Dabilpuram 	struct rte_devargs da;
372755e51c96SNithin Dabilpuram 	portid_t port_id;
372855e51c96SNithin Dabilpuram 
372955e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
373055e51c96SNithin Dabilpuram 
373155e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
373255e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
373361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "cannot parse identifier\n");
373455e51c96SNithin Dabilpuram 		return;
373555e51c96SNithin Dabilpuram 	}
373655e51c96SNithin Dabilpuram 
373755e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
373855e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
373955e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
374061a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
374161a3b0e5SAndrew Rybchenko 					port_id);
3742149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
374364051bb1SXueming Li 				rte_devargs_reset(&da);
374455e51c96SNithin Dabilpuram 				return;
374555e51c96SNithin Dabilpuram 			}
37464b27989dSDmitry Kozlyuk 			flush_port_owned_resources(port_id);
374755e51c96SNithin Dabilpuram 		}
374855e51c96SNithin Dabilpuram 	}
374955e51c96SNithin Dabilpuram 
3750148c51a3SDavid Marchand 	if (rte_eal_hotplug_remove(rte_bus_name(da.bus), da.name) != 0) {
375155e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3752148c51a3SDavid Marchand 			    da.name, rte_bus_name(da.bus));
375364051bb1SXueming Li 		rte_devargs_reset(&da);
375455e51c96SNithin Dabilpuram 		return;
375555e51c96SNithin Dabilpuram 	}
375655e51c96SNithin Dabilpuram 
375755e51c96SNithin Dabilpuram 	remove_invalid_ports();
375855e51c96SNithin Dabilpuram 
375955e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
376055e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
376155e51c96SNithin Dabilpuram 	printf("Done\n");
376264051bb1SXueming Li 	rte_devargs_reset(&da);
376355e51c96SNithin Dabilpuram }
376455e51c96SNithin Dabilpuram 
376555e51c96SNithin Dabilpuram void
3766af75078fSIntel pmd_test_exit(void)
3767af75078fSIntel {
3768af75078fSIntel 	portid_t pt_id;
376926cbb419SViacheslav Ovsiienko 	unsigned int i;
3770fb73e096SJeff Guo 	int ret;
3771af75078fSIntel 
37728210ec25SPablo de Lara 	if (test_done == 0)
37738210ec25SPablo de Lara 		stop_packet_forwarding();
37748210ec25SPablo de Lara 
3775761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
377626cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
37773a0968c8SShahaf Shuler 		if (mempools[i]) {
37783a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
37793a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
37803a0968c8SShahaf Shuler 						     NULL);
37813a0968c8SShahaf Shuler 		}
37823a0968c8SShahaf Shuler 	}
3783761f7ae1SJie Zhou #endif
3784d3a274ceSZhihong Wang 	if (ports != NULL) {
3785d3a274ceSZhihong Wang 		no_link_check = 1;
37867d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
378708fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3788af75078fSIntel 			fflush(stdout);
3789d3a274ceSZhihong Wang 			stop_port(pt_id);
379008fd782bSCristian Dumitrescu 		}
379108fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
379208fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
379308fd782bSCristian Dumitrescu 			fflush(stdout);
3794d3a274ceSZhihong Wang 			close_port(pt_id);
3795af75078fSIntel 		}
3796d3a274ceSZhihong Wang 	}
3797fb73e096SJeff Guo 
3798fb73e096SJeff Guo 	if (hot_plug) {
3799fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
38002049c511SJeff Guo 		if (ret) {
3801fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3802fb73e096SJeff Guo 				"fail to stop device event monitor.");
38032049c511SJeff Guo 			return;
38042049c511SJeff Guo 		}
3805fb73e096SJeff Guo 
38062049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3807cc1bf307SJeff Guo 			dev_event_callback, NULL);
38082049c511SJeff Guo 		if (ret < 0) {
3809fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
38102049c511SJeff Guo 				"fail to unregister device event callback.\n");
38112049c511SJeff Guo 			return;
38122049c511SJeff Guo 		}
38132049c511SJeff Guo 
38142049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
38152049c511SJeff Guo 		if (ret) {
38162049c511SJeff Guo 			RTE_LOG(ERR, EAL,
38172049c511SJeff Guo 				"fail to disable hotplug handling.\n");
38182049c511SJeff Guo 			return;
38192049c511SJeff Guo 		}
3820fb73e096SJeff Guo 	}
382126cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3822401b744dSShahaf Shuler 		if (mempools[i])
3823a550baf2SMin Hu (Connor) 			mempool_free_mp(mempools[i]);
3824401b744dSShahaf Shuler 	}
382563b72657SIvan Ilchenko 	free(xstats_display);
3826fb73e096SJeff Guo 
3827d3a274ceSZhihong Wang 	printf("\nBye...\n");
3828af75078fSIntel }
3829af75078fSIntel 
3830af75078fSIntel typedef void (*cmd_func_t)(void);
3831af75078fSIntel struct pmd_test_command {
3832af75078fSIntel 	const char *cmd_name;
3833af75078fSIntel 	cmd_func_t cmd_func;
3834af75078fSIntel };
3835af75078fSIntel 
3836ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3837af75078fSIntel static void
3838edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3839af75078fSIntel {
3840ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3841ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3842f8244c63SZhiyong Yang 	portid_t portid;
3843f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3844ce8d5614SIntel 	struct rte_eth_link link;
3845e661a08bSIgor Romanov 	int ret;
3846ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3847ce8d5614SIntel 
3848ce8d5614SIntel 	printf("Checking link statuses...\n");
3849ce8d5614SIntel 	fflush(stdout);
3850ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3851ce8d5614SIntel 		all_ports_up = 1;
38527d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3853ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3854ce8d5614SIntel 				continue;
3855ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3856e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3857e661a08bSIgor Romanov 			if (ret < 0) {
3858e661a08bSIgor Romanov 				all_ports_up = 0;
3859e661a08bSIgor Romanov 				if (print_flag == 1)
386061a3b0e5SAndrew Rybchenko 					fprintf(stderr,
386161a3b0e5SAndrew Rybchenko 						"Port %u link get failed: %s\n",
3862e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3863e661a08bSIgor Romanov 				continue;
3864e661a08bSIgor Romanov 			}
3865ce8d5614SIntel 			/* print link status if flag set */
3866ce8d5614SIntel 			if (print_flag == 1) {
3867ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3868ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3869ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3870ce8d5614SIntel 				continue;
3871ce8d5614SIntel 			}
3872ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
3873295968d1SFerruh Yigit 			if (link.link_status == RTE_ETH_LINK_DOWN) {
3874ce8d5614SIntel 				all_ports_up = 0;
3875ce8d5614SIntel 				break;
3876ce8d5614SIntel 			}
3877ce8d5614SIntel 		}
3878ce8d5614SIntel 		/* after finally printing all link status, get out */
3879ce8d5614SIntel 		if (print_flag == 1)
3880ce8d5614SIntel 			break;
3881ce8d5614SIntel 
3882ce8d5614SIntel 		if (all_ports_up == 0) {
3883ce8d5614SIntel 			fflush(stdout);
3884ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3885ce8d5614SIntel 		}
3886ce8d5614SIntel 
3887ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3888ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3889ce8d5614SIntel 			print_flag = 1;
3890ce8d5614SIntel 		}
38918ea656f8SGaetan Rivet 
38928ea656f8SGaetan Rivet 		if (lsc_interrupt)
38938ea656f8SGaetan Rivet 			break;
3894ce8d5614SIntel 	}
3895af75078fSIntel }
3896af75078fSIntel 
3897284c908cSGaetan Rivet static void
3898cc1bf307SJeff Guo rmv_port_callback(void *arg)
3899284c908cSGaetan Rivet {
39003b97888aSMatan Azrad 	int need_to_start = 0;
39010da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
390228caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
39030a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
39040a0821bcSPaulis Gributs 	int ret;
3905284c908cSGaetan Rivet 
3906284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3907284c908cSGaetan Rivet 
39083b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
39093b97888aSMatan Azrad 		need_to_start = 1;
39103b97888aSMatan Azrad 		stop_packet_forwarding();
39113b97888aSMatan Azrad 	}
39120da2a62bSMatan Azrad 	no_link_check = 1;
3913284c908cSGaetan Rivet 	stop_port(port_id);
39140da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
39150654d4a8SThomas Monjalon 
39160a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
39170a0821bcSPaulis Gributs 	if (ret != 0)
39180a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
39190a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
39200a0821bcSPaulis Gributs 			port_id);
3921e1d38504SPaulis Gributs 	else {
3922e1d38504SPaulis Gributs 		struct rte_device *device = dev_info.device;
3923e1d38504SPaulis Gributs 		close_port(port_id);
3924e1d38504SPaulis Gributs 		detach_device(device); /* might be already removed or have more ports */
3925e1d38504SPaulis Gributs 	}
39263b97888aSMatan Azrad 	if (need_to_start)
39273b97888aSMatan Azrad 		start_packet_forwarding(0);
3928284c908cSGaetan Rivet }
3929284c908cSGaetan Rivet 
393076ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3931d6af1a13SBernard Iremonger static int
3932f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3933d6af1a13SBernard Iremonger 		  void *ret_param)
393476ad4a2dSGaetan Rivet {
393576ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3936d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
393776ad4a2dSGaetan Rivet 
393876ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
393961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
394061a3b0e5SAndrew Rybchenko 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
394176ad4a2dSGaetan Rivet 			port_id, __func__, type);
394276ad4a2dSGaetan Rivet 		fflush(stderr);
39433af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3944f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
394597b5d8b5SThomas Monjalon 			eth_event_desc[type]);
394676ad4a2dSGaetan Rivet 		fflush(stdout);
394776ad4a2dSGaetan Rivet 	}
3948284c908cSGaetan Rivet 
3949284c908cSGaetan Rivet 	switch (type) {
39504f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
39514f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
39524f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
39534f1ed78eSThomas Monjalon 		break;
3954284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
39554f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
39564f1ed78eSThomas Monjalon 			break;
3957284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3958cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
395961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
396061a3b0e5SAndrew Rybchenko 				"Could not set up deferred device removal\n");
3961284c908cSGaetan Rivet 		break;
396285c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
396385c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
396485c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
396585c6571cSThomas Monjalon 		break;
3966bc70e559SSpike Du 	case RTE_ETH_EVENT_RX_AVAIL_THRESH: {
3967bc70e559SSpike Du 		uint16_t rxq_id;
3968bc70e559SSpike Du 		int ret;
3969bc70e559SSpike Du 
3970bc70e559SSpike Du 		/* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */
3971bc70e559SSpike Du 		for (rxq_id = 0; ; rxq_id++) {
3972bc70e559SSpike Du 			ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id,
3973bc70e559SSpike Du 							    NULL);
3974bc70e559SSpike Du 			if (ret <= 0)
3975bc70e559SSpike Du 				break;
3976bc70e559SSpike Du 			printf("Received avail_thresh event, port: %u, rxq_id: %u\n",
3977bc70e559SSpike Du 			       port_id, rxq_id);
3978f41a5092SSpike Du 
3979f41a5092SSpike Du #ifdef RTE_NET_MLX5
3980f41a5092SSpike Du 			mlx5_test_avail_thresh_event_handler(port_id, rxq_id);
3981f41a5092SSpike Du #endif
3982bc70e559SSpike Du 		}
3983bc70e559SSpike Du 		break;
3984bc70e559SSpike Du 	}
3985284c908cSGaetan Rivet 	default:
3986284c908cSGaetan Rivet 		break;
3987284c908cSGaetan Rivet 	}
3988d6af1a13SBernard Iremonger 	return 0;
398976ad4a2dSGaetan Rivet }
399076ad4a2dSGaetan Rivet 
399197b5d8b5SThomas Monjalon static int
399297b5d8b5SThomas Monjalon register_eth_event_callback(void)
399397b5d8b5SThomas Monjalon {
399497b5d8b5SThomas Monjalon 	int ret;
399597b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
399697b5d8b5SThomas Monjalon 
399797b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
399897b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
399997b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
400097b5d8b5SThomas Monjalon 				event,
400197b5d8b5SThomas Monjalon 				eth_event_callback,
400297b5d8b5SThomas Monjalon 				NULL);
400397b5d8b5SThomas Monjalon 		if (ret != 0) {
400497b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
400597b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
400697b5d8b5SThomas Monjalon 			return -1;
400797b5d8b5SThomas Monjalon 		}
400897b5d8b5SThomas Monjalon 	}
400997b5d8b5SThomas Monjalon 
401097b5d8b5SThomas Monjalon 	return 0;
401197b5d8b5SThomas Monjalon }
401297b5d8b5SThomas Monjalon 
4013*687a5b12SSinan Kaya static int
4014*687a5b12SSinan Kaya unregister_eth_event_callback(void)
4015*687a5b12SSinan Kaya {
4016*687a5b12SSinan Kaya 	int ret;
4017*687a5b12SSinan Kaya 	enum rte_eth_event_type event;
4018*687a5b12SSinan Kaya 
4019*687a5b12SSinan Kaya 	for (event = RTE_ETH_EVENT_UNKNOWN;
4020*687a5b12SSinan Kaya 			event < RTE_ETH_EVENT_MAX; event++) {
4021*687a5b12SSinan Kaya 		ret = rte_eth_dev_callback_unregister(RTE_ETH_ALL,
4022*687a5b12SSinan Kaya 				event,
4023*687a5b12SSinan Kaya 				eth_event_callback,
4024*687a5b12SSinan Kaya 				NULL);
4025*687a5b12SSinan Kaya 		if (ret != 0) {
4026*687a5b12SSinan Kaya 			TESTPMD_LOG(ERR, "Failed to unregister callback for "
4027*687a5b12SSinan Kaya 					"%s event\n", eth_event_desc[event]);
4028*687a5b12SSinan Kaya 			return -1;
4029*687a5b12SSinan Kaya 		}
4030*687a5b12SSinan Kaya 	}
4031*687a5b12SSinan Kaya 
4032*687a5b12SSinan Kaya 	return 0;
4033*687a5b12SSinan Kaya }
4034*687a5b12SSinan Kaya 
4035fb73e096SJeff Guo /* This function is used by the interrupt thread */
4036fb73e096SJeff Guo static void
4037cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
4038fb73e096SJeff Guo 			     __rte_unused void *arg)
4039fb73e096SJeff Guo {
40402049c511SJeff Guo 	uint16_t port_id;
40412049c511SJeff Guo 	int ret;
40422049c511SJeff Guo 
4043fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
4044fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
4045fb73e096SJeff Guo 			__func__, type);
4046fb73e096SJeff Guo 		fflush(stderr);
4047fb73e096SJeff Guo 	}
4048fb73e096SJeff Guo 
4049fb73e096SJeff Guo 	switch (type) {
4050fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
4051cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
4052fb73e096SJeff Guo 			device_name);
40532049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
40542049c511SJeff Guo 		if (ret) {
40552049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
40562049c511SJeff Guo 				device_name);
40572049c511SJeff Guo 			return;
40582049c511SJeff Guo 		}
4059cc1bf307SJeff Guo 		/*
4060cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
4061cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
4062cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
4063cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
4064cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
4065cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
4066cc1bf307SJeff Guo 		 * be deleted.
4067cc1bf307SJeff Guo 		 */
4068cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
4069cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
4070cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
4071cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
4072fb73e096SJeff Guo 		break;
4073fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
4074fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
4075fb73e096SJeff Guo 			device_name);
4076fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
4077fb73e096SJeff Guo 		 * begin to attach port.
4078fb73e096SJeff Guo 		 */
4079fb73e096SJeff Guo 		break;
4080fb73e096SJeff Guo 	default:
4081fb73e096SJeff Guo 		break;
4082fb73e096SJeff Guo 	}
4083fb73e096SJeff Guo }
4084fb73e096SJeff Guo 
4085f2c5125aSPablo de Lara static void
4086f4d178c1SXueming Li rxtx_port_config(portid_t pid)
4087f2c5125aSPablo de Lara {
4088d44f8a48SQi Zhang 	uint16_t qid;
40895e91aeefSWei Zhao 	uint64_t offloads;
4090f4d178c1SXueming Li 	struct rte_port *port = &ports[pid];
4091f2c5125aSPablo de Lara 
4092d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
40933c4426dbSDmitry Kozlyuk 		offloads = port->rxq[qid].conf.offloads;
40943c4426dbSDmitry Kozlyuk 		port->rxq[qid].conf = port->dev_info.default_rxconf;
4095f4d178c1SXueming Li 
4096f4d178c1SXueming Li 		if (rxq_share > 0 &&
4097f4d178c1SXueming Li 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
4098f4d178c1SXueming Li 			/* Non-zero share group to enable RxQ share. */
40993c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_group = pid / rxq_share + 1;
41003c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
4101f4d178c1SXueming Li 		}
4102f4d178c1SXueming Li 
4103575e0fd1SWei Zhao 		if (offloads != 0)
41043c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.offloads = offloads;
4105d44f8a48SQi Zhang 
4106d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
4107f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
41083c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
4109f2c5125aSPablo de Lara 
4110f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
41113c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
4112f2c5125aSPablo de Lara 
4113f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
41143c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
4115f2c5125aSPablo de Lara 
4116f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
41173c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
4118f2c5125aSPablo de Lara 
4119f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
41203c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_drop_en = rx_drop_en;
4121f2c5125aSPablo de Lara 
4122d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
4123d44f8a48SQi Zhang 	}
4124d44f8a48SQi Zhang 
4125d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
41263c4426dbSDmitry Kozlyuk 		offloads = port->txq[qid].conf.offloads;
41273c4426dbSDmitry Kozlyuk 		port->txq[qid].conf = port->dev_info.default_txconf;
4128575e0fd1SWei Zhao 		if (offloads != 0)
41293c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.offloads = offloads;
4130d44f8a48SQi Zhang 
4131d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
4132f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
41333c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
4134f2c5125aSPablo de Lara 
4135f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
41363c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
4137f2c5125aSPablo de Lara 
4138f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
41393c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
4140f2c5125aSPablo de Lara 
4141f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
41423c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
4143f2c5125aSPablo de Lara 
4144f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
41453c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
4146d44f8a48SQi Zhang 
4147d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
4148d44f8a48SQi Zhang 	}
4149f2c5125aSPablo de Lara }
4150f2c5125aSPablo de Lara 
41510c4abd36SSteve Yang /*
4152b563c142SFerruh Yigit  * Helper function to set MTU from frame size
41530c4abd36SSteve Yang  *
41540c4abd36SSteve Yang  * port->dev_info should be set before calling this function.
41550c4abd36SSteve Yang  *
41560c4abd36SSteve Yang  * return 0 on success, negative on error
41570c4abd36SSteve Yang  */
41580c4abd36SSteve Yang int
4159b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
41600c4abd36SSteve Yang {
41610c4abd36SSteve Yang 	struct rte_port *port = &ports[portid];
41620c4abd36SSteve Yang 	uint32_t eth_overhead;
41631bb4a528SFerruh Yigit 	uint16_t mtu, new_mtu;
41640c4abd36SSteve Yang 
41651bb4a528SFerruh Yigit 	eth_overhead = get_eth_overhead(&port->dev_info);
41661bb4a528SFerruh Yigit 
41671bb4a528SFerruh Yigit 	if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
41681bb4a528SFerruh Yigit 		printf("Failed to get MTU for port %u\n", portid);
41691bb4a528SFerruh Yigit 		return -1;
41701bb4a528SFerruh Yigit 	}
41711bb4a528SFerruh Yigit 
41721bb4a528SFerruh Yigit 	new_mtu = max_rx_pktlen - eth_overhead;
41730c4abd36SSteve Yang 
41741bb4a528SFerruh Yigit 	if (mtu == new_mtu)
41751bb4a528SFerruh Yigit 		return 0;
41761bb4a528SFerruh Yigit 
41771bb4a528SFerruh Yigit 	if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
417861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
417961a3b0e5SAndrew Rybchenko 			"Failed to set MTU to %u for port %u\n",
41801bb4a528SFerruh Yigit 			new_mtu, portid);
41811bb4a528SFerruh Yigit 		return -1;
41820c4abd36SSteve Yang 	}
41830c4abd36SSteve Yang 
41841bb4a528SFerruh Yigit 	port->dev_conf.rxmode.mtu = new_mtu;
41851bb4a528SFerruh Yigit 
41860c4abd36SSteve Yang 	return 0;
41870c4abd36SSteve Yang }
41880c4abd36SSteve Yang 
4189013af9b6SIntel void
4190013af9b6SIntel init_port_config(void)
4191013af9b6SIntel {
4192013af9b6SIntel 	portid_t pid;
4193013af9b6SIntel 	struct rte_port *port;
4194655eae01SJie Wang 	int ret, i;
4195013af9b6SIntel 
41967d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
4197013af9b6SIntel 		port = &ports[pid];
41986f51deb9SIvan Ilchenko 
41996f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
42006f51deb9SIvan Ilchenko 		if (ret != 0)
42016f51deb9SIvan Ilchenko 			return;
42026f51deb9SIvan Ilchenko 
42033ce690d3SBruce Richardson 		if (nb_rxq > 1) {
4204013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
420590892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
4206422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
4207af75078fSIntel 		} else {
4208013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
4209013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
4210af75078fSIntel 		}
42113ce690d3SBruce Richardson 
42125f592039SJingjing Wu 		if (port->dcb_flag == 0) {
4213655eae01SJie Wang 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
4214f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
4215f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
4216295968d1SFerruh Yigit 						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
4217655eae01SJie Wang 			} else {
4218295968d1SFerruh Yigit 				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
4219655eae01SJie Wang 				port->dev_conf.rxmode.offloads &=
4220295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4221655eae01SJie Wang 
4222655eae01SJie Wang 				for (i = 0;
4223655eae01SJie Wang 				     i < port->dev_info.nb_rx_queues;
4224655eae01SJie Wang 				     i++)
42253c4426dbSDmitry Kozlyuk 					port->rxq[i].conf.offloads &=
4226295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4227655eae01SJie Wang 			}
42283ce690d3SBruce Richardson 		}
42293ce690d3SBruce Richardson 
4230f4d178c1SXueming Li 		rxtx_port_config(pid);
4231013af9b6SIntel 
4232a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
4233a5279d25SIgor Romanov 		if (ret != 0)
4234a5279d25SIgor Romanov 			return;
4235013af9b6SIntel 
42360a0821bcSPaulis Gributs 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
42378ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
42380a0821bcSPaulis Gributs 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
4239284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
4240013af9b6SIntel 	}
4241013af9b6SIntel }
4242013af9b6SIntel 
424315e34522SLong Wu void set_port_member_flag(portid_t member_pid)
424441b05095SBernard Iremonger {
424541b05095SBernard Iremonger 	struct rte_port *port;
424641b05095SBernard Iremonger 
424715e34522SLong Wu 	port = &ports[member_pid];
424815e34522SLong Wu 	port->member_flag = 1;
424941b05095SBernard Iremonger }
425041b05095SBernard Iremonger 
425115e34522SLong Wu void clear_port_member_flag(portid_t member_pid)
425241b05095SBernard Iremonger {
425341b05095SBernard Iremonger 	struct rte_port *port;
425441b05095SBernard Iremonger 
425515e34522SLong Wu 	port = &ports[member_pid];
425615e34522SLong Wu 	port->member_flag = 0;
425741b05095SBernard Iremonger }
425841b05095SBernard Iremonger 
425915e34522SLong Wu uint8_t port_is_bonding_member(portid_t member_pid)
42600e545d30SBernard Iremonger {
42610e545d30SBernard Iremonger 	struct rte_port *port;
42620a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
42630a0821bcSPaulis Gributs 	int ret;
42640e545d30SBernard Iremonger 
426515e34522SLong Wu 	port = &ports[member_pid];
426615e34522SLong Wu 	ret = eth_dev_info_get_print_err(member_pid, &dev_info);
42670a0821bcSPaulis Gributs 	if (ret != 0) {
42680a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
42690a0821bcSPaulis Gributs 			"Failed to get device info for port id %d,"
42704f840086SLong Wu 			"cannot determine if the port is a bonding member",
427115e34522SLong Wu 			member_pid);
42720a0821bcSPaulis Gributs 		return 0;
42730a0821bcSPaulis Gributs 	}
427415e34522SLong Wu 
427515e34522SLong Wu 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDING_MEMBER) || (port->member_flag == 1))
4276b8b8b344SMatan Azrad 		return 1;
4277b8b8b344SMatan Azrad 	return 0;
42780e545d30SBernard Iremonger }
42790e545d30SBernard Iremonger 
4280013af9b6SIntel const uint16_t vlan_tags[] = {
4281013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
4282013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
4283013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
4284013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
4285013af9b6SIntel };
4286013af9b6SIntel 
4287013af9b6SIntel static  int
4288ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
42891a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
42901a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
42911a572499SJingjing Wu 		 uint8_t pfc_en)
4292013af9b6SIntel {
4293013af9b6SIntel 	uint8_t i;
4294ac7c491cSKonstantin Ananyev 	int32_t rc;
4295ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
4296af75078fSIntel 
4297af75078fSIntel 	/*
4298013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
4299013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
4300af75078fSIntel 	 */
43011a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
43021a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
43031a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
43041a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
43051a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
4306013af9b6SIntel 
4307547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
43081a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
43091a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
43101a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
4311295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
43121a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
4313295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4314013af9b6SIntel 
43151a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
43161a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
43171a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
43181a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
43191a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
4320af75078fSIntel 		}
4321295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4322f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
4323f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
4324013af9b6SIntel 		}
4325013af9b6SIntel 
4326013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
4327f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4328f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4329295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4330295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
43311a572499SJingjing Wu 	} else {
43321a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
43331a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
43341a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
43351a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
4336013af9b6SIntel 
43375139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
43385139bc12STing Xu 
4339ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
4340ac7c491cSKonstantin Ananyev 		if (rc != 0)
4341ac7c491cSKonstantin Ananyev 			return rc;
4342ac7c491cSKonstantin Ananyev 
43431a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
43441a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
43451a572499SJingjing Wu 
4346295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4347bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
4348bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
4349013af9b6SIntel 		}
4350ac7c491cSKonstantin Ananyev 
4351f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4352f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4353295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4354ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
4355295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
43561a572499SJingjing Wu 	}
43571a572499SJingjing Wu 
43581a572499SJingjing Wu 	if (pfc_en)
43591a572499SJingjing Wu 		eth_conf->dcb_capability_en =
4360295968d1SFerruh Yigit 				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4361013af9b6SIntel 	else
4362295968d1SFerruh Yigit 		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4363013af9b6SIntel 
4364013af9b6SIntel 	return 0;
4365013af9b6SIntel }
4366013af9b6SIntel 
4367013af9b6SIntel int
43681a572499SJingjing Wu init_port_dcb_config(portid_t pid,
43691a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
43701a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
43711a572499SJingjing Wu 		     uint8_t pfc_en)
4372013af9b6SIntel {
4373013af9b6SIntel 	struct rte_eth_conf port_conf;
4374013af9b6SIntel 	struct rte_port *rte_port;
4375013af9b6SIntel 	int retval;
4376013af9b6SIntel 	uint16_t i;
4377013af9b6SIntel 
4378a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
4379a550baf2SMin Hu (Connor) 		printf("The multi-process feature doesn't support dcb.\n");
4380a550baf2SMin Hu (Connor) 		return -ENOTSUP;
4381a550baf2SMin Hu (Connor) 	}
43822a977b89SWenzhuo Lu 	rte_port = &ports[pid];
4383013af9b6SIntel 
4384c1ba6c32SHuisong Li 	/* retain the original device configuration. */
4385c1ba6c32SHuisong Li 	memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4386d5354e89SYanglong Wu 
4387013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
4388ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4389013af9b6SIntel 	if (retval < 0)
4390013af9b6SIntel 		return retval;
4391295968d1SFerruh Yigit 	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4392cbe70fdeSJie Wang 	/* remove RSS HASH offload for DCB in vt mode */
4393cbe70fdeSJie Wang 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4394cbe70fdeSJie Wang 		port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4395cbe70fdeSJie Wang 		for (i = 0; i < nb_rxq; i++)
43963c4426dbSDmitry Kozlyuk 			rte_port->rxq[i].conf.offloads &=
4397cbe70fdeSJie Wang 				~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4398cbe70fdeSJie Wang 	}
4399013af9b6SIntel 
44002f203d44SQi Zhang 	/* re-configure the device . */
44012b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
44022b0e0ebaSChenbo Xia 	if (retval < 0)
44032b0e0ebaSChenbo Xia 		return retval;
44046f51deb9SIvan Ilchenko 
44056f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
44066f51deb9SIvan Ilchenko 	if (retval != 0)
44076f51deb9SIvan Ilchenko 		return retval;
44082a977b89SWenzhuo Lu 
44092a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
44102a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
44112a977b89SWenzhuo Lu 	 */
44122a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
44132a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
441461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
441561a3b0e5SAndrew Rybchenko 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
441661a3b0e5SAndrew Rybchenko 			pid);
44172a977b89SWenzhuo Lu 		return -1;
44182a977b89SWenzhuo Lu 	}
44192a977b89SWenzhuo Lu 
44202a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
44212a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
44222a977b89SWenzhuo Lu 	 */
44232a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
442486ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
442586ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
442686ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
442786ef65eeSBernard Iremonger 		} else {
44282a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
44292a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
443086ef65eeSBernard Iremonger 		}
44312a977b89SWenzhuo Lu 	} else {
44322a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
44332a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
44342a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
44352a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
44362a977b89SWenzhuo Lu 		} else {
44372a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
44382a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
44392a977b89SWenzhuo Lu 
44402a977b89SWenzhuo Lu 		}
44412a977b89SWenzhuo Lu 	}
44422a977b89SWenzhuo Lu 	rx_free_thresh = 64;
44432a977b89SWenzhuo Lu 
4444013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4445013af9b6SIntel 
4446f4d178c1SXueming Li 	rxtx_port_config(pid);
4447013af9b6SIntel 	/* VLAN filter */
4448295968d1SFerruh Yigit 	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
44491a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
4450013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
4451013af9b6SIntel 
4452a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4453a5279d25SIgor Romanov 	if (retval != 0)
4454a5279d25SIgor Romanov 		return retval;
4455a5279d25SIgor Romanov 
44567741e4cfSIntel 	rte_port->dcb_flag = 1;
44577741e4cfSIntel 
4458a690a070SHuisong Li 	/* Enter DCB configuration status */
4459a690a070SHuisong Li 	dcb_config = 1;
4460a690a070SHuisong Li 
4461013af9b6SIntel 	return 0;
4462af75078fSIntel }
4463af75078fSIntel 
4464ffc468ffSTetsuya Mukawa static void
4465ffc468ffSTetsuya Mukawa init_port(void)
4466ffc468ffSTetsuya Mukawa {
44671b9f2746SGregory Etelson 	int i;
44681b9f2746SGregory Etelson 
4469ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
4470ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
4471ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4472ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
4473ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
4474ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
4475ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
4476ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
4477ffc468ffSTetsuya Mukawa 	}
4478236bc417SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
4479236bc417SGregory Etelson 		ports[i].fwd_mac_swap = 1;
448063b72657SIvan Ilchenko 		ports[i].xstats_info.allocated = false;
44811b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
4482236bc417SGregory Etelson 	}
448329841336SPhil Yang 	/* Initialize ports NUMA structures */
448429841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
448529841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
448629841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4487ffc468ffSTetsuya Mukawa }
4488ffc468ffSTetsuya Mukawa 
4489d3a274ceSZhihong Wang static void
4490cfea1f30SPablo de Lara print_stats(void)
4491cfea1f30SPablo de Lara {
4492cfea1f30SPablo de Lara 	uint8_t i;
4493cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
4494cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4495cfea1f30SPablo de Lara 
4496cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
4497cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
4498cfea1f30SPablo de Lara 
4499cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
4500cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4501cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
4502683d1e82SIgor Romanov 
4503683d1e82SIgor Romanov 	fflush(stdout);
4504cfea1f30SPablo de Lara }
4505cfea1f30SPablo de Lara 
4506cfea1f30SPablo de Lara static void
45070fd1386cSStephen Hemminger signal_handler(int signum __rte_unused)
4508d3a274ceSZhihong Wang {
4509d9a191a0SPhil Yang 	f_quit = 1;
4510f1d0993eSStephen Hemminger 	prompt_exit();
4511d3a274ceSZhihong Wang }
4512d3a274ceSZhihong Wang 
4513af75078fSIntel int
4514af75078fSIntel main(int argc, char** argv)
4515af75078fSIntel {
4516af75078fSIntel 	int diag;
4517f8244c63SZhiyong Yang 	portid_t port_id;
45184918a357SXiaoyun Li 	uint16_t count;
4519fb73e096SJeff Guo 	int ret;
4520af75078fSIntel 
4521f1d0993eSStephen Hemminger #ifdef RTE_EXEC_ENV_WINDOWS
4522d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
4523d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
4524f1d0993eSStephen Hemminger #else
4525f1d0993eSStephen Hemminger 	/* Want read() not to be restarted on signal */
4526f1d0993eSStephen Hemminger 	struct sigaction action = {
4527f1d0993eSStephen Hemminger 		.sa_handler = signal_handler,
4528f1d0993eSStephen Hemminger 	};
4529f1d0993eSStephen Hemminger 
4530f1d0993eSStephen Hemminger 	sigaction(SIGINT, &action, NULL);
4531f1d0993eSStephen Hemminger 	sigaction(SIGTERM, &action, NULL);
4532f1d0993eSStephen Hemminger #endif
4533d3a274ceSZhihong Wang 
4534285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
4535285fd101SOlivier Matz 	if (testpmd_logtype < 0)
453616267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
4537285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4538285fd101SOlivier Matz 
45399201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
45409201806eSStephen Hemminger 	if (diag < 0)
454116267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
454216267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
45439201806eSStephen Hemminger 
4544563fbd08SDavid Marchand 	/* allocate port structures, and init them */
4545563fbd08SDavid Marchand 	init_port();
4546563fbd08SDavid Marchand 
454797b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
454897b5d8b5SThomas Monjalon 	if (ret != 0)
454916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
455097b5d8b5SThomas Monjalon 
4551a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
45524aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
4553e9436f54STiwei Bie 	rte_pdump_init();
45544aa0d012SAnatoly Burakov #endif
45554aa0d012SAnatoly Burakov 
45564918a357SXiaoyun Li 	count = 0;
45574918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
45584918a357SXiaoyun Li 		ports_ids[count] = port_id;
45594918a357SXiaoyun Li 		count++;
45604918a357SXiaoyun Li 	}
45614918a357SXiaoyun Li 	nb_ports = (portid_t) count;
45624aa0d012SAnatoly Burakov 	if (nb_ports == 0)
45634aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
45644aa0d012SAnatoly Burakov 
45654aa0d012SAnatoly Burakov 	set_def_fwd_config();
45664aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
456716267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
456816267ceeSStephen Hemminger 			 "Check the core mask argument\n");
45694aa0d012SAnatoly Burakov 
4570e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
4571a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4572e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
4573e505d84cSAnatoly Burakov #endif
4574a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
4575e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
4576e505d84cSAnatoly Burakov #endif
4577e505d84cSAnatoly Burakov 
4578fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
45795fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
4580fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
4581fb7b8b32SAnatoly Burakov #else
4582fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
4583fb7b8b32SAnatoly Burakov #endif
4584fb7b8b32SAnatoly Burakov 
4585e505d84cSAnatoly Burakov 	argc -= diag;
4586e505d84cSAnatoly Burakov 	argv += diag;
4587e505d84cSAnatoly Burakov 	if (argc > 1)
4588e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
4589e505d84cSAnatoly Burakov 
4590761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4591e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4592285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
45931c036b16SEelco Chaudron 			strerror(errno));
45941c036b16SEelco Chaudron 	}
4595761f7ae1SJie Zhou #endif
45961c036b16SEelco Chaudron 
459799cabef0SPablo de Lara 	if (tx_first && interactive)
459899cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
459999cabef0SPablo de Lara 				"interactive mode.\n");
46008820cba4SDavid Hunt 
46018820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
460261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
460361a3b0e5SAndrew Rybchenko 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
46048820cba4SDavid Hunt 		lsc_interrupt = 0;
46058820cba4SDavid Hunt 	}
46068820cba4SDavid Hunt 
46075a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
460861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
460961a3b0e5SAndrew Rybchenko 			"Warning: Either rx or tx queues should be non-zero\n");
46105a8fb55cSReshma Pattan 
46115a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
461261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
461361a3b0e5SAndrew Rybchenko 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4614af75078fSIntel 			nb_rxq, nb_txq);
4615af75078fSIntel 
4616af75078fSIntel 	init_config();
4617fb73e096SJeff Guo 
4618fb73e096SJeff Guo 	if (hot_plug) {
46192049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
4620fb73e096SJeff Guo 		if (ret) {
46212049c511SJeff Guo 			RTE_LOG(ERR, EAL,
46222049c511SJeff Guo 				"fail to enable hotplug handling.");
4623fb73e096SJeff Guo 			return -1;
4624fb73e096SJeff Guo 		}
4625fb73e096SJeff Guo 
46262049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
46272049c511SJeff Guo 		if (ret) {
46282049c511SJeff Guo 			RTE_LOG(ERR, EAL,
46292049c511SJeff Guo 				"fail to start device event monitoring.");
46302049c511SJeff Guo 			return -1;
46312049c511SJeff Guo 		}
46322049c511SJeff Guo 
46332049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
4634cc1bf307SJeff Guo 			dev_event_callback, NULL);
46352049c511SJeff Guo 		if (ret) {
46362049c511SJeff Guo 			RTE_LOG(ERR, EAL,
46372049c511SJeff Guo 				"fail  to register device event callback\n");
46382049c511SJeff Guo 			return -1;
46392049c511SJeff Guo 		}
4640fb73e096SJeff Guo 	}
4641fb73e096SJeff Guo 
46427e403725SGregory Etelson 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0) {
46437e403725SGregory Etelson 		if (!interactive) {
46447e403725SGregory Etelson 			rte_eal_cleanup();
4645148f963fSBruce Richardson 			rte_exit(EXIT_FAILURE, "Start ports failed\n");
46467e403725SGregory Etelson 		}
46477e403725SGregory Etelson 		fprintf(stderr, "Start ports failed\n");
46487e403725SGregory Etelson 	}
4649af75078fSIntel 
4650ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
465134fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
465234fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
465334fc1051SIvan Ilchenko 		if (ret != 0)
465461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
465561a3b0e5SAndrew Rybchenko 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
465634fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
465734fc1051SIvan Ilchenko 	}
4658af75078fSIntel 
4659bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
46607e4441c8SRemy Horton 	/* Init metrics library */
46617e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
4662bb9be9a4SDavid Marchand #endif
46637e4441c8SRemy Horton 
4664a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
466562d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
466662d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
466762d3216dSReshma Pattan 		if (ret)
466861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
466961a3b0e5SAndrew Rybchenko 				"Warning: latencystats init() returned error %d\n",
467061a3b0e5SAndrew Rybchenko 				ret);
467161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Latencystats running on lcore %d\n",
467262d3216dSReshma Pattan 			latencystats_lcore_id);
467362d3216dSReshma Pattan 	}
467462d3216dSReshma Pattan #endif
467562d3216dSReshma Pattan 
46767e4441c8SRemy Horton 	/* Setup bitrate stats */
4677a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4678e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
46797e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
46807e4441c8SRemy Horton 		if (bitrate_data == NULL)
4681e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
4682e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
46837e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
4684e25e6c70SRemy Horton 	}
46857e4441c8SRemy Horton #endif
468699a4974aSRobin Jarry 
468799a4974aSRobin Jarry 	if (record_core_cycles)
468899a4974aSRobin Jarry 		rte_lcore_register_usage_cb(lcore_usage_callback);
468999a4974aSRobin Jarry 
4690a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE
4691592ab76fSDavid Marchand 	if (init_cmdline() != 0)
4692592ab76fSDavid Marchand 		rte_exit(EXIT_FAILURE,
4693592ab76fSDavid Marchand 			"Could not initialise cmdline context.\n");
4694592ab76fSDavid Marchand 
469581ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
469681ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
469781ef862bSAllain Legacy 
4698ca7feb22SCyril Chemparathy 	if (interactive == 1) {
4699ca7feb22SCyril Chemparathy 		if (auto_start) {
4700ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
4701ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
4702ca7feb22SCyril Chemparathy 		}
4703af75078fSIntel 		prompt();
4704ca7feb22SCyril Chemparathy 	} else
47050d56cb81SThomas Monjalon #endif
47060d56cb81SThomas Monjalon 	{
4707af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
470899cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
4709cfea1f30SPablo de Lara 		if (stats_period != 0) {
4710cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4711cfea1f30SPablo de Lara 			uint64_t timer_period;
4712cfea1f30SPablo de Lara 
4713cfea1f30SPablo de Lara 			/* Convert to number of cycles */
4714cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
4715cfea1f30SPablo de Lara 
4716d9a191a0SPhil Yang 			while (f_quit == 0) {
4717cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4718cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4719cfea1f30SPablo de Lara 
4720cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4721cfea1f30SPablo de Lara 					print_stats();
4722cfea1f30SPablo de Lara 					/* Reset the timer */
4723cfea1f30SPablo de Lara 					diff_time = 0;
4724cfea1f30SPablo de Lara 				}
4725cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4726cfea1f30SPablo de Lara 				prev_time = cur_time;
4727761f7ae1SJie Zhou 				rte_delay_us_sleep(US_PER_S);
4728cfea1f30SPablo de Lara 			}
47290fd1386cSStephen Hemminger 		} else {
47300fd1386cSStephen Hemminger 			char c;
47310fd1386cSStephen Hemminger 			fd_set fds;
4732cfea1f30SPablo de Lara 
4733af75078fSIntel 			printf("Press enter to exit\n");
47340fd1386cSStephen Hemminger 
47350fd1386cSStephen Hemminger 			FD_ZERO(&fds);
47360fd1386cSStephen Hemminger 			FD_SET(0, &fds);
47370fd1386cSStephen Hemminger 
47380fd1386cSStephen Hemminger 			/* wait for signal or enter */
47390fd1386cSStephen Hemminger 			ret = select(1, &fds, NULL, NULL, NULL);
47400fd1386cSStephen Hemminger 			if (ret < 0 && errno != EINTR)
47410fd1386cSStephen Hemminger 				rte_exit(EXIT_FAILURE,
47420fd1386cSStephen Hemminger 					 "Select failed: %s\n",
47430fd1386cSStephen Hemminger 					 strerror(errno));
47440fd1386cSStephen Hemminger 
47450fd1386cSStephen Hemminger 			/* if got enter then consume it */
47460fd1386cSStephen Hemminger 			if (ret == 1 && read(0, &c, 1) < 0)
47470fd1386cSStephen Hemminger 				rte_exit(EXIT_FAILURE,
47480fd1386cSStephen Hemminger 					 "Read failed: %s\n",
47490fd1386cSStephen Hemminger 					 strerror(errno));
4750af75078fSIntel 		}
47510fd1386cSStephen Hemminger 	}
47520fd1386cSStephen Hemminger 
47530fd1386cSStephen Hemminger 	pmd_test_exit();
47540fd1386cSStephen Hemminger 
47550fd1386cSStephen Hemminger #ifdef RTE_LIB_PDUMP
47560fd1386cSStephen Hemminger 	/* uninitialize packet capture framework */
47570fd1386cSStephen Hemminger 	rte_pdump_uninit();
47580fd1386cSStephen Hemminger #endif
47590fd1386cSStephen Hemminger #ifdef RTE_LIB_LATENCYSTATS
47600fd1386cSStephen Hemminger 	if (latencystats_enabled != 0)
47610fd1386cSStephen Hemminger 		rte_latencystats_uninit();
47620fd1386cSStephen Hemminger #endif
4763af75078fSIntel 
4764*687a5b12SSinan Kaya 	ret = unregister_eth_event_callback();
4765*687a5b12SSinan Kaya 	if (ret != 0)
4766*687a5b12SSinan Kaya 		rte_exit(EXIT_FAILURE, "Cannot unregister for ethdev events");
4767*687a5b12SSinan Kaya 
4768*687a5b12SSinan Kaya 
47695e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
47705e516c89SStephen Hemminger 	if (ret != 0)
47715e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
47725e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
47735e516c89SStephen Hemminger 
47745e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4775af75078fSIntel }
4776