xref: /dpdk/app/test-pmd/testpmd.c (revision 34847a73034566ed1dab8bbc6882a12492b7f7fd)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
131c036b16SEelco Chaudron #include <sys/mman.h>
14761f7ae1SJie Zhou #endif
15af75078fSIntel #include <sys/types.h>
16af75078fSIntel #include <errno.h>
17fb73e096SJeff Guo #include <stdbool.h>
18af75078fSIntel 
19af75078fSIntel #include <sys/queue.h>
20af75078fSIntel #include <sys/stat.h>
21af75078fSIntel 
22af75078fSIntel #include <stdint.h>
23af75078fSIntel #include <unistd.h>
24af75078fSIntel #include <inttypes.h>
25af75078fSIntel 
26af75078fSIntel #include <rte_common.h>
27d1eb542eSOlivier Matz #include <rte_errno.h>
28af75078fSIntel #include <rte_byteorder.h>
29af75078fSIntel #include <rte_log.h>
30af75078fSIntel #include <rte_debug.h>
31af75078fSIntel #include <rte_cycles.h>
32af75078fSIntel #include <rte_memory.h>
33af75078fSIntel #include <rte_memcpy.h>
34af75078fSIntel #include <rte_launch.h>
35770ebc06SDavid Marchand #include <rte_bus.h>
36af75078fSIntel #include <rte_eal.h>
37284c908cSGaetan Rivet #include <rte_alarm.h>
38af75078fSIntel #include <rte_per_lcore.h>
39af75078fSIntel #include <rte_lcore.h>
40af75078fSIntel #include <rte_branch_prediction.h>
41af75078fSIntel #include <rte_mempool.h>
42af75078fSIntel #include <rte_malloc.h>
43af75078fSIntel #include <rte_mbuf.h>
440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
45af75078fSIntel #include <rte_interrupts.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
587e4441c8SRemy Horton #include <rte_metrics.h>
59bb9be9a4SDavid Marchand #endif
60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
617e4441c8SRemy Horton #include <rte_bitrate.h>
627e4441c8SRemy Horton #endif
63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6462d3216dSReshma Pattan #include <rte_latencystats.h>
6562d3216dSReshma Pattan #endif
66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS
67761f7ae1SJie Zhou #include <process.h>
68761f7ae1SJie Zhou #endif
69e46372d7SHuisong Li #ifdef RTE_NET_BOND
70e46372d7SHuisong Li #include <rte_eth_bond.h>
71e46372d7SHuisong Li #endif
72f41a5092SSpike Du #ifdef RTE_NET_MLX5
73f41a5092SSpike Du #include "mlx5_testpmd.h"
74f41a5092SSpike Du #endif
75af75078fSIntel 
76af75078fSIntel #include "testpmd.h"
77af75078fSIntel 
78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
79c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
80c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
81c7f5dba7SAnatoly Burakov #else
82c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
83c7f5dba7SAnatoly Burakov #endif
84c7f5dba7SAnatoly Burakov 
85c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
86c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
87c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
88c7f5dba7SAnatoly Burakov #else
89c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
90c7f5dba7SAnatoly Burakov #endif
91c7f5dba7SAnatoly Burakov 
92c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
9313b19642SDmitry Kozlyuk /*
9413b19642SDmitry Kozlyuk  * Zone size with the malloc overhead (max of debug and release variants)
9513b19642SDmitry Kozlyuk  * must fit into the smallest supported hugepage size (2M),
9613b19642SDmitry Kozlyuk  * so that an IOVA-contiguous zone of this size can always be allocated
9713b19642SDmitry Kozlyuk  * if there are free 2M hugepages.
9813b19642SDmitry Kozlyuk  */
9913b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
100c7f5dba7SAnatoly Burakov 
101af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
102285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
103af75078fSIntel 
104cb056611SStephen Hemminger /* use main core for command line ? */
105af75078fSIntel uint8_t interactive = 0;
106ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
10799cabef0SPablo de Lara uint8_t tx_first;
10881ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
109af75078fSIntel 
110af75078fSIntel /*
111af75078fSIntel  * NUMA support configuration.
112af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
113af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
114af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
115af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
116af75078fSIntel  */
117999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
118af75078fSIntel 
119af75078fSIntel /*
120b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
121b6ea6408SIntel  * not configured.
122b6ea6408SIntel  */
123b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
124b6ea6408SIntel 
125b6ea6408SIntel /*
126c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
127c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
128c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
129c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
130c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
131148f963fSBruce Richardson  */
132c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
133148f963fSBruce Richardson 
134148f963fSBruce Richardson /*
13563531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
13663531389SGeorgios Katsikas  * is allocated.
13763531389SGeorgios Katsikas  */
13863531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
13963531389SGeorgios Katsikas 
14063531389SGeorgios Katsikas /*
14163531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
14263531389SGeorgios Katsikas  * is allocated.
14363531389SGeorgios Katsikas  */
14463531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
14563531389SGeorgios Katsikas 
14663531389SGeorgios Katsikas /*
14763531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
14863531389SGeorgios Katsikas  * is allocated.
14963531389SGeorgios Katsikas  */
15063531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
15163531389SGeorgios Katsikas 
15263531389SGeorgios Katsikas /*
153af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
154af75078fSIntel  * forwarded.
155547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
156af75078fSIntel  * ports.
157af75078fSIntel  */
1586d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
159af75078fSIntel portid_t nb_peer_eth_addrs = 0;
160af75078fSIntel 
161af75078fSIntel /*
162af75078fSIntel  * Probed Target Environment.
163af75078fSIntel  */
164af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
165af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
166af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
167af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
168af75078fSIntel 
1694918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1704918a357SXiaoyun Li 
171af75078fSIntel /*
172af75078fSIntel  * Test Forwarding Configuration.
173af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
174af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
175af75078fSIntel  */
176af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
177af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
178af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
179af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
180af75078fSIntel 
181af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
182af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
183af75078fSIntel 
184af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
185af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
186af75078fSIntel 
187af75078fSIntel /*
188af75078fSIntel  * Forwarding engines.
189af75078fSIntel  */
190af75078fSIntel struct fwd_engine * fwd_engines[] = {
191af75078fSIntel 	&io_fwd_engine,
192af75078fSIntel 	&mac_fwd_engine,
193d47388f1SCyril Chemparathy 	&mac_swap_engine,
194e9e23a61SCyril Chemparathy 	&flow_gen_engine,
195af75078fSIntel 	&rx_only_engine,
196af75078fSIntel 	&tx_only_engine,
197af75078fSIntel 	&csum_fwd_engine,
198168dfa61SIvan Boule 	&icmp_echo_engine,
1993c156061SJens Freimann 	&noisy_vnf_engine,
2002564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
2015fe42bc6SFeifei Wang 	&recycle_mbufs_engine,
202af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
203af75078fSIntel 	&ieee1588_fwd_engine,
204af75078fSIntel #endif
20559840375SXueming Li 	&shared_rxq_engine,
206af75078fSIntel 	NULL,
207af75078fSIntel };
208af75078fSIntel 
20926cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
21059fcf854SShahaf Shuler uint16_t mempool_flags;
211401b744dSShahaf Shuler 
212af75078fSIntel struct fwd_config cur_fwd_config;
213af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
214bf56fce1SZhihong Wang uint32_t retry_enabled;
215bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
216bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
217af75078fSIntel 
21826cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
21926cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
22026cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
22126cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
222c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
223c8798818SIntel                                       * specified on command-line. */
224cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
225d9a191a0SPhil Yang 
22663b72657SIvan Ilchenko /** Extended statistics to show. */
22763b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display;
22863b72657SIvan Ilchenko 
22963b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */
23063b72657SIvan Ilchenko 
231d9a191a0SPhil Yang /*
232d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
233d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
234d9a191a0SPhil Yang  */
2350fd1386cSStephen Hemminger volatile uint8_t f_quit;
2363889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */
237d9a191a0SPhil Yang 
238af75078fSIntel /*
2391bb4a528SFerruh Yigit  * Max Rx frame size, set by '--max-pkt-len' parameter.
2401bb4a528SFerruh Yigit  */
2411bb4a528SFerruh Yigit uint32_t max_rx_pkt_len;
2421bb4a528SFerruh Yigit 
2431bb4a528SFerruh Yigit /*
2440f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2450f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2460f2096d7SViacheslav Ovsiienko  */
2470f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2480f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
24991c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
25091c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
25152e2e7edSYuan Wang uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT];
2520f2096d7SViacheslav Ovsiienko 
253a4bf5421SHanumanth Pothula uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature */
254a4bf5421SHanumanth Pothula 
2550f2096d7SViacheslav Ovsiienko /*
256af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
257af75078fSIntel  */
258af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
259af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
260af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
261af75078fSIntel };
262af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
263af75078fSIntel 
26479bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
26579bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
26679bec05bSKonstantin Ananyev 
26782010ef5SYongseok Koh uint8_t txonly_multi_flow;
26882010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
26982010ef5SYongseok Koh 
2704940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2714940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2724940344dSViacheslav Ovsiienko 
2734940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2744940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2754940344dSViacheslav Ovsiienko 
276af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
2776c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
278861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
279e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
280af75078fSIntel 
281900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
282900550deSIntel uint8_t dcb_config = 0;
283900550deSIntel 
284af75078fSIntel /*
285af75078fSIntel  * Configurable number of RX/TX queues.
286af75078fSIntel  */
287af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
288af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
289af75078fSIntel 
290af75078fSIntel /*
291af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2928599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
293af75078fSIntel  */
2944ed89049SDavid Marchand #define RX_DESC_DEFAULT 0
2954ed89049SDavid Marchand #define TX_DESC_DEFAULT 0
2964ed89049SDavid Marchand uint16_t nb_rxd = RX_DESC_DEFAULT; /**< Number of RX descriptors. */
2974ed89049SDavid Marchand uint16_t nb_txd = TX_DESC_DEFAULT; /**< Number of TX descriptors. */
298af75078fSIntel 
299f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
300af75078fSIntel /*
301af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
302af75078fSIntel  */
303af75078fSIntel 
304f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
305f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
306f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
307af75078fSIntel 
308f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
309f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
310f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
311af75078fSIntel 
312af75078fSIntel /*
313af75078fSIntel  * Configurable value of RX free threshold.
314af75078fSIntel  */
315f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
316af75078fSIntel 
317af75078fSIntel /*
318ce8d5614SIntel  * Configurable value of RX drop enable.
319ce8d5614SIntel  */
320f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
321ce8d5614SIntel 
322ce8d5614SIntel /*
323af75078fSIntel  * Configurable value of TX free threshold.
324af75078fSIntel  */
325f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
326af75078fSIntel 
327af75078fSIntel /*
328af75078fSIntel  * Configurable value of TX RS bit threshold.
329af75078fSIntel  */
330f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
331af75078fSIntel 
332af75078fSIntel /*
3331d343c19SMike Pattrick  * Configurable sub-forwarding mode for the noisy_vnf forwarding mode.
3341d343c19SMike Pattrick  */
3351d343c19SMike Pattrick enum noisy_fwd_mode noisy_fwd_mode;
3361d343c19SMike Pattrick 
3371d343c19SMike Pattrick /* String version of enum noisy_fwd_mode */
3381d343c19SMike Pattrick const char * const noisy_fwd_mode_desc[] = {
3391d343c19SMike Pattrick 	[NOISY_FWD_MODE_IO] = "io",
3401d343c19SMike Pattrick 	[NOISY_FWD_MODE_MAC] = "mac",
3411d343c19SMike Pattrick 	[NOISY_FWD_MODE_MACSWAP] = "macswap",
3421d343c19SMike Pattrick 	[NOISY_FWD_MODE_5TSWAP] = "5tswap",
3431d343c19SMike Pattrick 	[NOISY_FWD_MODE_MAX] = NULL,
3441d343c19SMike Pattrick };
3451d343c19SMike Pattrick 
3461d343c19SMike Pattrick /*
3473c156061SJens Freimann  * Configurable value of buffered packets before sending.
3483c156061SJens Freimann  */
3493c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3503c156061SJens Freimann 
3513c156061SJens Freimann /*
3523c156061SJens Freimann  * Configurable value of packet buffer timeout.
3533c156061SJens Freimann  */
3543c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3553c156061SJens Freimann 
3563c156061SJens Freimann /*
3573c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3583c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3593c156061SJens Freimann  */
3603c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3613c156061SJens Freimann 
3623c156061SJens Freimann /*
3633c156061SJens Freimann  * Configurable value of number of random writes done in
3643c156061SJens Freimann  * VNF simulation memory area.
3653c156061SJens Freimann  */
3663c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3673c156061SJens Freimann 
3683c156061SJens Freimann /*
3693c156061SJens Freimann  * Configurable value of number of random reads done in
3703c156061SJens Freimann  * VNF simulation memory area.
3713c156061SJens Freimann  */
3723c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3733c156061SJens Freimann 
3743c156061SJens Freimann /*
3753c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3763c156061SJens Freimann  * VNF simulation memory area.
3773c156061SJens Freimann  */
3783c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3793c156061SJens Freimann 
3803c156061SJens Freimann /*
381af75078fSIntel  * Receive Side Scaling (RSS) configuration.
382af75078fSIntel  */
383295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
384af75078fSIntel 
385af75078fSIntel /*
386af75078fSIntel  * Port topology configuration
387af75078fSIntel  */
388af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
389af75078fSIntel 
3907741e4cfSIntel /*
3917741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3927741e4cfSIntel  */
3937741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3947741e4cfSIntel 
395af75078fSIntel /*
3967ee3e944SVasily Philipov  * Flow API isolated mode.
3977ee3e944SVasily Philipov  */
3987ee3e944SVasily Philipov uint8_t flow_isolate_all;
3997ee3e944SVasily Philipov 
4007ee3e944SVasily Philipov /*
401543df472SChengwen Feng  * Disable port flow flush when stop port.
402543df472SChengwen Feng  */
403543df472SChengwen Feng uint8_t no_flow_flush = 0; /* do flow flush by default */
404543df472SChengwen Feng 
405543df472SChengwen Feng /*
406bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
407bc202406SDavid Marchand  */
408bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
409bc202406SDavid Marchand 
410bc202406SDavid Marchand /*
4116937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
4126937d210SStephen Hemminger  */
4136937d210SStephen Hemminger uint8_t no_device_start = 0;
4146937d210SStephen Hemminger 
4156937d210SStephen Hemminger /*
4168ea656f8SGaetan Rivet  * Enable link status change notification
4178ea656f8SGaetan Rivet  */
4188ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
4198ea656f8SGaetan Rivet 
4208ea656f8SGaetan Rivet /*
421284c908cSGaetan Rivet  * Enable device removal notification.
422284c908cSGaetan Rivet  */
423284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
424284c908cSGaetan Rivet 
425fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
426fb73e096SJeff Guo 
4274f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
4284f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
4294f1ed78eSThomas Monjalon 
430b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
431b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
432b0a9354aSPavan Nikhilesh 
43397b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
43497b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
43597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
43697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
43797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
43897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
43997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
44097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
44197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
44297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
44397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
44497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
4450e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
446bc70e559SSpike Du 	[RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached",
447eb0d471aSKalesh AP 	[RTE_ETH_EVENT_ERR_RECOVERING] = "error recovering",
448eb0d471aSKalesh AP 	[RTE_ETH_EVENT_RECOVERY_SUCCESS] = "error recovery successful",
449eb0d471aSKalesh AP 	[RTE_ETH_EVENT_RECOVERY_FAILED] = "error recovery failed",
45097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
45197b5d8b5SThomas Monjalon };
45297b5d8b5SThomas Monjalon 
453284c908cSGaetan Rivet /*
4543af72783SGaetan Rivet  * Display or mask ether events
4553af72783SGaetan Rivet  * Default to all events except VF_MBOX
4563af72783SGaetan Rivet  */
4573af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4583af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4593af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4603af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
461badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4623af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4630e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
464eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED) |
465eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_ERR_RECOVERING) |
466eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_SUCCESS) |
467eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_FAILED);
468e505d84cSAnatoly Burakov /*
469e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
470e505d84cSAnatoly Burakov  */
471e505d84cSAnatoly Burakov int do_mlockall = 0;
4723af72783SGaetan Rivet 
473a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
47462d3216dSReshma Pattan 
47562d3216dSReshma Pattan /*
47662d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
47762d3216dSReshma Pattan  */
47862d3216dSReshma Pattan uint8_t latencystats_enabled;
47962d3216dSReshma Pattan 
48062d3216dSReshma Pattan /*
4817be78d02SJosh Soref  * Lcore ID to service latency statistics.
48262d3216dSReshma Pattan  */
48362d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
48462d3216dSReshma Pattan 
48562d3216dSReshma Pattan #endif
48662d3216dSReshma Pattan 
4877b7e5ba7SIntel /*
488af75078fSIntel  * Ethernet device configuration.
489af75078fSIntel  */
4901bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode;
491af75078fSIntel 
49207e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
493295968d1SFerruh Yigit 	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
49407e5f7bdSShahaf Shuler };
495fd8c20aaSShahaf Shuler 
4962950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
497af75078fSIntel 
498a4fd5eeeSElza Mathew /*
499a4fd5eeeSElza Mathew  * Display zero values by default for xstats
500a4fd5eeeSElza Mathew  */
501a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
502a4fd5eeeSElza Mathew 
503bc700b67SDharmik Thakkar /*
504bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
505bc700b67SDharmik Thakkar  */
506bc700b67SDharmik Thakkar uint8_t record_core_cycles;
507bc700b67SDharmik Thakkar 
5080e4b1963SDharmik Thakkar /*
5090e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
5100e4b1963SDharmik Thakkar  */
5110e4b1963SDharmik Thakkar uint8_t record_burst_stats;
5120e4b1963SDharmik Thakkar 
513f4d178c1SXueming Li /*
514f4d178c1SXueming Li  * Number of ports per shared Rx queue group, 0 disable.
515f4d178c1SXueming Li  */
516f4d178c1SXueming Li uint32_t rxq_share;
517f4d178c1SXueming Li 
518c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
519c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5207acf894dSStephen Hurd 
521a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
5227e4441c8SRemy Horton /* Bitrate statistics */
5237e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
524e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
525e25e6c70SRemy Horton uint8_t bitrate_enabled;
526e25e6c70SRemy Horton #endif
5277e4441c8SRemy Horton 
5286970401eSDavid Marchand #ifdef RTE_LIB_GRO
529b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
530b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
5316970401eSDavid Marchand #endif
532b40f8d78SJiayu Hu 
533f9295aa2SXiaoyu Min /*
534f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
535f9295aa2SXiaoyu Min  */
536295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
537f9295aa2SXiaoyu Min 
538b7b78a08SAjit Khaparde /*
539b7b78a08SAjit Khaparde  * Used to set forced link speed
540b7b78a08SAjit Khaparde  */
541b7b78a08SAjit Khaparde uint32_t eth_link_speed;
542b7b78a08SAjit Khaparde 
543a550baf2SMin Hu (Connor) /*
544a550baf2SMin Hu (Connor)  * ID of the current process in multi-process, used to
545a550baf2SMin Hu (Connor)  * configure the queues to be polled.
546a550baf2SMin Hu (Connor)  */
547a550baf2SMin Hu (Connor) int proc_id;
548a550baf2SMin Hu (Connor) 
549a550baf2SMin Hu (Connor) /*
550a550baf2SMin Hu (Connor)  * Number of processes in multi-process, used to
551a550baf2SMin Hu (Connor)  * configure the queues to be polled.
552a550baf2SMin Hu (Connor)  */
553a550baf2SMin Hu (Connor) unsigned int num_procs = 1;
554a550baf2SMin Hu (Connor) 
555f6d8a6d3SIvan Malov static void
556f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id)
557f6d8a6d3SIvan Malov {
558f6d8a6d3SIvan Malov 	uint64_t rx_meta_features = 0;
559f6d8a6d3SIvan Malov 	int ret;
560f6d8a6d3SIvan Malov 
561f6d8a6d3SIvan Malov 	if (!is_proc_primary())
562f6d8a6d3SIvan Malov 		return;
563f6d8a6d3SIvan Malov 
564f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
565f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
566f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
567f6d8a6d3SIvan Malov 
568f6d8a6d3SIvan Malov 	ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
569f6d8a6d3SIvan Malov 	if (ret == 0) {
570f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
571f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
572f6d8a6d3SIvan Malov 				    port_id);
573f6d8a6d3SIvan Malov 		}
574f6d8a6d3SIvan Malov 
575f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
576f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
577f6d8a6d3SIvan Malov 				    port_id);
578f6d8a6d3SIvan Malov 		}
579f6d8a6d3SIvan Malov 
580f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
581f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
582f6d8a6d3SIvan Malov 				    port_id);
583f6d8a6d3SIvan Malov 		}
584f6d8a6d3SIvan Malov 	} else if (ret != -ENOTSUP) {
585f6d8a6d3SIvan Malov 		rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
586f6d8a6d3SIvan Malov 			 port_id, rte_strerror(-ret));
587f6d8a6d3SIvan Malov 	}
588f6d8a6d3SIvan Malov }
589f6d8a6d3SIvan Malov 
590a550baf2SMin Hu (Connor) static int
591a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
592a550baf2SMin Hu (Connor) 		      const struct rte_eth_conf *dev_conf)
593a550baf2SMin Hu (Connor) {
594a550baf2SMin Hu (Connor) 	if (is_proc_primary())
595a550baf2SMin Hu (Connor) 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
596a550baf2SMin Hu (Connor) 					dev_conf);
597a550baf2SMin Hu (Connor) 	return 0;
598a550baf2SMin Hu (Connor) }
599a550baf2SMin Hu (Connor) 
600a550baf2SMin Hu (Connor) static int
60115e34522SLong Wu change_bonding_member_port_status(portid_t bond_pid, bool is_stop)
602e46372d7SHuisong Li {
603e46372d7SHuisong Li #ifdef RTE_NET_BOND
604e46372d7SHuisong Li 
60515e34522SLong Wu 	portid_t member_pids[RTE_MAX_ETHPORTS];
606e46372d7SHuisong Li 	struct rte_port *port;
60715e34522SLong Wu 	int num_members;
60815e34522SLong Wu 	portid_t member_pid;
609e46372d7SHuisong Li 	int i;
610e46372d7SHuisong Li 
61115e34522SLong Wu 	num_members = rte_eth_bond_members_get(bond_pid, member_pids,
612e46372d7SHuisong Li 						RTE_MAX_ETHPORTS);
61315e34522SLong Wu 	if (num_members < 0) {
61415e34522SLong Wu 		fprintf(stderr, "Failed to get member list for port = %u\n",
615e46372d7SHuisong Li 			bond_pid);
61615e34522SLong Wu 		return num_members;
617e46372d7SHuisong Li 	}
618e46372d7SHuisong Li 
61915e34522SLong Wu 	for (i = 0; i < num_members; i++) {
62015e34522SLong Wu 		member_pid = member_pids[i];
62115e34522SLong Wu 		port = &ports[member_pid];
622e46372d7SHuisong Li 		port->port_status =
623e46372d7SHuisong Li 			is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
624e46372d7SHuisong Li 	}
625e46372d7SHuisong Li #else
626e46372d7SHuisong Li 	RTE_SET_USED(bond_pid);
627e46372d7SHuisong Li 	RTE_SET_USED(is_stop);
628e46372d7SHuisong Li #endif
629e46372d7SHuisong Li 	return 0;
630e46372d7SHuisong Li }
631e46372d7SHuisong Li 
632e46372d7SHuisong Li static int
633a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id)
634a550baf2SMin Hu (Connor) {
635e46372d7SHuisong Li 	int ret;
636e46372d7SHuisong Li 
637e46372d7SHuisong Li 	if (is_proc_primary()) {
638e46372d7SHuisong Li 		ret = rte_eth_dev_start(port_id);
639e46372d7SHuisong Li 		if (ret != 0)
640e46372d7SHuisong Li 			return ret;
641e46372d7SHuisong Li 
642e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
643e46372d7SHuisong Li 
644e46372d7SHuisong Li 		/*
6454f840086SLong Wu 		 * Starting a bonding port also starts all members under the bonding
646e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
64715e34522SLong Wu 		 * port status of these members.
648e46372d7SHuisong Li 		 */
649e46372d7SHuisong Li 		if (port->bond_flag == 1)
65015e34522SLong Wu 			return change_bonding_member_port_status(port_id, false);
651e46372d7SHuisong Li 	}
652a550baf2SMin Hu (Connor) 
653a550baf2SMin Hu (Connor) 	return 0;
654a550baf2SMin Hu (Connor) }
655a550baf2SMin Hu (Connor) 
656a550baf2SMin Hu (Connor) static int
657a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id)
658a550baf2SMin Hu (Connor) {
659e46372d7SHuisong Li 	int ret;
660e46372d7SHuisong Li 
661e46372d7SHuisong Li 	if (is_proc_primary()) {
662e46372d7SHuisong Li 		ret = rte_eth_dev_stop(port_id);
663e46372d7SHuisong Li 		if (ret != 0)
664e46372d7SHuisong Li 			return ret;
665e46372d7SHuisong Li 
666e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
667e46372d7SHuisong Li 
668e46372d7SHuisong Li 		/*
6694f840086SLong Wu 		 * Stopping a bonding port also stops all members under the bonding
670e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
67115e34522SLong Wu 		 * port status of these members.
672e46372d7SHuisong Li 		 */
673e46372d7SHuisong Li 		if (port->bond_flag == 1)
67415e34522SLong Wu 			return change_bonding_member_port_status(port_id, true);
675e46372d7SHuisong Li 	}
676a550baf2SMin Hu (Connor) 
677a550baf2SMin Hu (Connor) 	return 0;
678a550baf2SMin Hu (Connor) }
679a550baf2SMin Hu (Connor) 
680a550baf2SMin Hu (Connor) static void
681a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp)
682a550baf2SMin Hu (Connor) {
683a550baf2SMin Hu (Connor) 	if (is_proc_primary())
684a550baf2SMin Hu (Connor) 		rte_mempool_free(mp);
685a550baf2SMin Hu (Connor) }
686a550baf2SMin Hu (Connor) 
687a550baf2SMin Hu (Connor) static int
688a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
689a550baf2SMin Hu (Connor) {
690a550baf2SMin Hu (Connor) 	if (is_proc_primary())
691a550baf2SMin Hu (Connor) 		return rte_eth_dev_set_mtu(port_id, mtu);
692a550baf2SMin Hu (Connor) 
693a550baf2SMin Hu (Connor) 	return 0;
694a550baf2SMin Hu (Connor) }
695a550baf2SMin Hu (Connor) 
696ed30d9b6SIntel /* Forward function declarations */
697c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
698edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
699f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
70076ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
701d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
702cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
703fb73e096SJeff Guo 				enum rte_dev_event_type type,
704fb73e096SJeff Guo 				void *param);
70563b72657SIvan Ilchenko static void fill_xstats_display_info(void);
706ce8d5614SIntel 
707ce8d5614SIntel /*
708ce8d5614SIntel  * Check if all the ports are started.
709ce8d5614SIntel  * If yes, return positive value. If not, return zero.
710ce8d5614SIntel  */
711ce8d5614SIntel static int all_ports_started(void);
712ed30d9b6SIntel 
7136970401eSDavid Marchand #ifdef RTE_LIB_GSO
71452f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
71535b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
7166970401eSDavid Marchand #endif
71752f38a20SJiayu Hu 
718b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
719b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
720b57b66a9SOri Kam 
72163b72657SIvan Ilchenko 
722af75078fSIntel /*
72398a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
724c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
725c9cafcc8SShahaf Shuler  */
726c9cafcc8SShahaf Shuler int
727c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
728c9cafcc8SShahaf Shuler {
729c9cafcc8SShahaf Shuler 	unsigned int i;
730c9cafcc8SShahaf Shuler 
731c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
732c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
733c9cafcc8SShahaf Shuler 			return 0;
734c9cafcc8SShahaf Shuler 	}
735c9cafcc8SShahaf Shuler 	return 1;
736c9cafcc8SShahaf Shuler }
737c9cafcc8SShahaf Shuler 
738c9cafcc8SShahaf Shuler /*
739af75078fSIntel  * Setup default configuration.
740af75078fSIntel  */
741af75078fSIntel static void
742af75078fSIntel set_default_fwd_lcores_config(void)
743af75078fSIntel {
744af75078fSIntel 	unsigned int i;
745af75078fSIntel 	unsigned int nb_lc;
7467acf894dSStephen Hurd 	unsigned int sock_num;
747af75078fSIntel 
748af75078fSIntel 	nb_lc = 0;
749af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
750dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
751dbfb8ec7SPhil Yang 			continue;
752c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
753c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
754c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
755c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
756c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
757c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
758c9cafcc8SShahaf Shuler 			}
759c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
7607acf894dSStephen Hurd 		}
761cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
762f54fe5eeSStephen Hurd 			continue;
763f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
764af75078fSIntel 	}
765af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
766af75078fSIntel 	nb_cfg_lcores = nb_lcores;
767af75078fSIntel 	nb_fwd_lcores = 1;
768af75078fSIntel }
769af75078fSIntel 
770af75078fSIntel static void
771af75078fSIntel set_def_peer_eth_addrs(void)
772af75078fSIntel {
773af75078fSIntel 	portid_t i;
774af75078fSIntel 
775af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
77635b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
777af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
778af75078fSIntel 	}
779af75078fSIntel }
780af75078fSIntel 
781af75078fSIntel static void
782af75078fSIntel set_default_fwd_ports_config(void)
783af75078fSIntel {
784af75078fSIntel 	portid_t pt_id;
78565a7360cSMatan Azrad 	int i = 0;
786af75078fSIntel 
787effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
78865a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
789af75078fSIntel 
790effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
791effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
792effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
793effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
794effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
795effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
796effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
797effdb8bbSPhil Yang 			}
798effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
799effdb8bbSPhil Yang 		}
800effdb8bbSPhil Yang 	}
801effdb8bbSPhil Yang 
802af75078fSIntel 	nb_cfg_ports = nb_ports;
803af75078fSIntel 	nb_fwd_ports = nb_ports;
804af75078fSIntel }
805af75078fSIntel 
806af75078fSIntel void
807af75078fSIntel set_def_fwd_config(void)
808af75078fSIntel {
809af75078fSIntel 	set_default_fwd_lcores_config();
810af75078fSIntel 	set_def_peer_eth_addrs();
811af75078fSIntel 	set_default_fwd_ports_config();
812af75078fSIntel }
813af75078fSIntel 
814761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
815c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
816c7f5dba7SAnatoly Burakov static int
817c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
818c7f5dba7SAnatoly Burakov {
819c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
820c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
821c7f5dba7SAnatoly Burakov 
822c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
823c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
824c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
825c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
826c7f5dba7SAnatoly Burakov 	 */
827c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
828c7f5dba7SAnatoly Burakov 
829c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
830c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
831c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
832c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
833c7f5dba7SAnatoly Burakov 		return -1;
834c7f5dba7SAnatoly Burakov 	}
835c7f5dba7SAnatoly Burakov 
836c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
837c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
838c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
839c7f5dba7SAnatoly Burakov 
840c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
841c7f5dba7SAnatoly Burakov 
842c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
843c7f5dba7SAnatoly Burakov 
844c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
845c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
846c7f5dba7SAnatoly Burakov 		return -1;
847c7f5dba7SAnatoly Burakov 	}
848c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
849c7f5dba7SAnatoly Burakov 
850c7f5dba7SAnatoly Burakov 	return 0;
851c7f5dba7SAnatoly Burakov }
852c7f5dba7SAnatoly Burakov 
853c7f5dba7SAnatoly Burakov static int
854c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
855c7f5dba7SAnatoly Burakov {
856c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
857c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
858c7f5dba7SAnatoly Burakov 	 */
8599d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
860c7f5dba7SAnatoly Burakov 
861c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
862c7f5dba7SAnatoly Burakov }
863c7f5dba7SAnatoly Burakov 
864c7f5dba7SAnatoly Burakov static void *
865c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
866c7f5dba7SAnatoly Burakov {
867c7f5dba7SAnatoly Burakov 	void *addr;
868c7f5dba7SAnatoly Burakov 	int flags;
869c7f5dba7SAnatoly Burakov 
870c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
871c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
872c7f5dba7SAnatoly Burakov 	if (huge)
873c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
874c7f5dba7SAnatoly Burakov 
875c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
876c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
877c7f5dba7SAnatoly Burakov 		return NULL;
878c7f5dba7SAnatoly Burakov 
879c7f5dba7SAnatoly Burakov 	return addr;
880c7f5dba7SAnatoly Burakov }
881c7f5dba7SAnatoly Burakov 
882c7f5dba7SAnatoly Burakov struct extmem_param {
883c7f5dba7SAnatoly Burakov 	void *addr;
884c7f5dba7SAnatoly Burakov 	size_t len;
885c7f5dba7SAnatoly Burakov 	size_t pgsz;
886c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
887c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
888c7f5dba7SAnatoly Burakov };
889c7f5dba7SAnatoly Burakov 
890c7f5dba7SAnatoly Burakov static int
891c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
892c7f5dba7SAnatoly Burakov 		bool huge)
893c7f5dba7SAnatoly Burakov {
894c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
895c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
896c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
897c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
898c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
899c7f5dba7SAnatoly Burakov 	void *addr;
900c7f5dba7SAnatoly Burakov 	int ret;
901c7f5dba7SAnatoly Burakov 
902c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
903c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
904c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
905c7f5dba7SAnatoly Burakov 			continue;
906c7f5dba7SAnatoly Burakov 
907c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
908c7f5dba7SAnatoly Burakov 
909c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
910c7f5dba7SAnatoly Burakov 		if (!huge)
911c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
912c7f5dba7SAnatoly Burakov 
913c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
914c7f5dba7SAnatoly Burakov 		if (ret < 0) {
915c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
916c7f5dba7SAnatoly Burakov 			return -1;
917c7f5dba7SAnatoly Burakov 		}
918c7f5dba7SAnatoly Burakov 
919c7f5dba7SAnatoly Burakov 		/* allocate our memory */
920c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
921c7f5dba7SAnatoly Burakov 
922c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
923c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
924c7f5dba7SAnatoly Burakov 		 * try another one.
925c7f5dba7SAnatoly Burakov 		 */
926c7f5dba7SAnatoly Burakov 		if (addr == NULL)
927c7f5dba7SAnatoly Burakov 			continue;
928c7f5dba7SAnatoly Burakov 
929c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
930c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
931c7f5dba7SAnatoly Burakov 
932c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
933c7f5dba7SAnatoly Burakov 
934c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
935c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
936c7f5dba7SAnatoly Burakov 			goto fail;
937c7f5dba7SAnatoly Burakov 		}
938c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
939c7f5dba7SAnatoly Burakov 		if (!huge)
940c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
941c7f5dba7SAnatoly Burakov 
942c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
943c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
944c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
945c7f5dba7SAnatoly Burakov 			size_t offset;
946c7f5dba7SAnatoly Burakov 			void *cur;
947c7f5dba7SAnatoly Burakov 
948c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
949c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
950c7f5dba7SAnatoly Burakov 
951c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
952c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
953c7f5dba7SAnatoly Burakov 
954c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
955c7f5dba7SAnatoly Burakov 
956c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
957c7f5dba7SAnatoly Burakov 		}
958c7f5dba7SAnatoly Burakov 
959c7f5dba7SAnatoly Burakov 		break;
960c7f5dba7SAnatoly Burakov 	}
961c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
962c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
963c7f5dba7SAnatoly Burakov 		return -1;
964c7f5dba7SAnatoly Burakov 
965c7f5dba7SAnatoly Burakov 	param->addr = addr;
966c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
967c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
968c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
969c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
970c7f5dba7SAnatoly Burakov 
971c7f5dba7SAnatoly Burakov 	return 0;
972c7f5dba7SAnatoly Burakov fail:
973c7f5dba7SAnatoly Burakov 	free(iovas);
974c7f5dba7SAnatoly Burakov 	if (addr)
975c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
976c7f5dba7SAnatoly Burakov 
977c7f5dba7SAnatoly Burakov 	return -1;
978c7f5dba7SAnatoly Burakov }
979c7f5dba7SAnatoly Burakov 
980c7f5dba7SAnatoly Burakov static int
981c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
982c7f5dba7SAnatoly Burakov {
983c7f5dba7SAnatoly Burakov 	struct extmem_param param;
984c7f5dba7SAnatoly Burakov 	int socket_id, ret;
985c7f5dba7SAnatoly Burakov 
986c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
987c7f5dba7SAnatoly Burakov 
988c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
989c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
990c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
991c7f5dba7SAnatoly Burakov 		/* create our heap */
992c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
993c7f5dba7SAnatoly Burakov 		if (ret < 0) {
994c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
995c7f5dba7SAnatoly Burakov 			return -1;
996c7f5dba7SAnatoly Burakov 		}
997c7f5dba7SAnatoly Burakov 	}
998c7f5dba7SAnatoly Burakov 
999c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
1000c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1001c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
1002c7f5dba7SAnatoly Burakov 		return -1;
1003c7f5dba7SAnatoly Burakov 	}
1004c7f5dba7SAnatoly Burakov 
1005c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
1006c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
1007c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
1008c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
1009c7f5dba7SAnatoly Burakov 
1010c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
1011c7f5dba7SAnatoly Burakov 
1012c7f5dba7SAnatoly Burakov 	/* not needed any more */
1013c7f5dba7SAnatoly Burakov 	free(param.iova_table);
1014c7f5dba7SAnatoly Burakov 
1015c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1016c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1017c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
1018c7f5dba7SAnatoly Burakov 		return -1;
1019c7f5dba7SAnatoly Burakov 	}
1020c7f5dba7SAnatoly Burakov 
1021c7f5dba7SAnatoly Burakov 	/* success */
1022c7f5dba7SAnatoly Burakov 
1023c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1024c7f5dba7SAnatoly Burakov 			param.len >> 20);
1025c7f5dba7SAnatoly Burakov 
1026c7f5dba7SAnatoly Burakov 	return 0;
1027c7f5dba7SAnatoly Burakov }
10283a0968c8SShahaf Shuler static void
10293a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10303a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10313a0968c8SShahaf Shuler {
10323a0968c8SShahaf Shuler 	uint16_t pid = 0;
10333a0968c8SShahaf Shuler 	int ret;
10343a0968c8SShahaf Shuler 
10353a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10360a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10373a0968c8SShahaf Shuler 
10380a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10390a0821bcSPaulis Gributs 		if (ret != 0) {
10400a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10410a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10420a0821bcSPaulis Gributs 				    "mempool unmapping will not be performed\n",
10430a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10440a0821bcSPaulis Gributs 			continue;
10450a0821bcSPaulis Gributs 		}
10460a0821bcSPaulis Gributs 
10470a0821bcSPaulis Gributs 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
10483a0968c8SShahaf Shuler 		if (ret) {
10493a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10503a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
10513a0968c8SShahaf Shuler 				    "for device %s\n",
1052ec5ecd7eSDavid Marchand 				    memhdr->addr, rte_dev_name(dev_info.device));
10533a0968c8SShahaf Shuler 		}
10543a0968c8SShahaf Shuler 	}
10553a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
10563a0968c8SShahaf Shuler 	if (ret) {
10573a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10583a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
10593a0968c8SShahaf Shuler 	}
10603a0968c8SShahaf Shuler }
10613a0968c8SShahaf Shuler 
10623a0968c8SShahaf Shuler static void
10633a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10643a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10653a0968c8SShahaf Shuler {
10663a0968c8SShahaf Shuler 	uint16_t pid = 0;
10673a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
10683a0968c8SShahaf Shuler 	int ret;
10693a0968c8SShahaf Shuler 
10703a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
10713a0968c8SShahaf Shuler 				  page_size);
10723a0968c8SShahaf Shuler 	if (ret) {
10733a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10743a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
10753a0968c8SShahaf Shuler 		return;
10763a0968c8SShahaf Shuler 	}
10773a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10780a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10793a0968c8SShahaf Shuler 
10800a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10810a0821bcSPaulis Gributs 		if (ret != 0) {
10820a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10830a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10840a0821bcSPaulis Gributs 				    "mempool mapping will not be performed\n",
10850a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10860a0821bcSPaulis Gributs 			continue;
10870a0821bcSPaulis Gributs 		}
10880a0821bcSPaulis Gributs 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
10893a0968c8SShahaf Shuler 		if (ret) {
10903a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10913a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
10923a0968c8SShahaf Shuler 				    "for device %s\n",
1093ec5ecd7eSDavid Marchand 				    memhdr->addr, rte_dev_name(dev_info.device));
10943a0968c8SShahaf Shuler 		}
10953a0968c8SShahaf Shuler 	}
10963a0968c8SShahaf Shuler }
1097761f7ae1SJie Zhou #endif
1098c7f5dba7SAnatoly Burakov 
109972512e18SViacheslav Ovsiienko static unsigned int
110072512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
110172512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
110272512e18SViacheslav Ovsiienko {
110372512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
110472512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
110572512e18SViacheslav Ovsiienko 	uint16_t elt_size;
110672512e18SViacheslav Ovsiienko 
110772512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
110872512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
110972512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
111072512e18SViacheslav Ovsiienko 
111172512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
111272512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
111372512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
111472512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
111572512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
111672512e18SViacheslav Ovsiienko 		return 0;
111772512e18SViacheslav Ovsiienko 	}
111872512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
111972512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
112072512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
112172512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
112272512e18SViacheslav Ovsiienko 		int ret;
112372512e18SViacheslav Ovsiienko 
112472512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
112572512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
112672512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
112772512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
112872512e18SViacheslav Ovsiienko 			ext_num = 0;
112972512e18SViacheslav Ovsiienko 			break;
113072512e18SViacheslav Ovsiienko 		}
113113b19642SDmitry Kozlyuk 		mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
113272512e18SViacheslav Ovsiienko 					 socket_id,
113372512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_IOVA_CONTIG |
113472512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_1GB |
113513b19642SDmitry Kozlyuk 					 RTE_MEMZONE_SIZE_HINT_ONLY);
113672512e18SViacheslav Ovsiienko 		if (mz == NULL) {
113772512e18SViacheslav Ovsiienko 			/*
113872512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
113972512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
114072512e18SViacheslav Ovsiienko 			 */
114172512e18SViacheslav Ovsiienko 			errno = ENOMEM;
114272512e18SViacheslav Ovsiienko 			ext_num = 0;
114372512e18SViacheslav Ovsiienko 			break;
114472512e18SViacheslav Ovsiienko 		}
114572512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
114672512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
114772512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
114872512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
114972512e18SViacheslav Ovsiienko 	}
115072512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
115172512e18SViacheslav Ovsiienko 		free(xmem);
115272512e18SViacheslav Ovsiienko 		xmem = NULL;
115372512e18SViacheslav Ovsiienko 	}
115472512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
115572512e18SViacheslav Ovsiienko 	return ext_num;
115672512e18SViacheslav Ovsiienko }
115772512e18SViacheslav Ovsiienko 
1158af75078fSIntel /*
1159af75078fSIntel  * Configuration initialisation done once at init time.
1160af75078fSIntel  */
1161401b744dSShahaf Shuler static struct rte_mempool *
1162af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
116326cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
1164af75078fSIntel {
1165af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1166bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
1167761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1168af75078fSIntel 	uint32_t mb_size;
1169af75078fSIntel 
1170dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1171761f7ae1SJie Zhou #endif
117226cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1173a550baf2SMin Hu (Connor) 	if (!is_proc_primary()) {
1174a550baf2SMin Hu (Connor) 		rte_mp = rte_mempool_lookup(pool_name);
1175a550baf2SMin Hu (Connor) 		if (rte_mp == NULL)
1176a550baf2SMin Hu (Connor) 			rte_exit(EXIT_FAILURE,
1177a550baf2SMin Hu (Connor) 				"Get mbuf pool for socket %u failed: %s\n",
1178a550baf2SMin Hu (Connor) 				socket_id, rte_strerror(rte_errno));
1179a550baf2SMin Hu (Connor) 		return rte_mp;
1180a550baf2SMin Hu (Connor) 	}
1181148f963fSBruce Richardson 
1182285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
1183d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1184d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1185d1eb542eSOlivier Matz 
1186c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
1187c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
1188c7f5dba7SAnatoly Burakov 		{
1189c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
1190c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1191c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
1192c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1193c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1194c7f5dba7SAnatoly Burakov 			break;
1195c7f5dba7SAnatoly Burakov 		}
1196761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1197c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
1198c7f5dba7SAnatoly Burakov 		{
1199b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1200c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1201148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
120259fcf854SShahaf Shuler 				socket_id, mempool_flags);
120324427bb9SOlivier Matz 			if (rte_mp == NULL)
120424427bb9SOlivier Matz 				goto err;
1205b19a0c75SOlivier Matz 
1206b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1207b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1208b19a0c75SOlivier Matz 				rte_mp = NULL;
120924427bb9SOlivier Matz 				goto err;
1210b19a0c75SOlivier Matz 			}
1211b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1212b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
12133a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1214c7f5dba7SAnatoly Burakov 			break;
1215c7f5dba7SAnatoly Burakov 		}
1216c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1217c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1218c7f5dba7SAnatoly Burakov 		{
1219c7f5dba7SAnatoly Burakov 			int heap_socket;
1220c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1221c7f5dba7SAnatoly Burakov 
1222c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1223c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1224c7f5dba7SAnatoly Burakov 
1225c7f5dba7SAnatoly Burakov 			heap_socket =
1226c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1227c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1228c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1229c7f5dba7SAnatoly Burakov 
12300e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
12310e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1232ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1233c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1234c7f5dba7SAnatoly Burakov 					heap_socket);
1235c7f5dba7SAnatoly Burakov 			break;
1236c7f5dba7SAnatoly Burakov 		}
1237761f7ae1SJie Zhou #endif
123872512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
123972512e18SViacheslav Ovsiienko 		{
124072512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
124172512e18SViacheslav Ovsiienko 			unsigned int ext_num;
124272512e18SViacheslav Ovsiienko 
124372512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
124472512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
124572512e18SViacheslav Ovsiienko 			if (ext_num == 0)
124672512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
124772512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
124872512e18SViacheslav Ovsiienko 
124972512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
125072512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
125172512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
125272512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
125372512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
125472512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
125572512e18SViacheslav Ovsiienko 			free(ext_mem);
125672512e18SViacheslav Ovsiienko 			break;
125772512e18SViacheslav Ovsiienko 		}
1258c7f5dba7SAnatoly Burakov 	default:
1259c7f5dba7SAnatoly Burakov 		{
1260c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1261c7f5dba7SAnatoly Burakov 		}
1262bece7b6cSChristian Ehrhardt 	}
1263148f963fSBruce Richardson 
1264761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
126524427bb9SOlivier Matz err:
1266761f7ae1SJie Zhou #endif
1267af75078fSIntel 	if (rte_mp == NULL) {
1268d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1269d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1270d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1271148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1272591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1273af75078fSIntel 	}
1274401b744dSShahaf Shuler 	return rte_mp;
1275af75078fSIntel }
1276af75078fSIntel 
127720a0286fSLiu Xiaofeng /*
127820a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
127920a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
128020a0286fSLiu Xiaofeng  */
128120a0286fSLiu Xiaofeng static int
128220a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
128320a0286fSLiu Xiaofeng {
128420a0286fSLiu Xiaofeng 	static int warning_once = 0;
128520a0286fSLiu Xiaofeng 
1286c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
128720a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
128861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
128961a3b0e5SAndrew Rybchenko 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
129020a0286fSLiu Xiaofeng 		warning_once = 1;
129120a0286fSLiu Xiaofeng 		return -1;
129220a0286fSLiu Xiaofeng 	}
129320a0286fSLiu Xiaofeng 	return 0;
129420a0286fSLiu Xiaofeng }
129520a0286fSLiu Xiaofeng 
12963f7311baSWei Dai /*
12973f7311baSWei Dai  * Get the allowed maximum number of RX queues.
12983f7311baSWei Dai  * *pid return the port id which has minimal value of
12993f7311baSWei Dai  * max_rx_queues in all ports.
13003f7311baSWei Dai  */
13013f7311baSWei Dai queueid_t
13023f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
13033f7311baSWei Dai {
13049e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
13056f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
13063f7311baSWei Dai 	portid_t pi;
13073f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
13083f7311baSWei Dai 
13093f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13106f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13116f51deb9SIvan Ilchenko 			continue;
13126f51deb9SIvan Ilchenko 
13136f51deb9SIvan Ilchenko 		max_rxq_valid = true;
13143f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
13153f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
13163f7311baSWei Dai 			*pid = pi;
13173f7311baSWei Dai 		}
13183f7311baSWei Dai 	}
13196f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
13203f7311baSWei Dai }
13213f7311baSWei Dai 
13223f7311baSWei Dai /*
13233f7311baSWei Dai  * Check input rxq is valid or not.
13243f7311baSWei Dai  * If input rxq is not greater than any of maximum number
13253f7311baSWei Dai  * of RX queues of all ports, it is valid.
13263f7311baSWei Dai  * if valid, return 0, else return -1
13273f7311baSWei Dai  */
13283f7311baSWei Dai int
13293f7311baSWei Dai check_nb_rxq(queueid_t rxq)
13303f7311baSWei Dai {
13313f7311baSWei Dai 	queueid_t allowed_max_rxq;
13323f7311baSWei Dai 	portid_t pid = 0;
13333f7311baSWei Dai 
13343f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
13353f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
133661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
133761a3b0e5SAndrew Rybchenko 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
133861a3b0e5SAndrew Rybchenko 			rxq, allowed_max_rxq, pid);
13393f7311baSWei Dai 		return -1;
13403f7311baSWei Dai 	}
13413f7311baSWei Dai 	return 0;
13423f7311baSWei Dai }
13433f7311baSWei Dai 
134436db4f6cSWei Dai /*
134536db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
134636db4f6cSWei Dai  * *pid return the port id which has minimal value of
134736db4f6cSWei Dai  * max_tx_queues in all ports.
134836db4f6cSWei Dai  */
134936db4f6cSWei Dai queueid_t
135036db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
135136db4f6cSWei Dai {
13529e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
13536f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
135436db4f6cSWei Dai 	portid_t pi;
135536db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
135636db4f6cSWei Dai 
135736db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13586f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13596f51deb9SIvan Ilchenko 			continue;
13606f51deb9SIvan Ilchenko 
13616f51deb9SIvan Ilchenko 		max_txq_valid = true;
136236db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
136336db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
136436db4f6cSWei Dai 			*pid = pi;
136536db4f6cSWei Dai 		}
136636db4f6cSWei Dai 	}
13676f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
136836db4f6cSWei Dai }
136936db4f6cSWei Dai 
137036db4f6cSWei Dai /*
137136db4f6cSWei Dai  * Check input txq is valid or not.
137236db4f6cSWei Dai  * If input txq is not greater than any of maximum number
137336db4f6cSWei Dai  * of TX queues of all ports, it is valid.
137436db4f6cSWei Dai  * if valid, return 0, else return -1
137536db4f6cSWei Dai  */
137636db4f6cSWei Dai int
137736db4f6cSWei Dai check_nb_txq(queueid_t txq)
137836db4f6cSWei Dai {
137936db4f6cSWei Dai 	queueid_t allowed_max_txq;
138036db4f6cSWei Dai 	portid_t pid = 0;
138136db4f6cSWei Dai 
138236db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
138336db4f6cSWei Dai 	if (txq > allowed_max_txq) {
138461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
138561a3b0e5SAndrew Rybchenko 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
138661a3b0e5SAndrew Rybchenko 			txq, allowed_max_txq, pid);
138736db4f6cSWei Dai 		return -1;
138836db4f6cSWei Dai 	}
138936db4f6cSWei Dai 	return 0;
139036db4f6cSWei Dai }
139136db4f6cSWei Dai 
13921c69df45SOri Kam /*
139399e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
139499e040d3SLijun Ou  * *pid return the port id which has minimal value of
139599e040d3SLijun Ou  * max_rxd in all queues of all ports.
139699e040d3SLijun Ou  */
139799e040d3SLijun Ou static uint16_t
139899e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
139999e040d3SLijun Ou {
140099e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
140199e040d3SLijun Ou 	portid_t pi;
140299e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
140399e040d3SLijun Ou 
140499e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
140599e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
140699e040d3SLijun Ou 			continue;
140799e040d3SLijun Ou 
140899e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
140999e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
141099e040d3SLijun Ou 			*pid = pi;
141199e040d3SLijun Ou 		}
141299e040d3SLijun Ou 	}
141399e040d3SLijun Ou 	return allowed_max_rxd;
141499e040d3SLijun Ou }
141599e040d3SLijun Ou 
141699e040d3SLijun Ou /*
141799e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
141899e040d3SLijun Ou  * *pid return the port id which has minimal value of
141999e040d3SLijun Ou  * min_rxd in all queues of all ports.
142099e040d3SLijun Ou  */
142199e040d3SLijun Ou static uint16_t
142299e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
142399e040d3SLijun Ou {
142499e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
142599e040d3SLijun Ou 	portid_t pi;
142699e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
142799e040d3SLijun Ou 
142899e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
142999e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
143099e040d3SLijun Ou 			continue;
143199e040d3SLijun Ou 
143299e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
143399e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
143499e040d3SLijun Ou 			*pid = pi;
143599e040d3SLijun Ou 		}
143699e040d3SLijun Ou 	}
143799e040d3SLijun Ou 
143899e040d3SLijun Ou 	return allowed_min_rxd;
143999e040d3SLijun Ou }
144099e040d3SLijun Ou 
144199e040d3SLijun Ou /*
144299e040d3SLijun Ou  * Check input rxd is valid or not.
144399e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
144499e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
144599e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
144699e040d3SLijun Ou  * if valid, return 0, else return -1
144799e040d3SLijun Ou  */
144899e040d3SLijun Ou int
144999e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
145099e040d3SLijun Ou {
145199e040d3SLijun Ou 	uint16_t allowed_max_rxd;
145299e040d3SLijun Ou 	uint16_t allowed_min_rxd;
145399e040d3SLijun Ou 	portid_t pid = 0;
145499e040d3SLijun Ou 
145599e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
145699e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
145761a3b0e5SAndrew Rybchenko 		fprintf(stderr,
145861a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
145961a3b0e5SAndrew Rybchenko 			rxd, allowed_max_rxd, pid);
146099e040d3SLijun Ou 		return -1;
146199e040d3SLijun Ou 	}
146299e040d3SLijun Ou 
146399e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
146499e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
146561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
146661a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
146761a3b0e5SAndrew Rybchenko 			rxd, allowed_min_rxd, pid);
146899e040d3SLijun Ou 		return -1;
146999e040d3SLijun Ou 	}
147099e040d3SLijun Ou 
147199e040d3SLijun Ou 	return 0;
147299e040d3SLijun Ou }
147399e040d3SLijun Ou 
147499e040d3SLijun Ou /*
147599e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
147699e040d3SLijun Ou  * *pid return the port id which has minimal value of
147799e040d3SLijun Ou  * max_txd in every tx queue.
147899e040d3SLijun Ou  */
147999e040d3SLijun Ou static uint16_t
148099e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
148199e040d3SLijun Ou {
148299e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
148399e040d3SLijun Ou 	portid_t pi;
148499e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
148599e040d3SLijun Ou 
148699e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
148799e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
148899e040d3SLijun Ou 			continue;
148999e040d3SLijun Ou 
149099e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
149199e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
149299e040d3SLijun Ou 			*pid = pi;
149399e040d3SLijun Ou 		}
149499e040d3SLijun Ou 	}
149599e040d3SLijun Ou 	return allowed_max_txd;
149699e040d3SLijun Ou }
149799e040d3SLijun Ou 
149899e040d3SLijun Ou /*
149999e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
150099e040d3SLijun Ou  * *pid return the port id which has minimal value of
150199e040d3SLijun Ou  * min_txd in every tx queue.
150299e040d3SLijun Ou  */
150399e040d3SLijun Ou static uint16_t
150499e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
150599e040d3SLijun Ou {
150699e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
150799e040d3SLijun Ou 	portid_t pi;
150899e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
150999e040d3SLijun Ou 
151099e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
151199e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
151299e040d3SLijun Ou 			continue;
151399e040d3SLijun Ou 
151499e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
151599e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
151699e040d3SLijun Ou 			*pid = pi;
151799e040d3SLijun Ou 		}
151899e040d3SLijun Ou 	}
151999e040d3SLijun Ou 
152099e040d3SLijun Ou 	return allowed_min_txd;
152199e040d3SLijun Ou }
152299e040d3SLijun Ou 
152399e040d3SLijun Ou /*
152499e040d3SLijun Ou  * Check input txd is valid or not.
152599e040d3SLijun Ou  * If input txd is not greater than any of maximum number
152699e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
152799e040d3SLijun Ou  * if valid, return 0, else return -1
152899e040d3SLijun Ou  */
152999e040d3SLijun Ou int
153099e040d3SLijun Ou check_nb_txd(queueid_t txd)
153199e040d3SLijun Ou {
153299e040d3SLijun Ou 	uint16_t allowed_max_txd;
153399e040d3SLijun Ou 	uint16_t allowed_min_txd;
153499e040d3SLijun Ou 	portid_t pid = 0;
153599e040d3SLijun Ou 
153699e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
153799e040d3SLijun Ou 	if (txd > allowed_max_txd) {
153861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
153961a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
154061a3b0e5SAndrew Rybchenko 			txd, allowed_max_txd, pid);
154199e040d3SLijun Ou 		return -1;
154299e040d3SLijun Ou 	}
154399e040d3SLijun Ou 
154499e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
154599e040d3SLijun Ou 	if (txd < allowed_min_txd) {
154661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
154761a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
154861a3b0e5SAndrew Rybchenko 			txd, allowed_min_txd, pid);
154999e040d3SLijun Ou 		return -1;
155099e040d3SLijun Ou 	}
155199e040d3SLijun Ou 	return 0;
155299e040d3SLijun Ou }
155399e040d3SLijun Ou 
15541bb4a528SFerruh Yigit static int
15551bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info)
15561bb4a528SFerruh Yigit {
15571bb4a528SFerruh Yigit 	uint32_t eth_overhead;
15581bb4a528SFerruh Yigit 
15591bb4a528SFerruh Yigit 	if (dev_info->max_mtu != UINT16_MAX &&
15601bb4a528SFerruh Yigit 	    dev_info->max_rx_pktlen > dev_info->max_mtu)
15611bb4a528SFerruh Yigit 		eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
15621bb4a528SFerruh Yigit 	else
15631bb4a528SFerruh Yigit 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
15641bb4a528SFerruh Yigit 
15651bb4a528SFerruh Yigit 	return eth_overhead;
15661bb4a528SFerruh Yigit }
15671bb4a528SFerruh Yigit 
1568af75078fSIntel static void
1569b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id)
1570b6b8a1ebSViacheslav Ovsiienko {
1571b6b8a1ebSViacheslav Ovsiienko 	struct rte_port *port = &ports[pid];
1572b6b8a1ebSViacheslav Ovsiienko 	int ret;
1573b6b8a1ebSViacheslav Ovsiienko 	int i;
1574b6b8a1ebSViacheslav Ovsiienko 
1575f6d8a6d3SIvan Malov 	eth_rx_metadata_negotiate_mp(pid);
1576f6d8a6d3SIvan Malov 
1577b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.txmode = tx_mode;
1578b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.rxmode = rx_mode;
1579b6b8a1ebSViacheslav Ovsiienko 
1580b6b8a1ebSViacheslav Ovsiienko 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1581b6b8a1ebSViacheslav Ovsiienko 	if (ret != 0)
1582b6b8a1ebSViacheslav Ovsiienko 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1583b6b8a1ebSViacheslav Ovsiienko 
1584295968d1SFerruh Yigit 	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1585b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.txmode.offloads &=
1586295968d1SFerruh Yigit 			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1587b6b8a1ebSViacheslav Ovsiienko 
1588b6b8a1ebSViacheslav Ovsiienko 	/* Apply Rx offloads configuration */
1589b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
15903c4426dbSDmitry Kozlyuk 		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1591b6b8a1ebSViacheslav Ovsiienko 	/* Apply Tx offloads configuration */
1592b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
15933c4426dbSDmitry Kozlyuk 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1594b6b8a1ebSViacheslav Ovsiienko 
1595b6b8a1ebSViacheslav Ovsiienko 	if (eth_link_speed)
1596b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.link_speeds = eth_link_speed;
1597b6b8a1ebSViacheslav Ovsiienko 
15981bb4a528SFerruh Yigit 	if (max_rx_pkt_len)
15991bb4a528SFerruh Yigit 		port->dev_conf.rxmode.mtu = max_rx_pkt_len -
16001bb4a528SFerruh Yigit 			get_eth_overhead(&port->dev_info);
16011bb4a528SFerruh Yigit 
1602b6b8a1ebSViacheslav Ovsiienko 	/* set flag to initialize port/queue */
1603b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig = 1;
1604b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig_queues = 1;
1605b6b8a1ebSViacheslav Ovsiienko 	port->socket_id = socket_id;
1606b6b8a1ebSViacheslav Ovsiienko 	port->tx_metadata = 0;
1607b6b8a1ebSViacheslav Ovsiienko 
1608b6b8a1ebSViacheslav Ovsiienko 	/*
1609b6b8a1ebSViacheslav Ovsiienko 	 * Check for maximum number of segments per MTU.
1610b6b8a1ebSViacheslav Ovsiienko 	 * Accordingly update the mbuf data size.
1611b6b8a1ebSViacheslav Ovsiienko 	 */
1612b6b8a1ebSViacheslav Ovsiienko 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1613b6b8a1ebSViacheslav Ovsiienko 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
16141bb4a528SFerruh Yigit 		uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
16151bb4a528SFerruh Yigit 		uint16_t mtu;
1616b6b8a1ebSViacheslav Ovsiienko 
16171bb4a528SFerruh Yigit 		if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
16181bb4a528SFerruh Yigit 			uint16_t data_size = (mtu + eth_overhead) /
16191bb4a528SFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
16201bb4a528SFerruh Yigit 			uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
16211bb4a528SFerruh Yigit 
16221bb4a528SFerruh Yigit 			if (buffer_size > mbuf_data_size[0]) {
16231bb4a528SFerruh Yigit 				mbuf_data_size[0] = buffer_size;
1624b6b8a1ebSViacheslav Ovsiienko 				TESTPMD_LOG(WARNING,
1625b6b8a1ebSViacheslav Ovsiienko 					"Configured mbuf size of the first segment %hu\n",
1626b6b8a1ebSViacheslav Ovsiienko 					mbuf_data_size[0]);
1627b6b8a1ebSViacheslav Ovsiienko 			}
1628b6b8a1ebSViacheslav Ovsiienko 		}
1629b6b8a1ebSViacheslav Ovsiienko 	}
16301bb4a528SFerruh Yigit }
1631b6b8a1ebSViacheslav Ovsiienko 
1632b6b8a1ebSViacheslav Ovsiienko static void
1633af75078fSIntel init_config(void)
1634af75078fSIntel {
1635ce8d5614SIntel 	portid_t pid;
1636af75078fSIntel 	struct rte_mempool *mbp;
1637af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1638af75078fSIntel 	lcoreid_t  lc_id;
16396970401eSDavid Marchand #ifdef RTE_LIB_GRO
1640b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
16416970401eSDavid Marchand #endif
16426970401eSDavid Marchand #ifdef RTE_LIB_GSO
164352f38a20SJiayu Hu 	uint32_t gso_types;
16446970401eSDavid Marchand #endif
1645487f9a59SYulong Pei 
1646af75078fSIntel 	/* Configuration of logical cores. */
1647af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1648af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1649fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1650af75078fSIntel 	if (fwd_lcores == NULL) {
1651ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1652ce8d5614SIntel 							"failed\n", nb_lcores);
1653af75078fSIntel 	}
1654af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1655af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1656af75078fSIntel 					       sizeof(struct fwd_lcore),
1657fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1658af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1659ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1660ce8d5614SIntel 								"failed\n");
1661af75078fSIntel 		}
1662af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1663af75078fSIntel 	}
1664af75078fSIntel 
16657d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1666b6b8a1ebSViacheslav Ovsiienko 		uint32_t socket_id;
16676f51deb9SIvan Ilchenko 
1668b6ea6408SIntel 		if (numa_support) {
1669b6b8a1ebSViacheslav Ovsiienko 			socket_id = port_numa[pid];
1670b6b8a1ebSViacheslav Ovsiienko 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1671b6b8a1ebSViacheslav Ovsiienko 				socket_id = rte_eth_dev_socket_id(pid);
167220a0286fSLiu Xiaofeng 
167329841336SPhil Yang 				/*
167429841336SPhil Yang 				 * if socket_id is invalid,
167529841336SPhil Yang 				 * set to the first available socket.
167629841336SPhil Yang 				 */
167720a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
167829841336SPhil Yang 					socket_id = socket_ids[0];
1679b6ea6408SIntel 			}
1680b6b8a1ebSViacheslav Ovsiienko 		} else {
1681b6b8a1ebSViacheslav Ovsiienko 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1682b6b8a1ebSViacheslav Ovsiienko 				    0 : socket_num;
1683b6ea6408SIntel 		}
1684b6b8a1ebSViacheslav Ovsiienko 		/* Apply default TxRx configuration for all ports */
1685b6b8a1ebSViacheslav Ovsiienko 		init_config_port_offloads(pid, socket_id);
1686ce8d5614SIntel 	}
16873ab64341SOlivier Matz 	/*
16883ab64341SOlivier Matz 	 * Create pools of mbuf.
16893ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
16903ab64341SOlivier Matz 	 * socket 0 memory by default.
16913ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
16923ab64341SOlivier Matz 	 *
16933ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
16943ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
16953ab64341SOlivier Matz 	 */
16963ab64341SOlivier Matz 	if (param_total_num_mbufs)
16973ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
16983ab64341SOlivier Matz 	else {
16994ed89049SDavid Marchand 		nb_mbuf_per_pool = RX_DESC_MAX +
17003ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
17014ed89049SDavid Marchand 			TX_DESC_MAX + MAX_PKT_BURST;
17023ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
17033ab64341SOlivier Matz 	}
17043ab64341SOlivier Matz 
1705b6ea6408SIntel 	if (numa_support) {
170626cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1707ce8d5614SIntel 
1708c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
170926cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
171026cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
171126cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1712401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
171326cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
17143ab64341SOlivier Matz 	} else {
171526cbb419SViacheslav Ovsiienko 		uint8_t i;
171626cbb419SViacheslav Ovsiienko 
171726cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
171826cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
171926cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1720401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
172126cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
172226cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
17233ab64341SOlivier Matz 	}
1724b6ea6408SIntel 
1725b6ea6408SIntel 	init_port_config();
17265886ae07SAdrien Mazarguil 
17276970401eSDavid Marchand #ifdef RTE_LIB_GSO
1728295968d1SFerruh Yigit 	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1729295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
17306970401eSDavid Marchand #endif
17315886ae07SAdrien Mazarguil 	/*
17325886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
17335886ae07SAdrien Mazarguil 	 */
17345886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
17358fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
173626cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
17378fd8bebcSAdrien Mazarguil 
17385886ae07SAdrien Mazarguil 		if (mbp == NULL)
173926cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
17405886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
17416970401eSDavid Marchand #ifdef RTE_LIB_GSO
174252f38a20SJiayu Hu 		/* initialize GSO context */
174352f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
174452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
174552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
174635b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
174735b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
174852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
17496970401eSDavid Marchand #endif
17505886ae07SAdrien Mazarguil 	}
17515886ae07SAdrien Mazarguil 
17520c0db76fSBernard Iremonger 	fwd_config_setup();
1753b7091f1dSJiayu Hu 
17546970401eSDavid Marchand #ifdef RTE_LIB_GRO
1755b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1756b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1757b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1758b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1759b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1760b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1761b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1762b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1763b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1764b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1765b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1766b7091f1dSJiayu Hu 		}
1767b7091f1dSJiayu Hu 	}
17686970401eSDavid Marchand #endif
1769ce8d5614SIntel }
1770ce8d5614SIntel 
17712950a769SDeclan Doherty 
17722950a769SDeclan Doherty void
1773a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
17742950a769SDeclan Doherty {
17752950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
1776b6b8a1ebSViacheslav Ovsiienko 	init_config_port_offloads(new_port_id, socket_id);
17772950a769SDeclan Doherty 	init_port_config();
17782950a769SDeclan Doherty }
17792950a769SDeclan Doherty 
1780ce8d5614SIntel int
1781ce8d5614SIntel init_fwd_streams(void)
1782ce8d5614SIntel {
1783ce8d5614SIntel 	portid_t pid;
1784ce8d5614SIntel 	struct rte_port *port;
1785ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
17865a8fb55cSReshma Pattan 	queueid_t q;
1787ce8d5614SIntel 
1788ce8d5614SIntel 	/* set socket id according to numa or not */
17897d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1790ce8d5614SIntel 		port = &ports[pid];
1791ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
179261a3b0e5SAndrew Rybchenko 			fprintf(stderr,
179361a3b0e5SAndrew Rybchenko 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
179461a3b0e5SAndrew Rybchenko 				nb_rxq, port->dev_info.max_rx_queues);
1795ce8d5614SIntel 			return -1;
1796ce8d5614SIntel 		}
1797ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
179861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
179961a3b0e5SAndrew Rybchenko 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
180061a3b0e5SAndrew Rybchenko 				nb_txq, port->dev_info.max_tx_queues);
1801ce8d5614SIntel 			return -1;
1802ce8d5614SIntel 		}
180320a0286fSLiu Xiaofeng 		if (numa_support) {
180420a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
180520a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
180620a0286fSLiu Xiaofeng 			else {
1807b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
180820a0286fSLiu Xiaofeng 
180929841336SPhil Yang 				/*
181029841336SPhil Yang 				 * if socket_id is invalid,
181129841336SPhil Yang 				 * set to the first available socket.
181229841336SPhil Yang 				 */
181320a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
181429841336SPhil Yang 					port->socket_id = socket_ids[0];
181520a0286fSLiu Xiaofeng 			}
181620a0286fSLiu Xiaofeng 		}
1817b6ea6408SIntel 		else {
1818b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1819af75078fSIntel 				port->socket_id = 0;
1820b6ea6408SIntel 			else
1821b6ea6408SIntel 				port->socket_id = socket_num;
1822b6ea6408SIntel 		}
1823af75078fSIntel 	}
1824af75078fSIntel 
18255a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
18265a8fb55cSReshma Pattan 	if (q == 0) {
182761a3b0e5SAndrew Rybchenko 		fprintf(stderr,
182861a3b0e5SAndrew Rybchenko 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
18295a8fb55cSReshma Pattan 		return -1;
18305a8fb55cSReshma Pattan 	}
18315a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1832ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1833ce8d5614SIntel 		return 0;
1834ce8d5614SIntel 	/* clear the old */
1835ce8d5614SIntel 	if (fwd_streams != NULL) {
1836ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1837ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1838ce8d5614SIntel 				continue;
1839ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1840ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1841af75078fSIntel 		}
1842ce8d5614SIntel 		rte_free(fwd_streams);
1843ce8d5614SIntel 		fwd_streams = NULL;
1844ce8d5614SIntel 	}
1845ce8d5614SIntel 
1846ce8d5614SIntel 	/* init new */
1847ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
18481f84c469SMatan Azrad 	if (nb_fwd_streams) {
1849ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
18501f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
18511f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1852ce8d5614SIntel 		if (fwd_streams == NULL)
18531f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
18541f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
18551f84c469SMatan Azrad 				 nb_fwd_streams);
1856ce8d5614SIntel 
1857af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
18581f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
18591f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
18601f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1861ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
18621f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
18631f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
18641f84c469SMatan Azrad 		}
1865af75078fSIntel 	}
1866ce8d5614SIntel 
1867ce8d5614SIntel 	return 0;
1868af75078fSIntel }
1869af75078fSIntel 
1870af75078fSIntel static void
1871af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1872af75078fSIntel {
18737569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
187485de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
18757569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
18767569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1877af75078fSIntel 	uint16_t nb_pkt;
18787569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
18797569b8c1SHonnappa Nagarahalli 	int i;
1880af75078fSIntel 
1881af75078fSIntel 	/*
1882af75078fSIntel 	 * First compute the total number of packet bursts and the
1883af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1884af75078fSIntel 	 */
18857569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
18867569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
18877569b8c1SHonnappa Nagarahalli 
18887569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
18897569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
18907569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
18917569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
18927569b8c1SHonnappa Nagarahalli 
18937569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
18946a8b64fdSEli Britstein 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1895af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
18967569b8c1SHonnappa Nagarahalli 
1897af75078fSIntel 		if (nb_burst == 0)
1898af75078fSIntel 			continue;
18997569b8c1SHonnappa Nagarahalli 
1900af75078fSIntel 		total_burst += nb_burst;
19017569b8c1SHonnappa Nagarahalli 
19027569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
19037569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
19047569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1905fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1906fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
19077569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
19087569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
19097569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1910af75078fSIntel 		}
1911af75078fSIntel 	}
1912af75078fSIntel 	if (total_burst == 0)
1913af75078fSIntel 		return;
19147569b8c1SHonnappa Nagarahalli 
19157569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
19167569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
19177569b8c1SHonnappa Nagarahalli 		if (i == 3) {
19187569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1919af75078fSIntel 			return;
1920af75078fSIntel 		}
19217569b8c1SHonnappa Nagarahalli 
19227569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
19237569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
19247569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
19257569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1926af75078fSIntel 			return;
1927af75078fSIntel 		}
19287569b8c1SHonnappa Nagarahalli 
19297569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
19307569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
19317569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
19327569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
19337569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1934af75078fSIntel 	}
1935af75078fSIntel }
1936af75078fSIntel 
1937af75078fSIntel static void
1938af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1939af75078fSIntel {
1940af75078fSIntel 	struct fwd_stream *fs;
1941af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1942af75078fSIntel 
1943af75078fSIntel 	fs = fwd_streams[stream_id];
1944af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1945af75078fSIntel 	    (fs->fwd_dropped == 0))
1946af75078fSIntel 		return;
1947af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1948af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1949af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1950af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1951c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1952c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1953af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1954af75078fSIntel 
1955af75078fSIntel 	/* if checksum mode */
1956af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1957c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1958c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1959c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
196058d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
196158d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1962d139cf23SLance Richardson 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1963d139cf23SLance Richardson 			fs->rx_bad_outer_ip_csum);
196494d65546SDavid Marchand 	} else {
196594d65546SDavid Marchand 		printf("\n");
1966af75078fSIntel 	}
1967af75078fSIntel 
19680e4b1963SDharmik Thakkar 	if (record_burst_stats) {
1969af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1970af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
19710e4b1963SDharmik Thakkar 	}
1972af75078fSIntel }
1973af75078fSIntel 
197453324971SDavid Marchand void
197553324971SDavid Marchand fwd_stats_display(void)
197653324971SDavid Marchand {
197753324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
197853324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
197953324971SDavid Marchand 	struct {
198053324971SDavid Marchand 		struct fwd_stream *rx_stream;
198153324971SDavid Marchand 		struct fwd_stream *tx_stream;
198253324971SDavid Marchand 		uint64_t tx_dropped;
198353324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
198453324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
198553324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
1986d139cf23SLance Richardson 		uint64_t rx_bad_outer_ip_csum;
198753324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
198853324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
198953324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
199053324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
199153324971SDavid Marchand 	struct rte_eth_stats stats;
199253324971SDavid Marchand 	uint64_t fwd_cycles = 0;
199353324971SDavid Marchand 	uint64_t total_recv = 0;
199453324971SDavid Marchand 	uint64_t total_xmit = 0;
199553324971SDavid Marchand 	struct rte_port *port;
199653324971SDavid Marchand 	streamid_t sm_id;
199753324971SDavid Marchand 	portid_t pt_id;
1998baef6bbfSMin Hu (Connor) 	int ret;
199953324971SDavid Marchand 	int i;
200053324971SDavid Marchand 
200153324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
200253324971SDavid Marchand 
200353324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
200453324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
200553324971SDavid Marchand 
200653324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
200753324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
200853324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
200953324971SDavid Marchand 		} else {
201053324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
201153324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
201253324971SDavid Marchand 		}
201353324971SDavid Marchand 
201453324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
201553324971SDavid Marchand 
201653324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
201753324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
201853324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
201953324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
2020d139cf23SLance Richardson 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2021d139cf23SLance Richardson 				fs->rx_bad_outer_ip_csum;
202253324971SDavid Marchand 
2023bc700b67SDharmik Thakkar 		if (record_core_cycles)
202499a4974aSRobin Jarry 			fwd_cycles += fs->busy_cycles;
202553324971SDavid Marchand 	}
202653324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2027c3fd1e60SFerruh Yigit 		uint64_t tx_dropped = 0;
2028c3fd1e60SFerruh Yigit 
202953324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
203053324971SDavid Marchand 		port = &ports[pt_id];
203153324971SDavid Marchand 
2032baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &stats);
2033baef6bbfSMin Hu (Connor) 		if (ret != 0) {
2034baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2035baef6bbfSMin Hu (Connor) 				"%s: Error: failed to get stats (port %u): %d",
2036baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
2037baef6bbfSMin Hu (Connor) 			continue;
2038baef6bbfSMin Hu (Connor) 		}
203953324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
204053324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
204153324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
204253324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
204353324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
204453324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
204553324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
204653324971SDavid Marchand 
204753324971SDavid Marchand 		total_recv += stats.ipackets;
204853324971SDavid Marchand 		total_xmit += stats.opackets;
204953324971SDavid Marchand 		total_rx_dropped += stats.imissed;
2050c3fd1e60SFerruh Yigit 		tx_dropped += ports_stats[pt_id].tx_dropped;
2051c3fd1e60SFerruh Yigit 		tx_dropped += stats.oerrors;
2052c3fd1e60SFerruh Yigit 		total_tx_dropped += tx_dropped;
205353324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
205453324971SDavid Marchand 
205553324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
205653324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
205753324971SDavid Marchand 
205808dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
205908dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
206053324971SDavid Marchand 		       stats.ipackets + stats.imissed);
206153324971SDavid Marchand 
2062d139cf23SLance Richardson 		if (cur_fwd_eng == &csum_fwd_engine) {
206353324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
206453324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
206553324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
206653324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
206753324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
206853324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
2069d139cf23SLance Richardson 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2070d139cf23SLance Richardson 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
2071d139cf23SLance Richardson 		}
207253324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
207308dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
207408dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
207553324971SDavid Marchand 		}
207653324971SDavid Marchand 
207708dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
207853324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
2079c3fd1e60SFerruh Yigit 		       stats.opackets, tx_dropped,
2080c3fd1e60SFerruh Yigit 		       stats.opackets + tx_dropped);
208153324971SDavid Marchand 
20820e4b1963SDharmik Thakkar 		if (record_burst_stats) {
208353324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
208453324971SDavid Marchand 				pkt_burst_stats_display("RX",
208553324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
208653324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
208753324971SDavid Marchand 				pkt_burst_stats_display("TX",
208853324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
20890e4b1963SDharmik Thakkar 		}
209053324971SDavid Marchand 
209153324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
209253324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
209353324971SDavid Marchand 	}
209453324971SDavid Marchand 
209553324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
209653324971SDavid Marchand 	       "%s\n",
209753324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
209853324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
209953324971SDavid Marchand 	       "%-"PRIu64"\n"
210053324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
210153324971SDavid Marchand 	       "%-"PRIu64"\n",
210253324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
210353324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
210453324971SDavid Marchand 	if (total_rx_nombuf > 0)
210553324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
210653324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
210753324971SDavid Marchand 	       "%s\n",
210853324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
2109bc700b67SDharmik Thakkar 	if (record_core_cycles) {
21104c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
21113a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
21123a164e00SPhil Yang 			uint64_t total_pkts = 0;
21133a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
21143a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
21153a164e00SPhil Yang 				total_pkts = total_xmit;
21163a164e00SPhil Yang 			else
21173a164e00SPhil Yang 				total_pkts = total_recv;
21183a164e00SPhil Yang 
211999a4974aSRobin Jarry 			printf("\n  CPU cycles/packet=%.2F (busy cycles="
21203a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
21214c0497b1SDharmik Thakkar 			       " MHz Clock\n",
21223a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
21233a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
21244c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
21253a164e00SPhil Yang 		}
2126bc700b67SDharmik Thakkar 	}
212753324971SDavid Marchand }
212853324971SDavid Marchand 
212953324971SDavid Marchand void
213053324971SDavid Marchand fwd_stats_reset(void)
213153324971SDavid Marchand {
213253324971SDavid Marchand 	streamid_t sm_id;
213353324971SDavid Marchand 	portid_t pt_id;
2134baef6bbfSMin Hu (Connor) 	int ret;
213553324971SDavid Marchand 	int i;
213653324971SDavid Marchand 
213753324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
213853324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
2139baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2140baef6bbfSMin Hu (Connor) 		if (ret != 0)
2141baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2142baef6bbfSMin Hu (Connor) 				"%s: Error: failed to clear stats (port %u):%d",
2143baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
214453324971SDavid Marchand 	}
214553324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
214653324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
214753324971SDavid Marchand 
214853324971SDavid Marchand 		fs->rx_packets = 0;
214953324971SDavid Marchand 		fs->tx_packets = 0;
215053324971SDavid Marchand 		fs->fwd_dropped = 0;
215153324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
215253324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
215353324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
2154d139cf23SLance Richardson 		fs->rx_bad_outer_ip_csum = 0;
215553324971SDavid Marchand 
215653324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
215753324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
215899a4974aSRobin Jarry 		fs->busy_cycles = 0;
215953324971SDavid Marchand 	}
216053324971SDavid Marchand }
216153324971SDavid Marchand 
2162af75078fSIntel static void
21637741e4cfSIntel flush_fwd_rx_queues(void)
2164af75078fSIntel {
2165af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2166af75078fSIntel 	portid_t  rxp;
21677741e4cfSIntel 	portid_t port_id;
2168af75078fSIntel 	queueid_t rxq;
2169af75078fSIntel 	uint16_t  nb_rx;
2170af75078fSIntel 	uint8_t   j;
2171f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2172594302c7SJames Poole 	uint64_t timer_period;
2173f487715fSReshma Pattan 
2174a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
2175a550baf2SMin Hu (Connor) 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2176a550baf2SMin Hu (Connor) 		return;
2177a550baf2SMin Hu (Connor) 	}
2178a550baf2SMin Hu (Connor) 
2179f487715fSReshma Pattan 	/* convert to number of cycles */
2180594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2181af75078fSIntel 
2182af75078fSIntel 	for (j = 0; j < 2; j++) {
21837741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2184af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
21857741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
21863c4426dbSDmitry Kozlyuk 
21873c4426dbSDmitry Kozlyuk 				/* Polling stopped queues is prohibited. */
21883c4426dbSDmitry Kozlyuk 				if (ports[port_id].rxq[rxq].state ==
21893c4426dbSDmitry Kozlyuk 				    RTE_ETH_QUEUE_STATE_STOPPED)
21903c4426dbSDmitry Kozlyuk 					continue;
21913c4426dbSDmitry Kozlyuk 
2192f487715fSReshma Pattan 				/**
2193f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2194f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2195f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2196f487715fSReshma Pattan 				* after 1sec timer expiry.
2197f487715fSReshma Pattan 				*/
2198f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2199af75078fSIntel 				do {
22007741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2201013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2202d00fee5dSDavid Marchand 					rte_pktmbuf_free_bulk(pkts_burst, nb_rx);
2203f487715fSReshma Pattan 
2204f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2205f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2206f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2207f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2208f487715fSReshma Pattan 					(timer_tsc < timer_period));
2209f487715fSReshma Pattan 				timer_tsc = 0;
2210af75078fSIntel 			}
2211af75078fSIntel 		}
2212af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2213af75078fSIntel 	}
2214af75078fSIntel }
2215af75078fSIntel 
2216af75078fSIntel static void
2217af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2218af75078fSIntel {
2219af75078fSIntel 	struct fwd_stream **fsm;
222099a4974aSRobin Jarry 	uint64_t prev_tsc;
2221af75078fSIntel 	streamid_t nb_fs;
2222af75078fSIntel 	streamid_t sm_id;
2223a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
22247e4441c8SRemy Horton 	uint64_t tics_per_1sec;
22257e4441c8SRemy Horton 	uint64_t tics_datum;
22267e4441c8SRemy Horton 	uint64_t tics_current;
22274918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2228af75078fSIntel 
22294918a357SXiaoyun Li 	cnt_ports = nb_ports;
22307e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
22317e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
22327e4441c8SRemy Horton #endif
2233af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2234af75078fSIntel 	nb_fs = fc->stream_nb;
223599a4974aSRobin Jarry 	prev_tsc = rte_rdtsc();
2236af75078fSIntel 	do {
223706c20561SDavid Marchand 		for (sm_id = 0; sm_id < nb_fs; sm_id++) {
223806c20561SDavid Marchand 			struct fwd_stream *fs = fsm[sm_id];
223906c20561SDavid Marchand 			uint64_t start_fs_tsc = 0;
224006c20561SDavid Marchand 			bool busy;
224106c20561SDavid Marchand 
224206c20561SDavid Marchand 			if (fs->disabled)
224306c20561SDavid Marchand 				continue;
224406c20561SDavid Marchand 			if (record_core_cycles)
224506c20561SDavid Marchand 				start_fs_tsc = rte_rdtsc();
224606c20561SDavid Marchand 			busy = (*pkt_fwd)(fs);
224706c20561SDavid Marchand 			if (record_core_cycles && busy)
224806c20561SDavid Marchand 				fs->busy_cycles += rte_rdtsc() - start_fs_tsc;
224906c20561SDavid Marchand 		}
2250a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2251e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2252e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
22537e4441c8SRemy Horton 			tics_current = rte_rdtsc();
22547e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
22557e4441c8SRemy Horton 				/* Periodic bitrate calculation */
22564918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2257e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
22584918a357SXiaoyun Li 						ports_ids[i]);
22597e4441c8SRemy Horton 				tics_datum = tics_current;
22607e4441c8SRemy Horton 			}
2261e25e6c70SRemy Horton 		}
22627e4441c8SRemy Horton #endif
2263a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
226465eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
226565eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
226662d3216dSReshma Pattan 			rte_latencystats_update();
226762d3216dSReshma Pattan #endif
226899a4974aSRobin Jarry 		if (record_core_cycles) {
226999a4974aSRobin Jarry 			uint64_t tsc = rte_rdtsc();
227062d3216dSReshma Pattan 
227199a4974aSRobin Jarry 			fc->total_cycles += tsc - prev_tsc;
227299a4974aSRobin Jarry 			prev_tsc = tsc;
227399a4974aSRobin Jarry 		}
2274af75078fSIntel 	} while (! fc->stopped);
2275af75078fSIntel }
2276af75078fSIntel 
2277af75078fSIntel static int
227899a4974aSRobin Jarry lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
227999a4974aSRobin Jarry {
228099a4974aSRobin Jarry 	struct fwd_stream **fsm;
228199a4974aSRobin Jarry 	struct fwd_lcore *fc;
228299a4974aSRobin Jarry 	streamid_t nb_fs;
228399a4974aSRobin Jarry 	streamid_t sm_id;
228499a4974aSRobin Jarry 
228599a4974aSRobin Jarry 	fc = lcore_to_fwd_lcore(lcore_id);
228699a4974aSRobin Jarry 	if (fc == NULL)
228799a4974aSRobin Jarry 		return -1;
228899a4974aSRobin Jarry 
228999a4974aSRobin Jarry 	fsm = &fwd_streams[fc->stream_idx];
229099a4974aSRobin Jarry 	nb_fs = fc->stream_nb;
229199a4974aSRobin Jarry 	usage->busy_cycles = 0;
229299a4974aSRobin Jarry 	usage->total_cycles = fc->total_cycles;
229399a4974aSRobin Jarry 
229499a4974aSRobin Jarry 	for (sm_id = 0; sm_id < nb_fs; sm_id++) {
229599a4974aSRobin Jarry 		if (!fsm[sm_id]->disabled)
229699a4974aSRobin Jarry 			usage->busy_cycles += fsm[sm_id]->busy_cycles;
229799a4974aSRobin Jarry 	}
229899a4974aSRobin Jarry 
229999a4974aSRobin Jarry 	return 0;
230099a4974aSRobin Jarry }
230199a4974aSRobin Jarry 
230299a4974aSRobin Jarry static int
2303af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2304af75078fSIntel {
2305af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2306af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2307af75078fSIntel 	return 0;
2308af75078fSIntel }
2309af75078fSIntel 
2310af75078fSIntel /*
2311af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2312af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2313af75078fSIntel  */
2314af75078fSIntel static int
2315af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2316af75078fSIntel {
2317af75078fSIntel 	struct fwd_lcore *fwd_lc;
2318af75078fSIntel 	struct fwd_lcore tmp_lcore;
2319af75078fSIntel 
2320af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2321af75078fSIntel 	tmp_lcore = *fwd_lc;
2322af75078fSIntel 	tmp_lcore.stopped = 1;
2323af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2324af75078fSIntel 	return 0;
2325af75078fSIntel }
2326af75078fSIntel 
2327af75078fSIntel /*
2328af75078fSIntel  * Launch packet forwarding:
2329af75078fSIntel  *     - Setup per-port forwarding context.
2330af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2331af75078fSIntel  */
2332af75078fSIntel static void
2333af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2334af75078fSIntel {
2335af75078fSIntel 	unsigned int i;
2336af75078fSIntel 	unsigned int lc_id;
2337af75078fSIntel 	int diag;
2338af75078fSIntel 
2339af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2340af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2341af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2342af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2343af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2344af75078fSIntel 						     fwd_lcores[i], lc_id);
2345af75078fSIntel 			if (diag != 0)
234661a3b0e5SAndrew Rybchenko 				fprintf(stderr,
234761a3b0e5SAndrew Rybchenko 					"launch lcore %u failed - diag=%d\n",
2348af75078fSIntel 					lc_id, diag);
2349af75078fSIntel 		}
2350af75078fSIntel 	}
2351af75078fSIntel }
2352af75078fSIntel 
2353180ba023SDavid Marchand void
2354180ba023SDavid Marchand common_fwd_stream_init(struct fwd_stream *fs)
2355180ba023SDavid Marchand {
2356180ba023SDavid Marchand 	bool rx_stopped, tx_stopped;
2357180ba023SDavid Marchand 
2358180ba023SDavid Marchand 	rx_stopped = (ports[fs->rx_port].rxq[fs->rx_queue].state == RTE_ETH_QUEUE_STATE_STOPPED);
2359180ba023SDavid Marchand 	tx_stopped = (ports[fs->tx_port].txq[fs->tx_queue].state == RTE_ETH_QUEUE_STATE_STOPPED);
2360180ba023SDavid Marchand 	fs->disabled = rx_stopped || tx_stopped;
2361180ba023SDavid Marchand }
2362180ba023SDavid Marchand 
23635028f207SShiyang He static void
23645028f207SShiyang He update_rx_queue_state(uint16_t port_id, uint16_t queue_id)
23655028f207SShiyang He {
23665028f207SShiyang He 	struct rte_eth_rxq_info rx_qinfo;
23675028f207SShiyang He 	int32_t rc;
23685028f207SShiyang He 
23695028f207SShiyang He 	rc = rte_eth_rx_queue_info_get(port_id,
23705028f207SShiyang He 			queue_id, &rx_qinfo);
23715028f207SShiyang He 	if (rc == 0) {
23725028f207SShiyang He 		ports[port_id].rxq[queue_id].state =
23735028f207SShiyang He 			rx_qinfo.queue_state;
23745028f207SShiyang He 	} else if (rc == -ENOTSUP) {
23755028f207SShiyang He 		/*
237637b68fa1SJie Hai 		 * Do not change the rxq state for primary process
237737b68fa1SJie Hai 		 * to ensure that the PMDs do not implement
237837b68fa1SJie Hai 		 * rte_eth_rx_queue_info_get can forward as before.
237937b68fa1SJie Hai 		 */
238037b68fa1SJie Hai 		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
238137b68fa1SJie Hai 			return;
238237b68fa1SJie Hai 		/*
23835028f207SShiyang He 		 * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED
23845028f207SShiyang He 		 * to ensure that the PMDs do not implement
23855028f207SShiyang He 		 * rte_eth_rx_queue_info_get can forward.
23865028f207SShiyang He 		 */
23875028f207SShiyang He 		ports[port_id].rxq[queue_id].state =
23885028f207SShiyang He 			RTE_ETH_QUEUE_STATE_STARTED;
23895028f207SShiyang He 	} else {
23905028f207SShiyang He 		TESTPMD_LOG(WARNING,
23915028f207SShiyang He 			"Failed to get rx queue info\n");
23925028f207SShiyang He 	}
23935028f207SShiyang He }
23945028f207SShiyang He 
23955028f207SShiyang He static void
23965028f207SShiyang He update_tx_queue_state(uint16_t port_id, uint16_t queue_id)
23975028f207SShiyang He {
23985028f207SShiyang He 	struct rte_eth_txq_info tx_qinfo;
23995028f207SShiyang He 	int32_t rc;
24005028f207SShiyang He 
24015028f207SShiyang He 	rc = rte_eth_tx_queue_info_get(port_id,
24025028f207SShiyang He 			queue_id, &tx_qinfo);
24035028f207SShiyang He 	if (rc == 0) {
24045028f207SShiyang He 		ports[port_id].txq[queue_id].state =
24055028f207SShiyang He 			tx_qinfo.queue_state;
24065028f207SShiyang He 	} else if (rc == -ENOTSUP) {
24075028f207SShiyang He 		/*
240837b68fa1SJie Hai 		 * Do not change the txq state for primary process
240937b68fa1SJie Hai 		 * to ensure that the PMDs do not implement
241037b68fa1SJie Hai 		 * rte_eth_tx_queue_info_get can forward as before.
241137b68fa1SJie Hai 		 */
241237b68fa1SJie Hai 		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
241337b68fa1SJie Hai 			return;
241437b68fa1SJie Hai 		/*
24155028f207SShiyang He 		 * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED
24165028f207SShiyang He 		 * to ensure that the PMDs do not implement
24175028f207SShiyang He 		 * rte_eth_tx_queue_info_get can forward.
24185028f207SShiyang He 		 */
24195028f207SShiyang He 		ports[port_id].txq[queue_id].state =
24205028f207SShiyang He 			RTE_ETH_QUEUE_STATE_STARTED;
24215028f207SShiyang He 	} else {
24225028f207SShiyang He 		TESTPMD_LOG(WARNING,
24235028f207SShiyang He 			"Failed to get tx queue info\n");
24245028f207SShiyang He 	}
24255028f207SShiyang He }
24265028f207SShiyang He 
24275028f207SShiyang He static void
242837b68fa1SJie Hai update_queue_state(portid_t pid)
24295028f207SShiyang He {
24305028f207SShiyang He 	portid_t pi;
24315028f207SShiyang He 	queueid_t qi;
24325028f207SShiyang He 
24335028f207SShiyang He 	RTE_ETH_FOREACH_DEV(pi) {
243437b68fa1SJie Hai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
243537b68fa1SJie Hai 			continue;
243637b68fa1SJie Hai 
24375028f207SShiyang He 		for (qi = 0; qi < nb_rxq; qi++)
24385028f207SShiyang He 			update_rx_queue_state(pi, qi);
24395028f207SShiyang He 		for (qi = 0; qi < nb_txq; qi++)
24405028f207SShiyang He 			update_tx_queue_state(pi, qi);
24415028f207SShiyang He 	}
24425028f207SShiyang He }
24435028f207SShiyang He 
2444af75078fSIntel /*
2445af75078fSIntel  * Launch packet forwarding configuration.
2446af75078fSIntel  */
2447af75078fSIntel void
2448af75078fSIntel start_packet_forwarding(int with_tx_first)
2449af75078fSIntel {
2450af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2451af75078fSIntel 	port_fwd_end_t  port_fwd_end;
24523c4426dbSDmitry Kozlyuk 	stream_init_t stream_init = cur_fwd_eng->stream_init;
2453af75078fSIntel 	unsigned int i;
2454af75078fSIntel 
24555a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
24565a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
24575a8fb55cSReshma Pattan 
24585a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
24595a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
24605a8fb55cSReshma Pattan 
24615a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
24625a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
24635a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
24645a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
24655a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
24665a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
24675a8fb55cSReshma Pattan 
2468ce8d5614SIntel 	if (all_ports_started() == 0) {
246961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Not all ports were started\n");
2470ce8d5614SIntel 		return;
2471ce8d5614SIntel 	}
2472af75078fSIntel 	if (test_done == 0) {
247361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding already started\n");
2474af75078fSIntel 		return;
2475af75078fSIntel 	}
24767741e4cfSIntel 
247747a767b2SMatan Azrad 	fwd_config_setup();
247847a767b2SMatan Azrad 
247965744833SXueming Li 	pkt_fwd_config_display(&cur_fwd_config);
248065744833SXueming Li 	if (!pkt_fwd_shared_rxq_check())
248165744833SXueming Li 		return;
248265744833SXueming Li 
24835028f207SShiyang He 	if (stream_init != NULL) {
248437b68fa1SJie Hai 		update_queue_state(RTE_PORT_ALL);
24853c4426dbSDmitry Kozlyuk 		for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
24863c4426dbSDmitry Kozlyuk 			stream_init(fwd_streams[i]);
24875028f207SShiyang He 	}
24883c4426dbSDmitry Kozlyuk 
2489a78040c9SAlvin Zhang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2490a78040c9SAlvin Zhang 	if (port_fwd_begin != NULL) {
2491a78040c9SAlvin Zhang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2492a78040c9SAlvin Zhang 			if (port_fwd_begin(fwd_ports_ids[i])) {
2493a78040c9SAlvin Zhang 				fprintf(stderr,
2494a78040c9SAlvin Zhang 					"Packet forwarding is not ready\n");
2495a78040c9SAlvin Zhang 				return;
2496a78040c9SAlvin Zhang 			}
2497a78040c9SAlvin Zhang 		}
2498a78040c9SAlvin Zhang 	}
2499a78040c9SAlvin Zhang 
2500a78040c9SAlvin Zhang 	if (with_tx_first) {
2501a78040c9SAlvin Zhang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2502a78040c9SAlvin Zhang 		if (port_fwd_begin != NULL) {
2503a78040c9SAlvin Zhang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2504a78040c9SAlvin Zhang 				if (port_fwd_begin(fwd_ports_ids[i])) {
2505a78040c9SAlvin Zhang 					fprintf(stderr,
2506a78040c9SAlvin Zhang 						"Packet forwarding is not ready\n");
2507a78040c9SAlvin Zhang 					return;
2508a78040c9SAlvin Zhang 				}
2509a78040c9SAlvin Zhang 			}
2510a78040c9SAlvin Zhang 		}
2511a78040c9SAlvin Zhang 	}
2512a78040c9SAlvin Zhang 
2513a78040c9SAlvin Zhang 	test_done = 0;
2514a78040c9SAlvin Zhang 
25157741e4cfSIntel 	if(!no_flush_rx)
25167741e4cfSIntel 		flush_fwd_rx_queues();
25177741e4cfSIntel 
2518af75078fSIntel 	rxtx_config_display();
2519af75078fSIntel 
252053324971SDavid Marchand 	fwd_stats_reset();
2521af75078fSIntel 	if (with_tx_first) {
2522acbf77a6SZhihong Wang 		while (with_tx_first--) {
2523acbf77a6SZhihong Wang 			launch_packet_forwarding(
2524acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2525af75078fSIntel 			rte_eal_mp_wait_lcore();
2526acbf77a6SZhihong Wang 		}
2527af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2528af75078fSIntel 		if (port_fwd_end != NULL) {
2529af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2530af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2531af75078fSIntel 		}
2532af75078fSIntel 	}
2533af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2534af75078fSIntel }
2535af75078fSIntel 
2536af75078fSIntel void
2537af75078fSIntel stop_packet_forwarding(void)
2538af75078fSIntel {
2539af75078fSIntel 	port_fwd_end_t port_fwd_end;
2540af75078fSIntel 	lcoreid_t lc_id;
254153324971SDavid Marchand 	portid_t pt_id;
254253324971SDavid Marchand 	int i;
2543af75078fSIntel 
2544af75078fSIntel 	if (test_done) {
254561a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding not started\n");
2546af75078fSIntel 		return;
2547af75078fSIntel 	}
2548af75078fSIntel 	printf("Telling cores to stop...");
2549af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2550af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2551af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2552af75078fSIntel 	rte_eal_mp_wait_lcore();
2553af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2554af75078fSIntel 	if (port_fwd_end != NULL) {
2555af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2556af75078fSIntel 			pt_id = fwd_ports_ids[i];
2557af75078fSIntel 			(*port_fwd_end)(pt_id);
2558af75078fSIntel 		}
2559af75078fSIntel 	}
2560c185d42cSDavid Marchand 
256153324971SDavid Marchand 	fwd_stats_display();
256258d475b7SJerin Jacob 
2563af75078fSIntel 	printf("\nDone.\n");
2564af75078fSIntel 	test_done = 1;
2565af75078fSIntel }
2566af75078fSIntel 
2567cfae07fdSOuyang Changchun void
2568cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2569cfae07fdSOuyang Changchun {
2570492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
257161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link up fail.\n");
2572cfae07fdSOuyang Changchun }
2573cfae07fdSOuyang Changchun 
2574cfae07fdSOuyang Changchun void
2575cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2576cfae07fdSOuyang Changchun {
2577492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
257861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link down fail.\n");
2579cfae07fdSOuyang Changchun }
2580cfae07fdSOuyang Changchun 
2581ce8d5614SIntel static int
2582ce8d5614SIntel all_ports_started(void)
2583ce8d5614SIntel {
2584ce8d5614SIntel 	portid_t pi;
2585ce8d5614SIntel 	struct rte_port *port;
2586ce8d5614SIntel 
25877d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2588ce8d5614SIntel 		port = &ports[pi];
2589ce8d5614SIntel 		/* Check if there is a port which is not started */
259041b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
259115e34522SLong Wu 			(port->member_flag == 0))
2592ce8d5614SIntel 			return 0;
2593ce8d5614SIntel 	}
2594ce8d5614SIntel 
2595ce8d5614SIntel 	/* No port is not started */
2596ce8d5614SIntel 	return 1;
2597ce8d5614SIntel }
2598ce8d5614SIntel 
2599148f963fSBruce Richardson int
26006018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
26016018eb8cSShahaf Shuler {
26026018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
26036018eb8cSShahaf Shuler 
26046018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
260515e34522SLong Wu 	    (port->member_flag == 0))
26066018eb8cSShahaf Shuler 		return 0;
26076018eb8cSShahaf Shuler 	return 1;
26086018eb8cSShahaf Shuler }
26096018eb8cSShahaf Shuler 
26106018eb8cSShahaf Shuler int
2611edab33b1STetsuya Mukawa all_ports_stopped(void)
2612edab33b1STetsuya Mukawa {
2613edab33b1STetsuya Mukawa 	portid_t pi;
2614edab33b1STetsuya Mukawa 
26157d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
26166018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2617edab33b1STetsuya Mukawa 			return 0;
2618edab33b1STetsuya Mukawa 	}
2619edab33b1STetsuya Mukawa 
2620edab33b1STetsuya Mukawa 	return 1;
2621edab33b1STetsuya Mukawa }
2622edab33b1STetsuya Mukawa 
2623edab33b1STetsuya Mukawa int
2624edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2625edab33b1STetsuya Mukawa {
2626edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2627edab33b1STetsuya Mukawa 		return 0;
2628edab33b1STetsuya Mukawa 
2629edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2630edab33b1STetsuya Mukawa 		return 0;
2631edab33b1STetsuya Mukawa 
2632edab33b1STetsuya Mukawa 	return 1;
2633edab33b1STetsuya Mukawa }
2634edab33b1STetsuya Mukawa 
26352befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
26362befc67fSViacheslav Ovsiienko int
26372befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
26382befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
26392befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
26402befc67fSViacheslav Ovsiienko {
26412befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
26424f04edcdSHanumanth Pothula 	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
26434f04edcdSHanumanth Pothula 	struct rte_mempool *mpx;
26442befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
264554a0f4d7SYuan Wang 	uint32_t prev_hdrs = 0;
26462befc67fSViacheslav Ovsiienko 	int ret;
26472befc67fSViacheslav Ovsiienko 
26484f04edcdSHanumanth Pothula 
2649a4bf5421SHanumanth Pothula 	if ((rx_pkt_nb_segs > 1) &&
2650a4bf5421SHanumanth Pothula 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
26514f04edcdSHanumanth Pothula 		/* multi-segment configuration */
26522befc67fSViacheslav Ovsiienko 		for (i = 0; i < rx_pkt_nb_segs; i++) {
26532befc67fSViacheslav Ovsiienko 			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
26542befc67fSViacheslav Ovsiienko 			/*
26552befc67fSViacheslav Ovsiienko 			 * Use last valid pool for the segments with number
26562befc67fSViacheslav Ovsiienko 			 * exceeding the pool index.
26572befc67fSViacheslav Ovsiienko 			 */
26581108c33eSRaja Zidane 			mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
26592befc67fSViacheslav Ovsiienko 			mpx = mbuf_pool_find(socket_id, mp_n);
26602befc67fSViacheslav Ovsiienko 			/* Handle zero as mbuf data buffer size. */
26612befc67fSViacheslav Ovsiienko 			rx_seg->offset = i < rx_pkt_nb_offs ?
26622befc67fSViacheslav Ovsiienko 					   rx_pkt_seg_offsets[i] : 0;
26632befc67fSViacheslav Ovsiienko 			rx_seg->mp = mpx ? mpx : mp;
266452e2e7edSYuan Wang 			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
266554a0f4d7SYuan Wang 				rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
266654a0f4d7SYuan Wang 				prev_hdrs |= rx_seg->proto_hdr;
266752e2e7edSYuan Wang 			} else {
266852e2e7edSYuan Wang 				rx_seg->length = rx_pkt_seg_lengths[i] ?
266952e2e7edSYuan Wang 						rx_pkt_seg_lengths[i] :
267052e2e7edSYuan Wang 						mbuf_data_size[mp_n];
267152e2e7edSYuan Wang 			}
26722befc67fSViacheslav Ovsiienko 		}
26732befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = rx_pkt_nb_segs;
26742befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = rx_useg;
2675a4bf5421SHanumanth Pothula 		rx_conf->rx_mempools = NULL;
2676a4bf5421SHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2677a4bf5421SHanumanth Pothula 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2678a4bf5421SHanumanth Pothula 				    socket_id, rx_conf, NULL);
2679a4bf5421SHanumanth Pothula 		rx_conf->rx_seg = NULL;
2680a4bf5421SHanumanth Pothula 		rx_conf->rx_nseg = 0;
2681a4bf5421SHanumanth Pothula 	} else if (multi_rx_mempool == 1) {
26824f04edcdSHanumanth Pothula 		/* multi-pool configuration */
2683a4bf5421SHanumanth Pothula 		struct rte_eth_dev_info dev_info;
2684a4bf5421SHanumanth Pothula 
2685a4bf5421SHanumanth Pothula 		if (mbuf_data_size_n <= 1) {
2686a4bf5421SHanumanth Pothula 			fprintf(stderr, "Invalid number of mempools %u\n",
2687a4bf5421SHanumanth Pothula 				mbuf_data_size_n);
2688a4bf5421SHanumanth Pothula 			return -EINVAL;
2689a4bf5421SHanumanth Pothula 		}
2690a4bf5421SHanumanth Pothula 		ret = rte_eth_dev_info_get(port_id, &dev_info);
2691a4bf5421SHanumanth Pothula 		if (ret != 0)
2692a4bf5421SHanumanth Pothula 			return ret;
2693a4bf5421SHanumanth Pothula 		if (dev_info.max_rx_mempools == 0) {
2694a4bf5421SHanumanth Pothula 			fprintf(stderr,
2695a4bf5421SHanumanth Pothula 				"Port %u doesn't support requested multi-rx-mempool configuration.\n",
2696a4bf5421SHanumanth Pothula 				port_id);
2697a4bf5421SHanumanth Pothula 			return -ENOTSUP;
2698a4bf5421SHanumanth Pothula 		}
26994f04edcdSHanumanth Pothula 		for (i = 0; i < mbuf_data_size_n; i++) {
27004f04edcdSHanumanth Pothula 			mpx = mbuf_pool_find(socket_id, i);
27014f04edcdSHanumanth Pothula 			rx_mempool[i] = mpx ? mpx : mp;
27024f04edcdSHanumanth Pothula 		}
27034f04edcdSHanumanth Pothula 		rx_conf->rx_mempools = rx_mempool;
27044f04edcdSHanumanth Pothula 		rx_conf->rx_nmempool = mbuf_data_size_n;
2705a4bf5421SHanumanth Pothula 		rx_conf->rx_seg = NULL;
2706a4bf5421SHanumanth Pothula 		rx_conf->rx_nseg = 0;
27072befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
27082befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
2709a4bf5421SHanumanth Pothula 		rx_conf->rx_mempools = NULL;
2710a4bf5421SHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2711a4bf5421SHanumanth Pothula 	} else {
2712a4bf5421SHanumanth Pothula 		/* Single pool/segment configuration */
27132befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
27142befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
27154f04edcdSHanumanth Pothula 		rx_conf->rx_mempools = NULL;
27164f04edcdSHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2717a4bf5421SHanumanth Pothula 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2718a4bf5421SHanumanth Pothula 				    socket_id, rx_conf, mp);
2719a4bf5421SHanumanth Pothula 	}
2720a4bf5421SHanumanth Pothula 
27213c4426dbSDmitry Kozlyuk 	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
27223c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
27233c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
27242befc67fSViacheslav Ovsiienko 	return ret;
27252befc67fSViacheslav Ovsiienko }
27262befc67fSViacheslav Ovsiienko 
272763b72657SIvan Ilchenko static int
272863b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi)
272963b72657SIvan Ilchenko {
273063b72657SIvan Ilchenko 	uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
273163b72657SIvan Ilchenko 	uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
273263b72657SIvan Ilchenko 	uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
273363b72657SIvan Ilchenko 
273463b72657SIvan Ilchenko 	if (xstats_display_num == 0)
273563b72657SIvan Ilchenko 		return 0;
273663b72657SIvan Ilchenko 
273763b72657SIvan Ilchenko 	*ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
273863b72657SIvan Ilchenko 	if (*ids_supp == NULL)
273963b72657SIvan Ilchenko 		goto fail_ids_supp;
274063b72657SIvan Ilchenko 
274163b72657SIvan Ilchenko 	*prev_values = calloc(xstats_display_num,
274263b72657SIvan Ilchenko 			      sizeof(**prev_values));
274363b72657SIvan Ilchenko 	if (*prev_values == NULL)
274463b72657SIvan Ilchenko 		goto fail_prev_values;
274563b72657SIvan Ilchenko 
274663b72657SIvan Ilchenko 	*curr_values = calloc(xstats_display_num,
274763b72657SIvan Ilchenko 			      sizeof(**curr_values));
274863b72657SIvan Ilchenko 	if (*curr_values == NULL)
274963b72657SIvan Ilchenko 		goto fail_curr_values;
275063b72657SIvan Ilchenko 
275163b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = true;
275263b72657SIvan Ilchenko 
275363b72657SIvan Ilchenko 	return 0;
275463b72657SIvan Ilchenko 
275563b72657SIvan Ilchenko fail_curr_values:
275663b72657SIvan Ilchenko 	free(*prev_values);
275763b72657SIvan Ilchenko fail_prev_values:
275863b72657SIvan Ilchenko 	free(*ids_supp);
275963b72657SIvan Ilchenko fail_ids_supp:
276063b72657SIvan Ilchenko 	return -ENOMEM;
276163b72657SIvan Ilchenko }
276263b72657SIvan Ilchenko 
276363b72657SIvan Ilchenko static void
276463b72657SIvan Ilchenko free_xstats_display_info(portid_t pi)
276563b72657SIvan Ilchenko {
276663b72657SIvan Ilchenko 	if (!ports[pi].xstats_info.allocated)
276763b72657SIvan Ilchenko 		return;
276863b72657SIvan Ilchenko 	free(ports[pi].xstats_info.ids_supp);
276963b72657SIvan Ilchenko 	free(ports[pi].xstats_info.prev_values);
277063b72657SIvan Ilchenko 	free(ports[pi].xstats_info.curr_values);
277163b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = false;
277263b72657SIvan Ilchenko }
277363b72657SIvan Ilchenko 
277463b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */
277563b72657SIvan Ilchenko static void
277663b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi)
277763b72657SIvan Ilchenko {
277863b72657SIvan Ilchenko 	unsigned int stat, stat_supp;
277963b72657SIvan Ilchenko 	const char *xstat_name;
278063b72657SIvan Ilchenko 	struct rte_port *port;
278163b72657SIvan Ilchenko 	uint64_t *ids_supp;
278263b72657SIvan Ilchenko 	int rc;
278363b72657SIvan Ilchenko 
278463b72657SIvan Ilchenko 	if (xstats_display_num == 0)
278563b72657SIvan Ilchenko 		return;
278663b72657SIvan Ilchenko 
278763b72657SIvan Ilchenko 	if (pi == (portid_t)RTE_PORT_ALL) {
278863b72657SIvan Ilchenko 		fill_xstats_display_info();
278963b72657SIvan Ilchenko 		return;
279063b72657SIvan Ilchenko 	}
279163b72657SIvan Ilchenko 
279263b72657SIvan Ilchenko 	port = &ports[pi];
279363b72657SIvan Ilchenko 	if (port->port_status != RTE_PORT_STARTED)
279463b72657SIvan Ilchenko 		return;
279563b72657SIvan Ilchenko 
279663b72657SIvan Ilchenko 	if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
279763b72657SIvan Ilchenko 		rte_exit(EXIT_FAILURE,
279863b72657SIvan Ilchenko 			 "Failed to allocate xstats display memory\n");
279963b72657SIvan Ilchenko 
280063b72657SIvan Ilchenko 	ids_supp = port->xstats_info.ids_supp;
280163b72657SIvan Ilchenko 	for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
280263b72657SIvan Ilchenko 		xstat_name = xstats_display[stat].name;
280363b72657SIvan Ilchenko 		rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
280463b72657SIvan Ilchenko 						   ids_supp + stat_supp);
280563b72657SIvan Ilchenko 		if (rc != 0) {
280663b72657SIvan Ilchenko 			fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
280763b72657SIvan Ilchenko 				xstat_name, pi, stat);
280863b72657SIvan Ilchenko 			continue;
280963b72657SIvan Ilchenko 		}
281063b72657SIvan Ilchenko 		stat_supp++;
281163b72657SIvan Ilchenko 	}
281263b72657SIvan Ilchenko 
281363b72657SIvan Ilchenko 	port->xstats_info.ids_supp_sz = stat_supp;
281463b72657SIvan Ilchenko }
281563b72657SIvan Ilchenko 
281663b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */
281763b72657SIvan Ilchenko static void
281863b72657SIvan Ilchenko fill_xstats_display_info(void)
281963b72657SIvan Ilchenko {
282063b72657SIvan Ilchenko 	portid_t pi;
282163b72657SIvan Ilchenko 
282263b72657SIvan Ilchenko 	if (xstats_display_num == 0)
282363b72657SIvan Ilchenko 		return;
282463b72657SIvan Ilchenko 
282563b72657SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(pi)
282663b72657SIvan Ilchenko 		fill_xstats_display_info_for_port(pi);
282763b72657SIvan Ilchenko }
282863b72657SIvan Ilchenko 
28297c06f1abSHuisong Li /*
28307c06f1abSHuisong Li  * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
283115e34522SLong Wu  * device in dev_info is zero when no member is added. And its capability
283215e34522SLong Wu  * will be updated when add a new member device. So adding a member device need
28337c06f1abSHuisong Li  * to update the port configurations of bonding device.
28347c06f1abSHuisong Li  */
28357c06f1abSHuisong Li static void
28367c06f1abSHuisong Li update_bonding_port_dev_conf(portid_t bond_pid)
28377c06f1abSHuisong Li {
28387c06f1abSHuisong Li #ifdef RTE_NET_BOND
28397c06f1abSHuisong Li 	struct rte_port *port = &ports[bond_pid];
28407c06f1abSHuisong Li 	uint16_t i;
28417c06f1abSHuisong Li 	int ret;
28427c06f1abSHuisong Li 
28437c06f1abSHuisong Li 	ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info);
28447c06f1abSHuisong Li 	if (ret != 0) {
28457c06f1abSHuisong Li 		fprintf(stderr, "Failed to get dev info for port = %u\n",
28467c06f1abSHuisong Li 			bond_pid);
28477c06f1abSHuisong Li 		return;
28487c06f1abSHuisong Li 	}
28497c06f1abSHuisong Li 
28507c06f1abSHuisong Li 	if (port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
28517c06f1abSHuisong Li 		port->dev_conf.txmode.offloads |=
28527c06f1abSHuisong Li 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
28537c06f1abSHuisong Li 	/* Apply Tx offloads configuration */
28547c06f1abSHuisong Li 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
28557c06f1abSHuisong Li 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
28567c06f1abSHuisong Li 
28577c06f1abSHuisong Li 	port->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
28587c06f1abSHuisong Li 				port->dev_info.flow_type_rss_offloads;
28597c06f1abSHuisong Li #else
28607c06f1abSHuisong Li 	RTE_SET_USED(bond_pid);
28617c06f1abSHuisong Li #endif
28627c06f1abSHuisong Li }
28637c06f1abSHuisong Li 
2864edab33b1STetsuya Mukawa int
2865ce8d5614SIntel start_port(portid_t pid)
2866ce8d5614SIntel {
2867cdede073SFerruh Yigit 	int diag;
2868ce8d5614SIntel 	portid_t pi;
286901817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
287001817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
287101817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
287201817b10SBing Zhao 	uint16_t cnt_pi = 0;
287301817b10SBing Zhao 	uint16_t cfg_pi = 0;
2874ce8d5614SIntel 	queueid_t qi;
2875ce8d5614SIntel 	struct rte_port *port;
28761c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2877cdede073SFerruh Yigit 	bool at_least_one_port_exist = false;
2878cdede073SFerruh Yigit 	bool all_ports_already_started = true;
2879cdede073SFerruh Yigit 	bool at_least_one_port_successfully_started = false;
2880ce8d5614SIntel 
28814468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
28824468635fSMichael Qiu 		return 0;
28834468635fSMichael Qiu 
28847d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2885edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2886ce8d5614SIntel 			continue;
2887ce8d5614SIntel 
288815e34522SLong Wu 		if (port_is_bonding_member(pi)) {
2889d8c079a5SMin Hu (Connor) 			fprintf(stderr,
28904f840086SLong Wu 				"Please remove port %d from bonding device.\n",
2891d8c079a5SMin Hu (Connor) 				pi);
2892d8c079a5SMin Hu (Connor) 			continue;
2893d8c079a5SMin Hu (Connor) 		}
2894d8c079a5SMin Hu (Connor) 
2895cdede073SFerruh Yigit 		at_least_one_port_exist = true;
2896cdede073SFerruh Yigit 
2897ce8d5614SIntel 		port = &ports[pi];
2898cdede073SFerruh Yigit 		if (port->port_status == RTE_PORT_STOPPED) {
2899eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
2900cdede073SFerruh Yigit 			all_ports_already_started = false;
2901cdede073SFerruh Yigit 		} else {
290261a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is now not stopped\n", pi);
2903ce8d5614SIntel 			continue;
2904ce8d5614SIntel 		}
2905ce8d5614SIntel 
2906ce8d5614SIntel 		if (port->need_reconfig > 0) {
2907655eae01SJie Wang 			struct rte_eth_conf dev_conf;
2908655eae01SJie Wang 			int k;
2909655eae01SJie Wang 
2910ce8d5614SIntel 			port->need_reconfig = 0;
2911ce8d5614SIntel 
29127ee3e944SVasily Philipov 			if (flow_isolate_all) {
29137ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
29147ee3e944SVasily Philipov 				if (ret) {
291561a3b0e5SAndrew Rybchenko 					fprintf(stderr,
291661a3b0e5SAndrew Rybchenko 						"Failed to apply isolated mode on port %d\n",
291761a3b0e5SAndrew Rybchenko 						pi);
29187ee3e944SVasily Philipov 					return -1;
29197ee3e944SVasily Philipov 				}
29207ee3e944SVasily Philipov 			}
2921b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
29225706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
292320a0286fSLiu Xiaofeng 					port->socket_id);
29241c69df45SOri Kam 			if (nb_hairpinq > 0 &&
29251c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
292661a3b0e5SAndrew Rybchenko 				fprintf(stderr,
292761a3b0e5SAndrew Rybchenko 					"Port %d doesn't support hairpin queues\n",
292861a3b0e5SAndrew Rybchenko 					pi);
29291c69df45SOri Kam 				return -1;
29301c69df45SOri Kam 			}
29311bb4a528SFerruh Yigit 
29327c06f1abSHuisong Li 			if (port->bond_flag == 1 && port->update_conf == 1) {
29337c06f1abSHuisong Li 				update_bonding_port_dev_conf(pi);
29347c06f1abSHuisong Li 				port->update_conf = 0;
29357c06f1abSHuisong Li 			}
29367c06f1abSHuisong Li 
2937ce8d5614SIntel 			/* configure port */
2938a550baf2SMin Hu (Connor) 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
29391c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2940ce8d5614SIntel 						     &(port->dev_conf));
2941ce8d5614SIntel 			if (diag != 0) {
2942eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
2943eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
2944eac341d3SJoyce Kong 				else
294561a3b0e5SAndrew Rybchenko 					fprintf(stderr,
294661a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
294761a3b0e5SAndrew Rybchenko 						pi);
294861a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Fail to configure port %d\n",
294961a3b0e5SAndrew Rybchenko 					pi);
2950ce8d5614SIntel 				/* try to reconfigure port next time */
2951ce8d5614SIntel 				port->need_reconfig = 1;
2952148f963fSBruce Richardson 				return -1;
2953ce8d5614SIntel 			}
2954655eae01SJie Wang 			/* get device configuration*/
2955655eae01SJie Wang 			if (0 !=
2956655eae01SJie Wang 				eth_dev_conf_get_print_err(pi, &dev_conf)) {
2957655eae01SJie Wang 				fprintf(stderr,
2958655eae01SJie Wang 					"port %d can not get device configuration\n",
2959655eae01SJie Wang 					pi);
2960655eae01SJie Wang 				return -1;
2961655eae01SJie Wang 			}
2962655eae01SJie Wang 			/* Apply Rx offloads configuration */
2963655eae01SJie Wang 			if (dev_conf.rxmode.offloads !=
2964655eae01SJie Wang 			    port->dev_conf.rxmode.offloads) {
2965655eae01SJie Wang 				port->dev_conf.rxmode.offloads |=
2966655eae01SJie Wang 					dev_conf.rxmode.offloads;
2967655eae01SJie Wang 				for (k = 0;
2968655eae01SJie Wang 				     k < port->dev_info.max_rx_queues;
2969655eae01SJie Wang 				     k++)
29703c4426dbSDmitry Kozlyuk 					port->rxq[k].conf.offloads |=
2971655eae01SJie Wang 						dev_conf.rxmode.offloads;
2972655eae01SJie Wang 			}
2973655eae01SJie Wang 			/* Apply Tx offloads configuration */
2974655eae01SJie Wang 			if (dev_conf.txmode.offloads !=
2975655eae01SJie Wang 			    port->dev_conf.txmode.offloads) {
2976655eae01SJie Wang 				port->dev_conf.txmode.offloads |=
2977655eae01SJie Wang 					dev_conf.txmode.offloads;
2978655eae01SJie Wang 				for (k = 0;
2979655eae01SJie Wang 				     k < port->dev_info.max_tx_queues;
2980655eae01SJie Wang 				     k++)
29813c4426dbSDmitry Kozlyuk 					port->txq[k].conf.offloads |=
2982655eae01SJie Wang 						dev_conf.txmode.offloads;
2983655eae01SJie Wang 			}
2984ce8d5614SIntel 		}
2985a550baf2SMin Hu (Connor) 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2986ce8d5614SIntel 			port->need_reconfig_queues = 0;
2987ce8d5614SIntel 			/* setup tx queues */
2988ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
29893c4426dbSDmitry Kozlyuk 				struct rte_eth_txconf *conf =
29903c4426dbSDmitry Kozlyuk 							&port->txq[qi].conf;
29913c4426dbSDmitry Kozlyuk 
2992b6ea6408SIntel 				if ((numa_support) &&
2993b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2994b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2995d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2996d44f8a48SQi Zhang 						txring_numa[pi],
29973c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
2998b6ea6408SIntel 				else
2999b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
3000d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
3001d44f8a48SQi Zhang 						port->socket_id,
30023c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
3003b6ea6408SIntel 
30043c4426dbSDmitry Kozlyuk 				if (diag == 0) {
30053c4426dbSDmitry Kozlyuk 					port->txq[qi].state =
30063c4426dbSDmitry Kozlyuk 						conf->tx_deferred_start ?
30073c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
30083c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
3009ce8d5614SIntel 					continue;
30103c4426dbSDmitry Kozlyuk 				}
3011ce8d5614SIntel 
3012ce8d5614SIntel 				/* Fail to setup tx queue, return */
3013eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3014eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3015eac341d3SJoyce Kong 				else
301661a3b0e5SAndrew Rybchenko 					fprintf(stderr,
301761a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
301861a3b0e5SAndrew Rybchenko 						pi);
301961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
302061a3b0e5SAndrew Rybchenko 					"Fail to configure port %d tx queues\n",
3021d44f8a48SQi Zhang 					pi);
3022ce8d5614SIntel 				/* try to reconfigure queues next time */
3023ce8d5614SIntel 				port->need_reconfig_queues = 1;
3024148f963fSBruce Richardson 				return -1;
3025ce8d5614SIntel 			}
3026ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
3027d44f8a48SQi Zhang 				/* setup rx queues */
3028b6ea6408SIntel 				if ((numa_support) &&
3029b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
3030b6ea6408SIntel 					struct rte_mempool * mp =
303126cbb419SViacheslav Ovsiienko 						mbuf_pool_find
303226cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
3033b6ea6408SIntel 					if (mp == NULL) {
303461a3b0e5SAndrew Rybchenko 						fprintf(stderr,
303561a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
3036b6ea6408SIntel 							rxring_numa[pi]);
3037148f963fSBruce Richardson 						return -1;
3038b6ea6408SIntel 					}
3039b6ea6408SIntel 
30402befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
3041d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
3042d44f8a48SQi Zhang 					     rxring_numa[pi],
30433c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
3044d44f8a48SQi Zhang 					     mp);
30451e1d6bddSBernard Iremonger 				} else {
30461e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
304726cbb419SViacheslav Ovsiienko 						mbuf_pool_find
304826cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
30491e1d6bddSBernard Iremonger 					if (mp == NULL) {
305061a3b0e5SAndrew Rybchenko 						fprintf(stderr,
305161a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
30521e1d6bddSBernard Iremonger 							port->socket_id);
30531e1d6bddSBernard Iremonger 						return -1;
3054b6ea6408SIntel 					}
30552befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
3056d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
3057d44f8a48SQi Zhang 					     port->socket_id,
30583c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
3059d44f8a48SQi Zhang 					     mp);
30601e1d6bddSBernard Iremonger 				}
3061ce8d5614SIntel 				if (diag == 0)
3062ce8d5614SIntel 					continue;
3063ce8d5614SIntel 
3064ce8d5614SIntel 				/* Fail to setup rx queue, return */
3065eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3066eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3067eac341d3SJoyce Kong 				else
306861a3b0e5SAndrew Rybchenko 					fprintf(stderr,
306961a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
307061a3b0e5SAndrew Rybchenko 						pi);
307161a3b0e5SAndrew Rybchenko 				fprintf(stderr,
307261a3b0e5SAndrew Rybchenko 					"Fail to configure port %d rx queues\n",
3073d44f8a48SQi Zhang 					pi);
3074ce8d5614SIntel 				/* try to reconfigure queues next time */
3075ce8d5614SIntel 				port->need_reconfig_queues = 1;
3076148f963fSBruce Richardson 				return -1;
3077ce8d5614SIntel 			}
30781c69df45SOri Kam 			/* setup hairpin queues */
307901817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
30801c69df45SOri Kam 				return -1;
3081ce8d5614SIntel 		}
3082b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
3083b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
3084b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3085b0a9354aSPavan Nikhilesh 					NULL, 0);
3086b0a9354aSPavan Nikhilesh 			if (diag < 0)
308761a3b0e5SAndrew Rybchenko 				fprintf(stderr,
3088b0a9354aSPavan Nikhilesh 					"Port %d: Failed to disable Ptype parsing\n",
3089b0a9354aSPavan Nikhilesh 					pi);
3090b0a9354aSPavan Nikhilesh 		}
3091b0a9354aSPavan Nikhilesh 
309201817b10SBing Zhao 		p_pi = pi;
309301817b10SBing Zhao 		cnt_pi++;
309401817b10SBing Zhao 
3095ce8d5614SIntel 		/* start port */
3096a550baf2SMin Hu (Connor) 		diag = eth_dev_start_mp(pi);
309752f2c6f2SAndrew Rybchenko 		if (diag < 0) {
309861a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Fail to start port %d: %s\n",
309961a3b0e5SAndrew Rybchenko 				pi, rte_strerror(-diag));
3100ce8d5614SIntel 
3101ce8d5614SIntel 			/* Fail to setup rx queue, return */
3102eac341d3SJoyce Kong 			if (port->port_status == RTE_PORT_HANDLING)
3103eac341d3SJoyce Kong 				port->port_status = RTE_PORT_STOPPED;
3104eac341d3SJoyce Kong 			else
310561a3b0e5SAndrew Rybchenko 				fprintf(stderr,
310661a3b0e5SAndrew Rybchenko 					"Port %d can not be set back to stopped\n",
310761a3b0e5SAndrew Rybchenko 					pi);
3108ce8d5614SIntel 			continue;
3109ce8d5614SIntel 		}
3110ce8d5614SIntel 
3111eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3112eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STARTED;
3113eac341d3SJoyce Kong 		else
311461a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into started\n",
311561a3b0e5SAndrew Rybchenko 				pi);
3116ce8d5614SIntel 
31175ffc4a2aSYuying Zhang 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3118c2c4f87bSAman Deep Singh 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3119a7db3afcSAman Deep Singh 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3120d8c89163SZijie Pan 
3121cdede073SFerruh Yigit 		at_least_one_port_successfully_started = true;
312201817b10SBing Zhao 
312301817b10SBing Zhao 		pl[cfg_pi++] = pi;
3124ce8d5614SIntel 	}
3125ce8d5614SIntel 
312637b68fa1SJie Hai 	update_queue_state(pi);
31275028f207SShiyang He 
3128cdede073SFerruh Yigit 	if (at_least_one_port_successfully_started && !no_link_check)
3129edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3130cdede073SFerruh Yigit 	else if (at_least_one_port_exist & all_ports_already_started)
313161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Please stop the ports first\n");
3132ce8d5614SIntel 
313301817b10SBing Zhao 	if (hairpin_mode & 0xf) {
31345334c3feSGregory Etelson 		diag = hairpin_bind(cfg_pi, pl, peer_pl);
31355334c3feSGregory Etelson 		if (diag < 0)
313601817b10SBing Zhao 			return -1;
313701817b10SBing Zhao 	}
313801817b10SBing Zhao 
313963b72657SIvan Ilchenko 	fill_xstats_display_info_for_port(pid);
314063b72657SIvan Ilchenko 
3141ce8d5614SIntel 	printf("Done\n");
3142148f963fSBruce Richardson 	return 0;
3143ce8d5614SIntel }
3144ce8d5614SIntel 
3145ce8d5614SIntel void
3146ce8d5614SIntel stop_port(portid_t pid)
3147ce8d5614SIntel {
3148ce8d5614SIntel 	portid_t pi;
3149ce8d5614SIntel 	struct rte_port *port;
3150ce8d5614SIntel 	int need_check_link_status = 0;
315101817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
315201817b10SBing Zhao 	int peer_pi;
315347a4e1fbSDariusz Sosnowski 	int ret;
3154ce8d5614SIntel 
31554468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
31564468635fSMichael Qiu 		return;
31574468635fSMichael Qiu 
3158ce8d5614SIntel 	printf("Stopping ports...\n");
3159ce8d5614SIntel 
31607d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
31614468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3162ce8d5614SIntel 			continue;
3163ce8d5614SIntel 
3164a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
316561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
316661a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
316761a3b0e5SAndrew Rybchenko 				pi);
3168a8ef3e3aSBernard Iremonger 			continue;
3169a8ef3e3aSBernard Iremonger 		}
3170a8ef3e3aSBernard Iremonger 
317115e34522SLong Wu 		if (port_is_bonding_member(pi)) {
317261a3b0e5SAndrew Rybchenko 			fprintf(stderr,
31734f840086SLong Wu 				"Please remove port %d from bonding device.\n",
317461a3b0e5SAndrew Rybchenko 				pi);
31750e545d30SBernard Iremonger 			continue;
31760e545d30SBernard Iremonger 		}
31770e545d30SBernard Iremonger 
3178ce8d5614SIntel 		port = &ports[pi];
3179eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STARTED)
3180eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
3181eac341d3SJoyce Kong 		else
3182ce8d5614SIntel 			continue;
3183ce8d5614SIntel 
318401817b10SBing Zhao 		if (hairpin_mode & 0xf) {
318501817b10SBing Zhao 			int j;
318601817b10SBing Zhao 
318701817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
318801817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
318901817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
319001817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
319101817b10SBing Zhao 			if (peer_pi < 0)
319201817b10SBing Zhao 				continue;
319301817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
319401817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
319501817b10SBing Zhao 					continue;
319601817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
319701817b10SBing Zhao 			}
319801817b10SBing Zhao 		}
319901817b10SBing Zhao 
3200543df472SChengwen Feng 		if (port->flow_list && !no_flow_flush)
32010f93edbfSGregory Etelson 			port_flow_flush(pi);
32020f93edbfSGregory Etelson 
320347a4e1fbSDariusz Sosnowski 		ret = eth_dev_stop_mp(pi);
320447a4e1fbSDariusz Sosnowski 		if (ret != 0) {
32058570d76cSStephen Hemminger 			TESTPMD_LOG(ERR,
32068570d76cSStephen Hemminger 				    "rte_eth_dev_stop failed for port %u\n", pi);
320747a4e1fbSDariusz Sosnowski 			/* Allow to retry stopping the port. */
320847a4e1fbSDariusz Sosnowski 			port->port_status = RTE_PORT_STARTED;
320947a4e1fbSDariusz Sosnowski 			continue;
321047a4e1fbSDariusz Sosnowski 		}
3211ce8d5614SIntel 
3212eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3213eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
3214eac341d3SJoyce Kong 		else
321561a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into stopped\n",
321661a3b0e5SAndrew Rybchenko 				pi);
3217ce8d5614SIntel 		need_check_link_status = 1;
3218ce8d5614SIntel 	}
3219bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
3220edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3221ce8d5614SIntel 
3222ce8d5614SIntel 	printf("Done\n");
3223ce8d5614SIntel }
3224ce8d5614SIntel 
3225ce6959bfSWisam Jaddo static void
32264f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
3227ce6959bfSWisam Jaddo {
32284f1de450SThomas Monjalon 	portid_t i;
32294f1de450SThomas Monjalon 	portid_t new_total = 0;
3230ce6959bfSWisam Jaddo 
32314f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
32324f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
32334f1de450SThomas Monjalon 			array[new_total] = array[i];
32344f1de450SThomas Monjalon 			new_total++;
3235ce6959bfSWisam Jaddo 		}
32364f1de450SThomas Monjalon 	*total = new_total;
32374f1de450SThomas Monjalon }
32384f1de450SThomas Monjalon 
32394f1de450SThomas Monjalon static void
32404f1de450SThomas Monjalon remove_invalid_ports(void)
32414f1de450SThomas Monjalon {
32424f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
32434f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
32444f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
3245ce6959bfSWisam Jaddo }
3246ce6959bfSWisam Jaddo 
32473889a322SHuisong Li static void
32484b27989dSDmitry Kozlyuk flush_port_owned_resources(portid_t pi)
32494b27989dSDmitry Kozlyuk {
32504b27989dSDmitry Kozlyuk 	mcast_addr_pool_destroy(pi);
32514b27989dSDmitry Kozlyuk 	port_flow_flush(pi);
32526d736e05SSuanming Mou 	port_flow_template_table_flush(pi);
32536d736e05SSuanming Mou 	port_flow_pattern_template_flush(pi);
32546d736e05SSuanming Mou 	port_flow_actions_template_flush(pi);
3255653c0812SRongwei Liu 	port_flex_item_flush(pi);
32564b27989dSDmitry Kozlyuk 	port_action_handle_flush(pi);
32574b27989dSDmitry Kozlyuk }
32584b27989dSDmitry Kozlyuk 
32594b27989dSDmitry Kozlyuk static void
326015e34522SLong Wu clear_bonding_member_device(portid_t *member_pids, uint16_t num_members)
32613889a322SHuisong Li {
32623889a322SHuisong Li 	struct rte_port *port;
326315e34522SLong Wu 	portid_t member_pid;
32643889a322SHuisong Li 	uint16_t i;
32653889a322SHuisong Li 
326615e34522SLong Wu 	for (i = 0; i < num_members; i++) {
326715e34522SLong Wu 		member_pid = member_pids[i];
326815e34522SLong Wu 		if (port_is_started(member_pid) == 1) {
326915e34522SLong Wu 			if (rte_eth_dev_stop(member_pid) != 0)
32703889a322SHuisong Li 				fprintf(stderr, "rte_eth_dev_stop failed for port %u\n",
327115e34522SLong Wu 					member_pid);
32723889a322SHuisong Li 
327315e34522SLong Wu 			port = &ports[member_pid];
32743889a322SHuisong Li 			port->port_status = RTE_PORT_STOPPED;
32753889a322SHuisong Li 		}
32763889a322SHuisong Li 
327715e34522SLong Wu 		clear_port_member_flag(member_pid);
32783889a322SHuisong Li 
327915e34522SLong Wu 		/* Close member device when testpmd quit or is killed. */
32803889a322SHuisong Li 		if (cl_quit == 1 || f_quit == 1)
328115e34522SLong Wu 			rte_eth_dev_close(member_pid);
32823889a322SHuisong Li 	}
32833889a322SHuisong Li }
32843889a322SHuisong Li 
3285ce8d5614SIntel void
3286ce8d5614SIntel close_port(portid_t pid)
3287ce8d5614SIntel {
3288ce8d5614SIntel 	portid_t pi;
3289ce8d5614SIntel 	struct rte_port *port;
329015e34522SLong Wu 	portid_t member_pids[RTE_MAX_ETHPORTS];
329115e34522SLong Wu 	int num_members = 0;
3292ce8d5614SIntel 
32934468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
32944468635fSMichael Qiu 		return;
32954468635fSMichael Qiu 
3296ce8d5614SIntel 	printf("Closing ports...\n");
3297ce8d5614SIntel 
32987d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
32994468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3300ce8d5614SIntel 			continue;
3301ce8d5614SIntel 
3302a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
330361a3b0e5SAndrew Rybchenko 			fprintf(stderr,
330461a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
330561a3b0e5SAndrew Rybchenko 				pi);
3306a8ef3e3aSBernard Iremonger 			continue;
3307a8ef3e3aSBernard Iremonger 		}
3308a8ef3e3aSBernard Iremonger 
330915e34522SLong Wu 		if (port_is_bonding_member(pi)) {
331061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
33114f840086SLong Wu 				"Please remove port %d from bonding device.\n",
331261a3b0e5SAndrew Rybchenko 				pi);
33130e545d30SBernard Iremonger 			continue;
33140e545d30SBernard Iremonger 		}
33150e545d30SBernard Iremonger 
3316ce8d5614SIntel 		port = &ports[pi];
3317eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_CLOSED) {
331861a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is already closed\n", pi);
3319d4e8ad64SMichael Qiu 			continue;
3320d4e8ad64SMichael Qiu 		}
3321d4e8ad64SMichael Qiu 
3322a550baf2SMin Hu (Connor) 		if (is_proc_primary()) {
33234b27989dSDmitry Kozlyuk 			flush_port_owned_resources(pi);
33243889a322SHuisong Li #ifdef RTE_NET_BOND
33253889a322SHuisong Li 			if (port->bond_flag == 1)
332615e34522SLong Wu 				num_members = rte_eth_bond_members_get(pi,
332715e34522SLong Wu 						member_pids, RTE_MAX_ETHPORTS);
33283889a322SHuisong Li #endif
3329ce8d5614SIntel 			rte_eth_dev_close(pi);
33303889a322SHuisong Li 			/*
33314f840086SLong Wu 			 * If this port is bonding device, all members under the
33323889a322SHuisong Li 			 * device need to be removed or closed.
33333889a322SHuisong Li 			 */
333415e34522SLong Wu 			if (port->bond_flag == 1 && num_members > 0)
333515e34522SLong Wu 				clear_bonding_member_device(member_pids,
333615e34522SLong Wu 							num_members);
3337ce8d5614SIntel 		}
333863b72657SIvan Ilchenko 
333963b72657SIvan Ilchenko 		free_xstats_display_info(pi);
3340a550baf2SMin Hu (Connor) 	}
3341ce8d5614SIntel 
334285c6571cSThomas Monjalon 	remove_invalid_ports();
3343ce8d5614SIntel 	printf("Done\n");
3344ce8d5614SIntel }
3345ce8d5614SIntel 
3346edab33b1STetsuya Mukawa void
334797f1e196SWei Dai reset_port(portid_t pid)
334897f1e196SWei Dai {
334997f1e196SWei Dai 	int diag;
335097f1e196SWei Dai 	portid_t pi;
335197f1e196SWei Dai 	struct rte_port *port;
335297f1e196SWei Dai 
335397f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
335497f1e196SWei Dai 		return;
335597f1e196SWei Dai 
33561cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
33571cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
335861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
335961a3b0e5SAndrew Rybchenko 			"Can not reset port(s), please stop port(s) first.\n");
33601cde1b9aSShougang Wang 		return;
33611cde1b9aSShougang Wang 	}
33621cde1b9aSShougang Wang 
336397f1e196SWei Dai 	printf("Resetting ports...\n");
336497f1e196SWei Dai 
336597f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
336697f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
336797f1e196SWei Dai 			continue;
336897f1e196SWei Dai 
336997f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
337061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
337161a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
337261a3b0e5SAndrew Rybchenko 				pi);
337397f1e196SWei Dai 			continue;
337497f1e196SWei Dai 		}
337597f1e196SWei Dai 
337615e34522SLong Wu 		if (port_is_bonding_member(pi)) {
337761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
33784f840086SLong Wu 				"Please remove port %d from bonding device.\n",
337997f1e196SWei Dai 				pi);
338097f1e196SWei Dai 			continue;
338197f1e196SWei Dai 		}
338297f1e196SWei Dai 
3383e9351eaaSQiming Yang 		if (is_proc_primary()) {
338497f1e196SWei Dai 			diag = rte_eth_dev_reset(pi);
338597f1e196SWei Dai 			if (diag == 0) {
338697f1e196SWei Dai 				port = &ports[pi];
338797f1e196SWei Dai 				port->need_reconfig = 1;
338897f1e196SWei Dai 				port->need_reconfig_queues = 1;
338997f1e196SWei Dai 			} else {
339061a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Failed to reset port %d. diag=%d\n",
339161a3b0e5SAndrew Rybchenko 					pi, diag);
339297f1e196SWei Dai 			}
339397f1e196SWei Dai 		}
3394e9351eaaSQiming Yang 	}
339597f1e196SWei Dai 
339697f1e196SWei Dai 	printf("Done\n");
339797f1e196SWei Dai }
339897f1e196SWei Dai 
339997f1e196SWei Dai void
3400edab33b1STetsuya Mukawa attach_port(char *identifier)
3401ce8d5614SIntel {
34024f1ed78eSThomas Monjalon 	portid_t pi;
3403c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
3404ce8d5614SIntel 
3405edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
3406edab33b1STetsuya Mukawa 
3407edab33b1STetsuya Mukawa 	if (identifier == NULL) {
340861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Invalid parameters are specified\n");
3409edab33b1STetsuya Mukawa 		return;
3410ce8d5614SIntel 	}
3411ce8d5614SIntel 
341275b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
3413c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3414edab33b1STetsuya Mukawa 		return;
3415c9cce428SThomas Monjalon 	}
3416c9cce428SThomas Monjalon 
34174f1ed78eSThomas Monjalon 	/* first attach mode: event */
34184f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
34194f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
34204f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
34214f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
34224f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
34234f1ed78eSThomas Monjalon 				setup_attached_port(pi);
34244f1ed78eSThomas Monjalon 		return;
34254f1ed78eSThomas Monjalon 	}
34264f1ed78eSThomas Monjalon 
34274f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
342886fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
34294f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
343086fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
343186fa5de1SThomas Monjalon 			continue; /* port was already attached before */
3432c9cce428SThomas Monjalon 		setup_attached_port(pi);
3433c9cce428SThomas Monjalon 	}
343486fa5de1SThomas Monjalon }
3435c9cce428SThomas Monjalon 
3436c9cce428SThomas Monjalon static void
3437c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
3438c9cce428SThomas Monjalon {
3439c9cce428SThomas Monjalon 	unsigned int socket_id;
344034fc1051SIvan Ilchenko 	int ret;
3441edab33b1STetsuya Mukawa 
3442931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
344329841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
3444931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
344529841336SPhil Yang 		socket_id = socket_ids[0];
3446931126baSBernard Iremonger 	reconfig(pi, socket_id);
344734fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
344834fc1051SIvan Ilchenko 	if (ret != 0)
344961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
345061a3b0e5SAndrew Rybchenko 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
345134fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
3452edab33b1STetsuya Mukawa 
34534f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
34544f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
34554f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
34564f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
3457edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
3458edab33b1STetsuya Mukawa 
3459edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3460edab33b1STetsuya Mukawa 	printf("Done\n");
3461edab33b1STetsuya Mukawa }
3462edab33b1STetsuya Mukawa 
34630654d4a8SThomas Monjalon static void
34640654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
34655f4ec54fSChen Jing D(Mark) {
3466f8e5baa2SThomas Monjalon 	portid_t sibling;
3467f8e5baa2SThomas Monjalon 
3468f8e5baa2SThomas Monjalon 	if (dev == NULL) {
346961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Device already removed\n");
3470f8e5baa2SThomas Monjalon 		return;
3471f8e5baa2SThomas Monjalon 	}
3472f8e5baa2SThomas Monjalon 
34730654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3474938a184aSAdrien Mazarguil 
34752a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
34762a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
34772a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
347861a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
347961a3b0e5SAndrew Rybchenko 					sibling);
34802a449871SThomas Monjalon 				return;
34812a449871SThomas Monjalon 			}
34824b27989dSDmitry Kozlyuk 			flush_port_owned_resources(sibling);
34832a449871SThomas Monjalon 		}
34842a449871SThomas Monjalon 	}
34852a449871SThomas Monjalon 
348675b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3487ec5ecd7eSDavid Marchand 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", rte_dev_name(dev));
3488edab33b1STetsuya Mukawa 		return;
34893070419eSGaetan Rivet 	}
34904f1de450SThomas Monjalon 	remove_invalid_ports();
349103ce2c53SMatan Azrad 
34920654d4a8SThomas Monjalon 	printf("Device is detached\n");
3493f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3494edab33b1STetsuya Mukawa 	printf("Done\n");
3495edab33b1STetsuya Mukawa 	return;
34965f4ec54fSChen Jing D(Mark) }
34975f4ec54fSChen Jing D(Mark) 
3498af75078fSIntel void
34990654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
35000654d4a8SThomas Monjalon {
35010a0821bcSPaulis Gributs 	int ret;
35020a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
35030a0821bcSPaulis Gributs 
35040654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
35050654d4a8SThomas Monjalon 		return;
35060654d4a8SThomas Monjalon 
35070654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
35080654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
350961a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port not stopped\n");
35100654d4a8SThomas Monjalon 			return;
35110654d4a8SThomas Monjalon 		}
351261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Port was not closed\n");
35130654d4a8SThomas Monjalon 	}
35140654d4a8SThomas Monjalon 
35150a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
35160a0821bcSPaulis Gributs 	if (ret != 0) {
35170a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
35180a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
35190a0821bcSPaulis Gributs 			port_id);
35200a0821bcSPaulis Gributs 		return;
35210a0821bcSPaulis Gributs 	}
35220a0821bcSPaulis Gributs 	detach_device(dev_info.device);
35230654d4a8SThomas Monjalon }
35240654d4a8SThomas Monjalon 
35250654d4a8SThomas Monjalon void
35265edee5f6SThomas Monjalon detach_devargs(char *identifier)
352755e51c96SNithin Dabilpuram {
352855e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
352955e51c96SNithin Dabilpuram 	struct rte_devargs da;
353055e51c96SNithin Dabilpuram 	portid_t port_id;
353155e51c96SNithin Dabilpuram 
353255e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
353355e51c96SNithin Dabilpuram 
353455e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
353555e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
353661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "cannot parse identifier\n");
353755e51c96SNithin Dabilpuram 		return;
353855e51c96SNithin Dabilpuram 	}
353955e51c96SNithin Dabilpuram 
354055e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
354155e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
354255e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
354361a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
354461a3b0e5SAndrew Rybchenko 					port_id);
3545149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
354664051bb1SXueming Li 				rte_devargs_reset(&da);
354755e51c96SNithin Dabilpuram 				return;
354855e51c96SNithin Dabilpuram 			}
35494b27989dSDmitry Kozlyuk 			flush_port_owned_resources(port_id);
355055e51c96SNithin Dabilpuram 		}
355155e51c96SNithin Dabilpuram 	}
355255e51c96SNithin Dabilpuram 
3553148c51a3SDavid Marchand 	if (rte_eal_hotplug_remove(rte_bus_name(da.bus), da.name) != 0) {
355455e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3555148c51a3SDavid Marchand 			    da.name, rte_bus_name(da.bus));
355664051bb1SXueming Li 		rte_devargs_reset(&da);
355755e51c96SNithin Dabilpuram 		return;
355855e51c96SNithin Dabilpuram 	}
355955e51c96SNithin Dabilpuram 
356055e51c96SNithin Dabilpuram 	remove_invalid_ports();
356155e51c96SNithin Dabilpuram 
356255e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
356355e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
356455e51c96SNithin Dabilpuram 	printf("Done\n");
356564051bb1SXueming Li 	rte_devargs_reset(&da);
356655e51c96SNithin Dabilpuram }
356755e51c96SNithin Dabilpuram 
356855e51c96SNithin Dabilpuram void
3569af75078fSIntel pmd_test_exit(void)
3570af75078fSIntel {
3571af75078fSIntel 	portid_t pt_id;
357226cbb419SViacheslav Ovsiienko 	unsigned int i;
3573fb73e096SJeff Guo 	int ret;
3574af75078fSIntel 
35758210ec25SPablo de Lara 	if (test_done == 0)
35768210ec25SPablo de Lara 		stop_packet_forwarding();
35778210ec25SPablo de Lara 
3578761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
357926cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
35803a0968c8SShahaf Shuler 		if (mempools[i]) {
35813a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
35823a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
35833a0968c8SShahaf Shuler 						     NULL);
35843a0968c8SShahaf Shuler 		}
35853a0968c8SShahaf Shuler 	}
3586761f7ae1SJie Zhou #endif
3587d3a274ceSZhihong Wang 	if (ports != NULL) {
3588d3a274ceSZhihong Wang 		no_link_check = 1;
35897d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
359008fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3591af75078fSIntel 			fflush(stdout);
3592d3a274ceSZhihong Wang 			stop_port(pt_id);
359308fd782bSCristian Dumitrescu 		}
359408fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
359508fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
359608fd782bSCristian Dumitrescu 			fflush(stdout);
3597d3a274ceSZhihong Wang 			close_port(pt_id);
3598af75078fSIntel 		}
3599d3a274ceSZhihong Wang 	}
3600fb73e096SJeff Guo 
3601fb73e096SJeff Guo 	if (hot_plug) {
3602fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
36032049c511SJeff Guo 		if (ret) {
36048570d76cSStephen Hemminger 			TESTPMD_LOG(ERR, "fail to stop device event monitor.");
36052049c511SJeff Guo 			return;
36062049c511SJeff Guo 		}
3607fb73e096SJeff Guo 
36082049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3609cc1bf307SJeff Guo 			dev_event_callback, NULL);
36102049c511SJeff Guo 		if (ret < 0) {
36118570d76cSStephen Hemminger 			TESTPMD_LOG(ERR, "fail to unregister device event callback.\n");
36122049c511SJeff Guo 			return;
36132049c511SJeff Guo 		}
36142049c511SJeff Guo 
36152049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
36162049c511SJeff Guo 		if (ret) {
36178570d76cSStephen Hemminger 			TESTPMD_LOG(ERR, "fail to disable hotplug handling.\n");
36182049c511SJeff Guo 			return;
36192049c511SJeff Guo 		}
3620fb73e096SJeff Guo 	}
362126cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3622401b744dSShahaf Shuler 		if (mempools[i])
3623a550baf2SMin Hu (Connor) 			mempool_free_mp(mempools[i]);
3624401b744dSShahaf Shuler 	}
362563b72657SIvan Ilchenko 	free(xstats_display);
3626fb73e096SJeff Guo 
3627d3a274ceSZhihong Wang 	printf("\nBye...\n");
3628af75078fSIntel }
3629af75078fSIntel 
3630af75078fSIntel typedef void (*cmd_func_t)(void);
3631af75078fSIntel struct pmd_test_command {
3632af75078fSIntel 	const char *cmd_name;
3633af75078fSIntel 	cmd_func_t cmd_func;
3634af75078fSIntel };
3635af75078fSIntel 
3636ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3637af75078fSIntel static void
3638edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3639af75078fSIntel {
3640ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3641ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3642f8244c63SZhiyong Yang 	portid_t portid;
3643f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3644ce8d5614SIntel 	struct rte_eth_link link;
3645e661a08bSIgor Romanov 	int ret;
3646ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3647ce8d5614SIntel 
3648ce8d5614SIntel 	printf("Checking link statuses...\n");
3649ce8d5614SIntel 	fflush(stdout);
3650ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3651ce8d5614SIntel 		all_ports_up = 1;
36527d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3653ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3654ce8d5614SIntel 				continue;
3655ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3656e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3657e661a08bSIgor Romanov 			if (ret < 0) {
3658e661a08bSIgor Romanov 				all_ports_up = 0;
3659e661a08bSIgor Romanov 				if (print_flag == 1)
366061a3b0e5SAndrew Rybchenko 					fprintf(stderr,
366161a3b0e5SAndrew Rybchenko 						"Port %u link get failed: %s\n",
3662e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3663e661a08bSIgor Romanov 				continue;
3664e661a08bSIgor Romanov 			}
3665ce8d5614SIntel 			/* print link status if flag set */
3666ce8d5614SIntel 			if (print_flag == 1) {
3667ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3668ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3669ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3670ce8d5614SIntel 				continue;
3671ce8d5614SIntel 			}
3672ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
3673295968d1SFerruh Yigit 			if (link.link_status == RTE_ETH_LINK_DOWN) {
3674ce8d5614SIntel 				all_ports_up = 0;
3675ce8d5614SIntel 				break;
3676ce8d5614SIntel 			}
3677ce8d5614SIntel 		}
3678ce8d5614SIntel 		/* after finally printing all link status, get out */
3679ce8d5614SIntel 		if (print_flag == 1)
3680ce8d5614SIntel 			break;
3681ce8d5614SIntel 
3682ce8d5614SIntel 		if (all_ports_up == 0) {
3683ce8d5614SIntel 			fflush(stdout);
3684ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3685ce8d5614SIntel 		}
3686ce8d5614SIntel 
3687ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3688ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3689ce8d5614SIntel 			print_flag = 1;
3690ce8d5614SIntel 		}
36918ea656f8SGaetan Rivet 
36928ea656f8SGaetan Rivet 		if (lsc_interrupt)
36938ea656f8SGaetan Rivet 			break;
3694ce8d5614SIntel 	}
3695af75078fSIntel }
3696af75078fSIntel 
3697284c908cSGaetan Rivet static void
3698cc1bf307SJeff Guo rmv_port_callback(void *arg)
3699284c908cSGaetan Rivet {
37003b97888aSMatan Azrad 	int need_to_start = 0;
37010da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
370228caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
37030a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
37040a0821bcSPaulis Gributs 	int ret;
3705284c908cSGaetan Rivet 
3706284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3707284c908cSGaetan Rivet 
37083b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
37093b97888aSMatan Azrad 		need_to_start = 1;
37103b97888aSMatan Azrad 		stop_packet_forwarding();
37113b97888aSMatan Azrad 	}
37120da2a62bSMatan Azrad 	no_link_check = 1;
3713284c908cSGaetan Rivet 	stop_port(port_id);
37140da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
37150654d4a8SThomas Monjalon 
37160a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
37170a0821bcSPaulis Gributs 	if (ret != 0)
37180a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
37190a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
37200a0821bcSPaulis Gributs 			port_id);
3721e1d38504SPaulis Gributs 	else {
3722e1d38504SPaulis Gributs 		struct rte_device *device = dev_info.device;
3723e1d38504SPaulis Gributs 		close_port(port_id);
3724e1d38504SPaulis Gributs 		detach_device(device); /* might be already removed or have more ports */
3725e1d38504SPaulis Gributs 	}
37263b97888aSMatan Azrad 	if (need_to_start)
37273b97888aSMatan Azrad 		start_packet_forwarding(0);
3728284c908cSGaetan Rivet }
3729284c908cSGaetan Rivet 
373076ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3731d6af1a13SBernard Iremonger static int
3732f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3733d6af1a13SBernard Iremonger 		  void *ret_param)
373476ad4a2dSGaetan Rivet {
373576ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3736d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
373776ad4a2dSGaetan Rivet 
373876ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
373961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
374061a3b0e5SAndrew Rybchenko 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
374176ad4a2dSGaetan Rivet 			port_id, __func__, type);
374276ad4a2dSGaetan Rivet 		fflush(stderr);
37433af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3744f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
374597b5d8b5SThomas Monjalon 			eth_event_desc[type]);
374676ad4a2dSGaetan Rivet 		fflush(stdout);
374776ad4a2dSGaetan Rivet 	}
3748284c908cSGaetan Rivet 
3749284c908cSGaetan Rivet 	switch (type) {
37504f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
37514f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
37524f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
37534f1ed78eSThomas Monjalon 		break;
3754284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
37554f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
37564f1ed78eSThomas Monjalon 			break;
3757284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3758cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
375961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
376061a3b0e5SAndrew Rybchenko 				"Could not set up deferred device removal\n");
3761284c908cSGaetan Rivet 		break;
376285c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
376385c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
376485c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
376585c6571cSThomas Monjalon 		break;
3766bc70e559SSpike Du 	case RTE_ETH_EVENT_RX_AVAIL_THRESH: {
3767bc70e559SSpike Du 		uint16_t rxq_id;
3768bc70e559SSpike Du 		int ret;
3769bc70e559SSpike Du 
3770bc70e559SSpike Du 		/* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */
3771bc70e559SSpike Du 		for (rxq_id = 0; ; rxq_id++) {
3772bc70e559SSpike Du 			ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id,
3773bc70e559SSpike Du 							    NULL);
3774bc70e559SSpike Du 			if (ret <= 0)
3775bc70e559SSpike Du 				break;
3776bc70e559SSpike Du 			printf("Received avail_thresh event, port: %u, rxq_id: %u\n",
3777bc70e559SSpike Du 			       port_id, rxq_id);
3778f41a5092SSpike Du 
3779f41a5092SSpike Du #ifdef RTE_NET_MLX5
3780f41a5092SSpike Du 			mlx5_test_avail_thresh_event_handler(port_id, rxq_id);
3781f41a5092SSpike Du #endif
3782bc70e559SSpike Du 		}
3783bc70e559SSpike Du 		break;
3784bc70e559SSpike Du 	}
3785284c908cSGaetan Rivet 	default:
3786284c908cSGaetan Rivet 		break;
3787284c908cSGaetan Rivet 	}
3788d6af1a13SBernard Iremonger 	return 0;
378976ad4a2dSGaetan Rivet }
379076ad4a2dSGaetan Rivet 
379197b5d8b5SThomas Monjalon static int
379297b5d8b5SThomas Monjalon register_eth_event_callback(void)
379397b5d8b5SThomas Monjalon {
379497b5d8b5SThomas Monjalon 	int ret;
379597b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
379697b5d8b5SThomas Monjalon 
379797b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
379897b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
379997b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
380097b5d8b5SThomas Monjalon 				event,
380197b5d8b5SThomas Monjalon 				eth_event_callback,
380297b5d8b5SThomas Monjalon 				NULL);
380397b5d8b5SThomas Monjalon 		if (ret != 0) {
380497b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
380597b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
380697b5d8b5SThomas Monjalon 			return -1;
380797b5d8b5SThomas Monjalon 		}
380897b5d8b5SThomas Monjalon 	}
380997b5d8b5SThomas Monjalon 
381097b5d8b5SThomas Monjalon 	return 0;
381197b5d8b5SThomas Monjalon }
381297b5d8b5SThomas Monjalon 
3813687a5b12SSinan Kaya static int
3814687a5b12SSinan Kaya unregister_eth_event_callback(void)
3815687a5b12SSinan Kaya {
3816687a5b12SSinan Kaya 	int ret;
3817687a5b12SSinan Kaya 	enum rte_eth_event_type event;
3818687a5b12SSinan Kaya 
3819687a5b12SSinan Kaya 	for (event = RTE_ETH_EVENT_UNKNOWN;
3820687a5b12SSinan Kaya 			event < RTE_ETH_EVENT_MAX; event++) {
3821687a5b12SSinan Kaya 		ret = rte_eth_dev_callback_unregister(RTE_ETH_ALL,
3822687a5b12SSinan Kaya 				event,
3823687a5b12SSinan Kaya 				eth_event_callback,
3824687a5b12SSinan Kaya 				NULL);
3825687a5b12SSinan Kaya 		if (ret != 0) {
3826687a5b12SSinan Kaya 			TESTPMD_LOG(ERR, "Failed to unregister callback for "
3827687a5b12SSinan Kaya 					"%s event\n", eth_event_desc[event]);
3828687a5b12SSinan Kaya 			return -1;
3829687a5b12SSinan Kaya 		}
3830687a5b12SSinan Kaya 	}
3831687a5b12SSinan Kaya 
3832687a5b12SSinan Kaya 	return 0;
3833687a5b12SSinan Kaya }
3834687a5b12SSinan Kaya 
3835fb73e096SJeff Guo /* This function is used by the interrupt thread */
3836fb73e096SJeff Guo static void
3837cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3838fb73e096SJeff Guo 			     __rte_unused void *arg)
3839fb73e096SJeff Guo {
38402049c511SJeff Guo 	uint16_t port_id;
38412049c511SJeff Guo 	int ret;
38422049c511SJeff Guo 
3843fb73e096SJeff Guo 	switch (type) {
3844fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
38458570d76cSStephen Hemminger 		TESTPMD_LOG(INFO, "The device: %s has been removed!\n", device_name);
38462049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
38472049c511SJeff Guo 		if (ret) {
38488570d76cSStephen Hemminger 			TESTPMD_LOG(ERR,
38498570d76cSStephen Hemminger 				    "Can not get port for device %s!\n", device_name);
38502049c511SJeff Guo 			return;
38512049c511SJeff Guo 		}
3852cc1bf307SJeff Guo 		/*
3853cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3854cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3855cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3856cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3857cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3858cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3859cc1bf307SJeff Guo 		 * be deleted.
3860cc1bf307SJeff Guo 		 */
3861cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3862cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
38638570d76cSStephen Hemminger 			TESTPMD_LOG(ERR, "Could not set up deferred device removal\n");
3864fb73e096SJeff Guo 		break;
38658570d76cSStephen Hemminger 
3866fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
38678570d76cSStephen Hemminger 		TESTPMD_LOG(INFO, "The device: %s has been added!\n", device_name);
3868fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3869fb73e096SJeff Guo 		 * begin to attach port.
3870fb73e096SJeff Guo 		 */
3871fb73e096SJeff Guo 		break;
38728570d76cSStephen Hemminger 
3873fb73e096SJeff Guo 	default:
38748570d76cSStephen Hemminger 		if (type >= RTE_DEV_EVENT_MAX)
38758570d76cSStephen Hemminger 			TESTPMD_LOG(ERR, "%s called upon invalid event %d\n",
38768570d76cSStephen Hemminger 				    __func__, type);
3877fb73e096SJeff Guo 		break;
3878fb73e096SJeff Guo 	}
3879fb73e096SJeff Guo }
3880fb73e096SJeff Guo 
3881f2c5125aSPablo de Lara static void
3882f4d178c1SXueming Li rxtx_port_config(portid_t pid)
3883f2c5125aSPablo de Lara {
3884d44f8a48SQi Zhang 	uint16_t qid;
38855e91aeefSWei Zhao 	uint64_t offloads;
3886f4d178c1SXueming Li 	struct rte_port *port = &ports[pid];
3887f2c5125aSPablo de Lara 
3888d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
38893c4426dbSDmitry Kozlyuk 		offloads = port->rxq[qid].conf.offloads;
38903c4426dbSDmitry Kozlyuk 		port->rxq[qid].conf = port->dev_info.default_rxconf;
3891f4d178c1SXueming Li 
3892f4d178c1SXueming Li 		if (rxq_share > 0 &&
3893f4d178c1SXueming Li 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3894f4d178c1SXueming Li 			/* Non-zero share group to enable RxQ share. */
38953c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_group = pid / rxq_share + 1;
38963c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
3897f4d178c1SXueming Li 		}
3898f4d178c1SXueming Li 
3899575e0fd1SWei Zhao 		if (offloads != 0)
39003c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.offloads = offloads;
3901d44f8a48SQi Zhang 
3902d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3903f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
39043c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
3905f2c5125aSPablo de Lara 
3906f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
39073c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
3908f2c5125aSPablo de Lara 
3909f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
39103c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
3911f2c5125aSPablo de Lara 
3912f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
39133c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
3914f2c5125aSPablo de Lara 
3915f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
39163c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_drop_en = rx_drop_en;
3917f2c5125aSPablo de Lara 
3918d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3919d44f8a48SQi Zhang 	}
3920d44f8a48SQi Zhang 
3921d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
39223c4426dbSDmitry Kozlyuk 		offloads = port->txq[qid].conf.offloads;
39233c4426dbSDmitry Kozlyuk 		port->txq[qid].conf = port->dev_info.default_txconf;
3924575e0fd1SWei Zhao 		if (offloads != 0)
39253c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.offloads = offloads;
3926d44f8a48SQi Zhang 
3927d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3928f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
39293c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
3930f2c5125aSPablo de Lara 
3931f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
39323c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
3933f2c5125aSPablo de Lara 
3934f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
39353c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
3936f2c5125aSPablo de Lara 
3937f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
39383c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
3939f2c5125aSPablo de Lara 
3940f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
39413c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
3942d44f8a48SQi Zhang 
3943d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3944d44f8a48SQi Zhang 	}
3945f2c5125aSPablo de Lara }
3946f2c5125aSPablo de Lara 
39470c4abd36SSteve Yang /*
3948b563c142SFerruh Yigit  * Helper function to set MTU from frame size
39490c4abd36SSteve Yang  *
39500c4abd36SSteve Yang  * port->dev_info should be set before calling this function.
39510c4abd36SSteve Yang  *
39520c4abd36SSteve Yang  * return 0 on success, negative on error
39530c4abd36SSteve Yang  */
39540c4abd36SSteve Yang int
3955b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
39560c4abd36SSteve Yang {
39570c4abd36SSteve Yang 	struct rte_port *port = &ports[portid];
39580c4abd36SSteve Yang 	uint32_t eth_overhead;
39591bb4a528SFerruh Yigit 	uint16_t mtu, new_mtu;
39600c4abd36SSteve Yang 
39611bb4a528SFerruh Yigit 	eth_overhead = get_eth_overhead(&port->dev_info);
39621bb4a528SFerruh Yigit 
39631bb4a528SFerruh Yigit 	if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
39641bb4a528SFerruh Yigit 		printf("Failed to get MTU for port %u\n", portid);
39651bb4a528SFerruh Yigit 		return -1;
39661bb4a528SFerruh Yigit 	}
39671bb4a528SFerruh Yigit 
39681bb4a528SFerruh Yigit 	new_mtu = max_rx_pktlen - eth_overhead;
39690c4abd36SSteve Yang 
39701bb4a528SFerruh Yigit 	if (mtu == new_mtu)
39711bb4a528SFerruh Yigit 		return 0;
39721bb4a528SFerruh Yigit 
39731bb4a528SFerruh Yigit 	if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
397461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
397561a3b0e5SAndrew Rybchenko 			"Failed to set MTU to %u for port %u\n",
39761bb4a528SFerruh Yigit 			new_mtu, portid);
39771bb4a528SFerruh Yigit 		return -1;
39780c4abd36SSteve Yang 	}
39790c4abd36SSteve Yang 
39801bb4a528SFerruh Yigit 	port->dev_conf.rxmode.mtu = new_mtu;
39811bb4a528SFerruh Yigit 
39820c4abd36SSteve Yang 	return 0;
39830c4abd36SSteve Yang }
39840c4abd36SSteve Yang 
3985013af9b6SIntel void
3986013af9b6SIntel init_port_config(void)
3987013af9b6SIntel {
3988013af9b6SIntel 	portid_t pid;
3989013af9b6SIntel 	struct rte_port *port;
3990655eae01SJie Wang 	int ret, i;
3991013af9b6SIntel 
39927d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3993013af9b6SIntel 		port = &ports[pid];
39946f51deb9SIvan Ilchenko 
39956f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
39966f51deb9SIvan Ilchenko 		if (ret != 0)
39976f51deb9SIvan Ilchenko 			return;
39986f51deb9SIvan Ilchenko 
39993ce690d3SBruce Richardson 		if (nb_rxq > 1) {
4000013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
400190892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
4002422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
4003af75078fSIntel 		} else {
4004013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
4005013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
4006af75078fSIntel 		}
40073ce690d3SBruce Richardson 
40085f592039SJingjing Wu 		if (port->dcb_flag == 0) {
4009655eae01SJie Wang 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
4010f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
4011f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
4012295968d1SFerruh Yigit 						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
4013655eae01SJie Wang 			} else {
4014295968d1SFerruh Yigit 				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
4015655eae01SJie Wang 				port->dev_conf.rxmode.offloads &=
4016295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4017655eae01SJie Wang 
4018655eae01SJie Wang 				for (i = 0;
4019655eae01SJie Wang 				     i < port->dev_info.nb_rx_queues;
4020655eae01SJie Wang 				     i++)
40213c4426dbSDmitry Kozlyuk 					port->rxq[i].conf.offloads &=
4022295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4023655eae01SJie Wang 			}
40243ce690d3SBruce Richardson 		}
40253ce690d3SBruce Richardson 
4026f4d178c1SXueming Li 		rxtx_port_config(pid);
4027013af9b6SIntel 
4028a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
4029a5279d25SIgor Romanov 		if (ret != 0)
4030a5279d25SIgor Romanov 			return;
4031013af9b6SIntel 
40320a0821bcSPaulis Gributs 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
40338ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
40340a0821bcSPaulis Gributs 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
4035284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
4036013af9b6SIntel 	}
4037013af9b6SIntel }
4038013af9b6SIntel 
403915e34522SLong Wu void set_port_member_flag(portid_t member_pid)
404041b05095SBernard Iremonger {
404141b05095SBernard Iremonger 	struct rte_port *port;
404241b05095SBernard Iremonger 
404315e34522SLong Wu 	port = &ports[member_pid];
404415e34522SLong Wu 	port->member_flag = 1;
404541b05095SBernard Iremonger }
404641b05095SBernard Iremonger 
404715e34522SLong Wu void clear_port_member_flag(portid_t member_pid)
404841b05095SBernard Iremonger {
404941b05095SBernard Iremonger 	struct rte_port *port;
405041b05095SBernard Iremonger 
405115e34522SLong Wu 	port = &ports[member_pid];
405215e34522SLong Wu 	port->member_flag = 0;
405341b05095SBernard Iremonger }
405441b05095SBernard Iremonger 
405515e34522SLong Wu uint8_t port_is_bonding_member(portid_t member_pid)
40560e545d30SBernard Iremonger {
40570e545d30SBernard Iremonger 	struct rte_port *port;
40580a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
40590a0821bcSPaulis Gributs 	int ret;
40600e545d30SBernard Iremonger 
406115e34522SLong Wu 	port = &ports[member_pid];
406215e34522SLong Wu 	ret = eth_dev_info_get_print_err(member_pid, &dev_info);
40630a0821bcSPaulis Gributs 	if (ret != 0) {
40640a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
40650a0821bcSPaulis Gributs 			"Failed to get device info for port id %d,"
40664f840086SLong Wu 			"cannot determine if the port is a bonding member",
406715e34522SLong Wu 			member_pid);
40680a0821bcSPaulis Gributs 		return 0;
40690a0821bcSPaulis Gributs 	}
407015e34522SLong Wu 
407115e34522SLong Wu 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDING_MEMBER) || (port->member_flag == 1))
4072b8b8b344SMatan Azrad 		return 1;
4073b8b8b344SMatan Azrad 	return 0;
40740e545d30SBernard Iremonger }
40750e545d30SBernard Iremonger 
4076013af9b6SIntel const uint16_t vlan_tags[] = {
4077013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
4078013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
4079013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
4080013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
4081013af9b6SIntel };
4082013af9b6SIntel 
4083*34847a73SMin Zhou static void
4084*34847a73SMin Zhou get_eth_dcb_conf(struct rte_eth_conf *eth_conf, enum dcb_mode_enable dcb_mode,
4085*34847a73SMin Zhou 		 enum rte_eth_nb_tcs num_tcs, uint8_t pfc_en)
4086013af9b6SIntel {
4087013af9b6SIntel 	uint8_t i;
4088af75078fSIntel 
4089af75078fSIntel 	/*
4090013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
4091013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
4092af75078fSIntel 	 */
40931a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
40941a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
40951a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
40961a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
40971a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
4098013af9b6SIntel 
4099547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
41001a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
41011a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
41021a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
4103295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
41041a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
4105295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4106013af9b6SIntel 
41071a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
41081a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
41091a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
41101a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
41111a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
4112af75078fSIntel 		}
4113295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4114f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
4115f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
4116013af9b6SIntel 		}
4117013af9b6SIntel 
4118013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
4119f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4120f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4121295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4122295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
41231a572499SJingjing Wu 	} else {
41241a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
41251a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
41261a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
41271a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
4128013af9b6SIntel 
41291a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
41301a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
41311a572499SJingjing Wu 
4132295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4133bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
4134bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
4135013af9b6SIntel 		}
4136ac7c491cSKonstantin Ananyev 
4137f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4138f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4139295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4140295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
41411a572499SJingjing Wu 	}
41421a572499SJingjing Wu 
41431a572499SJingjing Wu 	if (pfc_en)
41441a572499SJingjing Wu 		eth_conf->dcb_capability_en =
4145295968d1SFerruh Yigit 				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4146013af9b6SIntel 	else
4147295968d1SFerruh Yigit 		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4148013af9b6SIntel }
4149013af9b6SIntel 
4150013af9b6SIntel int
41511a572499SJingjing Wu init_port_dcb_config(portid_t pid,
41521a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
41531a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
41541a572499SJingjing Wu 		     uint8_t pfc_en)
4155013af9b6SIntel {
4156013af9b6SIntel 	struct rte_eth_conf port_conf;
4157013af9b6SIntel 	struct rte_port *rte_port;
4158013af9b6SIntel 	int retval;
4159013af9b6SIntel 	uint16_t i;
4160013af9b6SIntel 
4161a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
4162a550baf2SMin Hu (Connor) 		printf("The multi-process feature doesn't support dcb.\n");
4163a550baf2SMin Hu (Connor) 		return -ENOTSUP;
4164a550baf2SMin Hu (Connor) 	}
41652a977b89SWenzhuo Lu 	rte_port = &ports[pid];
4166013af9b6SIntel 
4167c1ba6c32SHuisong Li 	/* retain the original device configuration. */
4168c1ba6c32SHuisong Li 	memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4169d5354e89SYanglong Wu 
4170013af9b6SIntel 	/* set configuration of DCB in vt mode and DCB in non-vt mode */
4171*34847a73SMin Zhou 	get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
4172*34847a73SMin Zhou 
4173295968d1SFerruh Yigit 	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4174cbe70fdeSJie Wang 	/* remove RSS HASH offload for DCB in vt mode */
4175cbe70fdeSJie Wang 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4176cbe70fdeSJie Wang 		port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4177cbe70fdeSJie Wang 		for (i = 0; i < nb_rxq; i++)
41783c4426dbSDmitry Kozlyuk 			rte_port->rxq[i].conf.offloads &=
4179cbe70fdeSJie Wang 				~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4180cbe70fdeSJie Wang 	}
4181013af9b6SIntel 
41822f203d44SQi Zhang 	/* re-configure the device . */
41832b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
41842b0e0ebaSChenbo Xia 	if (retval < 0)
41852b0e0ebaSChenbo Xia 		return retval;
41866f51deb9SIvan Ilchenko 
41876f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
41886f51deb9SIvan Ilchenko 	if (retval != 0)
41896f51deb9SIvan Ilchenko 		return retval;
41902a977b89SWenzhuo Lu 
41912a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
41922a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
41932a977b89SWenzhuo Lu 	 */
41942a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
41952a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
419661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
419761a3b0e5SAndrew Rybchenko 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
419861a3b0e5SAndrew Rybchenko 			pid);
41992a977b89SWenzhuo Lu 		return -1;
42002a977b89SWenzhuo Lu 	}
42012a977b89SWenzhuo Lu 
42022a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
42032a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
42042a977b89SWenzhuo Lu 	 */
42052a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
420686ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
420786ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
420886ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
420986ef65eeSBernard Iremonger 		} else {
42102a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
42112a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
421286ef65eeSBernard Iremonger 		}
42132a977b89SWenzhuo Lu 	} else {
42142a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
42152a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
42162a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
42172a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
42182a977b89SWenzhuo Lu 		} else {
42192a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
42202a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
42212a977b89SWenzhuo Lu 
42222a977b89SWenzhuo Lu 		}
42232a977b89SWenzhuo Lu 	}
42242a977b89SWenzhuo Lu 	rx_free_thresh = 64;
42252a977b89SWenzhuo Lu 
4226013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4227013af9b6SIntel 
4228f4d178c1SXueming Li 	rxtx_port_config(pid);
4229013af9b6SIntel 	/* VLAN filter */
4230295968d1SFerruh Yigit 	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
42311a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
4232013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
4233013af9b6SIntel 
4234a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4235a5279d25SIgor Romanov 	if (retval != 0)
4236a5279d25SIgor Romanov 		return retval;
4237a5279d25SIgor Romanov 
42387741e4cfSIntel 	rte_port->dcb_flag = 1;
42397741e4cfSIntel 
4240a690a070SHuisong Li 	/* Enter DCB configuration status */
4241a690a070SHuisong Li 	dcb_config = 1;
4242a690a070SHuisong Li 
4243013af9b6SIntel 	return 0;
4244af75078fSIntel }
4245af75078fSIntel 
4246ffc468ffSTetsuya Mukawa static void
4247ffc468ffSTetsuya Mukawa init_port(void)
4248ffc468ffSTetsuya Mukawa {
42491b9f2746SGregory Etelson 	int i;
42501b9f2746SGregory Etelson 
4251ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
4252ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
4253ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4254ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
4255ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
4256ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
4257ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
4258ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
4259ffc468ffSTetsuya Mukawa 	}
4260236bc417SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
4261236bc417SGregory Etelson 		ports[i].fwd_mac_swap = 1;
426263b72657SIvan Ilchenko 		ports[i].xstats_info.allocated = false;
42631b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
4264236bc417SGregory Etelson 	}
426529841336SPhil Yang 	/* Initialize ports NUMA structures */
426629841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
426729841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
426829841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4269ffc468ffSTetsuya Mukawa }
4270ffc468ffSTetsuya Mukawa 
4271d3a274ceSZhihong Wang static void
4272cfea1f30SPablo de Lara print_stats(void)
4273cfea1f30SPablo de Lara {
4274cfea1f30SPablo de Lara 	uint8_t i;
4275cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
4276cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4277cfea1f30SPablo de Lara 
4278cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
4279cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
4280cfea1f30SPablo de Lara 
4281cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
4282cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4283cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
4284683d1e82SIgor Romanov 
4285683d1e82SIgor Romanov 	fflush(stdout);
4286cfea1f30SPablo de Lara }
4287cfea1f30SPablo de Lara 
4288cfea1f30SPablo de Lara static void
42890fd1386cSStephen Hemminger signal_handler(int signum __rte_unused)
4290d3a274ceSZhihong Wang {
4291d9a191a0SPhil Yang 	f_quit = 1;
4292f1d0993eSStephen Hemminger 	prompt_exit();
4293d3a274ceSZhihong Wang }
4294d3a274ceSZhihong Wang 
4295af75078fSIntel int
4296af75078fSIntel main(int argc, char** argv)
4297af75078fSIntel {
4298af75078fSIntel 	int diag;
4299f8244c63SZhiyong Yang 	portid_t port_id;
43004918a357SXiaoyun Li 	uint16_t count;
4301fb73e096SJeff Guo 	int ret;
4302af75078fSIntel 
4303f1d0993eSStephen Hemminger #ifdef RTE_EXEC_ENV_WINDOWS
4304d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
4305d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
4306f1d0993eSStephen Hemminger #else
4307f1d0993eSStephen Hemminger 	/* Want read() not to be restarted on signal */
4308f1d0993eSStephen Hemminger 	struct sigaction action = {
4309f1d0993eSStephen Hemminger 		.sa_handler = signal_handler,
4310f1d0993eSStephen Hemminger 	};
4311f1d0993eSStephen Hemminger 
4312f1d0993eSStephen Hemminger 	sigaction(SIGINT, &action, NULL);
4313f1d0993eSStephen Hemminger 	sigaction(SIGTERM, &action, NULL);
4314f1d0993eSStephen Hemminger #endif
4315d3a274ceSZhihong Wang 
4316285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
4317285fd101SOlivier Matz 	if (testpmd_logtype < 0)
431816267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
4319285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4320285fd101SOlivier Matz 
43219201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
43229201806eSStephen Hemminger 	if (diag < 0)
432316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
432416267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
43259201806eSStephen Hemminger 
4326563fbd08SDavid Marchand 	/* allocate port structures, and init them */
4327563fbd08SDavid Marchand 	init_port();
4328563fbd08SDavid Marchand 
432997b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
433097b5d8b5SThomas Monjalon 	if (ret != 0)
433116267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
433297b5d8b5SThomas Monjalon 
4333a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
43344aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
4335e9436f54STiwei Bie 	rte_pdump_init();
43364aa0d012SAnatoly Burakov #endif
43374aa0d012SAnatoly Burakov 
43384918a357SXiaoyun Li 	count = 0;
43394918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
43404918a357SXiaoyun Li 		ports_ids[count] = port_id;
43414918a357SXiaoyun Li 		count++;
43424918a357SXiaoyun Li 	}
43434918a357SXiaoyun Li 	nb_ports = (portid_t) count;
43444aa0d012SAnatoly Burakov 	if (nb_ports == 0)
43454aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
43464aa0d012SAnatoly Burakov 
43474aa0d012SAnatoly Burakov 	set_def_fwd_config();
43484aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
434916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
435016267ceeSStephen Hemminger 			 "Check the core mask argument\n");
43514aa0d012SAnatoly Burakov 
4352e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
4353a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4354e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
4355e505d84cSAnatoly Burakov #endif
4356a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
4357e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
4358e505d84cSAnatoly Burakov #endif
4359e505d84cSAnatoly Burakov 
4360fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
43615fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
4362fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
4363fb7b8b32SAnatoly Burakov #else
4364fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
4365fb7b8b32SAnatoly Burakov #endif
4366fb7b8b32SAnatoly Burakov 
4367e505d84cSAnatoly Burakov 	argc -= diag;
4368e505d84cSAnatoly Burakov 	argv += diag;
4369e505d84cSAnatoly Burakov 	if (argc > 1)
4370e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
4371e505d84cSAnatoly Burakov 
4372761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4373e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4374285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
43751c036b16SEelco Chaudron 			strerror(errno));
43761c036b16SEelco Chaudron 	}
4377761f7ae1SJie Zhou #endif
43781c036b16SEelco Chaudron 
437999cabef0SPablo de Lara 	if (tx_first && interactive)
438099cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
438199cabef0SPablo de Lara 				"interactive mode.\n");
43828820cba4SDavid Hunt 
43838820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
438461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
438561a3b0e5SAndrew Rybchenko 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
43868820cba4SDavid Hunt 		lsc_interrupt = 0;
43878820cba4SDavid Hunt 	}
43888820cba4SDavid Hunt 
43895a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
43903012880dSDavid Marchand 		rte_exit(EXIT_FAILURE, "Either rx or tx queues should be non-zero\n");
43915a8fb55cSReshma Pattan 
43925a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
439361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
439461a3b0e5SAndrew Rybchenko 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4395af75078fSIntel 			nb_rxq, nb_txq);
4396af75078fSIntel 
4397af75078fSIntel 	init_config();
4398fb73e096SJeff Guo 
4399fb73e096SJeff Guo 	if (hot_plug) {
44002049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
4401fb73e096SJeff Guo 		if (ret) {
44028570d76cSStephen Hemminger 			TESTPMD_LOG(ERR, "fail to enable hotplug handling.");
4403fb73e096SJeff Guo 			return -1;
4404fb73e096SJeff Guo 		}
4405fb73e096SJeff Guo 
44062049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
44072049c511SJeff Guo 		if (ret) {
44088570d76cSStephen Hemminger 			TESTPMD_LOG(ERR, "fail to start device event monitoring.");
44092049c511SJeff Guo 			return -1;
44102049c511SJeff Guo 		}
44112049c511SJeff Guo 
44128570d76cSStephen Hemminger 		ret = rte_dev_event_callback_register(NULL, dev_event_callback, NULL);
44132049c511SJeff Guo 		if (ret) {
44148570d76cSStephen Hemminger 			TESTPMD_LOG(ERR, "fail to register device event callback\n");
44152049c511SJeff Guo 			return -1;
44162049c511SJeff Guo 		}
4417fb73e096SJeff Guo 	}
4418fb73e096SJeff Guo 
44197e403725SGregory Etelson 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0) {
44207e403725SGregory Etelson 		if (!interactive) {
44217e403725SGregory Etelson 			rte_eal_cleanup();
4422148f963fSBruce Richardson 			rte_exit(EXIT_FAILURE, "Start ports failed\n");
44237e403725SGregory Etelson 		}
44247e403725SGregory Etelson 		fprintf(stderr, "Start ports failed\n");
44257e403725SGregory Etelson 	}
4426af75078fSIntel 
4427ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
442834fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
442934fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
443034fc1051SIvan Ilchenko 		if (ret != 0)
443161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
443261a3b0e5SAndrew Rybchenko 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
443334fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
443434fc1051SIvan Ilchenko 	}
4435af75078fSIntel 
4436bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
44377e4441c8SRemy Horton 	/* Init metrics library */
44387e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
4439bb9be9a4SDavid Marchand #endif
44407e4441c8SRemy Horton 
4441a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
444262d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
444362d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
444462d3216dSReshma Pattan 		if (ret)
444561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
444661a3b0e5SAndrew Rybchenko 				"Warning: latencystats init() returned error %d\n",
444761a3b0e5SAndrew Rybchenko 				ret);
444861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Latencystats running on lcore %d\n",
444962d3216dSReshma Pattan 			latencystats_lcore_id);
445062d3216dSReshma Pattan 	}
445162d3216dSReshma Pattan #endif
445262d3216dSReshma Pattan 
44537e4441c8SRemy Horton 	/* Setup bitrate stats */
4454a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4455e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
44567e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
44577e4441c8SRemy Horton 		if (bitrate_data == NULL)
4458e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
4459e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
44607e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
4461e25e6c70SRemy Horton 	}
44627e4441c8SRemy Horton #endif
446399a4974aSRobin Jarry 
446499a4974aSRobin Jarry 	if (record_core_cycles)
446599a4974aSRobin Jarry 		rte_lcore_register_usage_cb(lcore_usage_callback);
446699a4974aSRobin Jarry 
4467592ab76fSDavid Marchand 	if (init_cmdline() != 0)
4468592ab76fSDavid Marchand 		rte_exit(EXIT_FAILURE,
4469592ab76fSDavid Marchand 			"Could not initialise cmdline context.\n");
4470592ab76fSDavid Marchand 
447181ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
447281ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
447381ef862bSAllain Legacy 
4474ca7feb22SCyril Chemparathy 	if (interactive == 1) {
4475ca7feb22SCyril Chemparathy 		if (auto_start) {
4476ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
4477ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
4478ca7feb22SCyril Chemparathy 		}
4479af75078fSIntel 		prompt();
4480ddbf4799SDavid Marchand 	} else {
4481af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
448299cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
4483cfea1f30SPablo de Lara 		if (stats_period != 0) {
4484cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4485cfea1f30SPablo de Lara 			uint64_t timer_period;
4486cfea1f30SPablo de Lara 
4487cfea1f30SPablo de Lara 			/* Convert to number of cycles */
4488cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
4489cfea1f30SPablo de Lara 
4490d9a191a0SPhil Yang 			while (f_quit == 0) {
4491cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4492cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4493cfea1f30SPablo de Lara 
4494cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4495cfea1f30SPablo de Lara 					print_stats();
4496cfea1f30SPablo de Lara 					/* Reset the timer */
4497cfea1f30SPablo de Lara 					diff_time = 0;
4498cfea1f30SPablo de Lara 				}
4499cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4500cfea1f30SPablo de Lara 				prev_time = cur_time;
4501761f7ae1SJie Zhou 				rte_delay_us_sleep(US_PER_S);
4502cfea1f30SPablo de Lara 			}
45030fd1386cSStephen Hemminger 		} else {
45040fd1386cSStephen Hemminger 			char c;
4505cfea1f30SPablo de Lara 
4506af75078fSIntel 			printf("Press enter to exit\n");
4507a996cd04SStephen Hemminger 			while (f_quit == 0) {
4508a996cd04SStephen Hemminger 				/* end-of-file or any character exits loop */
4509a996cd04SStephen Hemminger 				if (read(0, &c, 1) >= 0)
4510a996cd04SStephen Hemminger 					break;
4511a996cd04SStephen Hemminger 				if (errno == EINTR)
4512a996cd04SStephen Hemminger 					continue;
4513a996cd04SStephen Hemminger 				rte_exit(EXIT_FAILURE, "Read failed: %s\n",
45140fd1386cSStephen Hemminger 					 strerror(errno));
4515a996cd04SStephen Hemminger 			}
4516af75078fSIntel 		}
45170fd1386cSStephen Hemminger 	}
45180fd1386cSStephen Hemminger 
45190fd1386cSStephen Hemminger 	pmd_test_exit();
45200fd1386cSStephen Hemminger 
45210fd1386cSStephen Hemminger #ifdef RTE_LIB_PDUMP
45220fd1386cSStephen Hemminger 	/* uninitialize packet capture framework */
45230fd1386cSStephen Hemminger 	rte_pdump_uninit();
45240fd1386cSStephen Hemminger #endif
45250fd1386cSStephen Hemminger #ifdef RTE_LIB_LATENCYSTATS
45260fd1386cSStephen Hemminger 	if (latencystats_enabled != 0)
45270fd1386cSStephen Hemminger 		rte_latencystats_uninit();
45280fd1386cSStephen Hemminger #endif
4529af75078fSIntel 
4530687a5b12SSinan Kaya 	ret = unregister_eth_event_callback();
4531687a5b12SSinan Kaya 	if (ret != 0)
4532687a5b12SSinan Kaya 		rte_exit(EXIT_FAILURE, "Cannot unregister for ethdev events");
4533687a5b12SSinan Kaya 
4534687a5b12SSinan Kaya 
45355e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
45365e516c89SStephen Hemminger 	if (ret != 0)
45375e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
45385e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
45395e516c89SStephen Hemminger 
45405e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4541af75078fSIntel }
4542