xref: /dpdk/app/test-pmd/testpmd.c (revision 5028f207a4fa6d5cdd86019e43d2e2d80fa21ced)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
131c036b16SEelco Chaudron #include <sys/mman.h>
14761f7ae1SJie Zhou #endif
15af75078fSIntel #include <sys/types.h>
16af75078fSIntel #include <errno.h>
17fb73e096SJeff Guo #include <stdbool.h>
18af75078fSIntel 
19af75078fSIntel #include <sys/queue.h>
20af75078fSIntel #include <sys/stat.h>
21af75078fSIntel 
22af75078fSIntel #include <stdint.h>
23af75078fSIntel #include <unistd.h>
24af75078fSIntel #include <inttypes.h>
25af75078fSIntel 
26af75078fSIntel #include <rte_common.h>
27d1eb542eSOlivier Matz #include <rte_errno.h>
28af75078fSIntel #include <rte_byteorder.h>
29af75078fSIntel #include <rte_log.h>
30af75078fSIntel #include <rte_debug.h>
31af75078fSIntel #include <rte_cycles.h>
32af75078fSIntel #include <rte_memory.h>
33af75078fSIntel #include <rte_memcpy.h>
34af75078fSIntel #include <rte_launch.h>
35770ebc06SDavid Marchand #include <rte_bus.h>
36af75078fSIntel #include <rte_eal.h>
37284c908cSGaetan Rivet #include <rte_alarm.h>
38af75078fSIntel #include <rte_per_lcore.h>
39af75078fSIntel #include <rte_lcore.h>
40af75078fSIntel #include <rte_branch_prediction.h>
41af75078fSIntel #include <rte_mempool.h>
42af75078fSIntel #include <rte_malloc.h>
43af75078fSIntel #include <rte_mbuf.h>
440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
45af75078fSIntel #include <rte_interrupts.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
587e4441c8SRemy Horton #include <rte_metrics.h>
59bb9be9a4SDavid Marchand #endif
60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
617e4441c8SRemy Horton #include <rte_bitrate.h>
627e4441c8SRemy Horton #endif
63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6462d3216dSReshma Pattan #include <rte_latencystats.h>
6562d3216dSReshma Pattan #endif
66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS
67761f7ae1SJie Zhou #include <process.h>
68761f7ae1SJie Zhou #endif
69e46372d7SHuisong Li #ifdef RTE_NET_BOND
70e46372d7SHuisong Li #include <rte_eth_bond.h>
71e46372d7SHuisong Li #endif
72f41a5092SSpike Du #ifdef RTE_NET_MLX5
73f41a5092SSpike Du #include "mlx5_testpmd.h"
74f41a5092SSpike Du #endif
75af75078fSIntel 
76af75078fSIntel #include "testpmd.h"
77af75078fSIntel 
78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
79c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
80c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
81c7f5dba7SAnatoly Burakov #else
82c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
83c7f5dba7SAnatoly Burakov #endif
84c7f5dba7SAnatoly Burakov 
85c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
86c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
87c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
88c7f5dba7SAnatoly Burakov #else
89c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
90c7f5dba7SAnatoly Burakov #endif
91c7f5dba7SAnatoly Burakov 
92c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
9313b19642SDmitry Kozlyuk /*
9413b19642SDmitry Kozlyuk  * Zone size with the malloc overhead (max of debug and release variants)
9513b19642SDmitry Kozlyuk  * must fit into the smallest supported hugepage size (2M),
9613b19642SDmitry Kozlyuk  * so that an IOVA-contiguous zone of this size can always be allocated
9713b19642SDmitry Kozlyuk  * if there are free 2M hugepages.
9813b19642SDmitry Kozlyuk  */
9913b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
100c7f5dba7SAnatoly Burakov 
101af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
102285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
103af75078fSIntel 
104cb056611SStephen Hemminger /* use main core for command line ? */
105af75078fSIntel uint8_t interactive = 0;
106ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
10799cabef0SPablo de Lara uint8_t tx_first;
10881ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
109af75078fSIntel 
110af75078fSIntel /*
111af75078fSIntel  * NUMA support configuration.
112af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
113af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
114af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
115af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
116af75078fSIntel  */
117999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
118af75078fSIntel 
119af75078fSIntel /*
120b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
121b6ea6408SIntel  * not configured.
122b6ea6408SIntel  */
123b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
124b6ea6408SIntel 
125b6ea6408SIntel /*
126c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
127c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
128c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
129c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
130c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
131148f963fSBruce Richardson  */
132c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
133148f963fSBruce Richardson 
134148f963fSBruce Richardson /*
13563531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
13663531389SGeorgios Katsikas  * is allocated.
13763531389SGeorgios Katsikas  */
13863531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
13963531389SGeorgios Katsikas 
14063531389SGeorgios Katsikas /*
14163531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
14263531389SGeorgios Katsikas  * is allocated.
14363531389SGeorgios Katsikas  */
14463531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
14563531389SGeorgios Katsikas 
14663531389SGeorgios Katsikas /*
14763531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
14863531389SGeorgios Katsikas  * is allocated.
14963531389SGeorgios Katsikas  */
15063531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
15163531389SGeorgios Katsikas 
15263531389SGeorgios Katsikas /*
153af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
154af75078fSIntel  * forwarded.
155547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
156af75078fSIntel  * ports.
157af75078fSIntel  */
1586d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
159af75078fSIntel portid_t nb_peer_eth_addrs = 0;
160af75078fSIntel 
161af75078fSIntel /*
162af75078fSIntel  * Probed Target Environment.
163af75078fSIntel  */
164af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
165af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
166af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
167af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
168af75078fSIntel 
1694918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1704918a357SXiaoyun Li 
171af75078fSIntel /*
172af75078fSIntel  * Test Forwarding Configuration.
173af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
174af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
175af75078fSIntel  */
176af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
177af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
178af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
179af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
180af75078fSIntel 
181af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
182af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
183af75078fSIntel 
184af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
185af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
186af75078fSIntel 
187af75078fSIntel /*
188af75078fSIntel  * Forwarding engines.
189af75078fSIntel  */
190af75078fSIntel struct fwd_engine * fwd_engines[] = {
191af75078fSIntel 	&io_fwd_engine,
192af75078fSIntel 	&mac_fwd_engine,
193d47388f1SCyril Chemparathy 	&mac_swap_engine,
194e9e23a61SCyril Chemparathy 	&flow_gen_engine,
195af75078fSIntel 	&rx_only_engine,
196af75078fSIntel 	&tx_only_engine,
197af75078fSIntel 	&csum_fwd_engine,
198168dfa61SIvan Boule 	&icmp_echo_engine,
1993c156061SJens Freimann 	&noisy_vnf_engine,
2002564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
201af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
202af75078fSIntel 	&ieee1588_fwd_engine,
203af75078fSIntel #endif
20459840375SXueming Li 	&shared_rxq_engine,
205af75078fSIntel 	NULL,
206af75078fSIntel };
207af75078fSIntel 
20826cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
20959fcf854SShahaf Shuler uint16_t mempool_flags;
210401b744dSShahaf Shuler 
211af75078fSIntel struct fwd_config cur_fwd_config;
212af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
213bf56fce1SZhihong Wang uint32_t retry_enabled;
214bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
215bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
216af75078fSIntel 
21726cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
21826cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
21926cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
22026cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
221c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
222c8798818SIntel                                       * specified on command-line. */
223cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
224d9a191a0SPhil Yang 
22563b72657SIvan Ilchenko /** Extended statistics to show. */
22663b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display;
22763b72657SIvan Ilchenko 
22863b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */
22963b72657SIvan Ilchenko 
230d9a191a0SPhil Yang /*
231d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
232d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
233d9a191a0SPhil Yang  */
2344c243bd4SStephen Hemminger static volatile uint8_t f_quit;
2353889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */
236d9a191a0SPhil Yang 
237af75078fSIntel /*
2381bb4a528SFerruh Yigit  * Max Rx frame size, set by '--max-pkt-len' parameter.
2391bb4a528SFerruh Yigit  */
2401bb4a528SFerruh Yigit uint32_t max_rx_pkt_len;
2411bb4a528SFerruh Yigit 
2421bb4a528SFerruh Yigit /*
2430f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2440f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2450f2096d7SViacheslav Ovsiienko  */
2460f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2470f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
24891c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
24991c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
25052e2e7edSYuan Wang uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT];
2510f2096d7SViacheslav Ovsiienko 
252a4bf5421SHanumanth Pothula uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature */
253a4bf5421SHanumanth Pothula 
2540f2096d7SViacheslav Ovsiienko /*
255af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
256af75078fSIntel  */
257af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
258af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
259af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
260af75078fSIntel };
261af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
262af75078fSIntel 
26379bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
26479bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
26579bec05bSKonstantin Ananyev 
26682010ef5SYongseok Koh uint8_t txonly_multi_flow;
26782010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
26882010ef5SYongseok Koh 
2694940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2704940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2714940344dSViacheslav Ovsiienko 
2724940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2734940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2744940344dSViacheslav Ovsiienko 
275af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
2766c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
277861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
278e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
279af75078fSIntel 
280900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
281900550deSIntel uint8_t dcb_config = 0;
282900550deSIntel 
283af75078fSIntel /*
284af75078fSIntel  * Configurable number of RX/TX queues.
285af75078fSIntel  */
2861c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
287af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
288af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
289af75078fSIntel 
290af75078fSIntel /*
291af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2928599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
293af75078fSIntel  */
2944ed89049SDavid Marchand #define RX_DESC_DEFAULT 0
2954ed89049SDavid Marchand #define TX_DESC_DEFAULT 0
2964ed89049SDavid Marchand uint16_t nb_rxd = RX_DESC_DEFAULT; /**< Number of RX descriptors. */
2974ed89049SDavid Marchand uint16_t nb_txd = TX_DESC_DEFAULT; /**< Number of TX descriptors. */
298af75078fSIntel 
299f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
300af75078fSIntel /*
301af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
302af75078fSIntel  */
303af75078fSIntel 
304f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
305f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
306f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
307af75078fSIntel 
308f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
309f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
310f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
311af75078fSIntel 
312af75078fSIntel /*
313af75078fSIntel  * Configurable value of RX free threshold.
314af75078fSIntel  */
315f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
316af75078fSIntel 
317af75078fSIntel /*
318ce8d5614SIntel  * Configurable value of RX drop enable.
319ce8d5614SIntel  */
320f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
321ce8d5614SIntel 
322ce8d5614SIntel /*
323af75078fSIntel  * Configurable value of TX free threshold.
324af75078fSIntel  */
325f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
326af75078fSIntel 
327af75078fSIntel /*
328af75078fSIntel  * Configurable value of TX RS bit threshold.
329af75078fSIntel  */
330f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
331af75078fSIntel 
332af75078fSIntel /*
3333c156061SJens Freimann  * Configurable value of buffered packets before sending.
3343c156061SJens Freimann  */
3353c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3363c156061SJens Freimann 
3373c156061SJens Freimann /*
3383c156061SJens Freimann  * Configurable value of packet buffer timeout.
3393c156061SJens Freimann  */
3403c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3413c156061SJens Freimann 
3423c156061SJens Freimann /*
3433c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3443c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3453c156061SJens Freimann  */
3463c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3473c156061SJens Freimann 
3483c156061SJens Freimann /*
3493c156061SJens Freimann  * Configurable value of number of random writes done in
3503c156061SJens Freimann  * VNF simulation memory area.
3513c156061SJens Freimann  */
3523c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3533c156061SJens Freimann 
3543c156061SJens Freimann /*
3553c156061SJens Freimann  * Configurable value of number of random reads done in
3563c156061SJens Freimann  * VNF simulation memory area.
3573c156061SJens Freimann  */
3583c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3593c156061SJens Freimann 
3603c156061SJens Freimann /*
3613c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3623c156061SJens Freimann  * VNF simulation memory area.
3633c156061SJens Freimann  */
3643c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3653c156061SJens Freimann 
3663c156061SJens Freimann /*
367af75078fSIntel  * Receive Side Scaling (RSS) configuration.
368af75078fSIntel  */
369295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
370af75078fSIntel 
371af75078fSIntel /*
372af75078fSIntel  * Port topology configuration
373af75078fSIntel  */
374af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
375af75078fSIntel 
3767741e4cfSIntel /*
3777741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3787741e4cfSIntel  */
3797741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3807741e4cfSIntel 
381af75078fSIntel /*
3827ee3e944SVasily Philipov  * Flow API isolated mode.
3837ee3e944SVasily Philipov  */
3847ee3e944SVasily Philipov uint8_t flow_isolate_all;
3857ee3e944SVasily Philipov 
3867ee3e944SVasily Philipov /*
387543df472SChengwen Feng  * Disable port flow flush when stop port.
388543df472SChengwen Feng  */
389543df472SChengwen Feng uint8_t no_flow_flush = 0; /* do flow flush by default */
390543df472SChengwen Feng 
391543df472SChengwen Feng /*
392bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
393bc202406SDavid Marchand  */
394bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
395bc202406SDavid Marchand 
396bc202406SDavid Marchand /*
3976937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3986937d210SStephen Hemminger  */
3996937d210SStephen Hemminger uint8_t no_device_start = 0;
4006937d210SStephen Hemminger 
4016937d210SStephen Hemminger /*
4028ea656f8SGaetan Rivet  * Enable link status change notification
4038ea656f8SGaetan Rivet  */
4048ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
4058ea656f8SGaetan Rivet 
4068ea656f8SGaetan Rivet /*
407284c908cSGaetan Rivet  * Enable device removal notification.
408284c908cSGaetan Rivet  */
409284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
410284c908cSGaetan Rivet 
411fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
412fb73e096SJeff Guo 
4134f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
4144f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
4154f1ed78eSThomas Monjalon 
416b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
417b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
418b0a9354aSPavan Nikhilesh 
41901817b10SBing Zhao /* Hairpin ports configuration mode. */
42023095155SDariusz Sosnowski uint32_t hairpin_mode;
42101817b10SBing Zhao 
42297b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
42397b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
42497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
42597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
42697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
42797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
42897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
42997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
43097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
43197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
43297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
43397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
4340e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
435bc70e559SSpike Du 	[RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached",
436eb0d471aSKalesh AP 	[RTE_ETH_EVENT_ERR_RECOVERING] = "error recovering",
437eb0d471aSKalesh AP 	[RTE_ETH_EVENT_RECOVERY_SUCCESS] = "error recovery successful",
438eb0d471aSKalesh AP 	[RTE_ETH_EVENT_RECOVERY_FAILED] = "error recovery failed",
43997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
44097b5d8b5SThomas Monjalon };
44197b5d8b5SThomas Monjalon 
442284c908cSGaetan Rivet /*
4433af72783SGaetan Rivet  * Display or mask ether events
4443af72783SGaetan Rivet  * Default to all events except VF_MBOX
4453af72783SGaetan Rivet  */
4463af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4473af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4483af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4493af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
450badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4513af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4520e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
453eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED) |
454eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_ERR_RECOVERING) |
455eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_SUCCESS) |
456eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_FAILED);
457e505d84cSAnatoly Burakov /*
458e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
459e505d84cSAnatoly Burakov  */
460e505d84cSAnatoly Burakov int do_mlockall = 0;
4613af72783SGaetan Rivet 
462a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
46362d3216dSReshma Pattan 
46462d3216dSReshma Pattan /*
46562d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
46662d3216dSReshma Pattan  */
46762d3216dSReshma Pattan uint8_t latencystats_enabled;
46862d3216dSReshma Pattan 
46962d3216dSReshma Pattan /*
4707be78d02SJosh Soref  * Lcore ID to service latency statistics.
47162d3216dSReshma Pattan  */
47262d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
47362d3216dSReshma Pattan 
47462d3216dSReshma Pattan #endif
47562d3216dSReshma Pattan 
4767b7e5ba7SIntel /*
477af75078fSIntel  * Ethernet device configuration.
478af75078fSIntel  */
4791bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode;
480af75078fSIntel 
48107e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
482295968d1SFerruh Yigit 	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
48307e5f7bdSShahaf Shuler };
484fd8c20aaSShahaf Shuler 
4852950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
486af75078fSIntel 
487a4fd5eeeSElza Mathew /*
488a4fd5eeeSElza Mathew  * Display zero values by default for xstats
489a4fd5eeeSElza Mathew  */
490a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
491a4fd5eeeSElza Mathew 
492bc700b67SDharmik Thakkar /*
493bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
494bc700b67SDharmik Thakkar  */
495bc700b67SDharmik Thakkar uint8_t record_core_cycles;
496bc700b67SDharmik Thakkar 
4970e4b1963SDharmik Thakkar /*
4980e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
4990e4b1963SDharmik Thakkar  */
5000e4b1963SDharmik Thakkar uint8_t record_burst_stats;
5010e4b1963SDharmik Thakkar 
502f4d178c1SXueming Li /*
503f4d178c1SXueming Li  * Number of ports per shared Rx queue group, 0 disable.
504f4d178c1SXueming Li  */
505f4d178c1SXueming Li uint32_t rxq_share;
506f4d178c1SXueming Li 
507c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
508c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5097acf894dSStephen Hurd 
510a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
5117e4441c8SRemy Horton /* Bitrate statistics */
5127e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
513e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
514e25e6c70SRemy Horton uint8_t bitrate_enabled;
515e25e6c70SRemy Horton #endif
5167e4441c8SRemy Horton 
5176970401eSDavid Marchand #ifdef RTE_LIB_GRO
518b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
519b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
5206970401eSDavid Marchand #endif
521b40f8d78SJiayu Hu 
522f9295aa2SXiaoyu Min /*
523f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
524f9295aa2SXiaoyu Min  */
525295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
526f9295aa2SXiaoyu Min 
527b7b78a08SAjit Khaparde /*
528b7b78a08SAjit Khaparde  * Used to set forced link speed
529b7b78a08SAjit Khaparde  */
530b7b78a08SAjit Khaparde uint32_t eth_link_speed;
531b7b78a08SAjit Khaparde 
532a550baf2SMin Hu (Connor) /*
533a550baf2SMin Hu (Connor)  * ID of the current process in multi-process, used to
534a550baf2SMin Hu (Connor)  * configure the queues to be polled.
535a550baf2SMin Hu (Connor)  */
536a550baf2SMin Hu (Connor) int proc_id;
537a550baf2SMin Hu (Connor) 
538a550baf2SMin Hu (Connor) /*
539a550baf2SMin Hu (Connor)  * Number of processes in multi-process, used to
540a550baf2SMin Hu (Connor)  * configure the queues to be polled.
541a550baf2SMin Hu (Connor)  */
542a550baf2SMin Hu (Connor) unsigned int num_procs = 1;
543a550baf2SMin Hu (Connor) 
544f6d8a6d3SIvan Malov static void
545f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id)
546f6d8a6d3SIvan Malov {
547f6d8a6d3SIvan Malov 	uint64_t rx_meta_features = 0;
548f6d8a6d3SIvan Malov 	int ret;
549f6d8a6d3SIvan Malov 
550f6d8a6d3SIvan Malov 	if (!is_proc_primary())
551f6d8a6d3SIvan Malov 		return;
552f6d8a6d3SIvan Malov 
553f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
554f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
555f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
556f6d8a6d3SIvan Malov 
557f6d8a6d3SIvan Malov 	ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
558f6d8a6d3SIvan Malov 	if (ret == 0) {
559f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
560f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
561f6d8a6d3SIvan Malov 				    port_id);
562f6d8a6d3SIvan Malov 		}
563f6d8a6d3SIvan Malov 
564f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
565f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
566f6d8a6d3SIvan Malov 				    port_id);
567f6d8a6d3SIvan Malov 		}
568f6d8a6d3SIvan Malov 
569f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
570f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
571f6d8a6d3SIvan Malov 				    port_id);
572f6d8a6d3SIvan Malov 		}
573f6d8a6d3SIvan Malov 	} else if (ret != -ENOTSUP) {
574f6d8a6d3SIvan Malov 		rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
575f6d8a6d3SIvan Malov 			 port_id, rte_strerror(-ret));
576f6d8a6d3SIvan Malov 	}
577f6d8a6d3SIvan Malov }
578f6d8a6d3SIvan Malov 
579a550baf2SMin Hu (Connor) static int
580a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
581a550baf2SMin Hu (Connor) 		      const struct rte_eth_conf *dev_conf)
582a550baf2SMin Hu (Connor) {
583a550baf2SMin Hu (Connor) 	if (is_proc_primary())
584a550baf2SMin Hu (Connor) 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
585a550baf2SMin Hu (Connor) 					dev_conf);
586a550baf2SMin Hu (Connor) 	return 0;
587a550baf2SMin Hu (Connor) }
588a550baf2SMin Hu (Connor) 
589a550baf2SMin Hu (Connor) static int
590e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
591e46372d7SHuisong Li {
592e46372d7SHuisong Li #ifdef RTE_NET_BOND
593e46372d7SHuisong Li 
594e46372d7SHuisong Li 	portid_t slave_pids[RTE_MAX_ETHPORTS];
595e46372d7SHuisong Li 	struct rte_port *port;
596e46372d7SHuisong Li 	int num_slaves;
597e46372d7SHuisong Li 	portid_t slave_pid;
598e46372d7SHuisong Li 	int i;
599e46372d7SHuisong Li 
600e46372d7SHuisong Li 	num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
601e46372d7SHuisong Li 						RTE_MAX_ETHPORTS);
602e46372d7SHuisong Li 	if (num_slaves < 0) {
603e46372d7SHuisong Li 		fprintf(stderr, "Failed to get slave list for port = %u\n",
604e46372d7SHuisong Li 			bond_pid);
605e46372d7SHuisong Li 		return num_slaves;
606e46372d7SHuisong Li 	}
607e46372d7SHuisong Li 
608e46372d7SHuisong Li 	for (i = 0; i < num_slaves; i++) {
609e46372d7SHuisong Li 		slave_pid = slave_pids[i];
610e46372d7SHuisong Li 		port = &ports[slave_pid];
611e46372d7SHuisong Li 		port->port_status =
612e46372d7SHuisong Li 			is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
613e46372d7SHuisong Li 	}
614e46372d7SHuisong Li #else
615e46372d7SHuisong Li 	RTE_SET_USED(bond_pid);
616e46372d7SHuisong Li 	RTE_SET_USED(is_stop);
617e46372d7SHuisong Li #endif
618e46372d7SHuisong Li 	return 0;
619e46372d7SHuisong Li }
620e46372d7SHuisong Li 
621e46372d7SHuisong Li static int
622a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id)
623a550baf2SMin Hu (Connor) {
624e46372d7SHuisong Li 	int ret;
625e46372d7SHuisong Li 
626e46372d7SHuisong Li 	if (is_proc_primary()) {
627e46372d7SHuisong Li 		ret = rte_eth_dev_start(port_id);
628e46372d7SHuisong Li 		if (ret != 0)
629e46372d7SHuisong Li 			return ret;
630e46372d7SHuisong Li 
631e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
632e46372d7SHuisong Li 
633e46372d7SHuisong Li 		/*
634e46372d7SHuisong Li 		 * Starting a bonded port also starts all slaves under the bonded
635e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
636e46372d7SHuisong Li 		 * port status of these slaves.
637e46372d7SHuisong Li 		 */
638e46372d7SHuisong Li 		if (port->bond_flag == 1)
639e46372d7SHuisong Li 			return change_bonding_slave_port_status(port_id, false);
640e46372d7SHuisong Li 	}
641a550baf2SMin Hu (Connor) 
642a550baf2SMin Hu (Connor) 	return 0;
643a550baf2SMin Hu (Connor) }
644a550baf2SMin Hu (Connor) 
645a550baf2SMin Hu (Connor) static int
646a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id)
647a550baf2SMin Hu (Connor) {
648e46372d7SHuisong Li 	int ret;
649e46372d7SHuisong Li 
650e46372d7SHuisong Li 	if (is_proc_primary()) {
651e46372d7SHuisong Li 		ret = rte_eth_dev_stop(port_id);
652e46372d7SHuisong Li 		if (ret != 0)
653e46372d7SHuisong Li 			return ret;
654e46372d7SHuisong Li 
655e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
656e46372d7SHuisong Li 
657e46372d7SHuisong Li 		/*
658e46372d7SHuisong Li 		 * Stopping a bonded port also stops all slaves under the bonded
659e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
660e46372d7SHuisong Li 		 * port status of these slaves.
661e46372d7SHuisong Li 		 */
662e46372d7SHuisong Li 		if (port->bond_flag == 1)
663e46372d7SHuisong Li 			return change_bonding_slave_port_status(port_id, true);
664e46372d7SHuisong Li 	}
665a550baf2SMin Hu (Connor) 
666a550baf2SMin Hu (Connor) 	return 0;
667a550baf2SMin Hu (Connor) }
668a550baf2SMin Hu (Connor) 
669a550baf2SMin Hu (Connor) static void
670a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp)
671a550baf2SMin Hu (Connor) {
672a550baf2SMin Hu (Connor) 	if (is_proc_primary())
673a550baf2SMin Hu (Connor) 		rte_mempool_free(mp);
674a550baf2SMin Hu (Connor) }
675a550baf2SMin Hu (Connor) 
676a550baf2SMin Hu (Connor) static int
677a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
678a550baf2SMin Hu (Connor) {
679a550baf2SMin Hu (Connor) 	if (is_proc_primary())
680a550baf2SMin Hu (Connor) 		return rte_eth_dev_set_mtu(port_id, mtu);
681a550baf2SMin Hu (Connor) 
682a550baf2SMin Hu (Connor) 	return 0;
683a550baf2SMin Hu (Connor) }
684a550baf2SMin Hu (Connor) 
685ed30d9b6SIntel /* Forward function declarations */
686c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
687edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
688f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
68976ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
690d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
691cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
692fb73e096SJeff Guo 				enum rte_dev_event_type type,
693fb73e096SJeff Guo 				void *param);
69463b72657SIvan Ilchenko static void fill_xstats_display_info(void);
695ce8d5614SIntel 
696ce8d5614SIntel /*
697ce8d5614SIntel  * Check if all the ports are started.
698ce8d5614SIntel  * If yes, return positive value. If not, return zero.
699ce8d5614SIntel  */
700ce8d5614SIntel static int all_ports_started(void);
701ed30d9b6SIntel 
7026970401eSDavid Marchand #ifdef RTE_LIB_GSO
70352f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
70435b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
7056970401eSDavid Marchand #endif
70652f38a20SJiayu Hu 
707b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
708b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
709b57b66a9SOri Kam 
71063b72657SIvan Ilchenko 
711af75078fSIntel /*
71298a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
713c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
714c9cafcc8SShahaf Shuler  */
715c9cafcc8SShahaf Shuler int
716c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
717c9cafcc8SShahaf Shuler {
718c9cafcc8SShahaf Shuler 	unsigned int i;
719c9cafcc8SShahaf Shuler 
720c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
721c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
722c9cafcc8SShahaf Shuler 			return 0;
723c9cafcc8SShahaf Shuler 	}
724c9cafcc8SShahaf Shuler 	return 1;
725c9cafcc8SShahaf Shuler }
726c9cafcc8SShahaf Shuler 
727c9cafcc8SShahaf Shuler /*
728af75078fSIntel  * Setup default configuration.
729af75078fSIntel  */
730af75078fSIntel static void
731af75078fSIntel set_default_fwd_lcores_config(void)
732af75078fSIntel {
733af75078fSIntel 	unsigned int i;
734af75078fSIntel 	unsigned int nb_lc;
7357acf894dSStephen Hurd 	unsigned int sock_num;
736af75078fSIntel 
737af75078fSIntel 	nb_lc = 0;
738af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
739dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
740dbfb8ec7SPhil Yang 			continue;
741c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
742c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
743c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
744c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
745c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
746c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
747c9cafcc8SShahaf Shuler 			}
748c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
7497acf894dSStephen Hurd 		}
750cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
751f54fe5eeSStephen Hurd 			continue;
752f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
753af75078fSIntel 	}
754af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
755af75078fSIntel 	nb_cfg_lcores = nb_lcores;
756af75078fSIntel 	nb_fwd_lcores = 1;
757af75078fSIntel }
758af75078fSIntel 
759af75078fSIntel static void
760af75078fSIntel set_def_peer_eth_addrs(void)
761af75078fSIntel {
762af75078fSIntel 	portid_t i;
763af75078fSIntel 
764af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
76535b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
766af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
767af75078fSIntel 	}
768af75078fSIntel }
769af75078fSIntel 
770af75078fSIntel static void
771af75078fSIntel set_default_fwd_ports_config(void)
772af75078fSIntel {
773af75078fSIntel 	portid_t pt_id;
77465a7360cSMatan Azrad 	int i = 0;
775af75078fSIntel 
776effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
77765a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
778af75078fSIntel 
779effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
780effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
781effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
782effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
783effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
784effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
785effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
786effdb8bbSPhil Yang 			}
787effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
788effdb8bbSPhil Yang 		}
789effdb8bbSPhil Yang 	}
790effdb8bbSPhil Yang 
791af75078fSIntel 	nb_cfg_ports = nb_ports;
792af75078fSIntel 	nb_fwd_ports = nb_ports;
793af75078fSIntel }
794af75078fSIntel 
795af75078fSIntel void
796af75078fSIntel set_def_fwd_config(void)
797af75078fSIntel {
798af75078fSIntel 	set_default_fwd_lcores_config();
799af75078fSIntel 	set_def_peer_eth_addrs();
800af75078fSIntel 	set_default_fwd_ports_config();
801af75078fSIntel }
802af75078fSIntel 
803761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
804c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
805c7f5dba7SAnatoly Burakov static int
806c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
807c7f5dba7SAnatoly Burakov {
808c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
809c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
810c7f5dba7SAnatoly Burakov 
811c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
812c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
813c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
814c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
815c7f5dba7SAnatoly Burakov 	 */
816c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
817c7f5dba7SAnatoly Burakov 
818c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
819c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
820c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
821c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
822c7f5dba7SAnatoly Burakov 		return -1;
823c7f5dba7SAnatoly Burakov 	}
824c7f5dba7SAnatoly Burakov 
825c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
826c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
827c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
828c7f5dba7SAnatoly Burakov 
829c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
830c7f5dba7SAnatoly Burakov 
831c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
832c7f5dba7SAnatoly Burakov 
833c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
834c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
835c7f5dba7SAnatoly Burakov 		return -1;
836c7f5dba7SAnatoly Burakov 	}
837c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
838c7f5dba7SAnatoly Burakov 
839c7f5dba7SAnatoly Burakov 	return 0;
840c7f5dba7SAnatoly Burakov }
841c7f5dba7SAnatoly Burakov 
842c7f5dba7SAnatoly Burakov static int
843c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
844c7f5dba7SAnatoly Burakov {
845c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
846c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
847c7f5dba7SAnatoly Burakov 	 */
8489d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
849c7f5dba7SAnatoly Burakov 
850c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
851c7f5dba7SAnatoly Burakov }
852c7f5dba7SAnatoly Burakov 
853c7f5dba7SAnatoly Burakov static void *
854c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
855c7f5dba7SAnatoly Burakov {
856c7f5dba7SAnatoly Burakov 	void *addr;
857c7f5dba7SAnatoly Burakov 	int flags;
858c7f5dba7SAnatoly Burakov 
859c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
860c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
861c7f5dba7SAnatoly Burakov 	if (huge)
862c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
863c7f5dba7SAnatoly Burakov 
864c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
865c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
866c7f5dba7SAnatoly Burakov 		return NULL;
867c7f5dba7SAnatoly Burakov 
868c7f5dba7SAnatoly Burakov 	return addr;
869c7f5dba7SAnatoly Burakov }
870c7f5dba7SAnatoly Burakov 
871c7f5dba7SAnatoly Burakov struct extmem_param {
872c7f5dba7SAnatoly Burakov 	void *addr;
873c7f5dba7SAnatoly Burakov 	size_t len;
874c7f5dba7SAnatoly Burakov 	size_t pgsz;
875c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
876c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
877c7f5dba7SAnatoly Burakov };
878c7f5dba7SAnatoly Burakov 
879c7f5dba7SAnatoly Burakov static int
880c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
881c7f5dba7SAnatoly Burakov 		bool huge)
882c7f5dba7SAnatoly Burakov {
883c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
884c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
885c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
886c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
887c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
888c7f5dba7SAnatoly Burakov 	void *addr;
889c7f5dba7SAnatoly Burakov 	int ret;
890c7f5dba7SAnatoly Burakov 
891c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
892c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
893c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
894c7f5dba7SAnatoly Burakov 			continue;
895c7f5dba7SAnatoly Burakov 
896c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
897c7f5dba7SAnatoly Burakov 
898c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
899c7f5dba7SAnatoly Burakov 		if (!huge)
900c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
901c7f5dba7SAnatoly Burakov 
902c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
903c7f5dba7SAnatoly Burakov 		if (ret < 0) {
904c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
905c7f5dba7SAnatoly Burakov 			return -1;
906c7f5dba7SAnatoly Burakov 		}
907c7f5dba7SAnatoly Burakov 
908c7f5dba7SAnatoly Burakov 		/* allocate our memory */
909c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
910c7f5dba7SAnatoly Burakov 
911c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
912c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
913c7f5dba7SAnatoly Burakov 		 * try another one.
914c7f5dba7SAnatoly Burakov 		 */
915c7f5dba7SAnatoly Burakov 		if (addr == NULL)
916c7f5dba7SAnatoly Burakov 			continue;
917c7f5dba7SAnatoly Burakov 
918c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
919c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
920c7f5dba7SAnatoly Burakov 
921c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
922c7f5dba7SAnatoly Burakov 
923c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
924c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
925c7f5dba7SAnatoly Burakov 			goto fail;
926c7f5dba7SAnatoly Burakov 		}
927c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
928c7f5dba7SAnatoly Burakov 		if (!huge)
929c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
930c7f5dba7SAnatoly Burakov 
931c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
932c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
933c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
934c7f5dba7SAnatoly Burakov 			size_t offset;
935c7f5dba7SAnatoly Burakov 			void *cur;
936c7f5dba7SAnatoly Burakov 
937c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
938c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
939c7f5dba7SAnatoly Burakov 
940c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
941c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
942c7f5dba7SAnatoly Burakov 
943c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
944c7f5dba7SAnatoly Burakov 
945c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
946c7f5dba7SAnatoly Burakov 		}
947c7f5dba7SAnatoly Burakov 
948c7f5dba7SAnatoly Burakov 		break;
949c7f5dba7SAnatoly Burakov 	}
950c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
951c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
952c7f5dba7SAnatoly Burakov 		return -1;
953c7f5dba7SAnatoly Burakov 
954c7f5dba7SAnatoly Burakov 	param->addr = addr;
955c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
956c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
957c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
958c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
959c7f5dba7SAnatoly Burakov 
960c7f5dba7SAnatoly Burakov 	return 0;
961c7f5dba7SAnatoly Burakov fail:
962c7f5dba7SAnatoly Burakov 	free(iovas);
963c7f5dba7SAnatoly Burakov 	if (addr)
964c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
965c7f5dba7SAnatoly Burakov 
966c7f5dba7SAnatoly Burakov 	return -1;
967c7f5dba7SAnatoly Burakov }
968c7f5dba7SAnatoly Burakov 
969c7f5dba7SAnatoly Burakov static int
970c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
971c7f5dba7SAnatoly Burakov {
972c7f5dba7SAnatoly Burakov 	struct extmem_param param;
973c7f5dba7SAnatoly Burakov 	int socket_id, ret;
974c7f5dba7SAnatoly Burakov 
975c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
976c7f5dba7SAnatoly Burakov 
977c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
978c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
979c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
980c7f5dba7SAnatoly Burakov 		/* create our heap */
981c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
982c7f5dba7SAnatoly Burakov 		if (ret < 0) {
983c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
984c7f5dba7SAnatoly Burakov 			return -1;
985c7f5dba7SAnatoly Burakov 		}
986c7f5dba7SAnatoly Burakov 	}
987c7f5dba7SAnatoly Burakov 
988c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
989c7f5dba7SAnatoly Burakov 	if (ret < 0) {
990c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
991c7f5dba7SAnatoly Burakov 		return -1;
992c7f5dba7SAnatoly Burakov 	}
993c7f5dba7SAnatoly Burakov 
994c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
995c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
996c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
997c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
998c7f5dba7SAnatoly Burakov 
999c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
1000c7f5dba7SAnatoly Burakov 
1001c7f5dba7SAnatoly Burakov 	/* not needed any more */
1002c7f5dba7SAnatoly Burakov 	free(param.iova_table);
1003c7f5dba7SAnatoly Burakov 
1004c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1005c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1006c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
1007c7f5dba7SAnatoly Burakov 		return -1;
1008c7f5dba7SAnatoly Burakov 	}
1009c7f5dba7SAnatoly Burakov 
1010c7f5dba7SAnatoly Burakov 	/* success */
1011c7f5dba7SAnatoly Burakov 
1012c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1013c7f5dba7SAnatoly Burakov 			param.len >> 20);
1014c7f5dba7SAnatoly Burakov 
1015c7f5dba7SAnatoly Burakov 	return 0;
1016c7f5dba7SAnatoly Burakov }
10173a0968c8SShahaf Shuler static void
10183a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10193a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10203a0968c8SShahaf Shuler {
10213a0968c8SShahaf Shuler 	uint16_t pid = 0;
10223a0968c8SShahaf Shuler 	int ret;
10233a0968c8SShahaf Shuler 
10243a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10250a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10263a0968c8SShahaf Shuler 
10270a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10280a0821bcSPaulis Gributs 		if (ret != 0) {
10290a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10300a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10310a0821bcSPaulis Gributs 				    "mempool unmapping will not be performed\n",
10320a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10330a0821bcSPaulis Gributs 			continue;
10340a0821bcSPaulis Gributs 		}
10350a0821bcSPaulis Gributs 
10360a0821bcSPaulis Gributs 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
10373a0968c8SShahaf Shuler 		if (ret) {
10383a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10393a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
10403a0968c8SShahaf Shuler 				    "for device %s\n",
1041ec5ecd7eSDavid Marchand 				    memhdr->addr, rte_dev_name(dev_info.device));
10423a0968c8SShahaf Shuler 		}
10433a0968c8SShahaf Shuler 	}
10443a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
10453a0968c8SShahaf Shuler 	if (ret) {
10463a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10473a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
10483a0968c8SShahaf Shuler 	}
10493a0968c8SShahaf Shuler }
10503a0968c8SShahaf Shuler 
10513a0968c8SShahaf Shuler static void
10523a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10533a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10543a0968c8SShahaf Shuler {
10553a0968c8SShahaf Shuler 	uint16_t pid = 0;
10563a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
10573a0968c8SShahaf Shuler 	int ret;
10583a0968c8SShahaf Shuler 
10593a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
10603a0968c8SShahaf Shuler 				  page_size);
10613a0968c8SShahaf Shuler 	if (ret) {
10623a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10633a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
10643a0968c8SShahaf Shuler 		return;
10653a0968c8SShahaf Shuler 	}
10663a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10670a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10683a0968c8SShahaf Shuler 
10690a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10700a0821bcSPaulis Gributs 		if (ret != 0) {
10710a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10720a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10730a0821bcSPaulis Gributs 				    "mempool mapping will not be performed\n",
10740a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10750a0821bcSPaulis Gributs 			continue;
10760a0821bcSPaulis Gributs 		}
10770a0821bcSPaulis Gributs 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
10783a0968c8SShahaf Shuler 		if (ret) {
10793a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10803a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
10813a0968c8SShahaf Shuler 				    "for device %s\n",
1082ec5ecd7eSDavid Marchand 				    memhdr->addr, rte_dev_name(dev_info.device));
10833a0968c8SShahaf Shuler 		}
10843a0968c8SShahaf Shuler 	}
10853a0968c8SShahaf Shuler }
1086761f7ae1SJie Zhou #endif
1087c7f5dba7SAnatoly Burakov 
108872512e18SViacheslav Ovsiienko static unsigned int
108972512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
109072512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
109172512e18SViacheslav Ovsiienko {
109272512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
109372512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
109472512e18SViacheslav Ovsiienko 	uint16_t elt_size;
109572512e18SViacheslav Ovsiienko 
109672512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
109772512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
109872512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
109972512e18SViacheslav Ovsiienko 
110072512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
110172512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
110272512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
110372512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
110472512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
110572512e18SViacheslav Ovsiienko 		return 0;
110672512e18SViacheslav Ovsiienko 	}
110772512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
110872512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
110972512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
111072512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
111172512e18SViacheslav Ovsiienko 		int ret;
111272512e18SViacheslav Ovsiienko 
111372512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
111472512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
111572512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
111672512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
111772512e18SViacheslav Ovsiienko 			ext_num = 0;
111872512e18SViacheslav Ovsiienko 			break;
111972512e18SViacheslav Ovsiienko 		}
112013b19642SDmitry Kozlyuk 		mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
112172512e18SViacheslav Ovsiienko 					 socket_id,
112272512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_IOVA_CONTIG |
112372512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_1GB |
112413b19642SDmitry Kozlyuk 					 RTE_MEMZONE_SIZE_HINT_ONLY);
112572512e18SViacheslav Ovsiienko 		if (mz == NULL) {
112672512e18SViacheslav Ovsiienko 			/*
112772512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
112872512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
112972512e18SViacheslav Ovsiienko 			 */
113072512e18SViacheslav Ovsiienko 			errno = ENOMEM;
113172512e18SViacheslav Ovsiienko 			ext_num = 0;
113272512e18SViacheslav Ovsiienko 			break;
113372512e18SViacheslav Ovsiienko 		}
113472512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
113572512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
113672512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
113772512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
113872512e18SViacheslav Ovsiienko 	}
113972512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
114072512e18SViacheslav Ovsiienko 		free(xmem);
114172512e18SViacheslav Ovsiienko 		xmem = NULL;
114272512e18SViacheslav Ovsiienko 	}
114372512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
114472512e18SViacheslav Ovsiienko 	return ext_num;
114572512e18SViacheslav Ovsiienko }
114672512e18SViacheslav Ovsiienko 
1147af75078fSIntel /*
1148af75078fSIntel  * Configuration initialisation done once at init time.
1149af75078fSIntel  */
1150401b744dSShahaf Shuler static struct rte_mempool *
1151af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
115226cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
1153af75078fSIntel {
1154af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1155bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
1156761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1157af75078fSIntel 	uint32_t mb_size;
1158af75078fSIntel 
1159dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1160761f7ae1SJie Zhou #endif
116126cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1162a550baf2SMin Hu (Connor) 	if (!is_proc_primary()) {
1163a550baf2SMin Hu (Connor) 		rte_mp = rte_mempool_lookup(pool_name);
1164a550baf2SMin Hu (Connor) 		if (rte_mp == NULL)
1165a550baf2SMin Hu (Connor) 			rte_exit(EXIT_FAILURE,
1166a550baf2SMin Hu (Connor) 				"Get mbuf pool for socket %u failed: %s\n",
1167a550baf2SMin Hu (Connor) 				socket_id, rte_strerror(rte_errno));
1168a550baf2SMin Hu (Connor) 		return rte_mp;
1169a550baf2SMin Hu (Connor) 	}
1170148f963fSBruce Richardson 
1171285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
1172d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1173d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1174d1eb542eSOlivier Matz 
1175c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
1176c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
1177c7f5dba7SAnatoly Burakov 		{
1178c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
1179c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1180c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
1181c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1182c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1183c7f5dba7SAnatoly Burakov 			break;
1184c7f5dba7SAnatoly Burakov 		}
1185761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1186c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
1187c7f5dba7SAnatoly Burakov 		{
1188b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1189c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1190148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
119159fcf854SShahaf Shuler 				socket_id, mempool_flags);
119224427bb9SOlivier Matz 			if (rte_mp == NULL)
119324427bb9SOlivier Matz 				goto err;
1194b19a0c75SOlivier Matz 
1195b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1196b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1197b19a0c75SOlivier Matz 				rte_mp = NULL;
119824427bb9SOlivier Matz 				goto err;
1199b19a0c75SOlivier Matz 			}
1200b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1201b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
12023a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1203c7f5dba7SAnatoly Burakov 			break;
1204c7f5dba7SAnatoly Burakov 		}
1205c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1206c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1207c7f5dba7SAnatoly Burakov 		{
1208c7f5dba7SAnatoly Burakov 			int heap_socket;
1209c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1210c7f5dba7SAnatoly Burakov 
1211c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1212c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1213c7f5dba7SAnatoly Burakov 
1214c7f5dba7SAnatoly Burakov 			heap_socket =
1215c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1216c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1217c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1218c7f5dba7SAnatoly Burakov 
12190e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
12200e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1221ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1222c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1223c7f5dba7SAnatoly Burakov 					heap_socket);
1224c7f5dba7SAnatoly Burakov 			break;
1225c7f5dba7SAnatoly Burakov 		}
1226761f7ae1SJie Zhou #endif
122772512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
122872512e18SViacheslav Ovsiienko 		{
122972512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
123072512e18SViacheslav Ovsiienko 			unsigned int ext_num;
123172512e18SViacheslav Ovsiienko 
123272512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
123372512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
123472512e18SViacheslav Ovsiienko 			if (ext_num == 0)
123572512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
123672512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
123772512e18SViacheslav Ovsiienko 
123872512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
123972512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
124072512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
124172512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
124272512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
124372512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
124472512e18SViacheslav Ovsiienko 			free(ext_mem);
124572512e18SViacheslav Ovsiienko 			break;
124672512e18SViacheslav Ovsiienko 		}
1247c7f5dba7SAnatoly Burakov 	default:
1248c7f5dba7SAnatoly Burakov 		{
1249c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1250c7f5dba7SAnatoly Burakov 		}
1251bece7b6cSChristian Ehrhardt 	}
1252148f963fSBruce Richardson 
1253761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
125424427bb9SOlivier Matz err:
1255761f7ae1SJie Zhou #endif
1256af75078fSIntel 	if (rte_mp == NULL) {
1257d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1258d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1259d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1260148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1261591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1262af75078fSIntel 	}
1263401b744dSShahaf Shuler 	return rte_mp;
1264af75078fSIntel }
1265af75078fSIntel 
126620a0286fSLiu Xiaofeng /*
126720a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
126820a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
126920a0286fSLiu Xiaofeng  */
127020a0286fSLiu Xiaofeng static int
127120a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
127220a0286fSLiu Xiaofeng {
127320a0286fSLiu Xiaofeng 	static int warning_once = 0;
127420a0286fSLiu Xiaofeng 
1275c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
127620a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
127761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
127861a3b0e5SAndrew Rybchenko 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
127920a0286fSLiu Xiaofeng 		warning_once = 1;
128020a0286fSLiu Xiaofeng 		return -1;
128120a0286fSLiu Xiaofeng 	}
128220a0286fSLiu Xiaofeng 	return 0;
128320a0286fSLiu Xiaofeng }
128420a0286fSLiu Xiaofeng 
12853f7311baSWei Dai /*
12863f7311baSWei Dai  * Get the allowed maximum number of RX queues.
12873f7311baSWei Dai  * *pid return the port id which has minimal value of
12883f7311baSWei Dai  * max_rx_queues in all ports.
12893f7311baSWei Dai  */
12903f7311baSWei Dai queueid_t
12913f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
12923f7311baSWei Dai {
12939e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
12946f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
12953f7311baSWei Dai 	portid_t pi;
12963f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
12973f7311baSWei Dai 
12983f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
12996f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13006f51deb9SIvan Ilchenko 			continue;
13016f51deb9SIvan Ilchenko 
13026f51deb9SIvan Ilchenko 		max_rxq_valid = true;
13033f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
13043f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
13053f7311baSWei Dai 			*pid = pi;
13063f7311baSWei Dai 		}
13073f7311baSWei Dai 	}
13086f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
13093f7311baSWei Dai }
13103f7311baSWei Dai 
13113f7311baSWei Dai /*
13123f7311baSWei Dai  * Check input rxq is valid or not.
13133f7311baSWei Dai  * If input rxq is not greater than any of maximum number
13143f7311baSWei Dai  * of RX queues of all ports, it is valid.
13153f7311baSWei Dai  * if valid, return 0, else return -1
13163f7311baSWei Dai  */
13173f7311baSWei Dai int
13183f7311baSWei Dai check_nb_rxq(queueid_t rxq)
13193f7311baSWei Dai {
13203f7311baSWei Dai 	queueid_t allowed_max_rxq;
13213f7311baSWei Dai 	portid_t pid = 0;
13223f7311baSWei Dai 
13233f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
13243f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
132561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
132661a3b0e5SAndrew Rybchenko 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
132761a3b0e5SAndrew Rybchenko 			rxq, allowed_max_rxq, pid);
13283f7311baSWei Dai 		return -1;
13293f7311baSWei Dai 	}
13303f7311baSWei Dai 	return 0;
13313f7311baSWei Dai }
13323f7311baSWei Dai 
133336db4f6cSWei Dai /*
133436db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
133536db4f6cSWei Dai  * *pid return the port id which has minimal value of
133636db4f6cSWei Dai  * max_tx_queues in all ports.
133736db4f6cSWei Dai  */
133836db4f6cSWei Dai queueid_t
133936db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
134036db4f6cSWei Dai {
13419e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
13426f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
134336db4f6cSWei Dai 	portid_t pi;
134436db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
134536db4f6cSWei Dai 
134636db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13476f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13486f51deb9SIvan Ilchenko 			continue;
13496f51deb9SIvan Ilchenko 
13506f51deb9SIvan Ilchenko 		max_txq_valid = true;
135136db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
135236db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
135336db4f6cSWei Dai 			*pid = pi;
135436db4f6cSWei Dai 		}
135536db4f6cSWei Dai 	}
13566f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
135736db4f6cSWei Dai }
135836db4f6cSWei Dai 
135936db4f6cSWei Dai /*
136036db4f6cSWei Dai  * Check input txq is valid or not.
136136db4f6cSWei Dai  * If input txq is not greater than any of maximum number
136236db4f6cSWei Dai  * of TX queues of all ports, it is valid.
136336db4f6cSWei Dai  * if valid, return 0, else return -1
136436db4f6cSWei Dai  */
136536db4f6cSWei Dai int
136636db4f6cSWei Dai check_nb_txq(queueid_t txq)
136736db4f6cSWei Dai {
136836db4f6cSWei Dai 	queueid_t allowed_max_txq;
136936db4f6cSWei Dai 	portid_t pid = 0;
137036db4f6cSWei Dai 
137136db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
137236db4f6cSWei Dai 	if (txq > allowed_max_txq) {
137361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
137461a3b0e5SAndrew Rybchenko 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
137561a3b0e5SAndrew Rybchenko 			txq, allowed_max_txq, pid);
137636db4f6cSWei Dai 		return -1;
137736db4f6cSWei Dai 	}
137836db4f6cSWei Dai 	return 0;
137936db4f6cSWei Dai }
138036db4f6cSWei Dai 
13811c69df45SOri Kam /*
138299e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
138399e040d3SLijun Ou  * *pid return the port id which has minimal value of
138499e040d3SLijun Ou  * max_rxd in all queues of all ports.
138599e040d3SLijun Ou  */
138699e040d3SLijun Ou static uint16_t
138799e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
138899e040d3SLijun Ou {
138999e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
139099e040d3SLijun Ou 	portid_t pi;
139199e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
139299e040d3SLijun Ou 
139399e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
139499e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
139599e040d3SLijun Ou 			continue;
139699e040d3SLijun Ou 
139799e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
139899e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
139999e040d3SLijun Ou 			*pid = pi;
140099e040d3SLijun Ou 		}
140199e040d3SLijun Ou 	}
140299e040d3SLijun Ou 	return allowed_max_rxd;
140399e040d3SLijun Ou }
140499e040d3SLijun Ou 
140599e040d3SLijun Ou /*
140699e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
140799e040d3SLijun Ou  * *pid return the port id which has minimal value of
140899e040d3SLijun Ou  * min_rxd in all queues of all ports.
140999e040d3SLijun Ou  */
141099e040d3SLijun Ou static uint16_t
141199e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
141299e040d3SLijun Ou {
141399e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
141499e040d3SLijun Ou 	portid_t pi;
141599e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
141699e040d3SLijun Ou 
141799e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
141899e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
141999e040d3SLijun Ou 			continue;
142099e040d3SLijun Ou 
142199e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
142299e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
142399e040d3SLijun Ou 			*pid = pi;
142499e040d3SLijun Ou 		}
142599e040d3SLijun Ou 	}
142699e040d3SLijun Ou 
142799e040d3SLijun Ou 	return allowed_min_rxd;
142899e040d3SLijun Ou }
142999e040d3SLijun Ou 
143099e040d3SLijun Ou /*
143199e040d3SLijun Ou  * Check input rxd is valid or not.
143299e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
143399e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
143499e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
143599e040d3SLijun Ou  * if valid, return 0, else return -1
143699e040d3SLijun Ou  */
143799e040d3SLijun Ou int
143899e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
143999e040d3SLijun Ou {
144099e040d3SLijun Ou 	uint16_t allowed_max_rxd;
144199e040d3SLijun Ou 	uint16_t allowed_min_rxd;
144299e040d3SLijun Ou 	portid_t pid = 0;
144399e040d3SLijun Ou 
144499e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
144599e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
144661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
144761a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
144861a3b0e5SAndrew Rybchenko 			rxd, allowed_max_rxd, pid);
144999e040d3SLijun Ou 		return -1;
145099e040d3SLijun Ou 	}
145199e040d3SLijun Ou 
145299e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
145399e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
145461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
145561a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
145661a3b0e5SAndrew Rybchenko 			rxd, allowed_min_rxd, pid);
145799e040d3SLijun Ou 		return -1;
145899e040d3SLijun Ou 	}
145999e040d3SLijun Ou 
146099e040d3SLijun Ou 	return 0;
146199e040d3SLijun Ou }
146299e040d3SLijun Ou 
146399e040d3SLijun Ou /*
146499e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
146599e040d3SLijun Ou  * *pid return the port id which has minimal value of
146699e040d3SLijun Ou  * max_txd in every tx queue.
146799e040d3SLijun Ou  */
146899e040d3SLijun Ou static uint16_t
146999e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
147099e040d3SLijun Ou {
147199e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
147299e040d3SLijun Ou 	portid_t pi;
147399e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
147499e040d3SLijun Ou 
147599e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
147699e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
147799e040d3SLijun Ou 			continue;
147899e040d3SLijun Ou 
147999e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
148099e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
148199e040d3SLijun Ou 			*pid = pi;
148299e040d3SLijun Ou 		}
148399e040d3SLijun Ou 	}
148499e040d3SLijun Ou 	return allowed_max_txd;
148599e040d3SLijun Ou }
148699e040d3SLijun Ou 
148799e040d3SLijun Ou /*
148899e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
148999e040d3SLijun Ou  * *pid return the port id which has minimal value of
149099e040d3SLijun Ou  * min_txd in every tx queue.
149199e040d3SLijun Ou  */
149299e040d3SLijun Ou static uint16_t
149399e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
149499e040d3SLijun Ou {
149599e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
149699e040d3SLijun Ou 	portid_t pi;
149799e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
149899e040d3SLijun Ou 
149999e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
150099e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
150199e040d3SLijun Ou 			continue;
150299e040d3SLijun Ou 
150399e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
150499e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
150599e040d3SLijun Ou 			*pid = pi;
150699e040d3SLijun Ou 		}
150799e040d3SLijun Ou 	}
150899e040d3SLijun Ou 
150999e040d3SLijun Ou 	return allowed_min_txd;
151099e040d3SLijun Ou }
151199e040d3SLijun Ou 
151299e040d3SLijun Ou /*
151399e040d3SLijun Ou  * Check input txd is valid or not.
151499e040d3SLijun Ou  * If input txd is not greater than any of maximum number
151599e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
151699e040d3SLijun Ou  * if valid, return 0, else return -1
151799e040d3SLijun Ou  */
151899e040d3SLijun Ou int
151999e040d3SLijun Ou check_nb_txd(queueid_t txd)
152099e040d3SLijun Ou {
152199e040d3SLijun Ou 	uint16_t allowed_max_txd;
152299e040d3SLijun Ou 	uint16_t allowed_min_txd;
152399e040d3SLijun Ou 	portid_t pid = 0;
152499e040d3SLijun Ou 
152599e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
152699e040d3SLijun Ou 	if (txd > allowed_max_txd) {
152761a3b0e5SAndrew Rybchenko 		fprintf(stderr,
152861a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
152961a3b0e5SAndrew Rybchenko 			txd, allowed_max_txd, pid);
153099e040d3SLijun Ou 		return -1;
153199e040d3SLijun Ou 	}
153299e040d3SLijun Ou 
153399e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
153499e040d3SLijun Ou 	if (txd < allowed_min_txd) {
153561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
153661a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
153761a3b0e5SAndrew Rybchenko 			txd, allowed_min_txd, pid);
153899e040d3SLijun Ou 		return -1;
153999e040d3SLijun Ou 	}
154099e040d3SLijun Ou 	return 0;
154199e040d3SLijun Ou }
154299e040d3SLijun Ou 
154399e040d3SLijun Ou 
154499e040d3SLijun Ou /*
15451c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
15461c69df45SOri Kam  * *pid return the port id which has minimal value of
15471c69df45SOri Kam  * max_hairpin_queues in all ports.
15481c69df45SOri Kam  */
15491c69df45SOri Kam queueid_t
15501c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
15511c69df45SOri Kam {
15529e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
15531c69df45SOri Kam 	portid_t pi;
15541c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
15551c69df45SOri Kam 
15561c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
15571c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
15581c69df45SOri Kam 			*pid = pi;
15591c69df45SOri Kam 			return 0;
15601c69df45SOri Kam 		}
15611c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
15621c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
15631c69df45SOri Kam 			*pid = pi;
15641c69df45SOri Kam 		}
15651c69df45SOri Kam 	}
15661c69df45SOri Kam 	return allowed_max_hairpinq;
15671c69df45SOri Kam }
15681c69df45SOri Kam 
15691c69df45SOri Kam /*
15701c69df45SOri Kam  * Check input hairpin is valid or not.
15711c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
15721c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
15731c69df45SOri Kam  * if valid, return 0, else return -1
15741c69df45SOri Kam  */
15751c69df45SOri Kam int
15761c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
15771c69df45SOri Kam {
15781c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
15791c69df45SOri Kam 	portid_t pid = 0;
15801c69df45SOri Kam 
15811c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
15821c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
158361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
158461a3b0e5SAndrew Rybchenko 			"Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
15851c69df45SOri Kam 			hairpinq, allowed_max_hairpinq, pid);
15861c69df45SOri Kam 		return -1;
15871c69df45SOri Kam 	}
15881c69df45SOri Kam 	return 0;
15891c69df45SOri Kam }
15901c69df45SOri Kam 
15911bb4a528SFerruh Yigit static int
15921bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info)
15931bb4a528SFerruh Yigit {
15941bb4a528SFerruh Yigit 	uint32_t eth_overhead;
15951bb4a528SFerruh Yigit 
15961bb4a528SFerruh Yigit 	if (dev_info->max_mtu != UINT16_MAX &&
15971bb4a528SFerruh Yigit 	    dev_info->max_rx_pktlen > dev_info->max_mtu)
15981bb4a528SFerruh Yigit 		eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
15991bb4a528SFerruh Yigit 	else
16001bb4a528SFerruh Yigit 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
16011bb4a528SFerruh Yigit 
16021bb4a528SFerruh Yigit 	return eth_overhead;
16031bb4a528SFerruh Yigit }
16041bb4a528SFerruh Yigit 
1605af75078fSIntel static void
1606b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id)
1607b6b8a1ebSViacheslav Ovsiienko {
1608b6b8a1ebSViacheslav Ovsiienko 	struct rte_port *port = &ports[pid];
1609b6b8a1ebSViacheslav Ovsiienko 	int ret;
1610b6b8a1ebSViacheslav Ovsiienko 	int i;
1611b6b8a1ebSViacheslav Ovsiienko 
1612f6d8a6d3SIvan Malov 	eth_rx_metadata_negotiate_mp(pid);
1613f6d8a6d3SIvan Malov 
1614b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.txmode = tx_mode;
1615b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.rxmode = rx_mode;
1616b6b8a1ebSViacheslav Ovsiienko 
1617b6b8a1ebSViacheslav Ovsiienko 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1618b6b8a1ebSViacheslav Ovsiienko 	if (ret != 0)
1619b6b8a1ebSViacheslav Ovsiienko 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1620b6b8a1ebSViacheslav Ovsiienko 
1621295968d1SFerruh Yigit 	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1622b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.txmode.offloads &=
1623295968d1SFerruh Yigit 			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1624b6b8a1ebSViacheslav Ovsiienko 
1625b6b8a1ebSViacheslav Ovsiienko 	/* Apply Rx offloads configuration */
1626b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
16273c4426dbSDmitry Kozlyuk 		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1628b6b8a1ebSViacheslav Ovsiienko 	/* Apply Tx offloads configuration */
1629b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
16303c4426dbSDmitry Kozlyuk 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1631b6b8a1ebSViacheslav Ovsiienko 
1632b6b8a1ebSViacheslav Ovsiienko 	if (eth_link_speed)
1633b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.link_speeds = eth_link_speed;
1634b6b8a1ebSViacheslav Ovsiienko 
16351bb4a528SFerruh Yigit 	if (max_rx_pkt_len)
16361bb4a528SFerruh Yigit 		port->dev_conf.rxmode.mtu = max_rx_pkt_len -
16371bb4a528SFerruh Yigit 			get_eth_overhead(&port->dev_info);
16381bb4a528SFerruh Yigit 
1639b6b8a1ebSViacheslav Ovsiienko 	/* set flag to initialize port/queue */
1640b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig = 1;
1641b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig_queues = 1;
1642b6b8a1ebSViacheslav Ovsiienko 	port->socket_id = socket_id;
1643b6b8a1ebSViacheslav Ovsiienko 	port->tx_metadata = 0;
1644b6b8a1ebSViacheslav Ovsiienko 
1645b6b8a1ebSViacheslav Ovsiienko 	/*
1646b6b8a1ebSViacheslav Ovsiienko 	 * Check for maximum number of segments per MTU.
1647b6b8a1ebSViacheslav Ovsiienko 	 * Accordingly update the mbuf data size.
1648b6b8a1ebSViacheslav Ovsiienko 	 */
1649b6b8a1ebSViacheslav Ovsiienko 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1650b6b8a1ebSViacheslav Ovsiienko 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
16511bb4a528SFerruh Yigit 		uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
16521bb4a528SFerruh Yigit 		uint16_t mtu;
1653b6b8a1ebSViacheslav Ovsiienko 
16541bb4a528SFerruh Yigit 		if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
16551bb4a528SFerruh Yigit 			uint16_t data_size = (mtu + eth_overhead) /
16561bb4a528SFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
16571bb4a528SFerruh Yigit 			uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
16581bb4a528SFerruh Yigit 
16591bb4a528SFerruh Yigit 			if (buffer_size > mbuf_data_size[0]) {
16601bb4a528SFerruh Yigit 				mbuf_data_size[0] = buffer_size;
1661b6b8a1ebSViacheslav Ovsiienko 				TESTPMD_LOG(WARNING,
1662b6b8a1ebSViacheslav Ovsiienko 					"Configured mbuf size of the first segment %hu\n",
1663b6b8a1ebSViacheslav Ovsiienko 					mbuf_data_size[0]);
1664b6b8a1ebSViacheslav Ovsiienko 			}
1665b6b8a1ebSViacheslav Ovsiienko 		}
1666b6b8a1ebSViacheslav Ovsiienko 	}
16671bb4a528SFerruh Yigit }
1668b6b8a1ebSViacheslav Ovsiienko 
1669b6b8a1ebSViacheslav Ovsiienko static void
1670af75078fSIntel init_config(void)
1671af75078fSIntel {
1672ce8d5614SIntel 	portid_t pid;
1673af75078fSIntel 	struct rte_mempool *mbp;
1674af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1675af75078fSIntel 	lcoreid_t  lc_id;
16766970401eSDavid Marchand #ifdef RTE_LIB_GRO
1677b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
16786970401eSDavid Marchand #endif
16796970401eSDavid Marchand #ifdef RTE_LIB_GSO
168052f38a20SJiayu Hu 	uint32_t gso_types;
16816970401eSDavid Marchand #endif
1682487f9a59SYulong Pei 
1683af75078fSIntel 	/* Configuration of logical cores. */
1684af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1685af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1686fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1687af75078fSIntel 	if (fwd_lcores == NULL) {
1688ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1689ce8d5614SIntel 							"failed\n", nb_lcores);
1690af75078fSIntel 	}
1691af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1692af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1693af75078fSIntel 					       sizeof(struct fwd_lcore),
1694fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1695af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1696ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1697ce8d5614SIntel 								"failed\n");
1698af75078fSIntel 		}
1699af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1700af75078fSIntel 	}
1701af75078fSIntel 
17027d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1703b6b8a1ebSViacheslav Ovsiienko 		uint32_t socket_id;
17046f51deb9SIvan Ilchenko 
1705b6ea6408SIntel 		if (numa_support) {
1706b6b8a1ebSViacheslav Ovsiienko 			socket_id = port_numa[pid];
1707b6b8a1ebSViacheslav Ovsiienko 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1708b6b8a1ebSViacheslav Ovsiienko 				socket_id = rte_eth_dev_socket_id(pid);
170920a0286fSLiu Xiaofeng 
171029841336SPhil Yang 				/*
171129841336SPhil Yang 				 * if socket_id is invalid,
171229841336SPhil Yang 				 * set to the first available socket.
171329841336SPhil Yang 				 */
171420a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
171529841336SPhil Yang 					socket_id = socket_ids[0];
1716b6ea6408SIntel 			}
1717b6b8a1ebSViacheslav Ovsiienko 		} else {
1718b6b8a1ebSViacheslav Ovsiienko 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1719b6b8a1ebSViacheslav Ovsiienko 				    0 : socket_num;
1720b6ea6408SIntel 		}
1721b6b8a1ebSViacheslav Ovsiienko 		/* Apply default TxRx configuration for all ports */
1722b6b8a1ebSViacheslav Ovsiienko 		init_config_port_offloads(pid, socket_id);
1723ce8d5614SIntel 	}
17243ab64341SOlivier Matz 	/*
17253ab64341SOlivier Matz 	 * Create pools of mbuf.
17263ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
17273ab64341SOlivier Matz 	 * socket 0 memory by default.
17283ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
17293ab64341SOlivier Matz 	 *
17303ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
17313ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
17323ab64341SOlivier Matz 	 */
17333ab64341SOlivier Matz 	if (param_total_num_mbufs)
17343ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
17353ab64341SOlivier Matz 	else {
17364ed89049SDavid Marchand 		nb_mbuf_per_pool = RX_DESC_MAX +
17373ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
17384ed89049SDavid Marchand 			TX_DESC_MAX + MAX_PKT_BURST;
17393ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
17403ab64341SOlivier Matz 	}
17413ab64341SOlivier Matz 
1742b6ea6408SIntel 	if (numa_support) {
174326cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1744ce8d5614SIntel 
1745c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
174626cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
174726cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
174826cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1749401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
175026cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
17513ab64341SOlivier Matz 	} else {
175226cbb419SViacheslav Ovsiienko 		uint8_t i;
175326cbb419SViacheslav Ovsiienko 
175426cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
175526cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
175626cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1757401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
175826cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
175926cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
17603ab64341SOlivier Matz 	}
1761b6ea6408SIntel 
1762b6ea6408SIntel 	init_port_config();
17635886ae07SAdrien Mazarguil 
17646970401eSDavid Marchand #ifdef RTE_LIB_GSO
1765295968d1SFerruh Yigit 	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1766295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
17676970401eSDavid Marchand #endif
17685886ae07SAdrien Mazarguil 	/*
17695886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
17705886ae07SAdrien Mazarguil 	 */
17715886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
17728fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
177326cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
17748fd8bebcSAdrien Mazarguil 
17755886ae07SAdrien Mazarguil 		if (mbp == NULL)
177626cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
17775886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
17786970401eSDavid Marchand #ifdef RTE_LIB_GSO
177952f38a20SJiayu Hu 		/* initialize GSO context */
178052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
178152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
178252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
178335b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
178435b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
178552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
17866970401eSDavid Marchand #endif
17875886ae07SAdrien Mazarguil 	}
17885886ae07SAdrien Mazarguil 
17890c0db76fSBernard Iremonger 	fwd_config_setup();
1790b7091f1dSJiayu Hu 
17916970401eSDavid Marchand #ifdef RTE_LIB_GRO
1792b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1793b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1794b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1795b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1796b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1797b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1798b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1799b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1800b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1801b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1802b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1803b7091f1dSJiayu Hu 		}
1804b7091f1dSJiayu Hu 	}
18056970401eSDavid Marchand #endif
1806ce8d5614SIntel }
1807ce8d5614SIntel 
18082950a769SDeclan Doherty 
18092950a769SDeclan Doherty void
1810a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
18112950a769SDeclan Doherty {
18122950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
1813b6b8a1ebSViacheslav Ovsiienko 	init_config_port_offloads(new_port_id, socket_id);
18142950a769SDeclan Doherty 	init_port_config();
18152950a769SDeclan Doherty }
18162950a769SDeclan Doherty 
1817ce8d5614SIntel int
1818ce8d5614SIntel init_fwd_streams(void)
1819ce8d5614SIntel {
1820ce8d5614SIntel 	portid_t pid;
1821ce8d5614SIntel 	struct rte_port *port;
1822ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
18235a8fb55cSReshma Pattan 	queueid_t q;
1824ce8d5614SIntel 
1825ce8d5614SIntel 	/* set socket id according to numa or not */
18267d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1827ce8d5614SIntel 		port = &ports[pid];
1828ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
182961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
183061a3b0e5SAndrew Rybchenko 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
183161a3b0e5SAndrew Rybchenko 				nb_rxq, port->dev_info.max_rx_queues);
1832ce8d5614SIntel 			return -1;
1833ce8d5614SIntel 		}
1834ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
183561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
183661a3b0e5SAndrew Rybchenko 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
183761a3b0e5SAndrew Rybchenko 				nb_txq, port->dev_info.max_tx_queues);
1838ce8d5614SIntel 			return -1;
1839ce8d5614SIntel 		}
184020a0286fSLiu Xiaofeng 		if (numa_support) {
184120a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
184220a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
184320a0286fSLiu Xiaofeng 			else {
1844b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
184520a0286fSLiu Xiaofeng 
184629841336SPhil Yang 				/*
184729841336SPhil Yang 				 * if socket_id is invalid,
184829841336SPhil Yang 				 * set to the first available socket.
184929841336SPhil Yang 				 */
185020a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
185129841336SPhil Yang 					port->socket_id = socket_ids[0];
185220a0286fSLiu Xiaofeng 			}
185320a0286fSLiu Xiaofeng 		}
1854b6ea6408SIntel 		else {
1855b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1856af75078fSIntel 				port->socket_id = 0;
1857b6ea6408SIntel 			else
1858b6ea6408SIntel 				port->socket_id = socket_num;
1859b6ea6408SIntel 		}
1860af75078fSIntel 	}
1861af75078fSIntel 
18625a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
18635a8fb55cSReshma Pattan 	if (q == 0) {
186461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
186561a3b0e5SAndrew Rybchenko 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
18665a8fb55cSReshma Pattan 		return -1;
18675a8fb55cSReshma Pattan 	}
18685a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1869ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1870ce8d5614SIntel 		return 0;
1871ce8d5614SIntel 	/* clear the old */
1872ce8d5614SIntel 	if (fwd_streams != NULL) {
1873ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1874ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1875ce8d5614SIntel 				continue;
1876ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1877ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1878af75078fSIntel 		}
1879ce8d5614SIntel 		rte_free(fwd_streams);
1880ce8d5614SIntel 		fwd_streams = NULL;
1881ce8d5614SIntel 	}
1882ce8d5614SIntel 
1883ce8d5614SIntel 	/* init new */
1884ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
18851f84c469SMatan Azrad 	if (nb_fwd_streams) {
1886ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
18871f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
18881f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1889ce8d5614SIntel 		if (fwd_streams == NULL)
18901f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
18911f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
18921f84c469SMatan Azrad 				 nb_fwd_streams);
1893ce8d5614SIntel 
1894af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
18951f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
18961f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
18971f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1898ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
18991f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
19001f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
19011f84c469SMatan Azrad 		}
1902af75078fSIntel 	}
1903ce8d5614SIntel 
1904ce8d5614SIntel 	return 0;
1905af75078fSIntel }
1906af75078fSIntel 
1907af75078fSIntel static void
1908af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1909af75078fSIntel {
19107569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
191185de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
19127569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
19137569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1914af75078fSIntel 	uint16_t nb_pkt;
19157569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
19167569b8c1SHonnappa Nagarahalli 	int i;
1917af75078fSIntel 
1918af75078fSIntel 	/*
1919af75078fSIntel 	 * First compute the total number of packet bursts and the
1920af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1921af75078fSIntel 	 */
19227569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
19237569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
19247569b8c1SHonnappa Nagarahalli 
19257569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
19267569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
19277569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
19287569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
19297569b8c1SHonnappa Nagarahalli 
19307569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
19316a8b64fdSEli Britstein 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1932af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
19337569b8c1SHonnappa Nagarahalli 
1934af75078fSIntel 		if (nb_burst == 0)
1935af75078fSIntel 			continue;
19367569b8c1SHonnappa Nagarahalli 
1937af75078fSIntel 		total_burst += nb_burst;
19387569b8c1SHonnappa Nagarahalli 
19397569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
19407569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
19417569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1942fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1943fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
19447569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
19457569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
19467569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1947af75078fSIntel 		}
1948af75078fSIntel 	}
1949af75078fSIntel 	if (total_burst == 0)
1950af75078fSIntel 		return;
19517569b8c1SHonnappa Nagarahalli 
19527569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
19537569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
19547569b8c1SHonnappa Nagarahalli 		if (i == 3) {
19557569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1956af75078fSIntel 			return;
1957af75078fSIntel 		}
19587569b8c1SHonnappa Nagarahalli 
19597569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
19607569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
19617569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
19627569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1963af75078fSIntel 			return;
1964af75078fSIntel 		}
19657569b8c1SHonnappa Nagarahalli 
19667569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
19677569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
19687569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
19697569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
19707569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1971af75078fSIntel 	}
1972af75078fSIntel }
1973af75078fSIntel 
1974af75078fSIntel static void
1975af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1976af75078fSIntel {
1977af75078fSIntel 	struct fwd_stream *fs;
1978af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1979af75078fSIntel 
1980af75078fSIntel 	fs = fwd_streams[stream_id];
1981af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1982af75078fSIntel 	    (fs->fwd_dropped == 0))
1983af75078fSIntel 		return;
1984af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1985af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1986af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1987af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1988c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1989c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1990af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1991af75078fSIntel 
1992af75078fSIntel 	/* if checksum mode */
1993af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1994c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1995c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1996c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
199758d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
199858d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1999d139cf23SLance Richardson 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
2000d139cf23SLance Richardson 			fs->rx_bad_outer_ip_csum);
200194d65546SDavid Marchand 	} else {
200294d65546SDavid Marchand 		printf("\n");
2003af75078fSIntel 	}
2004af75078fSIntel 
20050e4b1963SDharmik Thakkar 	if (record_burst_stats) {
2006af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2007af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
20080e4b1963SDharmik Thakkar 	}
2009af75078fSIntel }
2010af75078fSIntel 
201153324971SDavid Marchand void
201253324971SDavid Marchand fwd_stats_display(void)
201353324971SDavid Marchand {
201453324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
201553324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
201653324971SDavid Marchand 	struct {
201753324971SDavid Marchand 		struct fwd_stream *rx_stream;
201853324971SDavid Marchand 		struct fwd_stream *tx_stream;
201953324971SDavid Marchand 		uint64_t tx_dropped;
202053324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
202153324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
202253324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
2023d139cf23SLance Richardson 		uint64_t rx_bad_outer_ip_csum;
202453324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
202553324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
202653324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
202753324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
202853324971SDavid Marchand 	struct rte_eth_stats stats;
202953324971SDavid Marchand 	uint64_t fwd_cycles = 0;
203053324971SDavid Marchand 	uint64_t total_recv = 0;
203153324971SDavid Marchand 	uint64_t total_xmit = 0;
203253324971SDavid Marchand 	struct rte_port *port;
203353324971SDavid Marchand 	streamid_t sm_id;
203453324971SDavid Marchand 	portid_t pt_id;
2035baef6bbfSMin Hu (Connor) 	int ret;
203653324971SDavid Marchand 	int i;
203753324971SDavid Marchand 
203853324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
203953324971SDavid Marchand 
204053324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
204153324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
204253324971SDavid Marchand 
204353324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
204453324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
204553324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
204653324971SDavid Marchand 		} else {
204753324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
204853324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
204953324971SDavid Marchand 		}
205053324971SDavid Marchand 
205153324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
205253324971SDavid Marchand 
205353324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
205453324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
205553324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
205653324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
2057d139cf23SLance Richardson 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2058d139cf23SLance Richardson 				fs->rx_bad_outer_ip_csum;
205953324971SDavid Marchand 
2060bc700b67SDharmik Thakkar 		if (record_core_cycles)
206199a4974aSRobin Jarry 			fwd_cycles += fs->busy_cycles;
206253324971SDavid Marchand 	}
206353324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2064c3fd1e60SFerruh Yigit 		uint64_t tx_dropped = 0;
2065c3fd1e60SFerruh Yigit 
206653324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
206753324971SDavid Marchand 		port = &ports[pt_id];
206853324971SDavid Marchand 
2069baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &stats);
2070baef6bbfSMin Hu (Connor) 		if (ret != 0) {
2071baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2072baef6bbfSMin Hu (Connor) 				"%s: Error: failed to get stats (port %u): %d",
2073baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
2074baef6bbfSMin Hu (Connor) 			continue;
2075baef6bbfSMin Hu (Connor) 		}
207653324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
207753324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
207853324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
207953324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
208053324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
208153324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
208253324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
208353324971SDavid Marchand 
208453324971SDavid Marchand 		total_recv += stats.ipackets;
208553324971SDavid Marchand 		total_xmit += stats.opackets;
208653324971SDavid Marchand 		total_rx_dropped += stats.imissed;
2087c3fd1e60SFerruh Yigit 		tx_dropped += ports_stats[pt_id].tx_dropped;
2088c3fd1e60SFerruh Yigit 		tx_dropped += stats.oerrors;
2089c3fd1e60SFerruh Yigit 		total_tx_dropped += tx_dropped;
209053324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
209153324971SDavid Marchand 
209253324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
209353324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
209453324971SDavid Marchand 
209508dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
209608dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
209753324971SDavid Marchand 		       stats.ipackets + stats.imissed);
209853324971SDavid Marchand 
2099d139cf23SLance Richardson 		if (cur_fwd_eng == &csum_fwd_engine) {
210053324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
210153324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
210253324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
210353324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
210453324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
210553324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
2106d139cf23SLance Richardson 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2107d139cf23SLance Richardson 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
2108d139cf23SLance Richardson 		}
210953324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
211008dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
211108dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
211253324971SDavid Marchand 		}
211353324971SDavid Marchand 
211408dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
211553324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
2116c3fd1e60SFerruh Yigit 		       stats.opackets, tx_dropped,
2117c3fd1e60SFerruh Yigit 		       stats.opackets + tx_dropped);
211853324971SDavid Marchand 
21190e4b1963SDharmik Thakkar 		if (record_burst_stats) {
212053324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
212153324971SDavid Marchand 				pkt_burst_stats_display("RX",
212253324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
212353324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
212453324971SDavid Marchand 				pkt_burst_stats_display("TX",
212553324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
21260e4b1963SDharmik Thakkar 		}
212753324971SDavid Marchand 
212853324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
212953324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
213053324971SDavid Marchand 	}
213153324971SDavid Marchand 
213253324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
213353324971SDavid Marchand 	       "%s\n",
213453324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
213553324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
213653324971SDavid Marchand 	       "%-"PRIu64"\n"
213753324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
213853324971SDavid Marchand 	       "%-"PRIu64"\n",
213953324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
214053324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
214153324971SDavid Marchand 	if (total_rx_nombuf > 0)
214253324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
214353324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
214453324971SDavid Marchand 	       "%s\n",
214553324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
2146bc700b67SDharmik Thakkar 	if (record_core_cycles) {
21474c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
21483a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
21493a164e00SPhil Yang 			uint64_t total_pkts = 0;
21503a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
21513a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
21523a164e00SPhil Yang 				total_pkts = total_xmit;
21533a164e00SPhil Yang 			else
21543a164e00SPhil Yang 				total_pkts = total_recv;
21553a164e00SPhil Yang 
215699a4974aSRobin Jarry 			printf("\n  CPU cycles/packet=%.2F (busy cycles="
21573a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
21584c0497b1SDharmik Thakkar 			       " MHz Clock\n",
21593a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
21603a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
21614c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
21623a164e00SPhil Yang 		}
2163bc700b67SDharmik Thakkar 	}
216453324971SDavid Marchand }
216553324971SDavid Marchand 
216653324971SDavid Marchand void
216753324971SDavid Marchand fwd_stats_reset(void)
216853324971SDavid Marchand {
216953324971SDavid Marchand 	streamid_t sm_id;
217053324971SDavid Marchand 	portid_t pt_id;
2171baef6bbfSMin Hu (Connor) 	int ret;
217253324971SDavid Marchand 	int i;
217353324971SDavid Marchand 
217453324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
217553324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
2176baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2177baef6bbfSMin Hu (Connor) 		if (ret != 0)
2178baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2179baef6bbfSMin Hu (Connor) 				"%s: Error: failed to clear stats (port %u):%d",
2180baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
218153324971SDavid Marchand 	}
218253324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
218353324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
218453324971SDavid Marchand 
218553324971SDavid Marchand 		fs->rx_packets = 0;
218653324971SDavid Marchand 		fs->tx_packets = 0;
218753324971SDavid Marchand 		fs->fwd_dropped = 0;
218853324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
218953324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
219053324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
2191d139cf23SLance Richardson 		fs->rx_bad_outer_ip_csum = 0;
219253324971SDavid Marchand 
219353324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
219453324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
219599a4974aSRobin Jarry 		fs->busy_cycles = 0;
219653324971SDavid Marchand 	}
219753324971SDavid Marchand }
219853324971SDavid Marchand 
2199af75078fSIntel static void
22007741e4cfSIntel flush_fwd_rx_queues(void)
2201af75078fSIntel {
2202af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2203af75078fSIntel 	portid_t  rxp;
22047741e4cfSIntel 	portid_t port_id;
2205af75078fSIntel 	queueid_t rxq;
2206af75078fSIntel 	uint16_t  nb_rx;
2207af75078fSIntel 	uint8_t   j;
2208f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2209594302c7SJames Poole 	uint64_t timer_period;
2210f487715fSReshma Pattan 
2211a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
2212a550baf2SMin Hu (Connor) 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2213a550baf2SMin Hu (Connor) 		return;
2214a550baf2SMin Hu (Connor) 	}
2215a550baf2SMin Hu (Connor) 
2216f487715fSReshma Pattan 	/* convert to number of cycles */
2217594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2218af75078fSIntel 
2219af75078fSIntel 	for (j = 0; j < 2; j++) {
22207741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2221af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
22227741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
22233c4426dbSDmitry Kozlyuk 
22243c4426dbSDmitry Kozlyuk 				/* Polling stopped queues is prohibited. */
22253c4426dbSDmitry Kozlyuk 				if (ports[port_id].rxq[rxq].state ==
22263c4426dbSDmitry Kozlyuk 				    RTE_ETH_QUEUE_STATE_STOPPED)
22273c4426dbSDmitry Kozlyuk 					continue;
22283c4426dbSDmitry Kozlyuk 
2229f487715fSReshma Pattan 				/**
2230f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2231f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2232f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2233f487715fSReshma Pattan 				* after 1sec timer expiry.
2234f487715fSReshma Pattan 				*/
2235f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2236af75078fSIntel 				do {
22377741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2238013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2239d00fee5dSDavid Marchand 					rte_pktmbuf_free_bulk(pkts_burst, nb_rx);
2240f487715fSReshma Pattan 
2241f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2242f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2243f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2244f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2245f487715fSReshma Pattan 					(timer_tsc < timer_period));
2246f487715fSReshma Pattan 				timer_tsc = 0;
2247af75078fSIntel 			}
2248af75078fSIntel 		}
2249af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2250af75078fSIntel 	}
2251af75078fSIntel }
2252af75078fSIntel 
2253af75078fSIntel static void
2254af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2255af75078fSIntel {
2256af75078fSIntel 	struct fwd_stream **fsm;
225799a4974aSRobin Jarry 	uint64_t prev_tsc;
2258af75078fSIntel 	streamid_t nb_fs;
2259af75078fSIntel 	streamid_t sm_id;
2260a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
22617e4441c8SRemy Horton 	uint64_t tics_per_1sec;
22627e4441c8SRemy Horton 	uint64_t tics_datum;
22637e4441c8SRemy Horton 	uint64_t tics_current;
22644918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2265af75078fSIntel 
22664918a357SXiaoyun Li 	cnt_ports = nb_ports;
22677e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
22687e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
22697e4441c8SRemy Horton #endif
2270af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2271af75078fSIntel 	nb_fs = fc->stream_nb;
227299a4974aSRobin Jarry 	prev_tsc = rte_rdtsc();
2273af75078fSIntel 	do {
227406c20561SDavid Marchand 		for (sm_id = 0; sm_id < nb_fs; sm_id++) {
227506c20561SDavid Marchand 			struct fwd_stream *fs = fsm[sm_id];
227606c20561SDavid Marchand 			uint64_t start_fs_tsc = 0;
227706c20561SDavid Marchand 			bool busy;
227806c20561SDavid Marchand 
227906c20561SDavid Marchand 			if (fs->disabled)
228006c20561SDavid Marchand 				continue;
228106c20561SDavid Marchand 			if (record_core_cycles)
228206c20561SDavid Marchand 				start_fs_tsc = rte_rdtsc();
228306c20561SDavid Marchand 			busy = (*pkt_fwd)(fs);
228406c20561SDavid Marchand 			if (record_core_cycles && busy)
228506c20561SDavid Marchand 				fs->busy_cycles += rte_rdtsc() - start_fs_tsc;
228606c20561SDavid Marchand 		}
2287a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2288e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2289e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
22907e4441c8SRemy Horton 			tics_current = rte_rdtsc();
22917e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
22927e4441c8SRemy Horton 				/* Periodic bitrate calculation */
22934918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2294e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
22954918a357SXiaoyun Li 						ports_ids[i]);
22967e4441c8SRemy Horton 				tics_datum = tics_current;
22977e4441c8SRemy Horton 			}
2298e25e6c70SRemy Horton 		}
22997e4441c8SRemy Horton #endif
2300a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
230165eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
230265eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
230362d3216dSReshma Pattan 			rte_latencystats_update();
230462d3216dSReshma Pattan #endif
230599a4974aSRobin Jarry 		if (record_core_cycles) {
230699a4974aSRobin Jarry 			uint64_t tsc = rte_rdtsc();
230762d3216dSReshma Pattan 
230899a4974aSRobin Jarry 			fc->total_cycles += tsc - prev_tsc;
230999a4974aSRobin Jarry 			prev_tsc = tsc;
231099a4974aSRobin Jarry 		}
2311af75078fSIntel 	} while (! fc->stopped);
2312af75078fSIntel }
2313af75078fSIntel 
2314af75078fSIntel static int
231599a4974aSRobin Jarry lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
231699a4974aSRobin Jarry {
231799a4974aSRobin Jarry 	struct fwd_stream **fsm;
231899a4974aSRobin Jarry 	struct fwd_lcore *fc;
231999a4974aSRobin Jarry 	streamid_t nb_fs;
232099a4974aSRobin Jarry 	streamid_t sm_id;
232199a4974aSRobin Jarry 
232299a4974aSRobin Jarry 	fc = lcore_to_fwd_lcore(lcore_id);
232399a4974aSRobin Jarry 	if (fc == NULL)
232499a4974aSRobin Jarry 		return -1;
232599a4974aSRobin Jarry 
232699a4974aSRobin Jarry 	fsm = &fwd_streams[fc->stream_idx];
232799a4974aSRobin Jarry 	nb_fs = fc->stream_nb;
232899a4974aSRobin Jarry 	usage->busy_cycles = 0;
232999a4974aSRobin Jarry 	usage->total_cycles = fc->total_cycles;
233099a4974aSRobin Jarry 
233199a4974aSRobin Jarry 	for (sm_id = 0; sm_id < nb_fs; sm_id++) {
233299a4974aSRobin Jarry 		if (!fsm[sm_id]->disabled)
233399a4974aSRobin Jarry 			usage->busy_cycles += fsm[sm_id]->busy_cycles;
233499a4974aSRobin Jarry 	}
233599a4974aSRobin Jarry 
233699a4974aSRobin Jarry 	return 0;
233799a4974aSRobin Jarry }
233899a4974aSRobin Jarry 
233999a4974aSRobin Jarry static int
2340af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2341af75078fSIntel {
2342af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2343af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2344af75078fSIntel 	return 0;
2345af75078fSIntel }
2346af75078fSIntel 
2347af75078fSIntel /*
2348af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2349af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2350af75078fSIntel  */
2351af75078fSIntel static int
2352af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2353af75078fSIntel {
2354af75078fSIntel 	struct fwd_lcore *fwd_lc;
2355af75078fSIntel 	struct fwd_lcore tmp_lcore;
2356af75078fSIntel 
2357af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2358af75078fSIntel 	tmp_lcore = *fwd_lc;
2359af75078fSIntel 	tmp_lcore.stopped = 1;
2360af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2361af75078fSIntel 	return 0;
2362af75078fSIntel }
2363af75078fSIntel 
2364af75078fSIntel /*
2365af75078fSIntel  * Launch packet forwarding:
2366af75078fSIntel  *     - Setup per-port forwarding context.
2367af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2368af75078fSIntel  */
2369af75078fSIntel static void
2370af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2371af75078fSIntel {
2372af75078fSIntel 	unsigned int i;
2373af75078fSIntel 	unsigned int lc_id;
2374af75078fSIntel 	int diag;
2375af75078fSIntel 
2376af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2377af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2378af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2379af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2380af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2381af75078fSIntel 						     fwd_lcores[i], lc_id);
2382af75078fSIntel 			if (diag != 0)
238361a3b0e5SAndrew Rybchenko 				fprintf(stderr,
238461a3b0e5SAndrew Rybchenko 					"launch lcore %u failed - diag=%d\n",
2385af75078fSIntel 					lc_id, diag);
2386af75078fSIntel 		}
2387af75078fSIntel 	}
2388af75078fSIntel }
2389af75078fSIntel 
2390180ba023SDavid Marchand void
2391180ba023SDavid Marchand common_fwd_stream_init(struct fwd_stream *fs)
2392180ba023SDavid Marchand {
2393180ba023SDavid Marchand 	bool rx_stopped, tx_stopped;
2394180ba023SDavid Marchand 
2395180ba023SDavid Marchand 	rx_stopped = (ports[fs->rx_port].rxq[fs->rx_queue].state == RTE_ETH_QUEUE_STATE_STOPPED);
2396180ba023SDavid Marchand 	tx_stopped = (ports[fs->tx_port].txq[fs->tx_queue].state == RTE_ETH_QUEUE_STATE_STOPPED);
2397180ba023SDavid Marchand 	fs->disabled = rx_stopped || tx_stopped;
2398180ba023SDavid Marchand }
2399180ba023SDavid Marchand 
2400*5028f207SShiyang He static void
2401*5028f207SShiyang He update_rx_queue_state(uint16_t port_id, uint16_t queue_id)
2402*5028f207SShiyang He {
2403*5028f207SShiyang He 	struct rte_eth_rxq_info rx_qinfo;
2404*5028f207SShiyang He 	int32_t rc;
2405*5028f207SShiyang He 
2406*5028f207SShiyang He 	rc = rte_eth_rx_queue_info_get(port_id,
2407*5028f207SShiyang He 			queue_id, &rx_qinfo);
2408*5028f207SShiyang He 	if (rc == 0) {
2409*5028f207SShiyang He 		ports[port_id].rxq[queue_id].state =
2410*5028f207SShiyang He 			rx_qinfo.queue_state;
2411*5028f207SShiyang He 	} else if (rc == -ENOTSUP) {
2412*5028f207SShiyang He 		/*
2413*5028f207SShiyang He 		 * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED
2414*5028f207SShiyang He 		 * to ensure that the PMDs do not implement
2415*5028f207SShiyang He 		 * rte_eth_rx_queue_info_get can forward.
2416*5028f207SShiyang He 		 */
2417*5028f207SShiyang He 		ports[port_id].rxq[queue_id].state =
2418*5028f207SShiyang He 			RTE_ETH_QUEUE_STATE_STARTED;
2419*5028f207SShiyang He 	} else {
2420*5028f207SShiyang He 		TESTPMD_LOG(WARNING,
2421*5028f207SShiyang He 			"Failed to get rx queue info\n");
2422*5028f207SShiyang He 	}
2423*5028f207SShiyang He }
2424*5028f207SShiyang He 
2425*5028f207SShiyang He static void
2426*5028f207SShiyang He update_tx_queue_state(uint16_t port_id, uint16_t queue_id)
2427*5028f207SShiyang He {
2428*5028f207SShiyang He 	struct rte_eth_txq_info tx_qinfo;
2429*5028f207SShiyang He 	int32_t rc;
2430*5028f207SShiyang He 
2431*5028f207SShiyang He 	rc = rte_eth_tx_queue_info_get(port_id,
2432*5028f207SShiyang He 			queue_id, &tx_qinfo);
2433*5028f207SShiyang He 	if (rc == 0) {
2434*5028f207SShiyang He 		ports[port_id].txq[queue_id].state =
2435*5028f207SShiyang He 			tx_qinfo.queue_state;
2436*5028f207SShiyang He 	} else if (rc == -ENOTSUP) {
2437*5028f207SShiyang He 		/*
2438*5028f207SShiyang He 		 * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED
2439*5028f207SShiyang He 		 * to ensure that the PMDs do not implement
2440*5028f207SShiyang He 		 * rte_eth_tx_queue_info_get can forward.
2441*5028f207SShiyang He 		 */
2442*5028f207SShiyang He 		ports[port_id].txq[queue_id].state =
2443*5028f207SShiyang He 			RTE_ETH_QUEUE_STATE_STARTED;
2444*5028f207SShiyang He 	} else {
2445*5028f207SShiyang He 		TESTPMD_LOG(WARNING,
2446*5028f207SShiyang He 			"Failed to get tx queue info\n");
2447*5028f207SShiyang He 	}
2448*5028f207SShiyang He }
2449*5028f207SShiyang He 
2450*5028f207SShiyang He static void
2451*5028f207SShiyang He update_queue_state(void)
2452*5028f207SShiyang He {
2453*5028f207SShiyang He 	portid_t pi;
2454*5028f207SShiyang He 	queueid_t qi;
2455*5028f207SShiyang He 
2456*5028f207SShiyang He 	RTE_ETH_FOREACH_DEV(pi) {
2457*5028f207SShiyang He 		for (qi = 0; qi < nb_rxq; qi++)
2458*5028f207SShiyang He 			update_rx_queue_state(pi, qi);
2459*5028f207SShiyang He 		for (qi = 0; qi < nb_txq; qi++)
2460*5028f207SShiyang He 			update_tx_queue_state(pi, qi);
2461*5028f207SShiyang He 	}
2462*5028f207SShiyang He }
2463*5028f207SShiyang He 
2464af75078fSIntel /*
2465af75078fSIntel  * Launch packet forwarding configuration.
2466af75078fSIntel  */
2467af75078fSIntel void
2468af75078fSIntel start_packet_forwarding(int with_tx_first)
2469af75078fSIntel {
2470af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2471af75078fSIntel 	port_fwd_end_t  port_fwd_end;
24723c4426dbSDmitry Kozlyuk 	stream_init_t stream_init = cur_fwd_eng->stream_init;
2473af75078fSIntel 	unsigned int i;
2474af75078fSIntel 
24755a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
24765a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
24775a8fb55cSReshma Pattan 
24785a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
24795a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
24805a8fb55cSReshma Pattan 
24815a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
24825a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
24835a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
24845a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
24855a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
24865a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
24875a8fb55cSReshma Pattan 
2488ce8d5614SIntel 	if (all_ports_started() == 0) {
248961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Not all ports were started\n");
2490ce8d5614SIntel 		return;
2491ce8d5614SIntel 	}
2492af75078fSIntel 	if (test_done == 0) {
249361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding already started\n");
2494af75078fSIntel 		return;
2495af75078fSIntel 	}
24967741e4cfSIntel 
249747a767b2SMatan Azrad 	fwd_config_setup();
249847a767b2SMatan Azrad 
249965744833SXueming Li 	pkt_fwd_config_display(&cur_fwd_config);
250065744833SXueming Li 	if (!pkt_fwd_shared_rxq_check())
250165744833SXueming Li 		return;
250265744833SXueming Li 
2503*5028f207SShiyang He 	if (stream_init != NULL) {
2504*5028f207SShiyang He 		if (rte_eal_process_type() == RTE_PROC_SECONDARY)
2505*5028f207SShiyang He 			update_queue_state();
25063c4426dbSDmitry Kozlyuk 		for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
25073c4426dbSDmitry Kozlyuk 			stream_init(fwd_streams[i]);
2508*5028f207SShiyang He 	}
25093c4426dbSDmitry Kozlyuk 
2510a78040c9SAlvin Zhang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2511a78040c9SAlvin Zhang 	if (port_fwd_begin != NULL) {
2512a78040c9SAlvin Zhang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2513a78040c9SAlvin Zhang 			if (port_fwd_begin(fwd_ports_ids[i])) {
2514a78040c9SAlvin Zhang 				fprintf(stderr,
2515a78040c9SAlvin Zhang 					"Packet forwarding is not ready\n");
2516a78040c9SAlvin Zhang 				return;
2517a78040c9SAlvin Zhang 			}
2518a78040c9SAlvin Zhang 		}
2519a78040c9SAlvin Zhang 	}
2520a78040c9SAlvin Zhang 
2521a78040c9SAlvin Zhang 	if (with_tx_first) {
2522a78040c9SAlvin Zhang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2523a78040c9SAlvin Zhang 		if (port_fwd_begin != NULL) {
2524a78040c9SAlvin Zhang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2525a78040c9SAlvin Zhang 				if (port_fwd_begin(fwd_ports_ids[i])) {
2526a78040c9SAlvin Zhang 					fprintf(stderr,
2527a78040c9SAlvin Zhang 						"Packet forwarding is not ready\n");
2528a78040c9SAlvin Zhang 					return;
2529a78040c9SAlvin Zhang 				}
2530a78040c9SAlvin Zhang 			}
2531a78040c9SAlvin Zhang 		}
2532a78040c9SAlvin Zhang 	}
2533a78040c9SAlvin Zhang 
2534a78040c9SAlvin Zhang 	test_done = 0;
2535a78040c9SAlvin Zhang 
25367741e4cfSIntel 	if(!no_flush_rx)
25377741e4cfSIntel 		flush_fwd_rx_queues();
25387741e4cfSIntel 
2539af75078fSIntel 	rxtx_config_display();
2540af75078fSIntel 
254153324971SDavid Marchand 	fwd_stats_reset();
2542af75078fSIntel 	if (with_tx_first) {
2543acbf77a6SZhihong Wang 		while (with_tx_first--) {
2544acbf77a6SZhihong Wang 			launch_packet_forwarding(
2545acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2546af75078fSIntel 			rte_eal_mp_wait_lcore();
2547acbf77a6SZhihong Wang 		}
2548af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2549af75078fSIntel 		if (port_fwd_end != NULL) {
2550af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2551af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2552af75078fSIntel 		}
2553af75078fSIntel 	}
2554af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2555af75078fSIntel }
2556af75078fSIntel 
2557af75078fSIntel void
2558af75078fSIntel stop_packet_forwarding(void)
2559af75078fSIntel {
2560af75078fSIntel 	port_fwd_end_t port_fwd_end;
2561af75078fSIntel 	lcoreid_t lc_id;
256253324971SDavid Marchand 	portid_t pt_id;
256353324971SDavid Marchand 	int i;
2564af75078fSIntel 
2565af75078fSIntel 	if (test_done) {
256661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding not started\n");
2567af75078fSIntel 		return;
2568af75078fSIntel 	}
2569af75078fSIntel 	printf("Telling cores to stop...");
2570af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2571af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2572af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2573af75078fSIntel 	rte_eal_mp_wait_lcore();
2574af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2575af75078fSIntel 	if (port_fwd_end != NULL) {
2576af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2577af75078fSIntel 			pt_id = fwd_ports_ids[i];
2578af75078fSIntel 			(*port_fwd_end)(pt_id);
2579af75078fSIntel 		}
2580af75078fSIntel 	}
2581c185d42cSDavid Marchand 
258253324971SDavid Marchand 	fwd_stats_display();
258358d475b7SJerin Jacob 
2584af75078fSIntel 	printf("\nDone.\n");
2585af75078fSIntel 	test_done = 1;
2586af75078fSIntel }
2587af75078fSIntel 
2588cfae07fdSOuyang Changchun void
2589cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2590cfae07fdSOuyang Changchun {
2591492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
259261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link up fail.\n");
2593cfae07fdSOuyang Changchun }
2594cfae07fdSOuyang Changchun 
2595cfae07fdSOuyang Changchun void
2596cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2597cfae07fdSOuyang Changchun {
2598492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
259961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link down fail.\n");
2600cfae07fdSOuyang Changchun }
2601cfae07fdSOuyang Changchun 
2602ce8d5614SIntel static int
2603ce8d5614SIntel all_ports_started(void)
2604ce8d5614SIntel {
2605ce8d5614SIntel 	portid_t pi;
2606ce8d5614SIntel 	struct rte_port *port;
2607ce8d5614SIntel 
26087d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2609ce8d5614SIntel 		port = &ports[pi];
2610ce8d5614SIntel 		/* Check if there is a port which is not started */
261141b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
261241b05095SBernard Iremonger 			(port->slave_flag == 0))
2613ce8d5614SIntel 			return 0;
2614ce8d5614SIntel 	}
2615ce8d5614SIntel 
2616ce8d5614SIntel 	/* No port is not started */
2617ce8d5614SIntel 	return 1;
2618ce8d5614SIntel }
2619ce8d5614SIntel 
2620148f963fSBruce Richardson int
26216018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
26226018eb8cSShahaf Shuler {
26236018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
26246018eb8cSShahaf Shuler 
26256018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
26266018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
26276018eb8cSShahaf Shuler 		return 0;
26286018eb8cSShahaf Shuler 	return 1;
26296018eb8cSShahaf Shuler }
26306018eb8cSShahaf Shuler 
26316018eb8cSShahaf Shuler int
2632edab33b1STetsuya Mukawa all_ports_stopped(void)
2633edab33b1STetsuya Mukawa {
2634edab33b1STetsuya Mukawa 	portid_t pi;
2635edab33b1STetsuya Mukawa 
26367d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
26376018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2638edab33b1STetsuya Mukawa 			return 0;
2639edab33b1STetsuya Mukawa 	}
2640edab33b1STetsuya Mukawa 
2641edab33b1STetsuya Mukawa 	return 1;
2642edab33b1STetsuya Mukawa }
2643edab33b1STetsuya Mukawa 
2644edab33b1STetsuya Mukawa int
2645edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2646edab33b1STetsuya Mukawa {
2647edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2648edab33b1STetsuya Mukawa 		return 0;
2649edab33b1STetsuya Mukawa 
2650edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2651edab33b1STetsuya Mukawa 		return 0;
2652edab33b1STetsuya Mukawa 
2653edab33b1STetsuya Mukawa 	return 1;
2654edab33b1STetsuya Mukawa }
2655edab33b1STetsuya Mukawa 
265623095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8)
265723095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9)
265823095155SDariusz Sosnowski 
265923095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12)
266023095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13)
266123095155SDariusz Sosnowski 
266223095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16)
266323095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17)
266423095155SDariusz Sosnowski 
266523095155SDariusz Sosnowski 
26661c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
26671c69df45SOri Kam static int
266801817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
26691c69df45SOri Kam {
26701c69df45SOri Kam 	queueid_t qi;
26711c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
26721c69df45SOri Kam 		.peer_count = 1,
26731c69df45SOri Kam 	};
26741c69df45SOri Kam 	int i;
26751c69df45SOri Kam 	int diag;
26761c69df45SOri Kam 	struct rte_port *port = &ports[pi];
267701817b10SBing Zhao 	uint16_t peer_rx_port = pi;
267801817b10SBing Zhao 	uint16_t peer_tx_port = pi;
267901817b10SBing Zhao 	uint32_t manual = 1;
268001817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
268123095155SDariusz Sosnowski 	uint32_t rx_force_memory = hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY;
268223095155SDariusz Sosnowski 	uint32_t rx_locked_memory = hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY;
268323095155SDariusz Sosnowski 	uint32_t rx_rte_memory = hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY;
268423095155SDariusz Sosnowski 	uint32_t tx_force_memory = hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY;
268523095155SDariusz Sosnowski 	uint32_t tx_locked_memory = hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY;
268623095155SDariusz Sosnowski 	uint32_t tx_rte_memory = hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY;
268701817b10SBing Zhao 
268801817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
268901817b10SBing Zhao 		peer_rx_port = pi;
269001817b10SBing Zhao 		peer_tx_port = pi;
269101817b10SBing Zhao 		manual = 0;
269201817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
269301817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
269401817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
269501817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
269601817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
269701817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
269801817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
269901817b10SBing Zhao 			peer_rx_port = p_pi;
270001817b10SBing Zhao 		} else {
270101817b10SBing Zhao 			uint16_t next_pi;
270201817b10SBing Zhao 
270301817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
270401817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
270501817b10SBing Zhao 				peer_rx_port = next_pi;
270601817b10SBing Zhao 		}
270701817b10SBing Zhao 		manual = 1;
270801817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
270901817b10SBing Zhao 		if (cnt_pi & 0x1) {
271001817b10SBing Zhao 			peer_rx_port = p_pi;
271101817b10SBing Zhao 		} else {
271201817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
271301817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
271401817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
271501817b10SBing Zhao 				peer_rx_port = pi;
271601817b10SBing Zhao 		}
271701817b10SBing Zhao 		peer_tx_port = peer_rx_port;
271801817b10SBing Zhao 		manual = 1;
271901817b10SBing Zhao 	}
27201c69df45SOri Kam 
27211c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
272201817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
27231c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
272401817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
272501817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
272623095155SDariusz Sosnowski 		hairpin_conf.force_memory = !!tx_force_memory;
272723095155SDariusz Sosnowski 		hairpin_conf.use_locked_device_memory = !!tx_locked_memory;
272823095155SDariusz Sosnowski 		hairpin_conf.use_rte_memory = !!tx_rte_memory;
27291c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
27301c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
27311c69df45SOri Kam 		i++;
27321c69df45SOri Kam 		if (diag == 0)
27331c69df45SOri Kam 			continue;
27341c69df45SOri Kam 
27351c69df45SOri Kam 		/* Fail to setup rx queue, return */
2736eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2737eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2738eac341d3SJoyce Kong 		else
273961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
274061a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
274161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
274261a3b0e5SAndrew Rybchenko 			pi);
27431c69df45SOri Kam 		/* try to reconfigure queues next time */
27441c69df45SOri Kam 		port->need_reconfig_queues = 1;
27451c69df45SOri Kam 		return -1;
27461c69df45SOri Kam 	}
27471c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
274801817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
27491c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
275001817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
275101817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
275223095155SDariusz Sosnowski 		hairpin_conf.force_memory = !!rx_force_memory;
275323095155SDariusz Sosnowski 		hairpin_conf.use_locked_device_memory = !!rx_locked_memory;
275423095155SDariusz Sosnowski 		hairpin_conf.use_rte_memory = !!rx_rte_memory;
27551c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
27561c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
27571c69df45SOri Kam 		i++;
27581c69df45SOri Kam 		if (diag == 0)
27591c69df45SOri Kam 			continue;
27601c69df45SOri Kam 
27611c69df45SOri Kam 		/* Fail to setup rx queue, return */
2762eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2763eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2764eac341d3SJoyce Kong 		else
276561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
276661a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
276761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
276861a3b0e5SAndrew Rybchenko 			pi);
27691c69df45SOri Kam 		/* try to reconfigure queues next time */
27701c69df45SOri Kam 		port->need_reconfig_queues = 1;
27711c69df45SOri Kam 		return -1;
27721c69df45SOri Kam 	}
27731c69df45SOri Kam 	return 0;
27741c69df45SOri Kam }
27751c69df45SOri Kam 
27762befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
27772befc67fSViacheslav Ovsiienko int
27782befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
27792befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
27802befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
27812befc67fSViacheslav Ovsiienko {
27822befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
27834f04edcdSHanumanth Pothula 	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
27844f04edcdSHanumanth Pothula 	struct rte_mempool *mpx;
27852befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
278654a0f4d7SYuan Wang 	uint32_t prev_hdrs = 0;
27872befc67fSViacheslav Ovsiienko 	int ret;
27882befc67fSViacheslav Ovsiienko 
27894f04edcdSHanumanth Pothula 
2790a4bf5421SHanumanth Pothula 	if ((rx_pkt_nb_segs > 1) &&
2791a4bf5421SHanumanth Pothula 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
27924f04edcdSHanumanth Pothula 		/* multi-segment configuration */
27932befc67fSViacheslav Ovsiienko 		for (i = 0; i < rx_pkt_nb_segs; i++) {
27942befc67fSViacheslav Ovsiienko 			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
27952befc67fSViacheslav Ovsiienko 			/*
27962befc67fSViacheslav Ovsiienko 			 * Use last valid pool for the segments with number
27972befc67fSViacheslav Ovsiienko 			 * exceeding the pool index.
27982befc67fSViacheslav Ovsiienko 			 */
27991108c33eSRaja Zidane 			mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
28002befc67fSViacheslav Ovsiienko 			mpx = mbuf_pool_find(socket_id, mp_n);
28012befc67fSViacheslav Ovsiienko 			/* Handle zero as mbuf data buffer size. */
28022befc67fSViacheslav Ovsiienko 			rx_seg->offset = i < rx_pkt_nb_offs ?
28032befc67fSViacheslav Ovsiienko 					   rx_pkt_seg_offsets[i] : 0;
28042befc67fSViacheslav Ovsiienko 			rx_seg->mp = mpx ? mpx : mp;
280552e2e7edSYuan Wang 			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
280654a0f4d7SYuan Wang 				rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
280754a0f4d7SYuan Wang 				prev_hdrs |= rx_seg->proto_hdr;
280852e2e7edSYuan Wang 			} else {
280952e2e7edSYuan Wang 				rx_seg->length = rx_pkt_seg_lengths[i] ?
281052e2e7edSYuan Wang 						rx_pkt_seg_lengths[i] :
281152e2e7edSYuan Wang 						mbuf_data_size[mp_n];
281252e2e7edSYuan Wang 			}
28132befc67fSViacheslav Ovsiienko 		}
28142befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = rx_pkt_nb_segs;
28152befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = rx_useg;
2816a4bf5421SHanumanth Pothula 		rx_conf->rx_mempools = NULL;
2817a4bf5421SHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2818a4bf5421SHanumanth Pothula 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2819a4bf5421SHanumanth Pothula 				    socket_id, rx_conf, NULL);
2820a4bf5421SHanumanth Pothula 		rx_conf->rx_seg = NULL;
2821a4bf5421SHanumanth Pothula 		rx_conf->rx_nseg = 0;
2822a4bf5421SHanumanth Pothula 	} else if (multi_rx_mempool == 1) {
28234f04edcdSHanumanth Pothula 		/* multi-pool configuration */
2824a4bf5421SHanumanth Pothula 		struct rte_eth_dev_info dev_info;
2825a4bf5421SHanumanth Pothula 
2826a4bf5421SHanumanth Pothula 		if (mbuf_data_size_n <= 1) {
2827a4bf5421SHanumanth Pothula 			fprintf(stderr, "Invalid number of mempools %u\n",
2828a4bf5421SHanumanth Pothula 				mbuf_data_size_n);
2829a4bf5421SHanumanth Pothula 			return -EINVAL;
2830a4bf5421SHanumanth Pothula 		}
2831a4bf5421SHanumanth Pothula 		ret = rte_eth_dev_info_get(port_id, &dev_info);
2832a4bf5421SHanumanth Pothula 		if (ret != 0)
2833a4bf5421SHanumanth Pothula 			return ret;
2834a4bf5421SHanumanth Pothula 		if (dev_info.max_rx_mempools == 0) {
2835a4bf5421SHanumanth Pothula 			fprintf(stderr,
2836a4bf5421SHanumanth Pothula 				"Port %u doesn't support requested multi-rx-mempool configuration.\n",
2837a4bf5421SHanumanth Pothula 				port_id);
2838a4bf5421SHanumanth Pothula 			return -ENOTSUP;
2839a4bf5421SHanumanth Pothula 		}
28404f04edcdSHanumanth Pothula 		for (i = 0; i < mbuf_data_size_n; i++) {
28414f04edcdSHanumanth Pothula 			mpx = mbuf_pool_find(socket_id, i);
28424f04edcdSHanumanth Pothula 			rx_mempool[i] = mpx ? mpx : mp;
28434f04edcdSHanumanth Pothula 		}
28444f04edcdSHanumanth Pothula 		rx_conf->rx_mempools = rx_mempool;
28454f04edcdSHanumanth Pothula 		rx_conf->rx_nmempool = mbuf_data_size_n;
2846a4bf5421SHanumanth Pothula 		rx_conf->rx_seg = NULL;
2847a4bf5421SHanumanth Pothula 		rx_conf->rx_nseg = 0;
28482befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
28492befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
2850a4bf5421SHanumanth Pothula 		rx_conf->rx_mempools = NULL;
2851a4bf5421SHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2852a4bf5421SHanumanth Pothula 	} else {
2853a4bf5421SHanumanth Pothula 		/* Single pool/segment configuration */
28542befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
28552befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
28564f04edcdSHanumanth Pothula 		rx_conf->rx_mempools = NULL;
28574f04edcdSHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2858a4bf5421SHanumanth Pothula 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2859a4bf5421SHanumanth Pothula 				    socket_id, rx_conf, mp);
2860a4bf5421SHanumanth Pothula 	}
2861a4bf5421SHanumanth Pothula 
28623c4426dbSDmitry Kozlyuk 	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
28633c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
28643c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
28652befc67fSViacheslav Ovsiienko 	return ret;
28662befc67fSViacheslav Ovsiienko }
28672befc67fSViacheslav Ovsiienko 
286863b72657SIvan Ilchenko static int
286963b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi)
287063b72657SIvan Ilchenko {
287163b72657SIvan Ilchenko 	uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
287263b72657SIvan Ilchenko 	uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
287363b72657SIvan Ilchenko 	uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
287463b72657SIvan Ilchenko 
287563b72657SIvan Ilchenko 	if (xstats_display_num == 0)
287663b72657SIvan Ilchenko 		return 0;
287763b72657SIvan Ilchenko 
287863b72657SIvan Ilchenko 	*ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
287963b72657SIvan Ilchenko 	if (*ids_supp == NULL)
288063b72657SIvan Ilchenko 		goto fail_ids_supp;
288163b72657SIvan Ilchenko 
288263b72657SIvan Ilchenko 	*prev_values = calloc(xstats_display_num,
288363b72657SIvan Ilchenko 			      sizeof(**prev_values));
288463b72657SIvan Ilchenko 	if (*prev_values == NULL)
288563b72657SIvan Ilchenko 		goto fail_prev_values;
288663b72657SIvan Ilchenko 
288763b72657SIvan Ilchenko 	*curr_values = calloc(xstats_display_num,
288863b72657SIvan Ilchenko 			      sizeof(**curr_values));
288963b72657SIvan Ilchenko 	if (*curr_values == NULL)
289063b72657SIvan Ilchenko 		goto fail_curr_values;
289163b72657SIvan Ilchenko 
289263b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = true;
289363b72657SIvan Ilchenko 
289463b72657SIvan Ilchenko 	return 0;
289563b72657SIvan Ilchenko 
289663b72657SIvan Ilchenko fail_curr_values:
289763b72657SIvan Ilchenko 	free(*prev_values);
289863b72657SIvan Ilchenko fail_prev_values:
289963b72657SIvan Ilchenko 	free(*ids_supp);
290063b72657SIvan Ilchenko fail_ids_supp:
290163b72657SIvan Ilchenko 	return -ENOMEM;
290263b72657SIvan Ilchenko }
290363b72657SIvan Ilchenko 
290463b72657SIvan Ilchenko static void
290563b72657SIvan Ilchenko free_xstats_display_info(portid_t pi)
290663b72657SIvan Ilchenko {
290763b72657SIvan Ilchenko 	if (!ports[pi].xstats_info.allocated)
290863b72657SIvan Ilchenko 		return;
290963b72657SIvan Ilchenko 	free(ports[pi].xstats_info.ids_supp);
291063b72657SIvan Ilchenko 	free(ports[pi].xstats_info.prev_values);
291163b72657SIvan Ilchenko 	free(ports[pi].xstats_info.curr_values);
291263b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = false;
291363b72657SIvan Ilchenko }
291463b72657SIvan Ilchenko 
291563b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */
291663b72657SIvan Ilchenko static void
291763b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi)
291863b72657SIvan Ilchenko {
291963b72657SIvan Ilchenko 	unsigned int stat, stat_supp;
292063b72657SIvan Ilchenko 	const char *xstat_name;
292163b72657SIvan Ilchenko 	struct rte_port *port;
292263b72657SIvan Ilchenko 	uint64_t *ids_supp;
292363b72657SIvan Ilchenko 	int rc;
292463b72657SIvan Ilchenko 
292563b72657SIvan Ilchenko 	if (xstats_display_num == 0)
292663b72657SIvan Ilchenko 		return;
292763b72657SIvan Ilchenko 
292863b72657SIvan Ilchenko 	if (pi == (portid_t)RTE_PORT_ALL) {
292963b72657SIvan Ilchenko 		fill_xstats_display_info();
293063b72657SIvan Ilchenko 		return;
293163b72657SIvan Ilchenko 	}
293263b72657SIvan Ilchenko 
293363b72657SIvan Ilchenko 	port = &ports[pi];
293463b72657SIvan Ilchenko 	if (port->port_status != RTE_PORT_STARTED)
293563b72657SIvan Ilchenko 		return;
293663b72657SIvan Ilchenko 
293763b72657SIvan Ilchenko 	if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
293863b72657SIvan Ilchenko 		rte_exit(EXIT_FAILURE,
293963b72657SIvan Ilchenko 			 "Failed to allocate xstats display memory\n");
294063b72657SIvan Ilchenko 
294163b72657SIvan Ilchenko 	ids_supp = port->xstats_info.ids_supp;
294263b72657SIvan Ilchenko 	for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
294363b72657SIvan Ilchenko 		xstat_name = xstats_display[stat].name;
294463b72657SIvan Ilchenko 		rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
294563b72657SIvan Ilchenko 						   ids_supp + stat_supp);
294663b72657SIvan Ilchenko 		if (rc != 0) {
294763b72657SIvan Ilchenko 			fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
294863b72657SIvan Ilchenko 				xstat_name, pi, stat);
294963b72657SIvan Ilchenko 			continue;
295063b72657SIvan Ilchenko 		}
295163b72657SIvan Ilchenko 		stat_supp++;
295263b72657SIvan Ilchenko 	}
295363b72657SIvan Ilchenko 
295463b72657SIvan Ilchenko 	port->xstats_info.ids_supp_sz = stat_supp;
295563b72657SIvan Ilchenko }
295663b72657SIvan Ilchenko 
295763b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */
295863b72657SIvan Ilchenko static void
295963b72657SIvan Ilchenko fill_xstats_display_info(void)
296063b72657SIvan Ilchenko {
296163b72657SIvan Ilchenko 	portid_t pi;
296263b72657SIvan Ilchenko 
296363b72657SIvan Ilchenko 	if (xstats_display_num == 0)
296463b72657SIvan Ilchenko 		return;
296563b72657SIvan Ilchenko 
296663b72657SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(pi)
296763b72657SIvan Ilchenko 		fill_xstats_display_info_for_port(pi);
296863b72657SIvan Ilchenko }
296963b72657SIvan Ilchenko 
29707c06f1abSHuisong Li /*
29717c06f1abSHuisong Li  * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
29727c06f1abSHuisong Li  * device in dev_info is zero when no slave is added. And its capability
29737c06f1abSHuisong Li  * will be updated when add a new slave device. So adding a slave device need
29747c06f1abSHuisong Li  * to update the port configurations of bonding device.
29757c06f1abSHuisong Li  */
29767c06f1abSHuisong Li static void
29777c06f1abSHuisong Li update_bonding_port_dev_conf(portid_t bond_pid)
29787c06f1abSHuisong Li {
29797c06f1abSHuisong Li #ifdef RTE_NET_BOND
29807c06f1abSHuisong Li 	struct rte_port *port = &ports[bond_pid];
29817c06f1abSHuisong Li 	uint16_t i;
29827c06f1abSHuisong Li 	int ret;
29837c06f1abSHuisong Li 
29847c06f1abSHuisong Li 	ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info);
29857c06f1abSHuisong Li 	if (ret != 0) {
29867c06f1abSHuisong Li 		fprintf(stderr, "Failed to get dev info for port = %u\n",
29877c06f1abSHuisong Li 			bond_pid);
29887c06f1abSHuisong Li 		return;
29897c06f1abSHuisong Li 	}
29907c06f1abSHuisong Li 
29917c06f1abSHuisong Li 	if (port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
29927c06f1abSHuisong Li 		port->dev_conf.txmode.offloads |=
29937c06f1abSHuisong Li 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
29947c06f1abSHuisong Li 	/* Apply Tx offloads configuration */
29957c06f1abSHuisong Li 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
29967c06f1abSHuisong Li 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
29977c06f1abSHuisong Li 
29987c06f1abSHuisong Li 	port->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
29997c06f1abSHuisong Li 				port->dev_info.flow_type_rss_offloads;
30007c06f1abSHuisong Li #else
30017c06f1abSHuisong Li 	RTE_SET_USED(bond_pid);
30027c06f1abSHuisong Li #endif
30037c06f1abSHuisong Li }
30047c06f1abSHuisong Li 
3005edab33b1STetsuya Mukawa int
3006ce8d5614SIntel start_port(portid_t pid)
3007ce8d5614SIntel {
3008cdede073SFerruh Yigit 	int diag;
3009ce8d5614SIntel 	portid_t pi;
301001817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
301101817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
301201817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
301301817b10SBing Zhao 	uint16_t cnt_pi = 0;
301401817b10SBing Zhao 	uint16_t cfg_pi = 0;
301501817b10SBing Zhao 	int peer_pi;
3016ce8d5614SIntel 	queueid_t qi;
3017ce8d5614SIntel 	struct rte_port *port;
30181c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
3019cdede073SFerruh Yigit 	bool at_least_one_port_exist = false;
3020cdede073SFerruh Yigit 	bool all_ports_already_started = true;
3021cdede073SFerruh Yigit 	bool at_least_one_port_successfully_started = false;
3022ce8d5614SIntel 
30234468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
30244468635fSMichael Qiu 		return 0;
30254468635fSMichael Qiu 
30267d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
3027edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3028ce8d5614SIntel 			continue;
3029ce8d5614SIntel 
3030d8c079a5SMin Hu (Connor) 		if (port_is_bonding_slave(pi)) {
3031d8c079a5SMin Hu (Connor) 			fprintf(stderr,
3032d8c079a5SMin Hu (Connor) 				"Please remove port %d from bonded device.\n",
3033d8c079a5SMin Hu (Connor) 				pi);
3034d8c079a5SMin Hu (Connor) 			continue;
3035d8c079a5SMin Hu (Connor) 		}
3036d8c079a5SMin Hu (Connor) 
3037cdede073SFerruh Yigit 		at_least_one_port_exist = true;
3038cdede073SFerruh Yigit 
3039ce8d5614SIntel 		port = &ports[pi];
3040cdede073SFerruh Yigit 		if (port->port_status == RTE_PORT_STOPPED) {
3041eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
3042cdede073SFerruh Yigit 			all_ports_already_started = false;
3043cdede073SFerruh Yigit 		} else {
304461a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is now not stopped\n", pi);
3045ce8d5614SIntel 			continue;
3046ce8d5614SIntel 		}
3047ce8d5614SIntel 
3048ce8d5614SIntel 		if (port->need_reconfig > 0) {
3049655eae01SJie Wang 			struct rte_eth_conf dev_conf;
3050655eae01SJie Wang 			int k;
3051655eae01SJie Wang 
3052ce8d5614SIntel 			port->need_reconfig = 0;
3053ce8d5614SIntel 
30547ee3e944SVasily Philipov 			if (flow_isolate_all) {
30557ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
30567ee3e944SVasily Philipov 				if (ret) {
305761a3b0e5SAndrew Rybchenko 					fprintf(stderr,
305861a3b0e5SAndrew Rybchenko 						"Failed to apply isolated mode on port %d\n",
305961a3b0e5SAndrew Rybchenko 						pi);
30607ee3e944SVasily Philipov 					return -1;
30617ee3e944SVasily Philipov 				}
30627ee3e944SVasily Philipov 			}
3063b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
30645706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
306520a0286fSLiu Xiaofeng 					port->socket_id);
30661c69df45SOri Kam 			if (nb_hairpinq > 0 &&
30671c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
306861a3b0e5SAndrew Rybchenko 				fprintf(stderr,
306961a3b0e5SAndrew Rybchenko 					"Port %d doesn't support hairpin queues\n",
307061a3b0e5SAndrew Rybchenko 					pi);
30711c69df45SOri Kam 				return -1;
30721c69df45SOri Kam 			}
30731bb4a528SFerruh Yigit 
30747c06f1abSHuisong Li 			if (port->bond_flag == 1 && port->update_conf == 1) {
30757c06f1abSHuisong Li 				update_bonding_port_dev_conf(pi);
30767c06f1abSHuisong Li 				port->update_conf = 0;
30777c06f1abSHuisong Li 			}
30787c06f1abSHuisong Li 
3079ce8d5614SIntel 			/* configure port */
3080a550baf2SMin Hu (Connor) 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
30811c69df45SOri Kam 						     nb_txq + nb_hairpinq,
3082ce8d5614SIntel 						     &(port->dev_conf));
3083ce8d5614SIntel 			if (diag != 0) {
3084eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3085eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3086eac341d3SJoyce Kong 				else
308761a3b0e5SAndrew Rybchenko 					fprintf(stderr,
308861a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
308961a3b0e5SAndrew Rybchenko 						pi);
309061a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Fail to configure port %d\n",
309161a3b0e5SAndrew Rybchenko 					pi);
3092ce8d5614SIntel 				/* try to reconfigure port next time */
3093ce8d5614SIntel 				port->need_reconfig = 1;
3094148f963fSBruce Richardson 				return -1;
3095ce8d5614SIntel 			}
3096655eae01SJie Wang 			/* get device configuration*/
3097655eae01SJie Wang 			if (0 !=
3098655eae01SJie Wang 				eth_dev_conf_get_print_err(pi, &dev_conf)) {
3099655eae01SJie Wang 				fprintf(stderr,
3100655eae01SJie Wang 					"port %d can not get device configuration\n",
3101655eae01SJie Wang 					pi);
3102655eae01SJie Wang 				return -1;
3103655eae01SJie Wang 			}
3104655eae01SJie Wang 			/* Apply Rx offloads configuration */
3105655eae01SJie Wang 			if (dev_conf.rxmode.offloads !=
3106655eae01SJie Wang 			    port->dev_conf.rxmode.offloads) {
3107655eae01SJie Wang 				port->dev_conf.rxmode.offloads |=
3108655eae01SJie Wang 					dev_conf.rxmode.offloads;
3109655eae01SJie Wang 				for (k = 0;
3110655eae01SJie Wang 				     k < port->dev_info.max_rx_queues;
3111655eae01SJie Wang 				     k++)
31123c4426dbSDmitry Kozlyuk 					port->rxq[k].conf.offloads |=
3113655eae01SJie Wang 						dev_conf.rxmode.offloads;
3114655eae01SJie Wang 			}
3115655eae01SJie Wang 			/* Apply Tx offloads configuration */
3116655eae01SJie Wang 			if (dev_conf.txmode.offloads !=
3117655eae01SJie Wang 			    port->dev_conf.txmode.offloads) {
3118655eae01SJie Wang 				port->dev_conf.txmode.offloads |=
3119655eae01SJie Wang 					dev_conf.txmode.offloads;
3120655eae01SJie Wang 				for (k = 0;
3121655eae01SJie Wang 				     k < port->dev_info.max_tx_queues;
3122655eae01SJie Wang 				     k++)
31233c4426dbSDmitry Kozlyuk 					port->txq[k].conf.offloads |=
3124655eae01SJie Wang 						dev_conf.txmode.offloads;
3125655eae01SJie Wang 			}
3126ce8d5614SIntel 		}
3127a550baf2SMin Hu (Connor) 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
3128ce8d5614SIntel 			port->need_reconfig_queues = 0;
3129ce8d5614SIntel 			/* setup tx queues */
3130ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
31313c4426dbSDmitry Kozlyuk 				struct rte_eth_txconf *conf =
31323c4426dbSDmitry Kozlyuk 							&port->txq[qi].conf;
31333c4426dbSDmitry Kozlyuk 
3134b6ea6408SIntel 				if ((numa_support) &&
3135b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
3136b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
3137d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
3138d44f8a48SQi Zhang 						txring_numa[pi],
31393c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
3140b6ea6408SIntel 				else
3141b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
3142d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
3143d44f8a48SQi Zhang 						port->socket_id,
31443c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
3145b6ea6408SIntel 
31463c4426dbSDmitry Kozlyuk 				if (diag == 0) {
31473c4426dbSDmitry Kozlyuk 					port->txq[qi].state =
31483c4426dbSDmitry Kozlyuk 						conf->tx_deferred_start ?
31493c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
31503c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
3151ce8d5614SIntel 					continue;
31523c4426dbSDmitry Kozlyuk 				}
3153ce8d5614SIntel 
3154ce8d5614SIntel 				/* Fail to setup tx queue, return */
3155eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3156eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3157eac341d3SJoyce Kong 				else
315861a3b0e5SAndrew Rybchenko 					fprintf(stderr,
315961a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
316061a3b0e5SAndrew Rybchenko 						pi);
316161a3b0e5SAndrew Rybchenko 				fprintf(stderr,
316261a3b0e5SAndrew Rybchenko 					"Fail to configure port %d tx queues\n",
3163d44f8a48SQi Zhang 					pi);
3164ce8d5614SIntel 				/* try to reconfigure queues next time */
3165ce8d5614SIntel 				port->need_reconfig_queues = 1;
3166148f963fSBruce Richardson 				return -1;
3167ce8d5614SIntel 			}
3168ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
3169d44f8a48SQi Zhang 				/* setup rx queues */
3170b6ea6408SIntel 				if ((numa_support) &&
3171b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
3172b6ea6408SIntel 					struct rte_mempool * mp =
317326cbb419SViacheslav Ovsiienko 						mbuf_pool_find
317426cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
3175b6ea6408SIntel 					if (mp == NULL) {
317661a3b0e5SAndrew Rybchenko 						fprintf(stderr,
317761a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
3178b6ea6408SIntel 							rxring_numa[pi]);
3179148f963fSBruce Richardson 						return -1;
3180b6ea6408SIntel 					}
3181b6ea6408SIntel 
31822befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
3183d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
3184d44f8a48SQi Zhang 					     rxring_numa[pi],
31853c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
3186d44f8a48SQi Zhang 					     mp);
31871e1d6bddSBernard Iremonger 				} else {
31881e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
318926cbb419SViacheslav Ovsiienko 						mbuf_pool_find
319026cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
31911e1d6bddSBernard Iremonger 					if (mp == NULL) {
319261a3b0e5SAndrew Rybchenko 						fprintf(stderr,
319361a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
31941e1d6bddSBernard Iremonger 							port->socket_id);
31951e1d6bddSBernard Iremonger 						return -1;
3196b6ea6408SIntel 					}
31972befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
3198d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
3199d44f8a48SQi Zhang 					     port->socket_id,
32003c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
3201d44f8a48SQi Zhang 					     mp);
32021e1d6bddSBernard Iremonger 				}
3203ce8d5614SIntel 				if (diag == 0)
3204ce8d5614SIntel 					continue;
3205ce8d5614SIntel 
3206ce8d5614SIntel 				/* Fail to setup rx queue, return */
3207eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3208eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3209eac341d3SJoyce Kong 				else
321061a3b0e5SAndrew Rybchenko 					fprintf(stderr,
321161a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
321261a3b0e5SAndrew Rybchenko 						pi);
321361a3b0e5SAndrew Rybchenko 				fprintf(stderr,
321461a3b0e5SAndrew Rybchenko 					"Fail to configure port %d rx queues\n",
3215d44f8a48SQi Zhang 					pi);
3216ce8d5614SIntel 				/* try to reconfigure queues next time */
3217ce8d5614SIntel 				port->need_reconfig_queues = 1;
3218148f963fSBruce Richardson 				return -1;
3219ce8d5614SIntel 			}
32201c69df45SOri Kam 			/* setup hairpin queues */
322101817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
32221c69df45SOri Kam 				return -1;
3223ce8d5614SIntel 		}
3224b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
3225b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
3226b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3227b0a9354aSPavan Nikhilesh 					NULL, 0);
3228b0a9354aSPavan Nikhilesh 			if (diag < 0)
322961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
3230b0a9354aSPavan Nikhilesh 					"Port %d: Failed to disable Ptype parsing\n",
3231b0a9354aSPavan Nikhilesh 					pi);
3232b0a9354aSPavan Nikhilesh 		}
3233b0a9354aSPavan Nikhilesh 
323401817b10SBing Zhao 		p_pi = pi;
323501817b10SBing Zhao 		cnt_pi++;
323601817b10SBing Zhao 
3237ce8d5614SIntel 		/* start port */
3238a550baf2SMin Hu (Connor) 		diag = eth_dev_start_mp(pi);
323952f2c6f2SAndrew Rybchenko 		if (diag < 0) {
324061a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Fail to start port %d: %s\n",
324161a3b0e5SAndrew Rybchenko 				pi, rte_strerror(-diag));
3242ce8d5614SIntel 
3243ce8d5614SIntel 			/* Fail to setup rx queue, return */
3244eac341d3SJoyce Kong 			if (port->port_status == RTE_PORT_HANDLING)
3245eac341d3SJoyce Kong 				port->port_status = RTE_PORT_STOPPED;
3246eac341d3SJoyce Kong 			else
324761a3b0e5SAndrew Rybchenko 				fprintf(stderr,
324861a3b0e5SAndrew Rybchenko 					"Port %d can not be set back to stopped\n",
324961a3b0e5SAndrew Rybchenko 					pi);
3250ce8d5614SIntel 			continue;
3251ce8d5614SIntel 		}
3252ce8d5614SIntel 
3253eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3254eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STARTED;
3255eac341d3SJoyce Kong 		else
325661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into started\n",
325761a3b0e5SAndrew Rybchenko 				pi);
3258ce8d5614SIntel 
32595ffc4a2aSYuying Zhang 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3260c2c4f87bSAman Deep Singh 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3261a7db3afcSAman Deep Singh 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3262d8c89163SZijie Pan 
3263cdede073SFerruh Yigit 		at_least_one_port_successfully_started = true;
326401817b10SBing Zhao 
326501817b10SBing Zhao 		pl[cfg_pi++] = pi;
3266ce8d5614SIntel 	}
3267ce8d5614SIntel 
3268*5028f207SShiyang He 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3269*5028f207SShiyang He 		update_queue_state();
3270*5028f207SShiyang He 
3271cdede073SFerruh Yigit 	if (at_least_one_port_successfully_started && !no_link_check)
3272edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3273cdede073SFerruh Yigit 	else if (at_least_one_port_exist & all_ports_already_started)
327461a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Please stop the ports first\n");
3275ce8d5614SIntel 
327601817b10SBing Zhao 	if (hairpin_mode & 0xf) {
327701817b10SBing Zhao 		uint16_t i;
327801817b10SBing Zhao 		int j;
327901817b10SBing Zhao 
328001817b10SBing Zhao 		/* bind all started hairpin ports */
328101817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
328201817b10SBing Zhao 			pi = pl[i];
328301817b10SBing Zhao 			/* bind current Tx to all peer Rx */
328401817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
328501817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
328601817b10SBing Zhao 			if (peer_pi < 0)
328701817b10SBing Zhao 				return peer_pi;
328801817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
328901817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
329001817b10SBing Zhao 					continue;
329101817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
329201817b10SBing Zhao 				if (diag < 0) {
329361a3b0e5SAndrew Rybchenko 					fprintf(stderr,
329461a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
329501817b10SBing Zhao 						pi, peer_pl[j],
329601817b10SBing Zhao 						rte_strerror(-diag));
329701817b10SBing Zhao 					return -1;
329801817b10SBing Zhao 				}
329901817b10SBing Zhao 			}
330001817b10SBing Zhao 			/* bind all peer Tx to current Rx */
330101817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
330201817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
330301817b10SBing Zhao 			if (peer_pi < 0)
330401817b10SBing Zhao 				return peer_pi;
330501817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
330601817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
330701817b10SBing Zhao 					continue;
330801817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
330901817b10SBing Zhao 				if (diag < 0) {
331061a3b0e5SAndrew Rybchenko 					fprintf(stderr,
331161a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
331201817b10SBing Zhao 						peer_pl[j], pi,
331301817b10SBing Zhao 						rte_strerror(-diag));
331401817b10SBing Zhao 					return -1;
331501817b10SBing Zhao 				}
331601817b10SBing Zhao 			}
331701817b10SBing Zhao 		}
331801817b10SBing Zhao 	}
331901817b10SBing Zhao 
332063b72657SIvan Ilchenko 	fill_xstats_display_info_for_port(pid);
332163b72657SIvan Ilchenko 
3322ce8d5614SIntel 	printf("Done\n");
3323148f963fSBruce Richardson 	return 0;
3324ce8d5614SIntel }
3325ce8d5614SIntel 
3326ce8d5614SIntel void
3327ce8d5614SIntel stop_port(portid_t pid)
3328ce8d5614SIntel {
3329ce8d5614SIntel 	portid_t pi;
3330ce8d5614SIntel 	struct rte_port *port;
3331ce8d5614SIntel 	int need_check_link_status = 0;
333201817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
333301817b10SBing Zhao 	int peer_pi;
333447a4e1fbSDariusz Sosnowski 	int ret;
3335ce8d5614SIntel 
33364468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
33374468635fSMichael Qiu 		return;
33384468635fSMichael Qiu 
3339ce8d5614SIntel 	printf("Stopping ports...\n");
3340ce8d5614SIntel 
33417d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
33424468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3343ce8d5614SIntel 			continue;
3344ce8d5614SIntel 
3345a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
334661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
334761a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
334861a3b0e5SAndrew Rybchenko 				pi);
3349a8ef3e3aSBernard Iremonger 			continue;
3350a8ef3e3aSBernard Iremonger 		}
3351a8ef3e3aSBernard Iremonger 
33520e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
335361a3b0e5SAndrew Rybchenko 			fprintf(stderr,
335461a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
335561a3b0e5SAndrew Rybchenko 				pi);
33560e545d30SBernard Iremonger 			continue;
33570e545d30SBernard Iremonger 		}
33580e545d30SBernard Iremonger 
3359ce8d5614SIntel 		port = &ports[pi];
3360eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STARTED)
3361eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
3362eac341d3SJoyce Kong 		else
3363ce8d5614SIntel 			continue;
3364ce8d5614SIntel 
336501817b10SBing Zhao 		if (hairpin_mode & 0xf) {
336601817b10SBing Zhao 			int j;
336701817b10SBing Zhao 
336801817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
336901817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
337001817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
337101817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
337201817b10SBing Zhao 			if (peer_pi < 0)
337301817b10SBing Zhao 				continue;
337401817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
337501817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
337601817b10SBing Zhao 					continue;
337701817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
337801817b10SBing Zhao 			}
337901817b10SBing Zhao 		}
338001817b10SBing Zhao 
3381543df472SChengwen Feng 		if (port->flow_list && !no_flow_flush)
33820f93edbfSGregory Etelson 			port_flow_flush(pi);
33830f93edbfSGregory Etelson 
338447a4e1fbSDariusz Sosnowski 		ret = eth_dev_stop_mp(pi);
338547a4e1fbSDariusz Sosnowski 		if (ret != 0) {
3386e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3387e62c5a12SIvan Ilchenko 				pi);
338847a4e1fbSDariusz Sosnowski 			/* Allow to retry stopping the port. */
338947a4e1fbSDariusz Sosnowski 			port->port_status = RTE_PORT_STARTED;
339047a4e1fbSDariusz Sosnowski 			continue;
339147a4e1fbSDariusz Sosnowski 		}
3392ce8d5614SIntel 
3393eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3394eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
3395eac341d3SJoyce Kong 		else
339661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into stopped\n",
339761a3b0e5SAndrew Rybchenko 				pi);
3398ce8d5614SIntel 		need_check_link_status = 1;
3399ce8d5614SIntel 	}
3400bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
3401edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3402ce8d5614SIntel 
3403ce8d5614SIntel 	printf("Done\n");
3404ce8d5614SIntel }
3405ce8d5614SIntel 
3406ce6959bfSWisam Jaddo static void
34074f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
3408ce6959bfSWisam Jaddo {
34094f1de450SThomas Monjalon 	portid_t i;
34104f1de450SThomas Monjalon 	portid_t new_total = 0;
3411ce6959bfSWisam Jaddo 
34124f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
34134f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
34144f1de450SThomas Monjalon 			array[new_total] = array[i];
34154f1de450SThomas Monjalon 			new_total++;
3416ce6959bfSWisam Jaddo 		}
34174f1de450SThomas Monjalon 	*total = new_total;
34184f1de450SThomas Monjalon }
34194f1de450SThomas Monjalon 
34204f1de450SThomas Monjalon static void
34214f1de450SThomas Monjalon remove_invalid_ports(void)
34224f1de450SThomas Monjalon {
34234f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
34244f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
34254f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
3426ce6959bfSWisam Jaddo }
3427ce6959bfSWisam Jaddo 
34283889a322SHuisong Li static void
34294b27989dSDmitry Kozlyuk flush_port_owned_resources(portid_t pi)
34304b27989dSDmitry Kozlyuk {
34314b27989dSDmitry Kozlyuk 	mcast_addr_pool_destroy(pi);
34324b27989dSDmitry Kozlyuk 	port_flow_flush(pi);
34336d736e05SSuanming Mou 	port_flow_template_table_flush(pi);
34346d736e05SSuanming Mou 	port_flow_pattern_template_flush(pi);
34356d736e05SSuanming Mou 	port_flow_actions_template_flush(pi);
3436653c0812SRongwei Liu 	port_flex_item_flush(pi);
34374b27989dSDmitry Kozlyuk 	port_action_handle_flush(pi);
34384b27989dSDmitry Kozlyuk }
34394b27989dSDmitry Kozlyuk 
34404b27989dSDmitry Kozlyuk static void
34413889a322SHuisong Li clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves)
34423889a322SHuisong Li {
34433889a322SHuisong Li 	struct rte_port *port;
34443889a322SHuisong Li 	portid_t slave_pid;
34453889a322SHuisong Li 	uint16_t i;
34463889a322SHuisong Li 
34473889a322SHuisong Li 	for (i = 0; i < num_slaves; i++) {
34483889a322SHuisong Li 		slave_pid = slave_pids[i];
34493889a322SHuisong Li 		if (port_is_started(slave_pid) == 1) {
34503889a322SHuisong Li 			if (rte_eth_dev_stop(slave_pid) != 0)
34513889a322SHuisong Li 				fprintf(stderr, "rte_eth_dev_stop failed for port %u\n",
34523889a322SHuisong Li 					slave_pid);
34533889a322SHuisong Li 
34543889a322SHuisong Li 			port = &ports[slave_pid];
34553889a322SHuisong Li 			port->port_status = RTE_PORT_STOPPED;
34563889a322SHuisong Li 		}
34573889a322SHuisong Li 
34583889a322SHuisong Li 		clear_port_slave_flag(slave_pid);
34593889a322SHuisong Li 
34603889a322SHuisong Li 		/* Close slave device when testpmd quit or is killed. */
34613889a322SHuisong Li 		if (cl_quit == 1 || f_quit == 1)
34623889a322SHuisong Li 			rte_eth_dev_close(slave_pid);
34633889a322SHuisong Li 	}
34643889a322SHuisong Li }
34653889a322SHuisong Li 
3466ce8d5614SIntel void
3467ce8d5614SIntel close_port(portid_t pid)
3468ce8d5614SIntel {
3469ce8d5614SIntel 	portid_t pi;
3470ce8d5614SIntel 	struct rte_port *port;
34713889a322SHuisong Li 	portid_t slave_pids[RTE_MAX_ETHPORTS];
34723889a322SHuisong Li 	int num_slaves = 0;
3473ce8d5614SIntel 
34744468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
34754468635fSMichael Qiu 		return;
34764468635fSMichael Qiu 
3477ce8d5614SIntel 	printf("Closing ports...\n");
3478ce8d5614SIntel 
34797d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
34804468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3481ce8d5614SIntel 			continue;
3482ce8d5614SIntel 
3483a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
348461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
348561a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
348661a3b0e5SAndrew Rybchenko 				pi);
3487a8ef3e3aSBernard Iremonger 			continue;
3488a8ef3e3aSBernard Iremonger 		}
3489a8ef3e3aSBernard Iremonger 
34900e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
349161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
349261a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
349361a3b0e5SAndrew Rybchenko 				pi);
34940e545d30SBernard Iremonger 			continue;
34950e545d30SBernard Iremonger 		}
34960e545d30SBernard Iremonger 
3497ce8d5614SIntel 		port = &ports[pi];
3498eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_CLOSED) {
349961a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is already closed\n", pi);
3500d4e8ad64SMichael Qiu 			continue;
3501d4e8ad64SMichael Qiu 		}
3502d4e8ad64SMichael Qiu 
3503a550baf2SMin Hu (Connor) 		if (is_proc_primary()) {
35044b27989dSDmitry Kozlyuk 			flush_port_owned_resources(pi);
35053889a322SHuisong Li #ifdef RTE_NET_BOND
35063889a322SHuisong Li 			if (port->bond_flag == 1)
35073889a322SHuisong Li 				num_slaves = rte_eth_bond_slaves_get(pi,
35083889a322SHuisong Li 						slave_pids, RTE_MAX_ETHPORTS);
35093889a322SHuisong Li #endif
3510ce8d5614SIntel 			rte_eth_dev_close(pi);
35113889a322SHuisong Li 			/*
35123889a322SHuisong Li 			 * If this port is bonded device, all slaves under the
35133889a322SHuisong Li 			 * device need to be removed or closed.
35143889a322SHuisong Li 			 */
35153889a322SHuisong Li 			if (port->bond_flag == 1 && num_slaves > 0)
35163889a322SHuisong Li 				clear_bonding_slave_device(slave_pids,
35173889a322SHuisong Li 							num_slaves);
3518ce8d5614SIntel 		}
351963b72657SIvan Ilchenko 
352063b72657SIvan Ilchenko 		free_xstats_display_info(pi);
3521a550baf2SMin Hu (Connor) 	}
3522ce8d5614SIntel 
352385c6571cSThomas Monjalon 	remove_invalid_ports();
3524ce8d5614SIntel 	printf("Done\n");
3525ce8d5614SIntel }
3526ce8d5614SIntel 
3527edab33b1STetsuya Mukawa void
352897f1e196SWei Dai reset_port(portid_t pid)
352997f1e196SWei Dai {
353097f1e196SWei Dai 	int diag;
353197f1e196SWei Dai 	portid_t pi;
353297f1e196SWei Dai 	struct rte_port *port;
353397f1e196SWei Dai 
353497f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
353597f1e196SWei Dai 		return;
353697f1e196SWei Dai 
35371cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
35381cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
353961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
354061a3b0e5SAndrew Rybchenko 			"Can not reset port(s), please stop port(s) first.\n");
35411cde1b9aSShougang Wang 		return;
35421cde1b9aSShougang Wang 	}
35431cde1b9aSShougang Wang 
354497f1e196SWei Dai 	printf("Resetting ports...\n");
354597f1e196SWei Dai 
354697f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
354797f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
354897f1e196SWei Dai 			continue;
354997f1e196SWei Dai 
355097f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
355161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
355261a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
355361a3b0e5SAndrew Rybchenko 				pi);
355497f1e196SWei Dai 			continue;
355597f1e196SWei Dai 		}
355697f1e196SWei Dai 
355797f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
355861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
355961a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
356097f1e196SWei Dai 				pi);
356197f1e196SWei Dai 			continue;
356297f1e196SWei Dai 		}
356397f1e196SWei Dai 
3564e9351eaaSQiming Yang 		if (is_proc_primary()) {
356597f1e196SWei Dai 			diag = rte_eth_dev_reset(pi);
356697f1e196SWei Dai 			if (diag == 0) {
356797f1e196SWei Dai 				port = &ports[pi];
356897f1e196SWei Dai 				port->need_reconfig = 1;
356997f1e196SWei Dai 				port->need_reconfig_queues = 1;
357097f1e196SWei Dai 			} else {
357161a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Failed to reset port %d. diag=%d\n",
357261a3b0e5SAndrew Rybchenko 					pi, diag);
357397f1e196SWei Dai 			}
357497f1e196SWei Dai 		}
3575e9351eaaSQiming Yang 	}
357697f1e196SWei Dai 
357797f1e196SWei Dai 	printf("Done\n");
357897f1e196SWei Dai }
357997f1e196SWei Dai 
358097f1e196SWei Dai void
3581edab33b1STetsuya Mukawa attach_port(char *identifier)
3582ce8d5614SIntel {
35834f1ed78eSThomas Monjalon 	portid_t pi;
3584c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
3585ce8d5614SIntel 
3586edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
3587edab33b1STetsuya Mukawa 
3588edab33b1STetsuya Mukawa 	if (identifier == NULL) {
358961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Invalid parameters are specified\n");
3590edab33b1STetsuya Mukawa 		return;
3591ce8d5614SIntel 	}
3592ce8d5614SIntel 
359375b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
3594c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3595edab33b1STetsuya Mukawa 		return;
3596c9cce428SThomas Monjalon 	}
3597c9cce428SThomas Monjalon 
35984f1ed78eSThomas Monjalon 	/* first attach mode: event */
35994f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
36004f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
36014f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
36024f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
36034f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
36044f1ed78eSThomas Monjalon 				setup_attached_port(pi);
36054f1ed78eSThomas Monjalon 		return;
36064f1ed78eSThomas Monjalon 	}
36074f1ed78eSThomas Monjalon 
36084f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
360986fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
36104f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
361186fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
361286fa5de1SThomas Monjalon 			continue; /* port was already attached before */
3613c9cce428SThomas Monjalon 		setup_attached_port(pi);
3614c9cce428SThomas Monjalon 	}
361586fa5de1SThomas Monjalon }
3616c9cce428SThomas Monjalon 
3617c9cce428SThomas Monjalon static void
3618c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
3619c9cce428SThomas Monjalon {
3620c9cce428SThomas Monjalon 	unsigned int socket_id;
362134fc1051SIvan Ilchenko 	int ret;
3622edab33b1STetsuya Mukawa 
3623931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
362429841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
3625931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
362629841336SPhil Yang 		socket_id = socket_ids[0];
3627931126baSBernard Iremonger 	reconfig(pi, socket_id);
362834fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
362934fc1051SIvan Ilchenko 	if (ret != 0)
363061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
363161a3b0e5SAndrew Rybchenko 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
363234fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
3633edab33b1STetsuya Mukawa 
36344f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
36354f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
36364f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
36374f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
3638edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
3639edab33b1STetsuya Mukawa 
3640edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3641edab33b1STetsuya Mukawa 	printf("Done\n");
3642edab33b1STetsuya Mukawa }
3643edab33b1STetsuya Mukawa 
36440654d4a8SThomas Monjalon static void
36450654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
36465f4ec54fSChen Jing D(Mark) {
3647f8e5baa2SThomas Monjalon 	portid_t sibling;
3648f8e5baa2SThomas Monjalon 
3649f8e5baa2SThomas Monjalon 	if (dev == NULL) {
365061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Device already removed\n");
3651f8e5baa2SThomas Monjalon 		return;
3652f8e5baa2SThomas Monjalon 	}
3653f8e5baa2SThomas Monjalon 
36540654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3655938a184aSAdrien Mazarguil 
36562a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
36572a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
36582a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
365961a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
366061a3b0e5SAndrew Rybchenko 					sibling);
36612a449871SThomas Monjalon 				return;
36622a449871SThomas Monjalon 			}
36634b27989dSDmitry Kozlyuk 			flush_port_owned_resources(sibling);
36642a449871SThomas Monjalon 		}
36652a449871SThomas Monjalon 	}
36662a449871SThomas Monjalon 
366775b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3668ec5ecd7eSDavid Marchand 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", rte_dev_name(dev));
3669edab33b1STetsuya Mukawa 		return;
36703070419eSGaetan Rivet 	}
36714f1de450SThomas Monjalon 	remove_invalid_ports();
367203ce2c53SMatan Azrad 
36730654d4a8SThomas Monjalon 	printf("Device is detached\n");
3674f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3675edab33b1STetsuya Mukawa 	printf("Done\n");
3676edab33b1STetsuya Mukawa 	return;
36775f4ec54fSChen Jing D(Mark) }
36785f4ec54fSChen Jing D(Mark) 
3679af75078fSIntel void
36800654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
36810654d4a8SThomas Monjalon {
36820a0821bcSPaulis Gributs 	int ret;
36830a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
36840a0821bcSPaulis Gributs 
36850654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
36860654d4a8SThomas Monjalon 		return;
36870654d4a8SThomas Monjalon 
36880654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
36890654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
369061a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port not stopped\n");
36910654d4a8SThomas Monjalon 			return;
36920654d4a8SThomas Monjalon 		}
369361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Port was not closed\n");
36940654d4a8SThomas Monjalon 	}
36950654d4a8SThomas Monjalon 
36960a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
36970a0821bcSPaulis Gributs 	if (ret != 0) {
36980a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
36990a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
37000a0821bcSPaulis Gributs 			port_id);
37010a0821bcSPaulis Gributs 		return;
37020a0821bcSPaulis Gributs 	}
37030a0821bcSPaulis Gributs 	detach_device(dev_info.device);
37040654d4a8SThomas Monjalon }
37050654d4a8SThomas Monjalon 
37060654d4a8SThomas Monjalon void
37075edee5f6SThomas Monjalon detach_devargs(char *identifier)
370855e51c96SNithin Dabilpuram {
370955e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
371055e51c96SNithin Dabilpuram 	struct rte_devargs da;
371155e51c96SNithin Dabilpuram 	portid_t port_id;
371255e51c96SNithin Dabilpuram 
371355e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
371455e51c96SNithin Dabilpuram 
371555e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
371655e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
371761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "cannot parse identifier\n");
371855e51c96SNithin Dabilpuram 		return;
371955e51c96SNithin Dabilpuram 	}
372055e51c96SNithin Dabilpuram 
372155e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
372255e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
372355e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
372461a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
372561a3b0e5SAndrew Rybchenko 					port_id);
3726149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
372764051bb1SXueming Li 				rte_devargs_reset(&da);
372855e51c96SNithin Dabilpuram 				return;
372955e51c96SNithin Dabilpuram 			}
37304b27989dSDmitry Kozlyuk 			flush_port_owned_resources(port_id);
373155e51c96SNithin Dabilpuram 		}
373255e51c96SNithin Dabilpuram 	}
373355e51c96SNithin Dabilpuram 
3734148c51a3SDavid Marchand 	if (rte_eal_hotplug_remove(rte_bus_name(da.bus), da.name) != 0) {
373555e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3736148c51a3SDavid Marchand 			    da.name, rte_bus_name(da.bus));
373764051bb1SXueming Li 		rte_devargs_reset(&da);
373855e51c96SNithin Dabilpuram 		return;
373955e51c96SNithin Dabilpuram 	}
374055e51c96SNithin Dabilpuram 
374155e51c96SNithin Dabilpuram 	remove_invalid_ports();
374255e51c96SNithin Dabilpuram 
374355e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
374455e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
374555e51c96SNithin Dabilpuram 	printf("Done\n");
374664051bb1SXueming Li 	rte_devargs_reset(&da);
374755e51c96SNithin Dabilpuram }
374855e51c96SNithin Dabilpuram 
374955e51c96SNithin Dabilpuram void
3750af75078fSIntel pmd_test_exit(void)
3751af75078fSIntel {
3752af75078fSIntel 	portid_t pt_id;
375326cbb419SViacheslav Ovsiienko 	unsigned int i;
3754fb73e096SJeff Guo 	int ret;
3755af75078fSIntel 
37568210ec25SPablo de Lara 	if (test_done == 0)
37578210ec25SPablo de Lara 		stop_packet_forwarding();
37588210ec25SPablo de Lara 
3759761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
376026cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
37613a0968c8SShahaf Shuler 		if (mempools[i]) {
37623a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
37633a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
37643a0968c8SShahaf Shuler 						     NULL);
37653a0968c8SShahaf Shuler 		}
37663a0968c8SShahaf Shuler 	}
3767761f7ae1SJie Zhou #endif
3768d3a274ceSZhihong Wang 	if (ports != NULL) {
3769d3a274ceSZhihong Wang 		no_link_check = 1;
37707d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
377108fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3772af75078fSIntel 			fflush(stdout);
3773d3a274ceSZhihong Wang 			stop_port(pt_id);
377408fd782bSCristian Dumitrescu 		}
377508fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
377608fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
377708fd782bSCristian Dumitrescu 			fflush(stdout);
3778d3a274ceSZhihong Wang 			close_port(pt_id);
3779af75078fSIntel 		}
3780d3a274ceSZhihong Wang 	}
3781fb73e096SJeff Guo 
3782fb73e096SJeff Guo 	if (hot_plug) {
3783fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
37842049c511SJeff Guo 		if (ret) {
3785fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3786fb73e096SJeff Guo 				"fail to stop device event monitor.");
37872049c511SJeff Guo 			return;
37882049c511SJeff Guo 		}
3789fb73e096SJeff Guo 
37902049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3791cc1bf307SJeff Guo 			dev_event_callback, NULL);
37922049c511SJeff Guo 		if (ret < 0) {
3793fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
37942049c511SJeff Guo 				"fail to unregister device event callback.\n");
37952049c511SJeff Guo 			return;
37962049c511SJeff Guo 		}
37972049c511SJeff Guo 
37982049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
37992049c511SJeff Guo 		if (ret) {
38002049c511SJeff Guo 			RTE_LOG(ERR, EAL,
38012049c511SJeff Guo 				"fail to disable hotplug handling.\n");
38022049c511SJeff Guo 			return;
38032049c511SJeff Guo 		}
3804fb73e096SJeff Guo 	}
380526cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3806401b744dSShahaf Shuler 		if (mempools[i])
3807a550baf2SMin Hu (Connor) 			mempool_free_mp(mempools[i]);
3808401b744dSShahaf Shuler 	}
380963b72657SIvan Ilchenko 	free(xstats_display);
3810fb73e096SJeff Guo 
3811d3a274ceSZhihong Wang 	printf("\nBye...\n");
3812af75078fSIntel }
3813af75078fSIntel 
3814af75078fSIntel typedef void (*cmd_func_t)(void);
3815af75078fSIntel struct pmd_test_command {
3816af75078fSIntel 	const char *cmd_name;
3817af75078fSIntel 	cmd_func_t cmd_func;
3818af75078fSIntel };
3819af75078fSIntel 
3820ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3821af75078fSIntel static void
3822edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3823af75078fSIntel {
3824ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3825ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3826f8244c63SZhiyong Yang 	portid_t portid;
3827f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3828ce8d5614SIntel 	struct rte_eth_link link;
3829e661a08bSIgor Romanov 	int ret;
3830ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3831ce8d5614SIntel 
3832ce8d5614SIntel 	printf("Checking link statuses...\n");
3833ce8d5614SIntel 	fflush(stdout);
3834ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3835ce8d5614SIntel 		all_ports_up = 1;
38367d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3837ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3838ce8d5614SIntel 				continue;
3839ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3840e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3841e661a08bSIgor Romanov 			if (ret < 0) {
3842e661a08bSIgor Romanov 				all_ports_up = 0;
3843e661a08bSIgor Romanov 				if (print_flag == 1)
384461a3b0e5SAndrew Rybchenko 					fprintf(stderr,
384561a3b0e5SAndrew Rybchenko 						"Port %u link get failed: %s\n",
3846e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3847e661a08bSIgor Romanov 				continue;
3848e661a08bSIgor Romanov 			}
3849ce8d5614SIntel 			/* print link status if flag set */
3850ce8d5614SIntel 			if (print_flag == 1) {
3851ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3852ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3853ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3854ce8d5614SIntel 				continue;
3855ce8d5614SIntel 			}
3856ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
3857295968d1SFerruh Yigit 			if (link.link_status == RTE_ETH_LINK_DOWN) {
3858ce8d5614SIntel 				all_ports_up = 0;
3859ce8d5614SIntel 				break;
3860ce8d5614SIntel 			}
3861ce8d5614SIntel 		}
3862ce8d5614SIntel 		/* after finally printing all link status, get out */
3863ce8d5614SIntel 		if (print_flag == 1)
3864ce8d5614SIntel 			break;
3865ce8d5614SIntel 
3866ce8d5614SIntel 		if (all_ports_up == 0) {
3867ce8d5614SIntel 			fflush(stdout);
3868ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3869ce8d5614SIntel 		}
3870ce8d5614SIntel 
3871ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3872ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3873ce8d5614SIntel 			print_flag = 1;
3874ce8d5614SIntel 		}
38758ea656f8SGaetan Rivet 
38768ea656f8SGaetan Rivet 		if (lsc_interrupt)
38778ea656f8SGaetan Rivet 			break;
3878ce8d5614SIntel 	}
3879af75078fSIntel }
3880af75078fSIntel 
3881284c908cSGaetan Rivet static void
3882cc1bf307SJeff Guo rmv_port_callback(void *arg)
3883284c908cSGaetan Rivet {
38843b97888aSMatan Azrad 	int need_to_start = 0;
38850da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
388628caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
38870a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
38880a0821bcSPaulis Gributs 	int ret;
3889284c908cSGaetan Rivet 
3890284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3891284c908cSGaetan Rivet 
38923b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
38933b97888aSMatan Azrad 		need_to_start = 1;
38943b97888aSMatan Azrad 		stop_packet_forwarding();
38953b97888aSMatan Azrad 	}
38960da2a62bSMatan Azrad 	no_link_check = 1;
3897284c908cSGaetan Rivet 	stop_port(port_id);
38980da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
38990654d4a8SThomas Monjalon 
39000a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
39010a0821bcSPaulis Gributs 	if (ret != 0)
39020a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
39030a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
39040a0821bcSPaulis Gributs 			port_id);
3905e1d38504SPaulis Gributs 	else {
3906e1d38504SPaulis Gributs 		struct rte_device *device = dev_info.device;
3907e1d38504SPaulis Gributs 		close_port(port_id);
3908e1d38504SPaulis Gributs 		detach_device(device); /* might be already removed or have more ports */
3909e1d38504SPaulis Gributs 	}
39103b97888aSMatan Azrad 	if (need_to_start)
39113b97888aSMatan Azrad 		start_packet_forwarding(0);
3912284c908cSGaetan Rivet }
3913284c908cSGaetan Rivet 
391476ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3915d6af1a13SBernard Iremonger static int
3916f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3917d6af1a13SBernard Iremonger 		  void *ret_param)
391876ad4a2dSGaetan Rivet {
391976ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3920d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
392176ad4a2dSGaetan Rivet 
392276ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
392361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
392461a3b0e5SAndrew Rybchenko 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
392576ad4a2dSGaetan Rivet 			port_id, __func__, type);
392676ad4a2dSGaetan Rivet 		fflush(stderr);
39273af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3928f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
392997b5d8b5SThomas Monjalon 			eth_event_desc[type]);
393076ad4a2dSGaetan Rivet 		fflush(stdout);
393176ad4a2dSGaetan Rivet 	}
3932284c908cSGaetan Rivet 
3933284c908cSGaetan Rivet 	switch (type) {
39344f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
39354f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
39364f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
39374f1ed78eSThomas Monjalon 		break;
3938284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
39394f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
39404f1ed78eSThomas Monjalon 			break;
3941284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3942cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
394361a3b0e5SAndrew Rybchenko 			fprintf(stderr,
394461a3b0e5SAndrew Rybchenko 				"Could not set up deferred device removal\n");
3945284c908cSGaetan Rivet 		break;
394685c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
394785c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
394885c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
394985c6571cSThomas Monjalon 		break;
3950bc70e559SSpike Du 	case RTE_ETH_EVENT_RX_AVAIL_THRESH: {
3951bc70e559SSpike Du 		uint16_t rxq_id;
3952bc70e559SSpike Du 		int ret;
3953bc70e559SSpike Du 
3954bc70e559SSpike Du 		/* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */
3955bc70e559SSpike Du 		for (rxq_id = 0; ; rxq_id++) {
3956bc70e559SSpike Du 			ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id,
3957bc70e559SSpike Du 							    NULL);
3958bc70e559SSpike Du 			if (ret <= 0)
3959bc70e559SSpike Du 				break;
3960bc70e559SSpike Du 			printf("Received avail_thresh event, port: %u, rxq_id: %u\n",
3961bc70e559SSpike Du 			       port_id, rxq_id);
3962f41a5092SSpike Du 
3963f41a5092SSpike Du #ifdef RTE_NET_MLX5
3964f41a5092SSpike Du 			mlx5_test_avail_thresh_event_handler(port_id, rxq_id);
3965f41a5092SSpike Du #endif
3966bc70e559SSpike Du 		}
3967bc70e559SSpike Du 		break;
3968bc70e559SSpike Du 	}
3969284c908cSGaetan Rivet 	default:
3970284c908cSGaetan Rivet 		break;
3971284c908cSGaetan Rivet 	}
3972d6af1a13SBernard Iremonger 	return 0;
397376ad4a2dSGaetan Rivet }
397476ad4a2dSGaetan Rivet 
397597b5d8b5SThomas Monjalon static int
397697b5d8b5SThomas Monjalon register_eth_event_callback(void)
397797b5d8b5SThomas Monjalon {
397897b5d8b5SThomas Monjalon 	int ret;
397997b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
398097b5d8b5SThomas Monjalon 
398197b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
398297b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
398397b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
398497b5d8b5SThomas Monjalon 				event,
398597b5d8b5SThomas Monjalon 				eth_event_callback,
398697b5d8b5SThomas Monjalon 				NULL);
398797b5d8b5SThomas Monjalon 		if (ret != 0) {
398897b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
398997b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
399097b5d8b5SThomas Monjalon 			return -1;
399197b5d8b5SThomas Monjalon 		}
399297b5d8b5SThomas Monjalon 	}
399397b5d8b5SThomas Monjalon 
399497b5d8b5SThomas Monjalon 	return 0;
399597b5d8b5SThomas Monjalon }
399697b5d8b5SThomas Monjalon 
3997fb73e096SJeff Guo /* This function is used by the interrupt thread */
3998fb73e096SJeff Guo static void
3999cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
4000fb73e096SJeff Guo 			     __rte_unused void *arg)
4001fb73e096SJeff Guo {
40022049c511SJeff Guo 	uint16_t port_id;
40032049c511SJeff Guo 	int ret;
40042049c511SJeff Guo 
4005fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
4006fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
4007fb73e096SJeff Guo 			__func__, type);
4008fb73e096SJeff Guo 		fflush(stderr);
4009fb73e096SJeff Guo 	}
4010fb73e096SJeff Guo 
4011fb73e096SJeff Guo 	switch (type) {
4012fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
4013cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
4014fb73e096SJeff Guo 			device_name);
40152049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
40162049c511SJeff Guo 		if (ret) {
40172049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
40182049c511SJeff Guo 				device_name);
40192049c511SJeff Guo 			return;
40202049c511SJeff Guo 		}
4021cc1bf307SJeff Guo 		/*
4022cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
4023cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
4024cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
4025cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
4026cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
4027cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
4028cc1bf307SJeff Guo 		 * be deleted.
4029cc1bf307SJeff Guo 		 */
4030cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
4031cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
4032cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
4033cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
4034fb73e096SJeff Guo 		break;
4035fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
4036fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
4037fb73e096SJeff Guo 			device_name);
4038fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
4039fb73e096SJeff Guo 		 * begin to attach port.
4040fb73e096SJeff Guo 		 */
4041fb73e096SJeff Guo 		break;
4042fb73e096SJeff Guo 	default:
4043fb73e096SJeff Guo 		break;
4044fb73e096SJeff Guo 	}
4045fb73e096SJeff Guo }
4046fb73e096SJeff Guo 
4047f2c5125aSPablo de Lara static void
4048f4d178c1SXueming Li rxtx_port_config(portid_t pid)
4049f2c5125aSPablo de Lara {
4050d44f8a48SQi Zhang 	uint16_t qid;
40515e91aeefSWei Zhao 	uint64_t offloads;
4052f4d178c1SXueming Li 	struct rte_port *port = &ports[pid];
4053f2c5125aSPablo de Lara 
4054d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
40553c4426dbSDmitry Kozlyuk 		offloads = port->rxq[qid].conf.offloads;
40563c4426dbSDmitry Kozlyuk 		port->rxq[qid].conf = port->dev_info.default_rxconf;
4057f4d178c1SXueming Li 
4058f4d178c1SXueming Li 		if (rxq_share > 0 &&
4059f4d178c1SXueming Li 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
4060f4d178c1SXueming Li 			/* Non-zero share group to enable RxQ share. */
40613c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_group = pid / rxq_share + 1;
40623c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
4063f4d178c1SXueming Li 		}
4064f4d178c1SXueming Li 
4065575e0fd1SWei Zhao 		if (offloads != 0)
40663c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.offloads = offloads;
4067d44f8a48SQi Zhang 
4068d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
4069f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
40703c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
4071f2c5125aSPablo de Lara 
4072f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
40733c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
4074f2c5125aSPablo de Lara 
4075f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
40763c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
4077f2c5125aSPablo de Lara 
4078f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
40793c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
4080f2c5125aSPablo de Lara 
4081f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
40823c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_drop_en = rx_drop_en;
4083f2c5125aSPablo de Lara 
4084d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
4085d44f8a48SQi Zhang 	}
4086d44f8a48SQi Zhang 
4087d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
40883c4426dbSDmitry Kozlyuk 		offloads = port->txq[qid].conf.offloads;
40893c4426dbSDmitry Kozlyuk 		port->txq[qid].conf = port->dev_info.default_txconf;
4090575e0fd1SWei Zhao 		if (offloads != 0)
40913c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.offloads = offloads;
4092d44f8a48SQi Zhang 
4093d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
4094f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
40953c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
4096f2c5125aSPablo de Lara 
4097f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
40983c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
4099f2c5125aSPablo de Lara 
4100f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
41013c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
4102f2c5125aSPablo de Lara 
4103f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
41043c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
4105f2c5125aSPablo de Lara 
4106f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
41073c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
4108d44f8a48SQi Zhang 
4109d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
4110d44f8a48SQi Zhang 	}
4111f2c5125aSPablo de Lara }
4112f2c5125aSPablo de Lara 
41130c4abd36SSteve Yang /*
4114b563c142SFerruh Yigit  * Helper function to set MTU from frame size
41150c4abd36SSteve Yang  *
41160c4abd36SSteve Yang  * port->dev_info should be set before calling this function.
41170c4abd36SSteve Yang  *
41180c4abd36SSteve Yang  * return 0 on success, negative on error
41190c4abd36SSteve Yang  */
41200c4abd36SSteve Yang int
4121b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
41220c4abd36SSteve Yang {
41230c4abd36SSteve Yang 	struct rte_port *port = &ports[portid];
41240c4abd36SSteve Yang 	uint32_t eth_overhead;
41251bb4a528SFerruh Yigit 	uint16_t mtu, new_mtu;
41260c4abd36SSteve Yang 
41271bb4a528SFerruh Yigit 	eth_overhead = get_eth_overhead(&port->dev_info);
41281bb4a528SFerruh Yigit 
41291bb4a528SFerruh Yigit 	if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
41301bb4a528SFerruh Yigit 		printf("Failed to get MTU for port %u\n", portid);
41311bb4a528SFerruh Yigit 		return -1;
41321bb4a528SFerruh Yigit 	}
41331bb4a528SFerruh Yigit 
41341bb4a528SFerruh Yigit 	new_mtu = max_rx_pktlen - eth_overhead;
41350c4abd36SSteve Yang 
41361bb4a528SFerruh Yigit 	if (mtu == new_mtu)
41371bb4a528SFerruh Yigit 		return 0;
41381bb4a528SFerruh Yigit 
41391bb4a528SFerruh Yigit 	if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
414061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
414161a3b0e5SAndrew Rybchenko 			"Failed to set MTU to %u for port %u\n",
41421bb4a528SFerruh Yigit 			new_mtu, portid);
41431bb4a528SFerruh Yigit 		return -1;
41440c4abd36SSteve Yang 	}
41450c4abd36SSteve Yang 
41461bb4a528SFerruh Yigit 	port->dev_conf.rxmode.mtu = new_mtu;
41471bb4a528SFerruh Yigit 
41480c4abd36SSteve Yang 	return 0;
41490c4abd36SSteve Yang }
41500c4abd36SSteve Yang 
4151013af9b6SIntel void
4152013af9b6SIntel init_port_config(void)
4153013af9b6SIntel {
4154013af9b6SIntel 	portid_t pid;
4155013af9b6SIntel 	struct rte_port *port;
4156655eae01SJie Wang 	int ret, i;
4157013af9b6SIntel 
41587d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
4159013af9b6SIntel 		port = &ports[pid];
41606f51deb9SIvan Ilchenko 
41616f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
41626f51deb9SIvan Ilchenko 		if (ret != 0)
41636f51deb9SIvan Ilchenko 			return;
41646f51deb9SIvan Ilchenko 
41653ce690d3SBruce Richardson 		if (nb_rxq > 1) {
4166013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
416790892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
4168422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
4169af75078fSIntel 		} else {
4170013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
4171013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
4172af75078fSIntel 		}
41733ce690d3SBruce Richardson 
41745f592039SJingjing Wu 		if (port->dcb_flag == 0) {
4175655eae01SJie Wang 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
4176f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
4177f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
4178295968d1SFerruh Yigit 						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
4179655eae01SJie Wang 			} else {
4180295968d1SFerruh Yigit 				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
4181655eae01SJie Wang 				port->dev_conf.rxmode.offloads &=
4182295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4183655eae01SJie Wang 
4184655eae01SJie Wang 				for (i = 0;
4185655eae01SJie Wang 				     i < port->dev_info.nb_rx_queues;
4186655eae01SJie Wang 				     i++)
41873c4426dbSDmitry Kozlyuk 					port->rxq[i].conf.offloads &=
4188295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4189655eae01SJie Wang 			}
41903ce690d3SBruce Richardson 		}
41913ce690d3SBruce Richardson 
4192f4d178c1SXueming Li 		rxtx_port_config(pid);
4193013af9b6SIntel 
4194a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
4195a5279d25SIgor Romanov 		if (ret != 0)
4196a5279d25SIgor Romanov 			return;
4197013af9b6SIntel 
41980a0821bcSPaulis Gributs 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
41998ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
42000a0821bcSPaulis Gributs 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
4201284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
4202013af9b6SIntel 	}
4203013af9b6SIntel }
4204013af9b6SIntel 
420541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
420641b05095SBernard Iremonger {
420741b05095SBernard Iremonger 	struct rte_port *port;
420841b05095SBernard Iremonger 
420941b05095SBernard Iremonger 	port = &ports[slave_pid];
421041b05095SBernard Iremonger 	port->slave_flag = 1;
421141b05095SBernard Iremonger }
421241b05095SBernard Iremonger 
421341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
421441b05095SBernard Iremonger {
421541b05095SBernard Iremonger 	struct rte_port *port;
421641b05095SBernard Iremonger 
421741b05095SBernard Iremonger 	port = &ports[slave_pid];
421841b05095SBernard Iremonger 	port->slave_flag = 0;
421941b05095SBernard Iremonger }
422041b05095SBernard Iremonger 
42210e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
42220e545d30SBernard Iremonger {
42230e545d30SBernard Iremonger 	struct rte_port *port;
42240a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
42250a0821bcSPaulis Gributs 	int ret;
42260e545d30SBernard Iremonger 
42270e545d30SBernard Iremonger 	port = &ports[slave_pid];
42280a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
42290a0821bcSPaulis Gributs 	if (ret != 0) {
42300a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
42310a0821bcSPaulis Gributs 			"Failed to get device info for port id %d,"
42320a0821bcSPaulis Gributs 			"cannot determine if the port is a bonded slave",
42330a0821bcSPaulis Gributs 			slave_pid);
42340a0821bcSPaulis Gributs 		return 0;
42350a0821bcSPaulis Gributs 	}
42360a0821bcSPaulis Gributs 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
4237b8b8b344SMatan Azrad 		return 1;
4238b8b8b344SMatan Azrad 	return 0;
42390e545d30SBernard Iremonger }
42400e545d30SBernard Iremonger 
4241013af9b6SIntel const uint16_t vlan_tags[] = {
4242013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
4243013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
4244013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
4245013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
4246013af9b6SIntel };
4247013af9b6SIntel 
4248013af9b6SIntel static  int
4249ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
42501a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
42511a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
42521a572499SJingjing Wu 		 uint8_t pfc_en)
4253013af9b6SIntel {
4254013af9b6SIntel 	uint8_t i;
4255ac7c491cSKonstantin Ananyev 	int32_t rc;
4256ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
4257af75078fSIntel 
4258af75078fSIntel 	/*
4259013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
4260013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
4261af75078fSIntel 	 */
42621a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
42631a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
42641a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
42651a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
42661a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
4267013af9b6SIntel 
4268547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
42691a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
42701a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
42711a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
4272295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
42731a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
4274295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4275013af9b6SIntel 
42761a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
42771a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
42781a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
42791a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
42801a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
4281af75078fSIntel 		}
4282295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4283f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
4284f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
4285013af9b6SIntel 		}
4286013af9b6SIntel 
4287013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
4288f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4289f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4290295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4291295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
42921a572499SJingjing Wu 	} else {
42931a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
42941a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
42951a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
42961a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
4297013af9b6SIntel 
42985139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
42995139bc12STing Xu 
4300ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
4301ac7c491cSKonstantin Ananyev 		if (rc != 0)
4302ac7c491cSKonstantin Ananyev 			return rc;
4303ac7c491cSKonstantin Ananyev 
43041a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
43051a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
43061a572499SJingjing Wu 
4307295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4308bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
4309bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
4310013af9b6SIntel 		}
4311ac7c491cSKonstantin Ananyev 
4312f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4313f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4314295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4315ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
4316295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
43171a572499SJingjing Wu 	}
43181a572499SJingjing Wu 
43191a572499SJingjing Wu 	if (pfc_en)
43201a572499SJingjing Wu 		eth_conf->dcb_capability_en =
4321295968d1SFerruh Yigit 				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4322013af9b6SIntel 	else
4323295968d1SFerruh Yigit 		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4324013af9b6SIntel 
4325013af9b6SIntel 	return 0;
4326013af9b6SIntel }
4327013af9b6SIntel 
4328013af9b6SIntel int
43291a572499SJingjing Wu init_port_dcb_config(portid_t pid,
43301a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
43311a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
43321a572499SJingjing Wu 		     uint8_t pfc_en)
4333013af9b6SIntel {
4334013af9b6SIntel 	struct rte_eth_conf port_conf;
4335013af9b6SIntel 	struct rte_port *rte_port;
4336013af9b6SIntel 	int retval;
4337013af9b6SIntel 	uint16_t i;
4338013af9b6SIntel 
4339a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
4340a550baf2SMin Hu (Connor) 		printf("The multi-process feature doesn't support dcb.\n");
4341a550baf2SMin Hu (Connor) 		return -ENOTSUP;
4342a550baf2SMin Hu (Connor) 	}
43432a977b89SWenzhuo Lu 	rte_port = &ports[pid];
4344013af9b6SIntel 
4345c1ba6c32SHuisong Li 	/* retain the original device configuration. */
4346c1ba6c32SHuisong Li 	memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4347d5354e89SYanglong Wu 
4348013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
4349ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4350013af9b6SIntel 	if (retval < 0)
4351013af9b6SIntel 		return retval;
4352295968d1SFerruh Yigit 	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4353cbe70fdeSJie Wang 	/* remove RSS HASH offload for DCB in vt mode */
4354cbe70fdeSJie Wang 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4355cbe70fdeSJie Wang 		port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4356cbe70fdeSJie Wang 		for (i = 0; i < nb_rxq; i++)
43573c4426dbSDmitry Kozlyuk 			rte_port->rxq[i].conf.offloads &=
4358cbe70fdeSJie Wang 				~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4359cbe70fdeSJie Wang 	}
4360013af9b6SIntel 
43612f203d44SQi Zhang 	/* re-configure the device . */
43622b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
43632b0e0ebaSChenbo Xia 	if (retval < 0)
43642b0e0ebaSChenbo Xia 		return retval;
43656f51deb9SIvan Ilchenko 
43666f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
43676f51deb9SIvan Ilchenko 	if (retval != 0)
43686f51deb9SIvan Ilchenko 		return retval;
43692a977b89SWenzhuo Lu 
43702a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
43712a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
43722a977b89SWenzhuo Lu 	 */
43732a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
43742a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
437561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
437661a3b0e5SAndrew Rybchenko 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
437761a3b0e5SAndrew Rybchenko 			pid);
43782a977b89SWenzhuo Lu 		return -1;
43792a977b89SWenzhuo Lu 	}
43802a977b89SWenzhuo Lu 
43812a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
43822a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
43832a977b89SWenzhuo Lu 	 */
43842a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
438586ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
438686ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
438786ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
438886ef65eeSBernard Iremonger 		} else {
43892a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
43902a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
439186ef65eeSBernard Iremonger 		}
43922a977b89SWenzhuo Lu 	} else {
43932a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
43942a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
43952a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
43962a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
43972a977b89SWenzhuo Lu 		} else {
43982a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
43992a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
44002a977b89SWenzhuo Lu 
44012a977b89SWenzhuo Lu 		}
44022a977b89SWenzhuo Lu 	}
44032a977b89SWenzhuo Lu 	rx_free_thresh = 64;
44042a977b89SWenzhuo Lu 
4405013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4406013af9b6SIntel 
4407f4d178c1SXueming Li 	rxtx_port_config(pid);
4408013af9b6SIntel 	/* VLAN filter */
4409295968d1SFerruh Yigit 	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
44101a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
4411013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
4412013af9b6SIntel 
4413a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4414a5279d25SIgor Romanov 	if (retval != 0)
4415a5279d25SIgor Romanov 		return retval;
4416a5279d25SIgor Romanov 
44177741e4cfSIntel 	rte_port->dcb_flag = 1;
44187741e4cfSIntel 
4419a690a070SHuisong Li 	/* Enter DCB configuration status */
4420a690a070SHuisong Li 	dcb_config = 1;
4421a690a070SHuisong Li 
4422013af9b6SIntel 	return 0;
4423af75078fSIntel }
4424af75078fSIntel 
4425ffc468ffSTetsuya Mukawa static void
4426ffc468ffSTetsuya Mukawa init_port(void)
4427ffc468ffSTetsuya Mukawa {
44281b9f2746SGregory Etelson 	int i;
44291b9f2746SGregory Etelson 
4430ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
4431ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
4432ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4433ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
4434ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
4435ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
4436ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
4437ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
4438ffc468ffSTetsuya Mukawa 	}
4439236bc417SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
4440236bc417SGregory Etelson 		ports[i].fwd_mac_swap = 1;
444163b72657SIvan Ilchenko 		ports[i].xstats_info.allocated = false;
44421b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
4443236bc417SGregory Etelson 	}
444429841336SPhil Yang 	/* Initialize ports NUMA structures */
444529841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
444629841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
444729841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4448ffc468ffSTetsuya Mukawa }
4449ffc468ffSTetsuya Mukawa 
4450d3a274ceSZhihong Wang static void
4451d3a274ceSZhihong Wang force_quit(void)
4452d3a274ceSZhihong Wang {
4453d3a274ceSZhihong Wang 	pmd_test_exit();
4454d3a274ceSZhihong Wang 	prompt_exit();
4455d3a274ceSZhihong Wang }
4456d3a274ceSZhihong Wang 
4457d3a274ceSZhihong Wang static void
4458cfea1f30SPablo de Lara print_stats(void)
4459cfea1f30SPablo de Lara {
4460cfea1f30SPablo de Lara 	uint8_t i;
4461cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
4462cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4463cfea1f30SPablo de Lara 
4464cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
4465cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
4466cfea1f30SPablo de Lara 
4467cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
4468cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4469cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
4470683d1e82SIgor Romanov 
4471683d1e82SIgor Romanov 	fflush(stdout);
4472cfea1f30SPablo de Lara }
4473cfea1f30SPablo de Lara 
4474cfea1f30SPablo de Lara static void
4475d3a274ceSZhihong Wang signal_handler(int signum)
4476d3a274ceSZhihong Wang {
4477d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
447861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4479d3a274ceSZhihong Wang 			signum);
4480a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
4481102b7329SReshma Pattan 		/* uninitialize packet capture framework */
4482102b7329SReshma Pattan 		rte_pdump_uninit();
4483102b7329SReshma Pattan #endif
4484a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
44858b36297dSAmit Gupta 		if (latencystats_enabled != 0)
448662d3216dSReshma Pattan 			rte_latencystats_uninit();
448762d3216dSReshma Pattan #endif
4488d3a274ceSZhihong Wang 		force_quit();
4489d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
4490d9a191a0SPhil Yang 		f_quit = 1;
4491d3a274ceSZhihong Wang 		/* exit with the expected status */
4492761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4493d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
4494d3a274ceSZhihong Wang 		kill(getpid(), signum);
4495761f7ae1SJie Zhou #endif
4496d3a274ceSZhihong Wang 	}
4497d3a274ceSZhihong Wang }
4498d3a274ceSZhihong Wang 
4499af75078fSIntel int
4500af75078fSIntel main(int argc, char** argv)
4501af75078fSIntel {
4502af75078fSIntel 	int diag;
4503f8244c63SZhiyong Yang 	portid_t port_id;
45044918a357SXiaoyun Li 	uint16_t count;
4505fb73e096SJeff Guo 	int ret;
4506af75078fSIntel 
4507d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
4508d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
4509d3a274ceSZhihong Wang 
4510285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
4511285fd101SOlivier Matz 	if (testpmd_logtype < 0)
451216267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
4513285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4514285fd101SOlivier Matz 
45159201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
45169201806eSStephen Hemminger 	if (diag < 0)
451716267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
451816267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
45199201806eSStephen Hemminger 
4520563fbd08SDavid Marchand 	/* allocate port structures, and init them */
4521563fbd08SDavid Marchand 	init_port();
4522563fbd08SDavid Marchand 
452397b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
452497b5d8b5SThomas Monjalon 	if (ret != 0)
452516267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
452697b5d8b5SThomas Monjalon 
4527a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
45284aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
4529e9436f54STiwei Bie 	rte_pdump_init();
45304aa0d012SAnatoly Burakov #endif
45314aa0d012SAnatoly Burakov 
45324918a357SXiaoyun Li 	count = 0;
45334918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
45344918a357SXiaoyun Li 		ports_ids[count] = port_id;
45354918a357SXiaoyun Li 		count++;
45364918a357SXiaoyun Li 	}
45374918a357SXiaoyun Li 	nb_ports = (portid_t) count;
45384aa0d012SAnatoly Burakov 	if (nb_ports == 0)
45394aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
45404aa0d012SAnatoly Burakov 
45414aa0d012SAnatoly Burakov 	set_def_fwd_config();
45424aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
454316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
454416267ceeSStephen Hemminger 			 "Check the core mask argument\n");
45454aa0d012SAnatoly Burakov 
4546e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
4547a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4548e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
4549e505d84cSAnatoly Burakov #endif
4550a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
4551e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
4552e505d84cSAnatoly Burakov #endif
4553e505d84cSAnatoly Burakov 
4554fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
45555fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
4556fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
4557fb7b8b32SAnatoly Burakov #else
4558fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
4559fb7b8b32SAnatoly Burakov #endif
4560fb7b8b32SAnatoly Burakov 
4561e505d84cSAnatoly Burakov 	argc -= diag;
4562e505d84cSAnatoly Burakov 	argv += diag;
4563e505d84cSAnatoly Burakov 	if (argc > 1)
4564e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
4565e505d84cSAnatoly Burakov 
4566761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4567e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4568285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
45691c036b16SEelco Chaudron 			strerror(errno));
45701c036b16SEelco Chaudron 	}
4571761f7ae1SJie Zhou #endif
45721c036b16SEelco Chaudron 
457399cabef0SPablo de Lara 	if (tx_first && interactive)
457499cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
457599cabef0SPablo de Lara 				"interactive mode.\n");
45768820cba4SDavid Hunt 
45778820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
457861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
457961a3b0e5SAndrew Rybchenko 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
45808820cba4SDavid Hunt 		lsc_interrupt = 0;
45818820cba4SDavid Hunt 	}
45828820cba4SDavid Hunt 
45835a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
458461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
458561a3b0e5SAndrew Rybchenko 			"Warning: Either rx or tx queues should be non-zero\n");
45865a8fb55cSReshma Pattan 
45875a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
458861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
458961a3b0e5SAndrew Rybchenko 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4590af75078fSIntel 			nb_rxq, nb_txq);
4591af75078fSIntel 
4592af75078fSIntel 	init_config();
4593fb73e096SJeff Guo 
4594fb73e096SJeff Guo 	if (hot_plug) {
45952049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
4596fb73e096SJeff Guo 		if (ret) {
45972049c511SJeff Guo 			RTE_LOG(ERR, EAL,
45982049c511SJeff Guo 				"fail to enable hotplug handling.");
4599fb73e096SJeff Guo 			return -1;
4600fb73e096SJeff Guo 		}
4601fb73e096SJeff Guo 
46022049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
46032049c511SJeff Guo 		if (ret) {
46042049c511SJeff Guo 			RTE_LOG(ERR, EAL,
46052049c511SJeff Guo 				"fail to start device event monitoring.");
46062049c511SJeff Guo 			return -1;
46072049c511SJeff Guo 		}
46082049c511SJeff Guo 
46092049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
4610cc1bf307SJeff Guo 			dev_event_callback, NULL);
46112049c511SJeff Guo 		if (ret) {
46122049c511SJeff Guo 			RTE_LOG(ERR, EAL,
46132049c511SJeff Guo 				"fail  to register device event callback\n");
46142049c511SJeff Guo 			return -1;
46152049c511SJeff Guo 		}
4616fb73e096SJeff Guo 	}
4617fb73e096SJeff Guo 
46187e403725SGregory Etelson 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0) {
46197e403725SGregory Etelson 		if (!interactive) {
46207e403725SGregory Etelson 			rte_eal_cleanup();
4621148f963fSBruce Richardson 			rte_exit(EXIT_FAILURE, "Start ports failed\n");
46227e403725SGregory Etelson 		}
46237e403725SGregory Etelson 		fprintf(stderr, "Start ports failed\n");
46247e403725SGregory Etelson 	}
4625af75078fSIntel 
4626ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
462734fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
462834fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
462934fc1051SIvan Ilchenko 		if (ret != 0)
463061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
463161a3b0e5SAndrew Rybchenko 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
463234fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
463334fc1051SIvan Ilchenko 	}
4634af75078fSIntel 
4635bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
46367e4441c8SRemy Horton 	/* Init metrics library */
46377e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
4638bb9be9a4SDavid Marchand #endif
46397e4441c8SRemy Horton 
4640a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
464162d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
464262d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
464362d3216dSReshma Pattan 		if (ret)
464461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
464561a3b0e5SAndrew Rybchenko 				"Warning: latencystats init() returned error %d\n",
464661a3b0e5SAndrew Rybchenko 				ret);
464761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Latencystats running on lcore %d\n",
464862d3216dSReshma Pattan 			latencystats_lcore_id);
464962d3216dSReshma Pattan 	}
465062d3216dSReshma Pattan #endif
465162d3216dSReshma Pattan 
46527e4441c8SRemy Horton 	/* Setup bitrate stats */
4653a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4654e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
46557e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
46567e4441c8SRemy Horton 		if (bitrate_data == NULL)
4657e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
4658e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
46597e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
4660e25e6c70SRemy Horton 	}
46617e4441c8SRemy Horton #endif
466299a4974aSRobin Jarry 
466399a4974aSRobin Jarry 	if (record_core_cycles)
466499a4974aSRobin Jarry 		rte_lcore_register_usage_cb(lcore_usage_callback);
466599a4974aSRobin Jarry 
4666a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE
4667592ab76fSDavid Marchand 	if (init_cmdline() != 0)
4668592ab76fSDavid Marchand 		rte_exit(EXIT_FAILURE,
4669592ab76fSDavid Marchand 			"Could not initialise cmdline context.\n");
4670592ab76fSDavid Marchand 
467181ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
467281ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
467381ef862bSAllain Legacy 
4674ca7feb22SCyril Chemparathy 	if (interactive == 1) {
4675ca7feb22SCyril Chemparathy 		if (auto_start) {
4676ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
4677ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
4678ca7feb22SCyril Chemparathy 		}
4679af75078fSIntel 		prompt();
46800de738cfSJiayu Hu 		pmd_test_exit();
4681ca7feb22SCyril Chemparathy 	} else
46820d56cb81SThomas Monjalon #endif
46830d56cb81SThomas Monjalon 	{
4684af75078fSIntel 		char c;
4685af75078fSIntel 		int rc;
4686af75078fSIntel 
4687d9a191a0SPhil Yang 		f_quit = 0;
4688d9a191a0SPhil Yang 
4689af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
469099cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
4691cfea1f30SPablo de Lara 		if (stats_period != 0) {
4692cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4693cfea1f30SPablo de Lara 			uint64_t timer_period;
4694cfea1f30SPablo de Lara 
4695cfea1f30SPablo de Lara 			/* Convert to number of cycles */
4696cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
4697cfea1f30SPablo de Lara 
4698d9a191a0SPhil Yang 			while (f_quit == 0) {
4699cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4700cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4701cfea1f30SPablo de Lara 
4702cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4703cfea1f30SPablo de Lara 					print_stats();
4704cfea1f30SPablo de Lara 					/* Reset the timer */
4705cfea1f30SPablo de Lara 					diff_time = 0;
4706cfea1f30SPablo de Lara 				}
4707cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4708cfea1f30SPablo de Lara 				prev_time = cur_time;
4709761f7ae1SJie Zhou 				rte_delay_us_sleep(US_PER_S);
4710cfea1f30SPablo de Lara 			}
4711cfea1f30SPablo de Lara 		}
4712cfea1f30SPablo de Lara 
4713af75078fSIntel 		printf("Press enter to exit\n");
4714af75078fSIntel 		rc = read(0, &c, 1);
4715d3a274ceSZhihong Wang 		pmd_test_exit();
4716af75078fSIntel 		if (rc < 0)
4717af75078fSIntel 			return 1;
4718af75078fSIntel 	}
4719af75078fSIntel 
47205e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
47215e516c89SStephen Hemminger 	if (ret != 0)
47225e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
47235e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
47245e516c89SStephen Hemminger 
47255e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4726af75078fSIntel }
4727