xref: /dpdk/app/test-pmd/testpmd.c (revision 59f3a8acbcdbafeebe816a26d76dfb06e6450f31)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
131c036b16SEelco Chaudron #include <sys/mman.h>
14761f7ae1SJie Zhou #endif
15af75078fSIntel #include <sys/types.h>
16af75078fSIntel #include <errno.h>
17fb73e096SJeff Guo #include <stdbool.h>
18af75078fSIntel 
19af75078fSIntel #include <sys/queue.h>
20af75078fSIntel #include <sys/stat.h>
21af75078fSIntel 
22af75078fSIntel #include <stdint.h>
23af75078fSIntel #include <unistd.h>
24af75078fSIntel #include <inttypes.h>
25af75078fSIntel 
26af75078fSIntel #include <rte_common.h>
27d1eb542eSOlivier Matz #include <rte_errno.h>
28af75078fSIntel #include <rte_byteorder.h>
29af75078fSIntel #include <rte_log.h>
30af75078fSIntel #include <rte_debug.h>
31af75078fSIntel #include <rte_cycles.h>
32af75078fSIntel #include <rte_memory.h>
33af75078fSIntel #include <rte_memcpy.h>
34af75078fSIntel #include <rte_launch.h>
35af75078fSIntel #include <rte_eal.h>
36284c908cSGaetan Rivet #include <rte_alarm.h>
37af75078fSIntel #include <rte_per_lcore.h>
38af75078fSIntel #include <rte_lcore.h>
39af75078fSIntel #include <rte_atomic.h>
40af75078fSIntel #include <rte_branch_prediction.h>
41af75078fSIntel #include <rte_mempool.h>
42af75078fSIntel #include <rte_malloc.h>
43af75078fSIntel #include <rte_mbuf.h>
440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
45af75078fSIntel #include <rte_interrupts.h>
46af75078fSIntel #include <rte_pci.h>
47af75078fSIntel #include <rte_ether.h>
48af75078fSIntel #include <rte_ethdev.h>
49edab33b1STetsuya Mukawa #include <rte_dev.h>
50af75078fSIntel #include <rte_string_fns.h>
51a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
52e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
53e261265eSRadu Nicolau #endif
54a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
55102b7329SReshma Pattan #include <rte_pdump.h>
56102b7329SReshma Pattan #endif
57938a184aSAdrien Mazarguil #include <rte_flow.h>
587e4441c8SRemy Horton #include <rte_metrics.h>
59a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
607e4441c8SRemy Horton #include <rte_bitrate.h>
617e4441c8SRemy Horton #endif
62a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6362d3216dSReshma Pattan #include <rte_latencystats.h>
6462d3216dSReshma Pattan #endif
65761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS
66761f7ae1SJie Zhou #include <process.h>
67761f7ae1SJie Zhou #endif
68af75078fSIntel 
69af75078fSIntel #include "testpmd.h"
70af75078fSIntel 
71c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
72c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
74c7f5dba7SAnatoly Burakov #else
75c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
76c7f5dba7SAnatoly Burakov #endif
77c7f5dba7SAnatoly Burakov 
78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
80c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
81c7f5dba7SAnatoly Burakov #else
82c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
83c7f5dba7SAnatoly Burakov #endif
84c7f5dba7SAnatoly Burakov 
85c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
8672512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
87c7f5dba7SAnatoly Burakov 
88af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
89285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
90af75078fSIntel 
91cb056611SStephen Hemminger /* use main core for command line ? */
92af75078fSIntel uint8_t interactive = 0;
93ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
9499cabef0SPablo de Lara uint8_t tx_first;
9581ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
96af75078fSIntel 
97af75078fSIntel /*
98af75078fSIntel  * NUMA support configuration.
99af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
100af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
102af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
103af75078fSIntel  */
104999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
105af75078fSIntel 
106af75078fSIntel /*
107b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
108b6ea6408SIntel  * not configured.
109b6ea6408SIntel  */
110b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
111b6ea6408SIntel 
112b6ea6408SIntel /*
113c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
114c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
115c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
116c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
117c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
118148f963fSBruce Richardson  */
119c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
120148f963fSBruce Richardson 
121148f963fSBruce Richardson /*
12263531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
12363531389SGeorgios Katsikas  * is allocated.
12463531389SGeorgios Katsikas  */
12563531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12663531389SGeorgios Katsikas 
12763531389SGeorgios Katsikas /*
12863531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12963531389SGeorgios Katsikas  * is allocated.
13063531389SGeorgios Katsikas  */
13163531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
13263531389SGeorgios Katsikas 
13363531389SGeorgios Katsikas /*
13463531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13563531389SGeorgios Katsikas  * is allocated.
13663531389SGeorgios Katsikas  */
13763531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13863531389SGeorgios Katsikas 
13963531389SGeorgios Katsikas /*
140af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
141af75078fSIntel  * forwarded.
142547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
143af75078fSIntel  * ports.
144af75078fSIntel  */
1456d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146af75078fSIntel portid_t nb_peer_eth_addrs = 0;
147af75078fSIntel 
148af75078fSIntel /*
149af75078fSIntel  * Probed Target Environment.
150af75078fSIntel  */
151af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
152af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
153af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
155af75078fSIntel 
1564918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1574918a357SXiaoyun Li 
158af75078fSIntel /*
159af75078fSIntel  * Test Forwarding Configuration.
160af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
162af75078fSIntel  */
163af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
166af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
167af75078fSIntel 
168af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
170af75078fSIntel 
171af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
173af75078fSIntel 
174af75078fSIntel /*
175af75078fSIntel  * Forwarding engines.
176af75078fSIntel  */
177af75078fSIntel struct fwd_engine * fwd_engines[] = {
178af75078fSIntel 	&io_fwd_engine,
179af75078fSIntel 	&mac_fwd_engine,
180d47388f1SCyril Chemparathy 	&mac_swap_engine,
181e9e23a61SCyril Chemparathy 	&flow_gen_engine,
182af75078fSIntel 	&rx_only_engine,
183af75078fSIntel 	&tx_only_engine,
184af75078fSIntel 	&csum_fwd_engine,
185168dfa61SIvan Boule 	&icmp_echo_engine,
1863c156061SJens Freimann 	&noisy_vnf_engine,
1872564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
188af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
189af75078fSIntel 	&ieee1588_fwd_engine,
190af75078fSIntel #endif
191af75078fSIntel 	NULL,
192af75078fSIntel };
193af75078fSIntel 
19426cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
19559fcf854SShahaf Shuler uint16_t mempool_flags;
196401b744dSShahaf Shuler 
197af75078fSIntel struct fwd_config cur_fwd_config;
198af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
199bf56fce1SZhihong Wang uint32_t retry_enabled;
200bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
201bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
202af75078fSIntel 
20326cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
20426cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
20526cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
20626cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
207c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
208c8798818SIntel                                       * specified on command-line. */
209cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
210d9a191a0SPhil Yang 
21163b72657SIvan Ilchenko /** Extended statistics to show. */
21263b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display;
21363b72657SIvan Ilchenko 
21463b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */
21563b72657SIvan Ilchenko 
216d9a191a0SPhil Yang /*
217d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
218d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
219d9a191a0SPhil Yang  */
220d9a191a0SPhil Yang uint8_t f_quit;
221d9a191a0SPhil Yang 
222af75078fSIntel /*
2231bb4a528SFerruh Yigit  * Max Rx frame size, set by '--max-pkt-len' parameter.
2241bb4a528SFerruh Yigit  */
2251bb4a528SFerruh Yigit uint32_t max_rx_pkt_len;
2261bb4a528SFerruh Yigit 
2271bb4a528SFerruh Yigit /*
2280f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2290f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2300f2096d7SViacheslav Ovsiienko  */
2310f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2320f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
23391c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
23491c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
2350f2096d7SViacheslav Ovsiienko 
2360f2096d7SViacheslav Ovsiienko /*
237af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
238af75078fSIntel  */
239af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
240af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
241af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
242af75078fSIntel };
243af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
244af75078fSIntel 
24579bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
24679bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
24779bec05bSKonstantin Ananyev 
24882010ef5SYongseok Koh uint8_t txonly_multi_flow;
24982010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
25082010ef5SYongseok Koh 
2514940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2524940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2534940344dSViacheslav Ovsiienko 
2544940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2554940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2564940344dSViacheslav Ovsiienko 
257af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
2586c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
259861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
260e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
261af75078fSIntel 
262900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
263900550deSIntel uint8_t dcb_config = 0;
264900550deSIntel 
265af75078fSIntel /*
266af75078fSIntel  * Configurable number of RX/TX queues.
267af75078fSIntel  */
2681c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
269af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
270af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
271af75078fSIntel 
272af75078fSIntel /*
273af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2748599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
275af75078fSIntel  */
2768599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2778599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
278af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
279af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
280af75078fSIntel 
281f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
282af75078fSIntel /*
283af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
284af75078fSIntel  */
285af75078fSIntel 
286f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
287f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
288f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
289af75078fSIntel 
290f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
291f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
292f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
293af75078fSIntel 
294af75078fSIntel /*
295af75078fSIntel  * Configurable value of RX free threshold.
296af75078fSIntel  */
297f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
298af75078fSIntel 
299af75078fSIntel /*
300ce8d5614SIntel  * Configurable value of RX drop enable.
301ce8d5614SIntel  */
302f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
303ce8d5614SIntel 
304ce8d5614SIntel /*
305af75078fSIntel  * Configurable value of TX free threshold.
306af75078fSIntel  */
307f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
308af75078fSIntel 
309af75078fSIntel /*
310af75078fSIntel  * Configurable value of TX RS bit threshold.
311af75078fSIntel  */
312f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
313af75078fSIntel 
314af75078fSIntel /*
3153c156061SJens Freimann  * Configurable value of buffered packets before sending.
3163c156061SJens Freimann  */
3173c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3183c156061SJens Freimann 
3193c156061SJens Freimann /*
3203c156061SJens Freimann  * Configurable value of packet buffer timeout.
3213c156061SJens Freimann  */
3223c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3233c156061SJens Freimann 
3243c156061SJens Freimann /*
3253c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3263c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3273c156061SJens Freimann  */
3283c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3293c156061SJens Freimann 
3303c156061SJens Freimann /*
3313c156061SJens Freimann  * Configurable value of number of random writes done in
3323c156061SJens Freimann  * VNF simulation memory area.
3333c156061SJens Freimann  */
3343c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3353c156061SJens Freimann 
3363c156061SJens Freimann /*
3373c156061SJens Freimann  * Configurable value of number of random reads done in
3383c156061SJens Freimann  * VNF simulation memory area.
3393c156061SJens Freimann  */
3403c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3413c156061SJens Freimann 
3423c156061SJens Freimann /*
3433c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3443c156061SJens Freimann  * VNF simulation memory area.
3453c156061SJens Freimann  */
3463c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3473c156061SJens Freimann 
3483c156061SJens Freimann /*
349af75078fSIntel  * Receive Side Scaling (RSS) configuration.
350af75078fSIntel  */
3518a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
352af75078fSIntel 
353af75078fSIntel /*
354af75078fSIntel  * Port topology configuration
355af75078fSIntel  */
356af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
357af75078fSIntel 
3587741e4cfSIntel /*
3597741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3607741e4cfSIntel  */
3617741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3627741e4cfSIntel 
363af75078fSIntel /*
3647ee3e944SVasily Philipov  * Flow API isolated mode.
3657ee3e944SVasily Philipov  */
3667ee3e944SVasily Philipov uint8_t flow_isolate_all;
3677ee3e944SVasily Philipov 
3687ee3e944SVasily Philipov /*
369bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
370bc202406SDavid Marchand  */
371bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
372bc202406SDavid Marchand 
373bc202406SDavid Marchand /*
3746937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3756937d210SStephen Hemminger  */
3766937d210SStephen Hemminger uint8_t no_device_start = 0;
3776937d210SStephen Hemminger 
3786937d210SStephen Hemminger /*
3798ea656f8SGaetan Rivet  * Enable link status change notification
3808ea656f8SGaetan Rivet  */
3818ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3828ea656f8SGaetan Rivet 
3838ea656f8SGaetan Rivet /*
384284c908cSGaetan Rivet  * Enable device removal notification.
385284c908cSGaetan Rivet  */
386284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
387284c908cSGaetan Rivet 
388fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
389fb73e096SJeff Guo 
3904f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3914f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3924f1ed78eSThomas Monjalon 
393b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
394b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
395b0a9354aSPavan Nikhilesh 
39601817b10SBing Zhao /* Hairpin ports configuration mode. */
39701817b10SBing Zhao uint16_t hairpin_mode;
39801817b10SBing Zhao 
39997b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
40097b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
40197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
40297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
40397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
40497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
40597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
40697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
40797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
40897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
40997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
41097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
4110e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
41297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
41397b5d8b5SThomas Monjalon };
41497b5d8b5SThomas Monjalon 
415284c908cSGaetan Rivet /*
4163af72783SGaetan Rivet  * Display or mask ether events
4173af72783SGaetan Rivet  * Default to all events except VF_MBOX
4183af72783SGaetan Rivet  */
4193af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4203af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4213af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4223af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
423badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4243af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4250e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
4260e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
427e505d84cSAnatoly Burakov /*
428e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
429e505d84cSAnatoly Burakov  */
430e505d84cSAnatoly Burakov int do_mlockall = 0;
4313af72783SGaetan Rivet 
4323af72783SGaetan Rivet /*
4337b7e5ba7SIntel  * NIC bypass mode configuration options.
4347b7e5ba7SIntel  */
4357b7e5ba7SIntel 
436a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
4377b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
438e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4397b7e5ba7SIntel #endif
4407b7e5ba7SIntel 
441e261265eSRadu Nicolau 
442a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
44362d3216dSReshma Pattan 
44462d3216dSReshma Pattan /*
44562d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
44662d3216dSReshma Pattan  */
44762d3216dSReshma Pattan uint8_t latencystats_enabled;
44862d3216dSReshma Pattan 
44962d3216dSReshma Pattan /*
45062d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
45162d3216dSReshma Pattan  */
45262d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
45362d3216dSReshma Pattan 
45462d3216dSReshma Pattan #endif
45562d3216dSReshma Pattan 
4567b7e5ba7SIntel /*
457af75078fSIntel  * Ethernet device configuration.
458af75078fSIntel  */
4591bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode;
460af75078fSIntel 
46107e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
46207e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
46307e5f7bdSShahaf Shuler };
464fd8c20aaSShahaf Shuler 
465af75078fSIntel struct rte_fdir_conf fdir_conf = {
466af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
467af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
468af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
469d9d5e6f2SJingjing Wu 	.mask = {
47026f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
471d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
472d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
473d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
474d9d5e6f2SJingjing Wu 		},
475d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
476d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
477d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
478d9d5e6f2SJingjing Wu 		},
479d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
480d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
48147b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
48247b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
48347b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
484d9d5e6f2SJingjing Wu 	},
485af75078fSIntel 	.drop_queue = 127,
486af75078fSIntel };
487af75078fSIntel 
4882950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
489af75078fSIntel 
490a4fd5eeeSElza Mathew /*
491a4fd5eeeSElza Mathew  * Display zero values by default for xstats
492a4fd5eeeSElza Mathew  */
493a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
494a4fd5eeeSElza Mathew 
495bc700b67SDharmik Thakkar /*
496bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
497bc700b67SDharmik Thakkar  */
498bc700b67SDharmik Thakkar uint8_t record_core_cycles;
499bc700b67SDharmik Thakkar 
5000e4b1963SDharmik Thakkar /*
5010e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
5020e4b1963SDharmik Thakkar  */
5030e4b1963SDharmik Thakkar uint8_t record_burst_stats;
5040e4b1963SDharmik Thakkar 
505c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
506c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5077acf894dSStephen Hurd 
508a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
5097e4441c8SRemy Horton /* Bitrate statistics */
5107e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
511e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
512e25e6c70SRemy Horton uint8_t bitrate_enabled;
513e25e6c70SRemy Horton #endif
5147e4441c8SRemy Horton 
515b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
516b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
517b40f8d78SJiayu Hu 
518f9295aa2SXiaoyu Min /*
519f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
520f9295aa2SXiaoyu Min  */
521f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
522f9295aa2SXiaoyu Min 
523b7b78a08SAjit Khaparde /*
524b7b78a08SAjit Khaparde  * Used to set forced link speed
525b7b78a08SAjit Khaparde  */
526b7b78a08SAjit Khaparde uint32_t eth_link_speed;
527b7b78a08SAjit Khaparde 
528a550baf2SMin Hu (Connor) /*
529a550baf2SMin Hu (Connor)  * ID of the current process in multi-process, used to
530a550baf2SMin Hu (Connor)  * configure the queues to be polled.
531a550baf2SMin Hu (Connor)  */
532a550baf2SMin Hu (Connor) int proc_id;
533a550baf2SMin Hu (Connor) 
534a550baf2SMin Hu (Connor) /*
535a550baf2SMin Hu (Connor)  * Number of processes in multi-process, used to
536a550baf2SMin Hu (Connor)  * configure the queues to be polled.
537a550baf2SMin Hu (Connor)  */
538a550baf2SMin Hu (Connor) unsigned int num_procs = 1;
539a550baf2SMin Hu (Connor) 
540f6d8a6d3SIvan Malov static void
541f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id)
542f6d8a6d3SIvan Malov {
543f6d8a6d3SIvan Malov 	uint64_t rx_meta_features = 0;
544f6d8a6d3SIvan Malov 	int ret;
545f6d8a6d3SIvan Malov 
546f6d8a6d3SIvan Malov 	if (!is_proc_primary())
547f6d8a6d3SIvan Malov 		return;
548f6d8a6d3SIvan Malov 
549f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
550f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
551f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
552f6d8a6d3SIvan Malov 
553f6d8a6d3SIvan Malov 	ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
554f6d8a6d3SIvan Malov 	if (ret == 0) {
555f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
556f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
557f6d8a6d3SIvan Malov 				    port_id);
558f6d8a6d3SIvan Malov 		}
559f6d8a6d3SIvan Malov 
560f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
561f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
562f6d8a6d3SIvan Malov 				    port_id);
563f6d8a6d3SIvan Malov 		}
564f6d8a6d3SIvan Malov 
565f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
566f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
567f6d8a6d3SIvan Malov 				    port_id);
568f6d8a6d3SIvan Malov 		}
569f6d8a6d3SIvan Malov 	} else if (ret != -ENOTSUP) {
570f6d8a6d3SIvan Malov 		rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
571f6d8a6d3SIvan Malov 			 port_id, rte_strerror(-ret));
572f6d8a6d3SIvan Malov 	}
573f6d8a6d3SIvan Malov }
574f6d8a6d3SIvan Malov 
5751179f05cSIvan Malov static void
5761179f05cSIvan Malov flow_pick_transfer_proxy_mp(uint16_t port_id)
5771179f05cSIvan Malov {
5781179f05cSIvan Malov 	struct rte_port *port = &ports[port_id];
5791179f05cSIvan Malov 	int ret;
5801179f05cSIvan Malov 
5811179f05cSIvan Malov 	port->flow_transfer_proxy = port_id;
5821179f05cSIvan Malov 
5831179f05cSIvan Malov 	if (!is_proc_primary())
5841179f05cSIvan Malov 		return;
5851179f05cSIvan Malov 
5861179f05cSIvan Malov 	ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
5871179f05cSIvan Malov 					   NULL);
5881179f05cSIvan Malov 	if (ret != 0) {
5891179f05cSIvan Malov 		fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
5901179f05cSIvan Malov 			port_id, rte_strerror(-ret));
5911179f05cSIvan Malov 	}
5921179f05cSIvan Malov }
5931179f05cSIvan Malov 
594a550baf2SMin Hu (Connor) static int
595a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
596a550baf2SMin Hu (Connor) 		      const struct rte_eth_conf *dev_conf)
597a550baf2SMin Hu (Connor) {
598a550baf2SMin Hu (Connor) 	if (is_proc_primary())
599a550baf2SMin Hu (Connor) 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
600a550baf2SMin Hu (Connor) 					dev_conf);
601a550baf2SMin Hu (Connor) 	return 0;
602a550baf2SMin Hu (Connor) }
603a550baf2SMin Hu (Connor) 
604a550baf2SMin Hu (Connor) static int
605a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id)
606a550baf2SMin Hu (Connor) {
607a550baf2SMin Hu (Connor) 	if (is_proc_primary())
608a550baf2SMin Hu (Connor) 		return rte_eth_dev_start(port_id);
609a550baf2SMin Hu (Connor) 
610a550baf2SMin Hu (Connor) 	return 0;
611a550baf2SMin Hu (Connor) }
612a550baf2SMin Hu (Connor) 
613a550baf2SMin Hu (Connor) static int
614a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id)
615a550baf2SMin Hu (Connor) {
616a550baf2SMin Hu (Connor) 	if (is_proc_primary())
617a550baf2SMin Hu (Connor) 		return rte_eth_dev_stop(port_id);
618a550baf2SMin Hu (Connor) 
619a550baf2SMin Hu (Connor) 	return 0;
620a550baf2SMin Hu (Connor) }
621a550baf2SMin Hu (Connor) 
622a550baf2SMin Hu (Connor) static void
623a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp)
624a550baf2SMin Hu (Connor) {
625a550baf2SMin Hu (Connor) 	if (is_proc_primary())
626a550baf2SMin Hu (Connor) 		rte_mempool_free(mp);
627a550baf2SMin Hu (Connor) }
628a550baf2SMin Hu (Connor) 
629a550baf2SMin Hu (Connor) static int
630a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
631a550baf2SMin Hu (Connor) {
632a550baf2SMin Hu (Connor) 	if (is_proc_primary())
633a550baf2SMin Hu (Connor) 		return rte_eth_dev_set_mtu(port_id, mtu);
634a550baf2SMin Hu (Connor) 
635a550baf2SMin Hu (Connor) 	return 0;
636a550baf2SMin Hu (Connor) }
637a550baf2SMin Hu (Connor) 
638ed30d9b6SIntel /* Forward function declarations */
639c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
640edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
641f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
64276ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
643d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
644cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
645fb73e096SJeff Guo 				enum rte_dev_event_type type,
646fb73e096SJeff Guo 				void *param);
64763b72657SIvan Ilchenko static void fill_xstats_display_info(void);
648ce8d5614SIntel 
649ce8d5614SIntel /*
650ce8d5614SIntel  * Check if all the ports are started.
651ce8d5614SIntel  * If yes, return positive value. If not, return zero.
652ce8d5614SIntel  */
653ce8d5614SIntel static int all_ports_started(void);
654ed30d9b6SIntel 
65552f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
65635b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
65752f38a20SJiayu Hu 
658b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
659b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
660b57b66a9SOri Kam 
66163b72657SIvan Ilchenko 
662af75078fSIntel /*
66398a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
664c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
665c9cafcc8SShahaf Shuler  */
666c9cafcc8SShahaf Shuler int
667c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
668c9cafcc8SShahaf Shuler {
669c9cafcc8SShahaf Shuler 	unsigned int i;
670c9cafcc8SShahaf Shuler 
671c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
672c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
673c9cafcc8SShahaf Shuler 			return 0;
674c9cafcc8SShahaf Shuler 	}
675c9cafcc8SShahaf Shuler 	return 1;
676c9cafcc8SShahaf Shuler }
677c9cafcc8SShahaf Shuler 
678c9cafcc8SShahaf Shuler /*
679af75078fSIntel  * Setup default configuration.
680af75078fSIntel  */
681af75078fSIntel static void
682af75078fSIntel set_default_fwd_lcores_config(void)
683af75078fSIntel {
684af75078fSIntel 	unsigned int i;
685af75078fSIntel 	unsigned int nb_lc;
6867acf894dSStephen Hurd 	unsigned int sock_num;
687af75078fSIntel 
688af75078fSIntel 	nb_lc = 0;
689af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
690dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
691dbfb8ec7SPhil Yang 			continue;
692c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
693c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
694c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
695c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
696c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
697c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
698c9cafcc8SShahaf Shuler 			}
699c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
7007acf894dSStephen Hurd 		}
701cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
702f54fe5eeSStephen Hurd 			continue;
703f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
704af75078fSIntel 	}
705af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
706af75078fSIntel 	nb_cfg_lcores = nb_lcores;
707af75078fSIntel 	nb_fwd_lcores = 1;
708af75078fSIntel }
709af75078fSIntel 
710af75078fSIntel static void
711af75078fSIntel set_def_peer_eth_addrs(void)
712af75078fSIntel {
713af75078fSIntel 	portid_t i;
714af75078fSIntel 
715af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
71635b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
717af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
718af75078fSIntel 	}
719af75078fSIntel }
720af75078fSIntel 
721af75078fSIntel static void
722af75078fSIntel set_default_fwd_ports_config(void)
723af75078fSIntel {
724af75078fSIntel 	portid_t pt_id;
72565a7360cSMatan Azrad 	int i = 0;
726af75078fSIntel 
727effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
72865a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
729af75078fSIntel 
730effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
731effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
732effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
733effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
734effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
735effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
736effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
737effdb8bbSPhil Yang 			}
738effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
739effdb8bbSPhil Yang 		}
740effdb8bbSPhil Yang 	}
741effdb8bbSPhil Yang 
742af75078fSIntel 	nb_cfg_ports = nb_ports;
743af75078fSIntel 	nb_fwd_ports = nb_ports;
744af75078fSIntel }
745af75078fSIntel 
746af75078fSIntel void
747af75078fSIntel set_def_fwd_config(void)
748af75078fSIntel {
749af75078fSIntel 	set_default_fwd_lcores_config();
750af75078fSIntel 	set_def_peer_eth_addrs();
751af75078fSIntel 	set_default_fwd_ports_config();
752af75078fSIntel }
753af75078fSIntel 
754761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
755c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
756c7f5dba7SAnatoly Burakov static int
757c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
758c7f5dba7SAnatoly Burakov {
759c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
760c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
761c7f5dba7SAnatoly Burakov 
762c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
763c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
764c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
765c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
766c7f5dba7SAnatoly Burakov 	 */
767c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
768c7f5dba7SAnatoly Burakov 
769c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
770c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
771c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
772c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
773c7f5dba7SAnatoly Burakov 		return -1;
774c7f5dba7SAnatoly Burakov 	}
775c7f5dba7SAnatoly Burakov 
776c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
777c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
778c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
779c7f5dba7SAnatoly Burakov 
780c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
781c7f5dba7SAnatoly Burakov 
782c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
783c7f5dba7SAnatoly Burakov 
784c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
785c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
786c7f5dba7SAnatoly Burakov 		return -1;
787c7f5dba7SAnatoly Burakov 	}
788c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
789c7f5dba7SAnatoly Burakov 
790c7f5dba7SAnatoly Burakov 	return 0;
791c7f5dba7SAnatoly Burakov }
792c7f5dba7SAnatoly Burakov 
793c7f5dba7SAnatoly Burakov static int
794c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
795c7f5dba7SAnatoly Burakov {
796c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
797c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
798c7f5dba7SAnatoly Burakov 	 */
7999d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
800c7f5dba7SAnatoly Burakov 
801c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
802c7f5dba7SAnatoly Burakov }
803c7f5dba7SAnatoly Burakov 
804c7f5dba7SAnatoly Burakov static void *
805c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
806c7f5dba7SAnatoly Burakov {
807c7f5dba7SAnatoly Burakov 	void *addr;
808c7f5dba7SAnatoly Burakov 	int flags;
809c7f5dba7SAnatoly Burakov 
810c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
811c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
812c7f5dba7SAnatoly Burakov 	if (huge)
813c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
814c7f5dba7SAnatoly Burakov 
815c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
816c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
817c7f5dba7SAnatoly Burakov 		return NULL;
818c7f5dba7SAnatoly Burakov 
819c7f5dba7SAnatoly Burakov 	return addr;
820c7f5dba7SAnatoly Burakov }
821c7f5dba7SAnatoly Burakov 
822c7f5dba7SAnatoly Burakov struct extmem_param {
823c7f5dba7SAnatoly Burakov 	void *addr;
824c7f5dba7SAnatoly Burakov 	size_t len;
825c7f5dba7SAnatoly Burakov 	size_t pgsz;
826c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
827c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
828c7f5dba7SAnatoly Burakov };
829c7f5dba7SAnatoly Burakov 
830c7f5dba7SAnatoly Burakov static int
831c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
832c7f5dba7SAnatoly Burakov 		bool huge)
833c7f5dba7SAnatoly Burakov {
834c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
835c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
836c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
837c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
838c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
839c7f5dba7SAnatoly Burakov 	void *addr;
840c7f5dba7SAnatoly Burakov 	int ret;
841c7f5dba7SAnatoly Burakov 
842c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
843c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
844c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
845c7f5dba7SAnatoly Burakov 			continue;
846c7f5dba7SAnatoly Burakov 
847c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
848c7f5dba7SAnatoly Burakov 
849c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
850c7f5dba7SAnatoly Burakov 		if (!huge)
851c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
852c7f5dba7SAnatoly Burakov 
853c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
854c7f5dba7SAnatoly Burakov 		if (ret < 0) {
855c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
856c7f5dba7SAnatoly Burakov 			return -1;
857c7f5dba7SAnatoly Burakov 		}
858c7f5dba7SAnatoly Burakov 
859c7f5dba7SAnatoly Burakov 		/* allocate our memory */
860c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
861c7f5dba7SAnatoly Burakov 
862c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
863c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
864c7f5dba7SAnatoly Burakov 		 * try another one.
865c7f5dba7SAnatoly Burakov 		 */
866c7f5dba7SAnatoly Burakov 		if (addr == NULL)
867c7f5dba7SAnatoly Burakov 			continue;
868c7f5dba7SAnatoly Burakov 
869c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
870c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
871c7f5dba7SAnatoly Burakov 
872c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
873c7f5dba7SAnatoly Burakov 
874c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
875c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
876c7f5dba7SAnatoly Burakov 			goto fail;
877c7f5dba7SAnatoly Burakov 		}
878c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
879c7f5dba7SAnatoly Burakov 		if (!huge)
880c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
881c7f5dba7SAnatoly Burakov 
882c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
883c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
884c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
885c7f5dba7SAnatoly Burakov 			size_t offset;
886c7f5dba7SAnatoly Burakov 			void *cur;
887c7f5dba7SAnatoly Burakov 
888c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
889c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
890c7f5dba7SAnatoly Burakov 
891c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
892c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
893c7f5dba7SAnatoly Burakov 
894c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
895c7f5dba7SAnatoly Burakov 
896c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
897c7f5dba7SAnatoly Burakov 		}
898c7f5dba7SAnatoly Burakov 
899c7f5dba7SAnatoly Burakov 		break;
900c7f5dba7SAnatoly Burakov 	}
901c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
902c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
903c7f5dba7SAnatoly Burakov 		return -1;
904c7f5dba7SAnatoly Burakov 
905c7f5dba7SAnatoly Burakov 	param->addr = addr;
906c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
907c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
908c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
909c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
910c7f5dba7SAnatoly Burakov 
911c7f5dba7SAnatoly Burakov 	return 0;
912c7f5dba7SAnatoly Burakov fail:
913c7f5dba7SAnatoly Burakov 	if (iovas)
914c7f5dba7SAnatoly Burakov 		free(iovas);
915c7f5dba7SAnatoly Burakov 	if (addr)
916c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
917c7f5dba7SAnatoly Burakov 
918c7f5dba7SAnatoly Burakov 	return -1;
919c7f5dba7SAnatoly Burakov }
920c7f5dba7SAnatoly Burakov 
921c7f5dba7SAnatoly Burakov static int
922c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
923c7f5dba7SAnatoly Burakov {
924c7f5dba7SAnatoly Burakov 	struct extmem_param param;
925c7f5dba7SAnatoly Burakov 	int socket_id, ret;
926c7f5dba7SAnatoly Burakov 
927c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
928c7f5dba7SAnatoly Burakov 
929c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
930c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
931c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
932c7f5dba7SAnatoly Burakov 		/* create our heap */
933c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
934c7f5dba7SAnatoly Burakov 		if (ret < 0) {
935c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
936c7f5dba7SAnatoly Burakov 			return -1;
937c7f5dba7SAnatoly Burakov 		}
938c7f5dba7SAnatoly Burakov 	}
939c7f5dba7SAnatoly Burakov 
940c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
941c7f5dba7SAnatoly Burakov 	if (ret < 0) {
942c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
943c7f5dba7SAnatoly Burakov 		return -1;
944c7f5dba7SAnatoly Burakov 	}
945c7f5dba7SAnatoly Burakov 
946c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
947c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
948c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
949c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
950c7f5dba7SAnatoly Burakov 
951c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
952c7f5dba7SAnatoly Burakov 
953c7f5dba7SAnatoly Burakov 	/* not needed any more */
954c7f5dba7SAnatoly Burakov 	free(param.iova_table);
955c7f5dba7SAnatoly Burakov 
956c7f5dba7SAnatoly Burakov 	if (ret < 0) {
957c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
958c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
959c7f5dba7SAnatoly Burakov 		return -1;
960c7f5dba7SAnatoly Burakov 	}
961c7f5dba7SAnatoly Burakov 
962c7f5dba7SAnatoly Burakov 	/* success */
963c7f5dba7SAnatoly Burakov 
964c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
965c7f5dba7SAnatoly Burakov 			param.len >> 20);
966c7f5dba7SAnatoly Burakov 
967c7f5dba7SAnatoly Burakov 	return 0;
968c7f5dba7SAnatoly Burakov }
9693a0968c8SShahaf Shuler static void
9703a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
9713a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
9723a0968c8SShahaf Shuler {
9733a0968c8SShahaf Shuler 	uint16_t pid = 0;
9743a0968c8SShahaf Shuler 	int ret;
9753a0968c8SShahaf Shuler 
9763a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
9770a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
9783a0968c8SShahaf Shuler 
9790a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
9800a0821bcSPaulis Gributs 		if (ret != 0) {
9810a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
9820a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
9830a0821bcSPaulis Gributs 				    "mempool unmapping will not be performed\n",
9840a0821bcSPaulis Gributs 				    pid, memhdr->addr);
9850a0821bcSPaulis Gributs 			continue;
9860a0821bcSPaulis Gributs 		}
9870a0821bcSPaulis Gributs 
9880a0821bcSPaulis Gributs 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
9893a0968c8SShahaf Shuler 		if (ret) {
9903a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
9913a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
9923a0968c8SShahaf Shuler 				    "for device %s\n",
9930a0821bcSPaulis Gributs 				    memhdr->addr, dev_info.device->name);
9943a0968c8SShahaf Shuler 		}
9953a0968c8SShahaf Shuler 	}
9963a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
9973a0968c8SShahaf Shuler 	if (ret) {
9983a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
9993a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
10003a0968c8SShahaf Shuler 	}
10013a0968c8SShahaf Shuler }
10023a0968c8SShahaf Shuler 
10033a0968c8SShahaf Shuler static void
10043a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10053a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10063a0968c8SShahaf Shuler {
10073a0968c8SShahaf Shuler 	uint16_t pid = 0;
10083a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
10093a0968c8SShahaf Shuler 	int ret;
10103a0968c8SShahaf Shuler 
10113a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
10123a0968c8SShahaf Shuler 				  page_size);
10133a0968c8SShahaf Shuler 	if (ret) {
10143a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10153a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
10163a0968c8SShahaf Shuler 		return;
10173a0968c8SShahaf Shuler 	}
10183a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10190a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10203a0968c8SShahaf Shuler 
10210a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10220a0821bcSPaulis Gributs 		if (ret != 0) {
10230a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10240a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10250a0821bcSPaulis Gributs 				    "mempool mapping will not be performed\n",
10260a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10270a0821bcSPaulis Gributs 			continue;
10280a0821bcSPaulis Gributs 		}
10290a0821bcSPaulis Gributs 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
10303a0968c8SShahaf Shuler 		if (ret) {
10313a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10323a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
10333a0968c8SShahaf Shuler 				    "for device %s\n",
10340a0821bcSPaulis Gributs 				    memhdr->addr, dev_info.device->name);
10353a0968c8SShahaf Shuler 		}
10363a0968c8SShahaf Shuler 	}
10373a0968c8SShahaf Shuler }
1038761f7ae1SJie Zhou #endif
1039c7f5dba7SAnatoly Burakov 
104072512e18SViacheslav Ovsiienko static unsigned int
104172512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
104272512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
104372512e18SViacheslav Ovsiienko {
104472512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
104572512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
104672512e18SViacheslav Ovsiienko 	uint16_t elt_size;
104772512e18SViacheslav Ovsiienko 
104872512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
104972512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
105072512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
105172512e18SViacheslav Ovsiienko 
105272512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
105372512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
105472512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
105572512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
105672512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
105772512e18SViacheslav Ovsiienko 		return 0;
105872512e18SViacheslav Ovsiienko 	}
105972512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
106072512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
106172512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
106272512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
106372512e18SViacheslav Ovsiienko 		int ret;
106472512e18SViacheslav Ovsiienko 
106572512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
106672512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
106772512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
106872512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
106972512e18SViacheslav Ovsiienko 			ext_num = 0;
107072512e18SViacheslav Ovsiienko 			break;
107172512e18SViacheslav Ovsiienko 		}
107272512e18SViacheslav Ovsiienko 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
107372512e18SViacheslav Ovsiienko 						 socket_id,
107472512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_IOVA_CONTIG |
107572512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_1GB |
107672512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_SIZE_HINT_ONLY,
107772512e18SViacheslav Ovsiienko 						 EXTBUF_ZONE_SIZE);
107872512e18SViacheslav Ovsiienko 		if (mz == NULL) {
107972512e18SViacheslav Ovsiienko 			/*
108072512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
108172512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
108272512e18SViacheslav Ovsiienko 			 */
108372512e18SViacheslav Ovsiienko 			errno = ENOMEM;
108472512e18SViacheslav Ovsiienko 			ext_num = 0;
108572512e18SViacheslav Ovsiienko 			break;
108672512e18SViacheslav Ovsiienko 		}
108772512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
108872512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
108972512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
109072512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
109172512e18SViacheslav Ovsiienko 	}
109272512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
109372512e18SViacheslav Ovsiienko 		free(xmem);
109472512e18SViacheslav Ovsiienko 		xmem = NULL;
109572512e18SViacheslav Ovsiienko 	}
109672512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
109772512e18SViacheslav Ovsiienko 	return ext_num;
109872512e18SViacheslav Ovsiienko }
109972512e18SViacheslav Ovsiienko 
1100af75078fSIntel /*
1101af75078fSIntel  * Configuration initialisation done once at init time.
1102af75078fSIntel  */
1103401b744dSShahaf Shuler static struct rte_mempool *
1104af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
110526cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
1106af75078fSIntel {
1107af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1108bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
1109761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1110af75078fSIntel 	uint32_t mb_size;
1111af75078fSIntel 
1112dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1113761f7ae1SJie Zhou #endif
111426cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1115a550baf2SMin Hu (Connor) 	if (!is_proc_primary()) {
1116a550baf2SMin Hu (Connor) 		rte_mp = rte_mempool_lookup(pool_name);
1117a550baf2SMin Hu (Connor) 		if (rte_mp == NULL)
1118a550baf2SMin Hu (Connor) 			rte_exit(EXIT_FAILURE,
1119a550baf2SMin Hu (Connor) 				"Get mbuf pool for socket %u failed: %s\n",
1120a550baf2SMin Hu (Connor) 				socket_id, rte_strerror(rte_errno));
1121a550baf2SMin Hu (Connor) 		return rte_mp;
1122a550baf2SMin Hu (Connor) 	}
1123148f963fSBruce Richardson 
1124285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
1125d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1126d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1127d1eb542eSOlivier Matz 
1128c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
1129c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
1130c7f5dba7SAnatoly Burakov 		{
1131c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
1132c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1133c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
1134c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1135c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1136c7f5dba7SAnatoly Burakov 			break;
1137c7f5dba7SAnatoly Burakov 		}
1138761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1139c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
1140c7f5dba7SAnatoly Burakov 		{
1141b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1142c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1143148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
114459fcf854SShahaf Shuler 				socket_id, mempool_flags);
114524427bb9SOlivier Matz 			if (rte_mp == NULL)
114624427bb9SOlivier Matz 				goto err;
1147b19a0c75SOlivier Matz 
1148b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1149b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1150b19a0c75SOlivier Matz 				rte_mp = NULL;
115124427bb9SOlivier Matz 				goto err;
1152b19a0c75SOlivier Matz 			}
1153b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1154b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
11553a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1156c7f5dba7SAnatoly Burakov 			break;
1157c7f5dba7SAnatoly Burakov 		}
1158c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1159c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1160c7f5dba7SAnatoly Burakov 		{
1161c7f5dba7SAnatoly Burakov 			int heap_socket;
1162c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1163c7f5dba7SAnatoly Burakov 
1164c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1165c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1166c7f5dba7SAnatoly Burakov 
1167c7f5dba7SAnatoly Burakov 			heap_socket =
1168c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1169c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1170c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1171c7f5dba7SAnatoly Burakov 
11720e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
11730e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1174ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1175c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1176c7f5dba7SAnatoly Burakov 					heap_socket);
1177c7f5dba7SAnatoly Burakov 			break;
1178c7f5dba7SAnatoly Burakov 		}
1179761f7ae1SJie Zhou #endif
118072512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
118172512e18SViacheslav Ovsiienko 		{
118272512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
118372512e18SViacheslav Ovsiienko 			unsigned int ext_num;
118472512e18SViacheslav Ovsiienko 
118572512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
118672512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
118772512e18SViacheslav Ovsiienko 			if (ext_num == 0)
118872512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
118972512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
119072512e18SViacheslav Ovsiienko 
119172512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
119272512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
119372512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
119472512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
119572512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
119672512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
119772512e18SViacheslav Ovsiienko 			free(ext_mem);
119872512e18SViacheslav Ovsiienko 			break;
119972512e18SViacheslav Ovsiienko 		}
1200c7f5dba7SAnatoly Burakov 	default:
1201c7f5dba7SAnatoly Burakov 		{
1202c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1203c7f5dba7SAnatoly Burakov 		}
1204bece7b6cSChristian Ehrhardt 	}
1205148f963fSBruce Richardson 
1206761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
120724427bb9SOlivier Matz err:
1208761f7ae1SJie Zhou #endif
1209af75078fSIntel 	if (rte_mp == NULL) {
1210d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1211d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1212d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1213148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1214591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1215af75078fSIntel 	}
1216401b744dSShahaf Shuler 	return rte_mp;
1217af75078fSIntel }
1218af75078fSIntel 
121920a0286fSLiu Xiaofeng /*
122020a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
122120a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
122220a0286fSLiu Xiaofeng  */
122320a0286fSLiu Xiaofeng static int
122420a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
122520a0286fSLiu Xiaofeng {
122620a0286fSLiu Xiaofeng 	static int warning_once = 0;
122720a0286fSLiu Xiaofeng 
1228c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
122920a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
123061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
123161a3b0e5SAndrew Rybchenko 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
123220a0286fSLiu Xiaofeng 		warning_once = 1;
123320a0286fSLiu Xiaofeng 		return -1;
123420a0286fSLiu Xiaofeng 	}
123520a0286fSLiu Xiaofeng 	return 0;
123620a0286fSLiu Xiaofeng }
123720a0286fSLiu Xiaofeng 
12383f7311baSWei Dai /*
12393f7311baSWei Dai  * Get the allowed maximum number of RX queues.
12403f7311baSWei Dai  * *pid return the port id which has minimal value of
12413f7311baSWei Dai  * max_rx_queues in all ports.
12423f7311baSWei Dai  */
12433f7311baSWei Dai queueid_t
12443f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
12453f7311baSWei Dai {
12469e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
12476f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
12483f7311baSWei Dai 	portid_t pi;
12493f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
12503f7311baSWei Dai 
12513f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
12526f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
12536f51deb9SIvan Ilchenko 			continue;
12546f51deb9SIvan Ilchenko 
12556f51deb9SIvan Ilchenko 		max_rxq_valid = true;
12563f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
12573f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
12583f7311baSWei Dai 			*pid = pi;
12593f7311baSWei Dai 		}
12603f7311baSWei Dai 	}
12616f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
12623f7311baSWei Dai }
12633f7311baSWei Dai 
12643f7311baSWei Dai /*
12653f7311baSWei Dai  * Check input rxq is valid or not.
12663f7311baSWei Dai  * If input rxq is not greater than any of maximum number
12673f7311baSWei Dai  * of RX queues of all ports, it is valid.
12683f7311baSWei Dai  * if valid, return 0, else return -1
12693f7311baSWei Dai  */
12703f7311baSWei Dai int
12713f7311baSWei Dai check_nb_rxq(queueid_t rxq)
12723f7311baSWei Dai {
12733f7311baSWei Dai 	queueid_t allowed_max_rxq;
12743f7311baSWei Dai 	portid_t pid = 0;
12753f7311baSWei Dai 
12763f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
12773f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
127861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
127961a3b0e5SAndrew Rybchenko 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
128061a3b0e5SAndrew Rybchenko 			rxq, allowed_max_rxq, pid);
12813f7311baSWei Dai 		return -1;
12823f7311baSWei Dai 	}
12833f7311baSWei Dai 	return 0;
12843f7311baSWei Dai }
12853f7311baSWei Dai 
128636db4f6cSWei Dai /*
128736db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
128836db4f6cSWei Dai  * *pid return the port id which has minimal value of
128936db4f6cSWei Dai  * max_tx_queues in all ports.
129036db4f6cSWei Dai  */
129136db4f6cSWei Dai queueid_t
129236db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
129336db4f6cSWei Dai {
12949e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
12956f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
129636db4f6cSWei Dai 	portid_t pi;
129736db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
129836db4f6cSWei Dai 
129936db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13006f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13016f51deb9SIvan Ilchenko 			continue;
13026f51deb9SIvan Ilchenko 
13036f51deb9SIvan Ilchenko 		max_txq_valid = true;
130436db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
130536db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
130636db4f6cSWei Dai 			*pid = pi;
130736db4f6cSWei Dai 		}
130836db4f6cSWei Dai 	}
13096f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
131036db4f6cSWei Dai }
131136db4f6cSWei Dai 
131236db4f6cSWei Dai /*
131336db4f6cSWei Dai  * Check input txq is valid or not.
131436db4f6cSWei Dai  * If input txq is not greater than any of maximum number
131536db4f6cSWei Dai  * of TX queues of all ports, it is valid.
131636db4f6cSWei Dai  * if valid, return 0, else return -1
131736db4f6cSWei Dai  */
131836db4f6cSWei Dai int
131936db4f6cSWei Dai check_nb_txq(queueid_t txq)
132036db4f6cSWei Dai {
132136db4f6cSWei Dai 	queueid_t allowed_max_txq;
132236db4f6cSWei Dai 	portid_t pid = 0;
132336db4f6cSWei Dai 
132436db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
132536db4f6cSWei Dai 	if (txq > allowed_max_txq) {
132661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
132761a3b0e5SAndrew Rybchenko 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
132861a3b0e5SAndrew Rybchenko 			txq, allowed_max_txq, pid);
132936db4f6cSWei Dai 		return -1;
133036db4f6cSWei Dai 	}
133136db4f6cSWei Dai 	return 0;
133236db4f6cSWei Dai }
133336db4f6cSWei Dai 
13341c69df45SOri Kam /*
133599e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
133699e040d3SLijun Ou  * *pid return the port id which has minimal value of
133799e040d3SLijun Ou  * max_rxd in all queues of all ports.
133899e040d3SLijun Ou  */
133999e040d3SLijun Ou static uint16_t
134099e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
134199e040d3SLijun Ou {
134299e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
134399e040d3SLijun Ou 	portid_t pi;
134499e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
134599e040d3SLijun Ou 
134699e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
134799e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
134899e040d3SLijun Ou 			continue;
134999e040d3SLijun Ou 
135099e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
135199e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
135299e040d3SLijun Ou 			*pid = pi;
135399e040d3SLijun Ou 		}
135499e040d3SLijun Ou 	}
135599e040d3SLijun Ou 	return allowed_max_rxd;
135699e040d3SLijun Ou }
135799e040d3SLijun Ou 
135899e040d3SLijun Ou /*
135999e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
136099e040d3SLijun Ou  * *pid return the port id which has minimal value of
136199e040d3SLijun Ou  * min_rxd in all queues of all ports.
136299e040d3SLijun Ou  */
136399e040d3SLijun Ou static uint16_t
136499e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
136599e040d3SLijun Ou {
136699e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
136799e040d3SLijun Ou 	portid_t pi;
136899e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
136999e040d3SLijun Ou 
137099e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
137199e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
137299e040d3SLijun Ou 			continue;
137399e040d3SLijun Ou 
137499e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
137599e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
137699e040d3SLijun Ou 			*pid = pi;
137799e040d3SLijun Ou 		}
137899e040d3SLijun Ou 	}
137999e040d3SLijun Ou 
138099e040d3SLijun Ou 	return allowed_min_rxd;
138199e040d3SLijun Ou }
138299e040d3SLijun Ou 
138399e040d3SLijun Ou /*
138499e040d3SLijun Ou  * Check input rxd is valid or not.
138599e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
138699e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
138799e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
138899e040d3SLijun Ou  * if valid, return 0, else return -1
138999e040d3SLijun Ou  */
139099e040d3SLijun Ou int
139199e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
139299e040d3SLijun Ou {
139399e040d3SLijun Ou 	uint16_t allowed_max_rxd;
139499e040d3SLijun Ou 	uint16_t allowed_min_rxd;
139599e040d3SLijun Ou 	portid_t pid = 0;
139699e040d3SLijun Ou 
139799e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
139899e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
139961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
140061a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
140161a3b0e5SAndrew Rybchenko 			rxd, allowed_max_rxd, pid);
140299e040d3SLijun Ou 		return -1;
140399e040d3SLijun Ou 	}
140499e040d3SLijun Ou 
140599e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
140699e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
140761a3b0e5SAndrew Rybchenko 		fprintf(stderr,
140861a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
140961a3b0e5SAndrew Rybchenko 			rxd, allowed_min_rxd, pid);
141099e040d3SLijun Ou 		return -1;
141199e040d3SLijun Ou 	}
141299e040d3SLijun Ou 
141399e040d3SLijun Ou 	return 0;
141499e040d3SLijun Ou }
141599e040d3SLijun Ou 
141699e040d3SLijun Ou /*
141799e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
141899e040d3SLijun Ou  * *pid return the port id which has minimal value of
141999e040d3SLijun Ou  * max_txd in every tx queue.
142099e040d3SLijun Ou  */
142199e040d3SLijun Ou static uint16_t
142299e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
142399e040d3SLijun Ou {
142499e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
142599e040d3SLijun Ou 	portid_t pi;
142699e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
142799e040d3SLijun Ou 
142899e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
142999e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
143099e040d3SLijun Ou 			continue;
143199e040d3SLijun Ou 
143299e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
143399e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
143499e040d3SLijun Ou 			*pid = pi;
143599e040d3SLijun Ou 		}
143699e040d3SLijun Ou 	}
143799e040d3SLijun Ou 	return allowed_max_txd;
143899e040d3SLijun Ou }
143999e040d3SLijun Ou 
144099e040d3SLijun Ou /*
144199e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
144299e040d3SLijun Ou  * *pid return the port id which has minimal value of
144399e040d3SLijun Ou  * min_txd in every tx queue.
144499e040d3SLijun Ou  */
144599e040d3SLijun Ou static uint16_t
144699e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
144799e040d3SLijun Ou {
144899e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
144999e040d3SLijun Ou 	portid_t pi;
145099e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
145199e040d3SLijun Ou 
145299e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
145399e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
145499e040d3SLijun Ou 			continue;
145599e040d3SLijun Ou 
145699e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
145799e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
145899e040d3SLijun Ou 			*pid = pi;
145999e040d3SLijun Ou 		}
146099e040d3SLijun Ou 	}
146199e040d3SLijun Ou 
146299e040d3SLijun Ou 	return allowed_min_txd;
146399e040d3SLijun Ou }
146499e040d3SLijun Ou 
146599e040d3SLijun Ou /*
146699e040d3SLijun Ou  * Check input txd is valid or not.
146799e040d3SLijun Ou  * If input txd is not greater than any of maximum number
146899e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
146999e040d3SLijun Ou  * if valid, return 0, else return -1
147099e040d3SLijun Ou  */
147199e040d3SLijun Ou int
147299e040d3SLijun Ou check_nb_txd(queueid_t txd)
147399e040d3SLijun Ou {
147499e040d3SLijun Ou 	uint16_t allowed_max_txd;
147599e040d3SLijun Ou 	uint16_t allowed_min_txd;
147699e040d3SLijun Ou 	portid_t pid = 0;
147799e040d3SLijun Ou 
147899e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
147999e040d3SLijun Ou 	if (txd > allowed_max_txd) {
148061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
148161a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
148261a3b0e5SAndrew Rybchenko 			txd, allowed_max_txd, pid);
148399e040d3SLijun Ou 		return -1;
148499e040d3SLijun Ou 	}
148599e040d3SLijun Ou 
148699e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
148799e040d3SLijun Ou 	if (txd < allowed_min_txd) {
148861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
148961a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
149061a3b0e5SAndrew Rybchenko 			txd, allowed_min_txd, pid);
149199e040d3SLijun Ou 		return -1;
149299e040d3SLijun Ou 	}
149399e040d3SLijun Ou 	return 0;
149499e040d3SLijun Ou }
149599e040d3SLijun Ou 
149699e040d3SLijun Ou 
149799e040d3SLijun Ou /*
14981c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
14991c69df45SOri Kam  * *pid return the port id which has minimal value of
15001c69df45SOri Kam  * max_hairpin_queues in all ports.
15011c69df45SOri Kam  */
15021c69df45SOri Kam queueid_t
15031c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
15041c69df45SOri Kam {
15059e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
15061c69df45SOri Kam 	portid_t pi;
15071c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
15081c69df45SOri Kam 
15091c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
15101c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
15111c69df45SOri Kam 			*pid = pi;
15121c69df45SOri Kam 			return 0;
15131c69df45SOri Kam 		}
15141c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
15151c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
15161c69df45SOri Kam 			*pid = pi;
15171c69df45SOri Kam 		}
15181c69df45SOri Kam 	}
15191c69df45SOri Kam 	return allowed_max_hairpinq;
15201c69df45SOri Kam }
15211c69df45SOri Kam 
15221c69df45SOri Kam /*
15231c69df45SOri Kam  * Check input hairpin is valid or not.
15241c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
15251c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
15261c69df45SOri Kam  * if valid, return 0, else return -1
15271c69df45SOri Kam  */
15281c69df45SOri Kam int
15291c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
15301c69df45SOri Kam {
15311c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
15321c69df45SOri Kam 	portid_t pid = 0;
15331c69df45SOri Kam 
15341c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
15351c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
153661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
153761a3b0e5SAndrew Rybchenko 			"Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
15381c69df45SOri Kam 			hairpinq, allowed_max_hairpinq, pid);
15391c69df45SOri Kam 		return -1;
15401c69df45SOri Kam 	}
15411c69df45SOri Kam 	return 0;
15421c69df45SOri Kam }
15431c69df45SOri Kam 
15441bb4a528SFerruh Yigit static int
15451bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info)
15461bb4a528SFerruh Yigit {
15471bb4a528SFerruh Yigit 	uint32_t eth_overhead;
15481bb4a528SFerruh Yigit 
15491bb4a528SFerruh Yigit 	if (dev_info->max_mtu != UINT16_MAX &&
15501bb4a528SFerruh Yigit 	    dev_info->max_rx_pktlen > dev_info->max_mtu)
15511bb4a528SFerruh Yigit 		eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
15521bb4a528SFerruh Yigit 	else
15531bb4a528SFerruh Yigit 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
15541bb4a528SFerruh Yigit 
15551bb4a528SFerruh Yigit 	return eth_overhead;
15561bb4a528SFerruh Yigit }
15571bb4a528SFerruh Yigit 
1558af75078fSIntel static void
1559b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id)
1560b6b8a1ebSViacheslav Ovsiienko {
1561b6b8a1ebSViacheslav Ovsiienko 	struct rte_port *port = &ports[pid];
1562b6b8a1ebSViacheslav Ovsiienko 	int ret;
1563b6b8a1ebSViacheslav Ovsiienko 	int i;
1564b6b8a1ebSViacheslav Ovsiienko 
1565f6d8a6d3SIvan Malov 	eth_rx_metadata_negotiate_mp(pid);
15661179f05cSIvan Malov 	flow_pick_transfer_proxy_mp(pid);
1567f6d8a6d3SIvan Malov 
1568b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.txmode = tx_mode;
1569b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.rxmode = rx_mode;
1570b6b8a1ebSViacheslav Ovsiienko 
1571b6b8a1ebSViacheslav Ovsiienko 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1572b6b8a1ebSViacheslav Ovsiienko 	if (ret != 0)
1573b6b8a1ebSViacheslav Ovsiienko 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1574b6b8a1ebSViacheslav Ovsiienko 
1575b6b8a1ebSViacheslav Ovsiienko 	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1576b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.txmode.offloads &=
1577b6b8a1ebSViacheslav Ovsiienko 			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1578b6b8a1ebSViacheslav Ovsiienko 
1579b6b8a1ebSViacheslav Ovsiienko 	/* Apply Rx offloads configuration */
1580b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
1581b6b8a1ebSViacheslav Ovsiienko 		port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1582b6b8a1ebSViacheslav Ovsiienko 	/* Apply Tx offloads configuration */
1583b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
1584b6b8a1ebSViacheslav Ovsiienko 		port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1585b6b8a1ebSViacheslav Ovsiienko 
1586b6b8a1ebSViacheslav Ovsiienko 	if (eth_link_speed)
1587b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.link_speeds = eth_link_speed;
1588b6b8a1ebSViacheslav Ovsiienko 
15891bb4a528SFerruh Yigit 	if (max_rx_pkt_len)
15901bb4a528SFerruh Yigit 		port->dev_conf.rxmode.mtu = max_rx_pkt_len -
15911bb4a528SFerruh Yigit 			get_eth_overhead(&port->dev_info);
15921bb4a528SFerruh Yigit 
1593b6b8a1ebSViacheslav Ovsiienko 	/* set flag to initialize port/queue */
1594b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig = 1;
1595b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig_queues = 1;
1596b6b8a1ebSViacheslav Ovsiienko 	port->socket_id = socket_id;
1597b6b8a1ebSViacheslav Ovsiienko 	port->tx_metadata = 0;
1598b6b8a1ebSViacheslav Ovsiienko 
1599b6b8a1ebSViacheslav Ovsiienko 	/*
1600b6b8a1ebSViacheslav Ovsiienko 	 * Check for maximum number of segments per MTU.
1601b6b8a1ebSViacheslav Ovsiienko 	 * Accordingly update the mbuf data size.
1602b6b8a1ebSViacheslav Ovsiienko 	 */
1603b6b8a1ebSViacheslav Ovsiienko 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1604b6b8a1ebSViacheslav Ovsiienko 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
16051bb4a528SFerruh Yigit 		uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
16061bb4a528SFerruh Yigit 		uint16_t mtu;
1607b6b8a1ebSViacheslav Ovsiienko 
16081bb4a528SFerruh Yigit 		if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
16091bb4a528SFerruh Yigit 			uint16_t data_size = (mtu + eth_overhead) /
16101bb4a528SFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
16111bb4a528SFerruh Yigit 			uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
16121bb4a528SFerruh Yigit 
16131bb4a528SFerruh Yigit 			if (buffer_size > mbuf_data_size[0]) {
16141bb4a528SFerruh Yigit 				mbuf_data_size[0] = buffer_size;
1615b6b8a1ebSViacheslav Ovsiienko 				TESTPMD_LOG(WARNING,
1616b6b8a1ebSViacheslav Ovsiienko 					"Configured mbuf size of the first segment %hu\n",
1617b6b8a1ebSViacheslav Ovsiienko 					mbuf_data_size[0]);
1618b6b8a1ebSViacheslav Ovsiienko 			}
1619b6b8a1ebSViacheslav Ovsiienko 		}
1620b6b8a1ebSViacheslav Ovsiienko 	}
16211bb4a528SFerruh Yigit }
1622b6b8a1ebSViacheslav Ovsiienko 
1623b6b8a1ebSViacheslav Ovsiienko static void
1624af75078fSIntel init_config(void)
1625af75078fSIntel {
1626ce8d5614SIntel 	portid_t pid;
1627af75078fSIntel 	struct rte_mempool *mbp;
1628af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1629af75078fSIntel 	lcoreid_t  lc_id;
1630b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
163152f38a20SJiayu Hu 	uint32_t gso_types;
1632487f9a59SYulong Pei 
1633af75078fSIntel 	/* Configuration of logical cores. */
1634af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1635af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1636fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1637af75078fSIntel 	if (fwd_lcores == NULL) {
1638ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1639ce8d5614SIntel 							"failed\n", nb_lcores);
1640af75078fSIntel 	}
1641af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1642af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1643af75078fSIntel 					       sizeof(struct fwd_lcore),
1644fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1645af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1646ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1647ce8d5614SIntel 								"failed\n");
1648af75078fSIntel 		}
1649af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1650af75078fSIntel 	}
1651af75078fSIntel 
16527d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1653b6b8a1ebSViacheslav Ovsiienko 		uint32_t socket_id;
16546f51deb9SIvan Ilchenko 
1655b6ea6408SIntel 		if (numa_support) {
1656b6b8a1ebSViacheslav Ovsiienko 			socket_id = port_numa[pid];
1657b6b8a1ebSViacheslav Ovsiienko 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1658b6b8a1ebSViacheslav Ovsiienko 				socket_id = rte_eth_dev_socket_id(pid);
165920a0286fSLiu Xiaofeng 
166029841336SPhil Yang 				/*
166129841336SPhil Yang 				 * if socket_id is invalid,
166229841336SPhil Yang 				 * set to the first available socket.
166329841336SPhil Yang 				 */
166420a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
166529841336SPhil Yang 					socket_id = socket_ids[0];
1666b6ea6408SIntel 			}
1667b6b8a1ebSViacheslav Ovsiienko 		} else {
1668b6b8a1ebSViacheslav Ovsiienko 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1669b6b8a1ebSViacheslav Ovsiienko 				    0 : socket_num;
1670b6ea6408SIntel 		}
1671b6b8a1ebSViacheslav Ovsiienko 		/* Apply default TxRx configuration for all ports */
1672b6b8a1ebSViacheslav Ovsiienko 		init_config_port_offloads(pid, socket_id);
1673ce8d5614SIntel 	}
16743ab64341SOlivier Matz 	/*
16753ab64341SOlivier Matz 	 * Create pools of mbuf.
16763ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
16773ab64341SOlivier Matz 	 * socket 0 memory by default.
16783ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
16793ab64341SOlivier Matz 	 *
16803ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
16813ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
16823ab64341SOlivier Matz 	 */
16833ab64341SOlivier Matz 	if (param_total_num_mbufs)
16843ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
16853ab64341SOlivier Matz 	else {
16863ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
16873ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
16883ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
16893ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
16903ab64341SOlivier Matz 	}
16913ab64341SOlivier Matz 
1692b6ea6408SIntel 	if (numa_support) {
169326cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1694ce8d5614SIntel 
1695c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
169626cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
169726cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
169826cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1699401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
170026cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
17013ab64341SOlivier Matz 	} else {
170226cbb419SViacheslav Ovsiienko 		uint8_t i;
170326cbb419SViacheslav Ovsiienko 
170426cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
170526cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
170626cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1707401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
170826cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
170926cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
17103ab64341SOlivier Matz 	}
1711b6ea6408SIntel 
1712b6ea6408SIntel 	init_port_config();
17135886ae07SAdrien Mazarguil 
171452f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1715aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
17165886ae07SAdrien Mazarguil 	/*
17175886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
17185886ae07SAdrien Mazarguil 	 */
17195886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
17208fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
172126cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
17228fd8bebcSAdrien Mazarguil 
17235886ae07SAdrien Mazarguil 		if (mbp == NULL)
172426cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
17255886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
172652f38a20SJiayu Hu 		/* initialize GSO context */
172752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
172852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
172952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
173035b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
173135b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
173252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
17335886ae07SAdrien Mazarguil 	}
17345886ae07SAdrien Mazarguil 
17350c0db76fSBernard Iremonger 	fwd_config_setup();
1736b7091f1dSJiayu Hu 
1737b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1738b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1739b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1740b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1741b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1742b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1743b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1744b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1745b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1746b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1747b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1748b7091f1dSJiayu Hu 		}
1749b7091f1dSJiayu Hu 	}
1750ce8d5614SIntel }
1751ce8d5614SIntel 
17522950a769SDeclan Doherty 
17532950a769SDeclan Doherty void
1754a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
17552950a769SDeclan Doherty {
17562950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
1757b6b8a1ebSViacheslav Ovsiienko 	init_config_port_offloads(new_port_id, socket_id);
17582950a769SDeclan Doherty 	init_port_config();
17592950a769SDeclan Doherty }
17602950a769SDeclan Doherty 
17612950a769SDeclan Doherty 
1762ce8d5614SIntel int
1763ce8d5614SIntel init_fwd_streams(void)
1764ce8d5614SIntel {
1765ce8d5614SIntel 	portid_t pid;
1766ce8d5614SIntel 	struct rte_port *port;
1767ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
17685a8fb55cSReshma Pattan 	queueid_t q;
1769ce8d5614SIntel 
1770ce8d5614SIntel 	/* set socket id according to numa or not */
17717d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1772ce8d5614SIntel 		port = &ports[pid];
1773ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
177461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
177561a3b0e5SAndrew Rybchenko 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
177661a3b0e5SAndrew Rybchenko 				nb_rxq, port->dev_info.max_rx_queues);
1777ce8d5614SIntel 			return -1;
1778ce8d5614SIntel 		}
1779ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
178061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
178161a3b0e5SAndrew Rybchenko 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
178261a3b0e5SAndrew Rybchenko 				nb_txq, port->dev_info.max_tx_queues);
1783ce8d5614SIntel 			return -1;
1784ce8d5614SIntel 		}
178520a0286fSLiu Xiaofeng 		if (numa_support) {
178620a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
178720a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
178820a0286fSLiu Xiaofeng 			else {
1789b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
179020a0286fSLiu Xiaofeng 
179129841336SPhil Yang 				/*
179229841336SPhil Yang 				 * if socket_id is invalid,
179329841336SPhil Yang 				 * set to the first available socket.
179429841336SPhil Yang 				 */
179520a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
179629841336SPhil Yang 					port->socket_id = socket_ids[0];
179720a0286fSLiu Xiaofeng 			}
179820a0286fSLiu Xiaofeng 		}
1799b6ea6408SIntel 		else {
1800b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1801af75078fSIntel 				port->socket_id = 0;
1802b6ea6408SIntel 			else
1803b6ea6408SIntel 				port->socket_id = socket_num;
1804b6ea6408SIntel 		}
1805af75078fSIntel 	}
1806af75078fSIntel 
18075a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
18085a8fb55cSReshma Pattan 	if (q == 0) {
180961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
181061a3b0e5SAndrew Rybchenko 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
18115a8fb55cSReshma Pattan 		return -1;
18125a8fb55cSReshma Pattan 	}
18135a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1814ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1815ce8d5614SIntel 		return 0;
1816ce8d5614SIntel 	/* clear the old */
1817ce8d5614SIntel 	if (fwd_streams != NULL) {
1818ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1819ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1820ce8d5614SIntel 				continue;
1821ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1822ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1823af75078fSIntel 		}
1824ce8d5614SIntel 		rte_free(fwd_streams);
1825ce8d5614SIntel 		fwd_streams = NULL;
1826ce8d5614SIntel 	}
1827ce8d5614SIntel 
1828ce8d5614SIntel 	/* init new */
1829ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
18301f84c469SMatan Azrad 	if (nb_fwd_streams) {
1831ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
18321f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
18331f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1834ce8d5614SIntel 		if (fwd_streams == NULL)
18351f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
18361f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
18371f84c469SMatan Azrad 				 nb_fwd_streams);
1838ce8d5614SIntel 
1839af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
18401f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
18411f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
18421f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1843ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
18441f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
18451f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
18461f84c469SMatan Azrad 		}
1847af75078fSIntel 	}
1848ce8d5614SIntel 
1849ce8d5614SIntel 	return 0;
1850af75078fSIntel }
1851af75078fSIntel 
1852af75078fSIntel static void
1853af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1854af75078fSIntel {
18557569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
185685de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
18577569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
18587569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1859af75078fSIntel 	uint16_t nb_pkt;
18607569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
18617569b8c1SHonnappa Nagarahalli 	int i;
1862af75078fSIntel 
1863af75078fSIntel 	/*
1864af75078fSIntel 	 * First compute the total number of packet bursts and the
1865af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1866af75078fSIntel 	 */
18677569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
18687569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
18697569b8c1SHonnappa Nagarahalli 
18707569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
18717569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
18727569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
18737569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
18747569b8c1SHonnappa Nagarahalli 
18757569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
18767569b8c1SHonnappa Nagarahalli 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1877af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
18787569b8c1SHonnappa Nagarahalli 
1879af75078fSIntel 		if (nb_burst == 0)
1880af75078fSIntel 			continue;
18817569b8c1SHonnappa Nagarahalli 
1882af75078fSIntel 		total_burst += nb_burst;
18837569b8c1SHonnappa Nagarahalli 
18847569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
18857569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
18867569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1887fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1888fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
18897569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
18907569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
18917569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1892af75078fSIntel 		}
1893af75078fSIntel 	}
1894af75078fSIntel 	if (total_burst == 0)
1895af75078fSIntel 		return;
18967569b8c1SHonnappa Nagarahalli 
18977569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
18987569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
18997569b8c1SHonnappa Nagarahalli 		if (i == 3) {
19007569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1901af75078fSIntel 			return;
1902af75078fSIntel 		}
19037569b8c1SHonnappa Nagarahalli 
19047569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
19057569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
19067569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
19077569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1908af75078fSIntel 			return;
1909af75078fSIntel 		}
19107569b8c1SHonnappa Nagarahalli 
19117569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
19127569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
19137569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
19147569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
19157569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1916af75078fSIntel 	}
1917af75078fSIntel }
1918af75078fSIntel 
1919af75078fSIntel static void
1920af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1921af75078fSIntel {
1922af75078fSIntel 	struct fwd_stream *fs;
1923af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1924af75078fSIntel 
1925af75078fSIntel 	fs = fwd_streams[stream_id];
1926af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1927af75078fSIntel 	    (fs->fwd_dropped == 0))
1928af75078fSIntel 		return;
1929af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1930af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1931af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1932af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1933c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1934c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1935af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1936af75078fSIntel 
1937af75078fSIntel 	/* if checksum mode */
1938af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1939c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1940c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1941c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
194258d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
194358d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1944d139cf23SLance Richardson 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1945d139cf23SLance Richardson 			fs->rx_bad_outer_ip_csum);
194694d65546SDavid Marchand 	} else {
194794d65546SDavid Marchand 		printf("\n");
1948af75078fSIntel 	}
1949af75078fSIntel 
19500e4b1963SDharmik Thakkar 	if (record_burst_stats) {
1951af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1952af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
19530e4b1963SDharmik Thakkar 	}
1954af75078fSIntel }
1955af75078fSIntel 
195653324971SDavid Marchand void
195753324971SDavid Marchand fwd_stats_display(void)
195853324971SDavid Marchand {
195953324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
196053324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
196153324971SDavid Marchand 	struct {
196253324971SDavid Marchand 		struct fwd_stream *rx_stream;
196353324971SDavid Marchand 		struct fwd_stream *tx_stream;
196453324971SDavid Marchand 		uint64_t tx_dropped;
196553324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
196653324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
196753324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
1968d139cf23SLance Richardson 		uint64_t rx_bad_outer_ip_csum;
196953324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
197053324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
197153324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
197253324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
197353324971SDavid Marchand 	struct rte_eth_stats stats;
197453324971SDavid Marchand 	uint64_t fwd_cycles = 0;
197553324971SDavid Marchand 	uint64_t total_recv = 0;
197653324971SDavid Marchand 	uint64_t total_xmit = 0;
197753324971SDavid Marchand 	struct rte_port *port;
197853324971SDavid Marchand 	streamid_t sm_id;
197953324971SDavid Marchand 	portid_t pt_id;
198053324971SDavid Marchand 	int i;
198153324971SDavid Marchand 
198253324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
198353324971SDavid Marchand 
198453324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
198553324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
198653324971SDavid Marchand 
198753324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
198853324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
198953324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
199053324971SDavid Marchand 		} else {
199153324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
199253324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
199353324971SDavid Marchand 		}
199453324971SDavid Marchand 
199553324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
199653324971SDavid Marchand 
199753324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
199853324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
199953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
200053324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
2001d139cf23SLance Richardson 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2002d139cf23SLance Richardson 				fs->rx_bad_outer_ip_csum;
200353324971SDavid Marchand 
2004bc700b67SDharmik Thakkar 		if (record_core_cycles)
200553324971SDavid Marchand 			fwd_cycles += fs->core_cycles;
200653324971SDavid Marchand 	}
200753324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
200853324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
200953324971SDavid Marchand 		port = &ports[pt_id];
201053324971SDavid Marchand 
201153324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
201253324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
201353324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
201453324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
201553324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
201653324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
201753324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
201853324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
201953324971SDavid Marchand 
202053324971SDavid Marchand 		total_recv += stats.ipackets;
202153324971SDavid Marchand 		total_xmit += stats.opackets;
202253324971SDavid Marchand 		total_rx_dropped += stats.imissed;
202353324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
202453324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
202553324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
202653324971SDavid Marchand 
202753324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
202853324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
202953324971SDavid Marchand 
203008dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
203108dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
203253324971SDavid Marchand 		       stats.ipackets + stats.imissed);
203353324971SDavid Marchand 
2034d139cf23SLance Richardson 		if (cur_fwd_eng == &csum_fwd_engine) {
203553324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
203653324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
203753324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
203853324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
203953324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
204053324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
2041d139cf23SLance Richardson 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2042d139cf23SLance Richardson 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
2043d139cf23SLance Richardson 		}
204453324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
204508dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
204608dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
204753324971SDavid Marchand 		}
204853324971SDavid Marchand 
204908dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
205053324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
205153324971SDavid Marchand 		       stats.opackets, ports_stats[pt_id].tx_dropped,
205253324971SDavid Marchand 		       stats.opackets + ports_stats[pt_id].tx_dropped);
205353324971SDavid Marchand 
20540e4b1963SDharmik Thakkar 		if (record_burst_stats) {
205553324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
205653324971SDavid Marchand 				pkt_burst_stats_display("RX",
205753324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
205853324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
205953324971SDavid Marchand 				pkt_burst_stats_display("TX",
206053324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
20610e4b1963SDharmik Thakkar 		}
206253324971SDavid Marchand 
206353324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
206453324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
206553324971SDavid Marchand 	}
206653324971SDavid Marchand 
206753324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
206853324971SDavid Marchand 	       "%s\n",
206953324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
207053324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
207153324971SDavid Marchand 	       "%-"PRIu64"\n"
207253324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
207353324971SDavid Marchand 	       "%-"PRIu64"\n",
207453324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
207553324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
207653324971SDavid Marchand 	if (total_rx_nombuf > 0)
207753324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
207853324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
207953324971SDavid Marchand 	       "%s\n",
208053324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
2081bc700b67SDharmik Thakkar 	if (record_core_cycles) {
20824c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
20833a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
20843a164e00SPhil Yang 			uint64_t total_pkts = 0;
20853a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
20863a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
20873a164e00SPhil Yang 				total_pkts = total_xmit;
20883a164e00SPhil Yang 			else
20893a164e00SPhil Yang 				total_pkts = total_recv;
20903a164e00SPhil Yang 
20911920832aSDharmik Thakkar 			printf("\n  CPU cycles/packet=%.2F (total cycles="
20923a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
20934c0497b1SDharmik Thakkar 			       " MHz Clock\n",
20943a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
20953a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
20964c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
20973a164e00SPhil Yang 		}
2098bc700b67SDharmik Thakkar 	}
209953324971SDavid Marchand }
210053324971SDavid Marchand 
210153324971SDavid Marchand void
210253324971SDavid Marchand fwd_stats_reset(void)
210353324971SDavid Marchand {
210453324971SDavid Marchand 	streamid_t sm_id;
210553324971SDavid Marchand 	portid_t pt_id;
210653324971SDavid Marchand 	int i;
210753324971SDavid Marchand 
210853324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
210953324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
211053324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
211153324971SDavid Marchand 	}
211253324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
211353324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
211453324971SDavid Marchand 
211553324971SDavid Marchand 		fs->rx_packets = 0;
211653324971SDavid Marchand 		fs->tx_packets = 0;
211753324971SDavid Marchand 		fs->fwd_dropped = 0;
211853324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
211953324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
212053324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
2121d139cf23SLance Richardson 		fs->rx_bad_outer_ip_csum = 0;
212253324971SDavid Marchand 
212353324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
212453324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
212553324971SDavid Marchand 		fs->core_cycles = 0;
212653324971SDavid Marchand 	}
212753324971SDavid Marchand }
212853324971SDavid Marchand 
2129af75078fSIntel static void
21307741e4cfSIntel flush_fwd_rx_queues(void)
2131af75078fSIntel {
2132af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2133af75078fSIntel 	portid_t  rxp;
21347741e4cfSIntel 	portid_t port_id;
2135af75078fSIntel 	queueid_t rxq;
2136af75078fSIntel 	uint16_t  nb_rx;
2137af75078fSIntel 	uint16_t  i;
2138af75078fSIntel 	uint8_t   j;
2139f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2140594302c7SJames Poole 	uint64_t timer_period;
2141f487715fSReshma Pattan 
2142a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
2143a550baf2SMin Hu (Connor) 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2144a550baf2SMin Hu (Connor) 		return;
2145a550baf2SMin Hu (Connor) 	}
2146a550baf2SMin Hu (Connor) 
2147f487715fSReshma Pattan 	/* convert to number of cycles */
2148594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2149af75078fSIntel 
2150af75078fSIntel 	for (j = 0; j < 2; j++) {
21517741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2152af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
21537741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
2154f487715fSReshma Pattan 				/**
2155f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2156f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2157f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2158f487715fSReshma Pattan 				* after 1sec timer expiry.
2159f487715fSReshma Pattan 				*/
2160f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2161af75078fSIntel 				do {
21627741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2163013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2164af75078fSIntel 					for (i = 0; i < nb_rx; i++)
2165af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
2166f487715fSReshma Pattan 
2167f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2168f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2169f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2170f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2171f487715fSReshma Pattan 					(timer_tsc < timer_period));
2172f487715fSReshma Pattan 				timer_tsc = 0;
2173af75078fSIntel 			}
2174af75078fSIntel 		}
2175af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2176af75078fSIntel 	}
2177af75078fSIntel }
2178af75078fSIntel 
2179af75078fSIntel static void
2180af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2181af75078fSIntel {
2182af75078fSIntel 	struct fwd_stream **fsm;
2183af75078fSIntel 	streamid_t nb_fs;
2184af75078fSIntel 	streamid_t sm_id;
2185a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
21867e4441c8SRemy Horton 	uint64_t tics_per_1sec;
21877e4441c8SRemy Horton 	uint64_t tics_datum;
21887e4441c8SRemy Horton 	uint64_t tics_current;
21894918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2190af75078fSIntel 
21914918a357SXiaoyun Li 	cnt_ports = nb_ports;
21927e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
21937e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
21947e4441c8SRemy Horton #endif
2195af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2196af75078fSIntel 	nb_fs = fc->stream_nb;
2197af75078fSIntel 	do {
2198af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
2199af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
2200a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2201e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2202e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
22037e4441c8SRemy Horton 			tics_current = rte_rdtsc();
22047e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
22057e4441c8SRemy Horton 				/* Periodic bitrate calculation */
22064918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2207e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
22084918a357SXiaoyun Li 						ports_ids[i]);
22097e4441c8SRemy Horton 				tics_datum = tics_current;
22107e4441c8SRemy Horton 			}
2211e25e6c70SRemy Horton 		}
22127e4441c8SRemy Horton #endif
2213a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
221465eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
221565eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
221662d3216dSReshma Pattan 			rte_latencystats_update();
221762d3216dSReshma Pattan #endif
221862d3216dSReshma Pattan 
2219af75078fSIntel 	} while (! fc->stopped);
2220af75078fSIntel }
2221af75078fSIntel 
2222af75078fSIntel static int
2223af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2224af75078fSIntel {
2225af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2226af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2227af75078fSIntel 	return 0;
2228af75078fSIntel }
2229af75078fSIntel 
2230af75078fSIntel /*
2231af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2232af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2233af75078fSIntel  */
2234af75078fSIntel static int
2235af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2236af75078fSIntel {
2237af75078fSIntel 	struct fwd_lcore *fwd_lc;
2238af75078fSIntel 	struct fwd_lcore tmp_lcore;
2239af75078fSIntel 
2240af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2241af75078fSIntel 	tmp_lcore = *fwd_lc;
2242af75078fSIntel 	tmp_lcore.stopped = 1;
2243af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2244af75078fSIntel 	return 0;
2245af75078fSIntel }
2246af75078fSIntel 
2247af75078fSIntel /*
2248af75078fSIntel  * Launch packet forwarding:
2249af75078fSIntel  *     - Setup per-port forwarding context.
2250af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2251af75078fSIntel  */
2252af75078fSIntel static void
2253af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2254af75078fSIntel {
2255af75078fSIntel 	unsigned int i;
2256af75078fSIntel 	unsigned int lc_id;
2257af75078fSIntel 	int diag;
2258af75078fSIntel 
2259af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2260af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2261af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2262af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2263af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2264af75078fSIntel 						     fwd_lcores[i], lc_id);
2265af75078fSIntel 			if (diag != 0)
226661a3b0e5SAndrew Rybchenko 				fprintf(stderr,
226761a3b0e5SAndrew Rybchenko 					"launch lcore %u failed - diag=%d\n",
2268af75078fSIntel 					lc_id, diag);
2269af75078fSIntel 		}
2270af75078fSIntel 	}
2271af75078fSIntel }
2272af75078fSIntel 
2273af75078fSIntel /*
2274af75078fSIntel  * Launch packet forwarding configuration.
2275af75078fSIntel  */
2276af75078fSIntel void
2277af75078fSIntel start_packet_forwarding(int with_tx_first)
2278af75078fSIntel {
2279af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2280af75078fSIntel 	port_fwd_end_t  port_fwd_end;
2281af75078fSIntel 	unsigned int i;
2282af75078fSIntel 
22835a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
22845a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
22855a8fb55cSReshma Pattan 
22865a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
22875a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
22885a8fb55cSReshma Pattan 
22895a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
22905a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
22915a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
22925a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
22935a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
22945a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
22955a8fb55cSReshma Pattan 
2296ce8d5614SIntel 	if (all_ports_started() == 0) {
229761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Not all ports were started\n");
2298ce8d5614SIntel 		return;
2299ce8d5614SIntel 	}
2300af75078fSIntel 	if (test_done == 0) {
230161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding already started\n");
2302af75078fSIntel 		return;
2303af75078fSIntel 	}
23047741e4cfSIntel 
230547a767b2SMatan Azrad 	fwd_config_setup();
230647a767b2SMatan Azrad 
2307a78040c9SAlvin Zhang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2308a78040c9SAlvin Zhang 	if (port_fwd_begin != NULL) {
2309a78040c9SAlvin Zhang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2310a78040c9SAlvin Zhang 			if (port_fwd_begin(fwd_ports_ids[i])) {
2311a78040c9SAlvin Zhang 				fprintf(stderr,
2312a78040c9SAlvin Zhang 					"Packet forwarding is not ready\n");
2313a78040c9SAlvin Zhang 				return;
2314a78040c9SAlvin Zhang 			}
2315a78040c9SAlvin Zhang 		}
2316a78040c9SAlvin Zhang 	}
2317a78040c9SAlvin Zhang 
2318a78040c9SAlvin Zhang 	if (with_tx_first) {
2319a78040c9SAlvin Zhang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2320a78040c9SAlvin Zhang 		if (port_fwd_begin != NULL) {
2321a78040c9SAlvin Zhang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2322a78040c9SAlvin Zhang 				if (port_fwd_begin(fwd_ports_ids[i])) {
2323a78040c9SAlvin Zhang 					fprintf(stderr,
2324a78040c9SAlvin Zhang 						"Packet forwarding is not ready\n");
2325a78040c9SAlvin Zhang 					return;
2326a78040c9SAlvin Zhang 				}
2327a78040c9SAlvin Zhang 			}
2328a78040c9SAlvin Zhang 		}
2329a78040c9SAlvin Zhang 	}
2330a78040c9SAlvin Zhang 
2331a78040c9SAlvin Zhang 	test_done = 0;
2332a78040c9SAlvin Zhang 
23337741e4cfSIntel 	if(!no_flush_rx)
23347741e4cfSIntel 		flush_fwd_rx_queues();
23357741e4cfSIntel 
2336933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
2337af75078fSIntel 	rxtx_config_display();
2338af75078fSIntel 
233953324971SDavid Marchand 	fwd_stats_reset();
2340af75078fSIntel 	if (with_tx_first) {
2341acbf77a6SZhihong Wang 		while (with_tx_first--) {
2342acbf77a6SZhihong Wang 			launch_packet_forwarding(
2343acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2344af75078fSIntel 			rte_eal_mp_wait_lcore();
2345acbf77a6SZhihong Wang 		}
2346af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2347af75078fSIntel 		if (port_fwd_end != NULL) {
2348af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2349af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2350af75078fSIntel 		}
2351af75078fSIntel 	}
2352af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2353af75078fSIntel }
2354af75078fSIntel 
2355af75078fSIntel void
2356af75078fSIntel stop_packet_forwarding(void)
2357af75078fSIntel {
2358af75078fSIntel 	port_fwd_end_t port_fwd_end;
2359af75078fSIntel 	lcoreid_t lc_id;
236053324971SDavid Marchand 	portid_t pt_id;
236153324971SDavid Marchand 	int i;
2362af75078fSIntel 
2363af75078fSIntel 	if (test_done) {
236461a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding not started\n");
2365af75078fSIntel 		return;
2366af75078fSIntel 	}
2367af75078fSIntel 	printf("Telling cores to stop...");
2368af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2369af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2370af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2371af75078fSIntel 	rte_eal_mp_wait_lcore();
2372af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2373af75078fSIntel 	if (port_fwd_end != NULL) {
2374af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2375af75078fSIntel 			pt_id = fwd_ports_ids[i];
2376af75078fSIntel 			(*port_fwd_end)(pt_id);
2377af75078fSIntel 		}
2378af75078fSIntel 	}
2379c185d42cSDavid Marchand 
238053324971SDavid Marchand 	fwd_stats_display();
238158d475b7SJerin Jacob 
2382af75078fSIntel 	printf("\nDone.\n");
2383af75078fSIntel 	test_done = 1;
2384af75078fSIntel }
2385af75078fSIntel 
2386cfae07fdSOuyang Changchun void
2387cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2388cfae07fdSOuyang Changchun {
2389492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
239061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link up fail.\n");
2391cfae07fdSOuyang Changchun }
2392cfae07fdSOuyang Changchun 
2393cfae07fdSOuyang Changchun void
2394cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2395cfae07fdSOuyang Changchun {
2396492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
239761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link down fail.\n");
2398cfae07fdSOuyang Changchun }
2399cfae07fdSOuyang Changchun 
2400ce8d5614SIntel static int
2401ce8d5614SIntel all_ports_started(void)
2402ce8d5614SIntel {
2403ce8d5614SIntel 	portid_t pi;
2404ce8d5614SIntel 	struct rte_port *port;
2405ce8d5614SIntel 
24067d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2407ce8d5614SIntel 		port = &ports[pi];
2408ce8d5614SIntel 		/* Check if there is a port which is not started */
240941b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
241041b05095SBernard Iremonger 			(port->slave_flag == 0))
2411ce8d5614SIntel 			return 0;
2412ce8d5614SIntel 	}
2413ce8d5614SIntel 
2414ce8d5614SIntel 	/* No port is not started */
2415ce8d5614SIntel 	return 1;
2416ce8d5614SIntel }
2417ce8d5614SIntel 
2418148f963fSBruce Richardson int
24196018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
24206018eb8cSShahaf Shuler {
24216018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
24226018eb8cSShahaf Shuler 
24236018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
24246018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
24256018eb8cSShahaf Shuler 		return 0;
24266018eb8cSShahaf Shuler 	return 1;
24276018eb8cSShahaf Shuler }
24286018eb8cSShahaf Shuler 
24296018eb8cSShahaf Shuler int
2430edab33b1STetsuya Mukawa all_ports_stopped(void)
2431edab33b1STetsuya Mukawa {
2432edab33b1STetsuya Mukawa 	portid_t pi;
2433edab33b1STetsuya Mukawa 
24347d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
24356018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2436edab33b1STetsuya Mukawa 			return 0;
2437edab33b1STetsuya Mukawa 	}
2438edab33b1STetsuya Mukawa 
2439edab33b1STetsuya Mukawa 	return 1;
2440edab33b1STetsuya Mukawa }
2441edab33b1STetsuya Mukawa 
2442edab33b1STetsuya Mukawa int
2443edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2444edab33b1STetsuya Mukawa {
2445edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2446edab33b1STetsuya Mukawa 		return 0;
2447edab33b1STetsuya Mukawa 
2448edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2449edab33b1STetsuya Mukawa 		return 0;
2450edab33b1STetsuya Mukawa 
2451edab33b1STetsuya Mukawa 	return 1;
2452edab33b1STetsuya Mukawa }
2453edab33b1STetsuya Mukawa 
24541c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
24551c69df45SOri Kam static int
245601817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
24571c69df45SOri Kam {
24581c69df45SOri Kam 	queueid_t qi;
24591c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
24601c69df45SOri Kam 		.peer_count = 1,
24611c69df45SOri Kam 	};
24621c69df45SOri Kam 	int i;
24631c69df45SOri Kam 	int diag;
24641c69df45SOri Kam 	struct rte_port *port = &ports[pi];
246501817b10SBing Zhao 	uint16_t peer_rx_port = pi;
246601817b10SBing Zhao 	uint16_t peer_tx_port = pi;
246701817b10SBing Zhao 	uint32_t manual = 1;
246801817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
246901817b10SBing Zhao 
247001817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
247101817b10SBing Zhao 		peer_rx_port = pi;
247201817b10SBing Zhao 		peer_tx_port = pi;
247301817b10SBing Zhao 		manual = 0;
247401817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
247501817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
247601817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
247701817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
247801817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
247901817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
248001817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
248101817b10SBing Zhao 			peer_rx_port = p_pi;
248201817b10SBing Zhao 		} else {
248301817b10SBing Zhao 			uint16_t next_pi;
248401817b10SBing Zhao 
248501817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
248601817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
248701817b10SBing Zhao 				peer_rx_port = next_pi;
248801817b10SBing Zhao 		}
248901817b10SBing Zhao 		manual = 1;
249001817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
249101817b10SBing Zhao 		if (cnt_pi & 0x1) {
249201817b10SBing Zhao 			peer_rx_port = p_pi;
249301817b10SBing Zhao 		} else {
249401817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
249501817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
249601817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
249701817b10SBing Zhao 				peer_rx_port = pi;
249801817b10SBing Zhao 		}
249901817b10SBing Zhao 		peer_tx_port = peer_rx_port;
250001817b10SBing Zhao 		manual = 1;
250101817b10SBing Zhao 	}
25021c69df45SOri Kam 
25031c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
250401817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
25051c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
250601817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
250701817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
25081c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
25091c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
25101c69df45SOri Kam 		i++;
25111c69df45SOri Kam 		if (diag == 0)
25121c69df45SOri Kam 			continue;
25131c69df45SOri Kam 
25141c69df45SOri Kam 		/* Fail to setup rx queue, return */
25151c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
25161c69df45SOri Kam 					RTE_PORT_HANDLING,
25171c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
251861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
251961a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
252061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
252161a3b0e5SAndrew Rybchenko 			pi);
25221c69df45SOri Kam 		/* try to reconfigure queues next time */
25231c69df45SOri Kam 		port->need_reconfig_queues = 1;
25241c69df45SOri Kam 		return -1;
25251c69df45SOri Kam 	}
25261c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
252701817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
25281c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
252901817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
253001817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
25311c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
25321c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
25331c69df45SOri Kam 		i++;
25341c69df45SOri Kam 		if (diag == 0)
25351c69df45SOri Kam 			continue;
25361c69df45SOri Kam 
25371c69df45SOri Kam 		/* Fail to setup rx queue, return */
25381c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
25391c69df45SOri Kam 					RTE_PORT_HANDLING,
25401c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
254161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
254261a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
254361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
254461a3b0e5SAndrew Rybchenko 			pi);
25451c69df45SOri Kam 		/* try to reconfigure queues next time */
25461c69df45SOri Kam 		port->need_reconfig_queues = 1;
25471c69df45SOri Kam 		return -1;
25481c69df45SOri Kam 	}
25491c69df45SOri Kam 	return 0;
25501c69df45SOri Kam }
25511c69df45SOri Kam 
25522befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
25532befc67fSViacheslav Ovsiienko int
25542befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
25552befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
25562befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
25572befc67fSViacheslav Ovsiienko {
25582befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
25592befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
25602befc67fSViacheslav Ovsiienko 	int ret;
25612befc67fSViacheslav Ovsiienko 
25622befc67fSViacheslav Ovsiienko 	if (rx_pkt_nb_segs <= 1 ||
25632befc67fSViacheslav Ovsiienko 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
25642befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
25652befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
25662befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
25672befc67fSViacheslav Ovsiienko 					     nb_rx_desc, socket_id,
25682befc67fSViacheslav Ovsiienko 					     rx_conf, mp);
25692befc67fSViacheslav Ovsiienko 		return ret;
25702befc67fSViacheslav Ovsiienko 	}
25712befc67fSViacheslav Ovsiienko 	for (i = 0; i < rx_pkt_nb_segs; i++) {
25722befc67fSViacheslav Ovsiienko 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
25732befc67fSViacheslav Ovsiienko 		struct rte_mempool *mpx;
25742befc67fSViacheslav Ovsiienko 		/*
25752befc67fSViacheslav Ovsiienko 		 * Use last valid pool for the segments with number
25762befc67fSViacheslav Ovsiienko 		 * exceeding the pool index.
25772befc67fSViacheslav Ovsiienko 		 */
25782befc67fSViacheslav Ovsiienko 		mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
25792befc67fSViacheslav Ovsiienko 		mpx = mbuf_pool_find(socket_id, mp_n);
25802befc67fSViacheslav Ovsiienko 		/* Handle zero as mbuf data buffer size. */
25812befc67fSViacheslav Ovsiienko 		rx_seg->length = rx_pkt_seg_lengths[i] ?
25822befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_lengths[i] :
25832befc67fSViacheslav Ovsiienko 				   mbuf_data_size[mp_n];
25842befc67fSViacheslav Ovsiienko 		rx_seg->offset = i < rx_pkt_nb_offs ?
25852befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_offsets[i] : 0;
25862befc67fSViacheslav Ovsiienko 		rx_seg->mp = mpx ? mpx : mp;
25872befc67fSViacheslav Ovsiienko 	}
25882befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = rx_pkt_nb_segs;
25892befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = rx_useg;
25902befc67fSViacheslav Ovsiienko 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
25912befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
25922befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = NULL;
25932befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = 0;
25942befc67fSViacheslav Ovsiienko 	return ret;
25952befc67fSViacheslav Ovsiienko }
25962befc67fSViacheslav Ovsiienko 
259763b72657SIvan Ilchenko static int
259863b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi)
259963b72657SIvan Ilchenko {
260063b72657SIvan Ilchenko 	uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
260163b72657SIvan Ilchenko 	uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
260263b72657SIvan Ilchenko 	uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
260363b72657SIvan Ilchenko 
260463b72657SIvan Ilchenko 	if (xstats_display_num == 0)
260563b72657SIvan Ilchenko 		return 0;
260663b72657SIvan Ilchenko 
260763b72657SIvan Ilchenko 	*ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
260863b72657SIvan Ilchenko 	if (*ids_supp == NULL)
260963b72657SIvan Ilchenko 		goto fail_ids_supp;
261063b72657SIvan Ilchenko 
261163b72657SIvan Ilchenko 	*prev_values = calloc(xstats_display_num,
261263b72657SIvan Ilchenko 			      sizeof(**prev_values));
261363b72657SIvan Ilchenko 	if (*prev_values == NULL)
261463b72657SIvan Ilchenko 		goto fail_prev_values;
261563b72657SIvan Ilchenko 
261663b72657SIvan Ilchenko 	*curr_values = calloc(xstats_display_num,
261763b72657SIvan Ilchenko 			      sizeof(**curr_values));
261863b72657SIvan Ilchenko 	if (*curr_values == NULL)
261963b72657SIvan Ilchenko 		goto fail_curr_values;
262063b72657SIvan Ilchenko 
262163b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = true;
262263b72657SIvan Ilchenko 
262363b72657SIvan Ilchenko 	return 0;
262463b72657SIvan Ilchenko 
262563b72657SIvan Ilchenko fail_curr_values:
262663b72657SIvan Ilchenko 	free(*prev_values);
262763b72657SIvan Ilchenko fail_prev_values:
262863b72657SIvan Ilchenko 	free(*ids_supp);
262963b72657SIvan Ilchenko fail_ids_supp:
263063b72657SIvan Ilchenko 	return -ENOMEM;
263163b72657SIvan Ilchenko }
263263b72657SIvan Ilchenko 
263363b72657SIvan Ilchenko static void
263463b72657SIvan Ilchenko free_xstats_display_info(portid_t pi)
263563b72657SIvan Ilchenko {
263663b72657SIvan Ilchenko 	if (!ports[pi].xstats_info.allocated)
263763b72657SIvan Ilchenko 		return;
263863b72657SIvan Ilchenko 	free(ports[pi].xstats_info.ids_supp);
263963b72657SIvan Ilchenko 	free(ports[pi].xstats_info.prev_values);
264063b72657SIvan Ilchenko 	free(ports[pi].xstats_info.curr_values);
264163b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = false;
264263b72657SIvan Ilchenko }
264363b72657SIvan Ilchenko 
264463b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */
264563b72657SIvan Ilchenko static void
264663b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi)
264763b72657SIvan Ilchenko {
264863b72657SIvan Ilchenko 	unsigned int stat, stat_supp;
264963b72657SIvan Ilchenko 	const char *xstat_name;
265063b72657SIvan Ilchenko 	struct rte_port *port;
265163b72657SIvan Ilchenko 	uint64_t *ids_supp;
265263b72657SIvan Ilchenko 	int rc;
265363b72657SIvan Ilchenko 
265463b72657SIvan Ilchenko 	if (xstats_display_num == 0)
265563b72657SIvan Ilchenko 		return;
265663b72657SIvan Ilchenko 
265763b72657SIvan Ilchenko 	if (pi == (portid_t)RTE_PORT_ALL) {
265863b72657SIvan Ilchenko 		fill_xstats_display_info();
265963b72657SIvan Ilchenko 		return;
266063b72657SIvan Ilchenko 	}
266163b72657SIvan Ilchenko 
266263b72657SIvan Ilchenko 	port = &ports[pi];
266363b72657SIvan Ilchenko 	if (port->port_status != RTE_PORT_STARTED)
266463b72657SIvan Ilchenko 		return;
266563b72657SIvan Ilchenko 
266663b72657SIvan Ilchenko 	if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
266763b72657SIvan Ilchenko 		rte_exit(EXIT_FAILURE,
266863b72657SIvan Ilchenko 			 "Failed to allocate xstats display memory\n");
266963b72657SIvan Ilchenko 
267063b72657SIvan Ilchenko 	ids_supp = port->xstats_info.ids_supp;
267163b72657SIvan Ilchenko 	for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
267263b72657SIvan Ilchenko 		xstat_name = xstats_display[stat].name;
267363b72657SIvan Ilchenko 		rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
267463b72657SIvan Ilchenko 						   ids_supp + stat_supp);
267563b72657SIvan Ilchenko 		if (rc != 0) {
267663b72657SIvan Ilchenko 			fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
267763b72657SIvan Ilchenko 				xstat_name, pi, stat);
267863b72657SIvan Ilchenko 			continue;
267963b72657SIvan Ilchenko 		}
268063b72657SIvan Ilchenko 		stat_supp++;
268163b72657SIvan Ilchenko 	}
268263b72657SIvan Ilchenko 
268363b72657SIvan Ilchenko 	port->xstats_info.ids_supp_sz = stat_supp;
268463b72657SIvan Ilchenko }
268563b72657SIvan Ilchenko 
268663b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */
268763b72657SIvan Ilchenko static void
268863b72657SIvan Ilchenko fill_xstats_display_info(void)
268963b72657SIvan Ilchenko {
269063b72657SIvan Ilchenko 	portid_t pi;
269163b72657SIvan Ilchenko 
269263b72657SIvan Ilchenko 	if (xstats_display_num == 0)
269363b72657SIvan Ilchenko 		return;
269463b72657SIvan Ilchenko 
269563b72657SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(pi)
269663b72657SIvan Ilchenko 		fill_xstats_display_info_for_port(pi);
269763b72657SIvan Ilchenko }
269863b72657SIvan Ilchenko 
2699edab33b1STetsuya Mukawa int
2700ce8d5614SIntel start_port(portid_t pid)
2701ce8d5614SIntel {
270292d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2703ce8d5614SIntel 	portid_t pi;
270401817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
270501817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
270601817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
270701817b10SBing Zhao 	uint16_t cnt_pi = 0;
270801817b10SBing Zhao 	uint16_t cfg_pi = 0;
270901817b10SBing Zhao 	int peer_pi;
2710ce8d5614SIntel 	queueid_t qi;
2711ce8d5614SIntel 	struct rte_port *port;
27121c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2713ce8d5614SIntel 
27144468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
27154468635fSMichael Qiu 		return 0;
27164468635fSMichael Qiu 
27177d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2718edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2719ce8d5614SIntel 			continue;
2720ce8d5614SIntel 
272192d2703eSMichael Qiu 		need_check_link_status = 0;
2722ce8d5614SIntel 		port = &ports[pi];
2723ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2724ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
272561a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is now not stopped\n", pi);
2726ce8d5614SIntel 			continue;
2727ce8d5614SIntel 		}
2728ce8d5614SIntel 
2729ce8d5614SIntel 		if (port->need_reconfig > 0) {
2730655eae01SJie Wang 			struct rte_eth_conf dev_conf;
2731655eae01SJie Wang 			int k;
2732655eae01SJie Wang 
2733ce8d5614SIntel 			port->need_reconfig = 0;
2734ce8d5614SIntel 
27357ee3e944SVasily Philipov 			if (flow_isolate_all) {
27367ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
27377ee3e944SVasily Philipov 				if (ret) {
273861a3b0e5SAndrew Rybchenko 					fprintf(stderr,
273961a3b0e5SAndrew Rybchenko 						"Failed to apply isolated mode on port %d\n",
274061a3b0e5SAndrew Rybchenko 						pi);
27417ee3e944SVasily Philipov 					return -1;
27427ee3e944SVasily Philipov 				}
27437ee3e944SVasily Philipov 			}
2744b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
27455706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
274620a0286fSLiu Xiaofeng 					port->socket_id);
27471c69df45SOri Kam 			if (nb_hairpinq > 0 &&
27481c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
274961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
275061a3b0e5SAndrew Rybchenko 					"Port %d doesn't support hairpin queues\n",
275161a3b0e5SAndrew Rybchenko 					pi);
27521c69df45SOri Kam 				return -1;
27531c69df45SOri Kam 			}
27541bb4a528SFerruh Yigit 
2755ce8d5614SIntel 			/* configure port */
2756a550baf2SMin Hu (Connor) 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
27571c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2758ce8d5614SIntel 						     &(port->dev_conf));
2759ce8d5614SIntel 			if (diag != 0) {
2760ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2761ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
276261a3b0e5SAndrew Rybchenko 					fprintf(stderr,
276361a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
276461a3b0e5SAndrew Rybchenko 						pi);
276561a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Fail to configure port %d\n",
276661a3b0e5SAndrew Rybchenko 					pi);
2767ce8d5614SIntel 				/* try to reconfigure port next time */
2768ce8d5614SIntel 				port->need_reconfig = 1;
2769148f963fSBruce Richardson 				return -1;
2770ce8d5614SIntel 			}
2771655eae01SJie Wang 			/* get device configuration*/
2772655eae01SJie Wang 			if (0 !=
2773655eae01SJie Wang 				eth_dev_conf_get_print_err(pi, &dev_conf)) {
2774655eae01SJie Wang 				fprintf(stderr,
2775655eae01SJie Wang 					"port %d can not get device configuration\n",
2776655eae01SJie Wang 					pi);
2777655eae01SJie Wang 				return -1;
2778655eae01SJie Wang 			}
2779655eae01SJie Wang 			/* Apply Rx offloads configuration */
2780655eae01SJie Wang 			if (dev_conf.rxmode.offloads !=
2781655eae01SJie Wang 			    port->dev_conf.rxmode.offloads) {
2782655eae01SJie Wang 				port->dev_conf.rxmode.offloads |=
2783655eae01SJie Wang 					dev_conf.rxmode.offloads;
2784655eae01SJie Wang 				for (k = 0;
2785655eae01SJie Wang 				     k < port->dev_info.max_rx_queues;
2786655eae01SJie Wang 				     k++)
2787655eae01SJie Wang 					port->rx_conf[k].offloads |=
2788655eae01SJie Wang 						dev_conf.rxmode.offloads;
2789655eae01SJie Wang 			}
2790655eae01SJie Wang 			/* Apply Tx offloads configuration */
2791655eae01SJie Wang 			if (dev_conf.txmode.offloads !=
2792655eae01SJie Wang 			    port->dev_conf.txmode.offloads) {
2793655eae01SJie Wang 				port->dev_conf.txmode.offloads |=
2794655eae01SJie Wang 					dev_conf.txmode.offloads;
2795655eae01SJie Wang 				for (k = 0;
2796655eae01SJie Wang 				     k < port->dev_info.max_tx_queues;
2797655eae01SJie Wang 				     k++)
2798655eae01SJie Wang 					port->tx_conf[k].offloads |=
2799655eae01SJie Wang 						dev_conf.txmode.offloads;
2800655eae01SJie Wang 			}
2801ce8d5614SIntel 		}
2802a550baf2SMin Hu (Connor) 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2803ce8d5614SIntel 			port->need_reconfig_queues = 0;
2804ce8d5614SIntel 			/* setup tx queues */
2805ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2806b6ea6408SIntel 				if ((numa_support) &&
2807b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2808b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2809d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2810d44f8a48SQi Zhang 						txring_numa[pi],
2811d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2812b6ea6408SIntel 				else
2813b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2814d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2815d44f8a48SQi Zhang 						port->socket_id,
2816d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2817b6ea6408SIntel 
2818ce8d5614SIntel 				if (diag == 0)
2819ce8d5614SIntel 					continue;
2820ce8d5614SIntel 
2821ce8d5614SIntel 				/* Fail to setup tx queue, return */
2822ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2823ce8d5614SIntel 							RTE_PORT_HANDLING,
2824ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
282561a3b0e5SAndrew Rybchenko 					fprintf(stderr,
282661a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
282761a3b0e5SAndrew Rybchenko 						pi);
282861a3b0e5SAndrew Rybchenko 				fprintf(stderr,
282961a3b0e5SAndrew Rybchenko 					"Fail to configure port %d tx queues\n",
2830d44f8a48SQi Zhang 					pi);
2831ce8d5614SIntel 				/* try to reconfigure queues next time */
2832ce8d5614SIntel 				port->need_reconfig_queues = 1;
2833148f963fSBruce Richardson 				return -1;
2834ce8d5614SIntel 			}
2835ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2836d44f8a48SQi Zhang 				/* setup rx queues */
2837b6ea6408SIntel 				if ((numa_support) &&
2838b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2839b6ea6408SIntel 					struct rte_mempool * mp =
284026cbb419SViacheslav Ovsiienko 						mbuf_pool_find
284126cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
2842b6ea6408SIntel 					if (mp == NULL) {
284361a3b0e5SAndrew Rybchenko 						fprintf(stderr,
284461a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
2845b6ea6408SIntel 							rxring_numa[pi]);
2846148f963fSBruce Richardson 						return -1;
2847b6ea6408SIntel 					}
2848b6ea6408SIntel 
28492befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2850d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2851d44f8a48SQi Zhang 					     rxring_numa[pi],
2852d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2853d44f8a48SQi Zhang 					     mp);
28541e1d6bddSBernard Iremonger 				} else {
28551e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
285626cbb419SViacheslav Ovsiienko 						mbuf_pool_find
285726cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
28581e1d6bddSBernard Iremonger 					if (mp == NULL) {
285961a3b0e5SAndrew Rybchenko 						fprintf(stderr,
286061a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
28611e1d6bddSBernard Iremonger 							port->socket_id);
28621e1d6bddSBernard Iremonger 						return -1;
2863b6ea6408SIntel 					}
28642befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2865d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2866d44f8a48SQi Zhang 					     port->socket_id,
2867d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2868d44f8a48SQi Zhang 					     mp);
28691e1d6bddSBernard Iremonger 				}
2870ce8d5614SIntel 				if (diag == 0)
2871ce8d5614SIntel 					continue;
2872ce8d5614SIntel 
2873ce8d5614SIntel 				/* Fail to setup rx queue, return */
2874ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2875ce8d5614SIntel 							RTE_PORT_HANDLING,
2876ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
287761a3b0e5SAndrew Rybchenko 					fprintf(stderr,
287861a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
287961a3b0e5SAndrew Rybchenko 						pi);
288061a3b0e5SAndrew Rybchenko 				fprintf(stderr,
288161a3b0e5SAndrew Rybchenko 					"Fail to configure port %d rx queues\n",
2882d44f8a48SQi Zhang 					pi);
2883ce8d5614SIntel 				/* try to reconfigure queues next time */
2884ce8d5614SIntel 				port->need_reconfig_queues = 1;
2885148f963fSBruce Richardson 				return -1;
2886ce8d5614SIntel 			}
28871c69df45SOri Kam 			/* setup hairpin queues */
288801817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
28891c69df45SOri Kam 				return -1;
2890ce8d5614SIntel 		}
2891b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2892b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
2893b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2894b0a9354aSPavan Nikhilesh 					NULL, 0);
2895b0a9354aSPavan Nikhilesh 			if (diag < 0)
289661a3b0e5SAndrew Rybchenko 				fprintf(stderr,
2897b0a9354aSPavan Nikhilesh 					"Port %d: Failed to disable Ptype parsing\n",
2898b0a9354aSPavan Nikhilesh 					pi);
2899b0a9354aSPavan Nikhilesh 		}
2900b0a9354aSPavan Nikhilesh 
290101817b10SBing Zhao 		p_pi = pi;
290201817b10SBing Zhao 		cnt_pi++;
290301817b10SBing Zhao 
2904ce8d5614SIntel 		/* start port */
2905a550baf2SMin Hu (Connor) 		diag = eth_dev_start_mp(pi);
290652f2c6f2SAndrew Rybchenko 		if (diag < 0) {
290761a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Fail to start port %d: %s\n",
290861a3b0e5SAndrew Rybchenko 				pi, rte_strerror(-diag));
2909ce8d5614SIntel 
2910ce8d5614SIntel 			/* Fail to setup rx queue, return */
2911ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2912ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
291361a3b0e5SAndrew Rybchenko 				fprintf(stderr,
291461a3b0e5SAndrew Rybchenko 					"Port %d can not be set back to stopped\n",
291561a3b0e5SAndrew Rybchenko 					pi);
2916ce8d5614SIntel 			continue;
2917ce8d5614SIntel 		}
2918ce8d5614SIntel 
2919ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2920ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
292161a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into started\n",
292261a3b0e5SAndrew Rybchenko 				pi);
2923ce8d5614SIntel 
29245ffc4a2aSYuying Zhang 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2925c2c4f87bSAman Deep Singh 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2926a7db3afcSAman Deep Singh 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2927d8c89163SZijie Pan 
2928ce8d5614SIntel 		/* at least one port started, need checking link status */
2929ce8d5614SIntel 		need_check_link_status = 1;
293001817b10SBing Zhao 
293101817b10SBing Zhao 		pl[cfg_pi++] = pi;
2932ce8d5614SIntel 	}
2933ce8d5614SIntel 
293492d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2935edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
293692d2703eSMichael Qiu 	else if (need_check_link_status == 0)
293761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Please stop the ports first\n");
2938ce8d5614SIntel 
293901817b10SBing Zhao 	if (hairpin_mode & 0xf) {
294001817b10SBing Zhao 		uint16_t i;
294101817b10SBing Zhao 		int j;
294201817b10SBing Zhao 
294301817b10SBing Zhao 		/* bind all started hairpin ports */
294401817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
294501817b10SBing Zhao 			pi = pl[i];
294601817b10SBing Zhao 			/* bind current Tx to all peer Rx */
294701817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
294801817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
294901817b10SBing Zhao 			if (peer_pi < 0)
295001817b10SBing Zhao 				return peer_pi;
295101817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
295201817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
295301817b10SBing Zhao 					continue;
295401817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
295501817b10SBing Zhao 				if (diag < 0) {
295661a3b0e5SAndrew Rybchenko 					fprintf(stderr,
295761a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
295801817b10SBing Zhao 						pi, peer_pl[j],
295901817b10SBing Zhao 						rte_strerror(-diag));
296001817b10SBing Zhao 					return -1;
296101817b10SBing Zhao 				}
296201817b10SBing Zhao 			}
296301817b10SBing Zhao 			/* bind all peer Tx to current Rx */
296401817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
296501817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
296601817b10SBing Zhao 			if (peer_pi < 0)
296701817b10SBing Zhao 				return peer_pi;
296801817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
296901817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
297001817b10SBing Zhao 					continue;
297101817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
297201817b10SBing Zhao 				if (diag < 0) {
297361a3b0e5SAndrew Rybchenko 					fprintf(stderr,
297461a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
297501817b10SBing Zhao 						peer_pl[j], pi,
297601817b10SBing Zhao 						rte_strerror(-diag));
297701817b10SBing Zhao 					return -1;
297801817b10SBing Zhao 				}
297901817b10SBing Zhao 			}
298001817b10SBing Zhao 		}
298101817b10SBing Zhao 	}
298201817b10SBing Zhao 
298363b72657SIvan Ilchenko 	fill_xstats_display_info_for_port(pid);
298463b72657SIvan Ilchenko 
2985ce8d5614SIntel 	printf("Done\n");
2986148f963fSBruce Richardson 	return 0;
2987ce8d5614SIntel }
2988ce8d5614SIntel 
2989ce8d5614SIntel void
2990ce8d5614SIntel stop_port(portid_t pid)
2991ce8d5614SIntel {
2992ce8d5614SIntel 	portid_t pi;
2993ce8d5614SIntel 	struct rte_port *port;
2994ce8d5614SIntel 	int need_check_link_status = 0;
299501817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
299601817b10SBing Zhao 	int peer_pi;
2997ce8d5614SIntel 
29984468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
29994468635fSMichael Qiu 		return;
30004468635fSMichael Qiu 
3001ce8d5614SIntel 	printf("Stopping ports...\n");
3002ce8d5614SIntel 
30037d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
30044468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3005ce8d5614SIntel 			continue;
3006ce8d5614SIntel 
3007a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
300861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
300961a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
301061a3b0e5SAndrew Rybchenko 				pi);
3011a8ef3e3aSBernard Iremonger 			continue;
3012a8ef3e3aSBernard Iremonger 		}
3013a8ef3e3aSBernard Iremonger 
30140e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
301561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
301661a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
301761a3b0e5SAndrew Rybchenko 				pi);
30180e545d30SBernard Iremonger 			continue;
30190e545d30SBernard Iremonger 		}
30200e545d30SBernard Iremonger 
3021ce8d5614SIntel 		port = &ports[pi];
3022ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
3023ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
3024ce8d5614SIntel 			continue;
3025ce8d5614SIntel 
302601817b10SBing Zhao 		if (hairpin_mode & 0xf) {
302701817b10SBing Zhao 			int j;
302801817b10SBing Zhao 
302901817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
303001817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
303101817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
303201817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
303301817b10SBing Zhao 			if (peer_pi < 0)
303401817b10SBing Zhao 				continue;
303501817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
303601817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
303701817b10SBing Zhao 					continue;
303801817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
303901817b10SBing Zhao 			}
304001817b10SBing Zhao 		}
304101817b10SBing Zhao 
30420f93edbfSGregory Etelson 		if (port->flow_list)
30430f93edbfSGregory Etelson 			port_flow_flush(pi);
30440f93edbfSGregory Etelson 
3045a550baf2SMin Hu (Connor) 		if (eth_dev_stop_mp(pi) != 0)
3046e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3047e62c5a12SIvan Ilchenko 				pi);
3048ce8d5614SIntel 
3049ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
3050ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
305161a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into stopped\n",
305261a3b0e5SAndrew Rybchenko 				pi);
3053ce8d5614SIntel 		need_check_link_status = 1;
3054ce8d5614SIntel 	}
3055bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
3056edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3057ce8d5614SIntel 
3058ce8d5614SIntel 	printf("Done\n");
3059ce8d5614SIntel }
3060ce8d5614SIntel 
3061ce6959bfSWisam Jaddo static void
30624f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
3063ce6959bfSWisam Jaddo {
30644f1de450SThomas Monjalon 	portid_t i;
30654f1de450SThomas Monjalon 	portid_t new_total = 0;
3066ce6959bfSWisam Jaddo 
30674f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
30684f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
30694f1de450SThomas Monjalon 			array[new_total] = array[i];
30704f1de450SThomas Monjalon 			new_total++;
3071ce6959bfSWisam Jaddo 		}
30724f1de450SThomas Monjalon 	*total = new_total;
30734f1de450SThomas Monjalon }
30744f1de450SThomas Monjalon 
30754f1de450SThomas Monjalon static void
30764f1de450SThomas Monjalon remove_invalid_ports(void)
30774f1de450SThomas Monjalon {
30784f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
30794f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
30804f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
3081ce6959bfSWisam Jaddo }
3082ce6959bfSWisam Jaddo 
3083ce8d5614SIntel void
3084ce8d5614SIntel close_port(portid_t pid)
3085ce8d5614SIntel {
3086ce8d5614SIntel 	portid_t pi;
3087ce8d5614SIntel 	struct rte_port *port;
3088ce8d5614SIntel 
30894468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
30904468635fSMichael Qiu 		return;
30914468635fSMichael Qiu 
3092ce8d5614SIntel 	printf("Closing ports...\n");
3093ce8d5614SIntel 
30947d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
30954468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3096ce8d5614SIntel 			continue;
3097ce8d5614SIntel 
3098a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
309961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
310061a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
310161a3b0e5SAndrew Rybchenko 				pi);
3102a8ef3e3aSBernard Iremonger 			continue;
3103a8ef3e3aSBernard Iremonger 		}
3104a8ef3e3aSBernard Iremonger 
31050e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
310661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
310761a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
310861a3b0e5SAndrew Rybchenko 				pi);
31090e545d30SBernard Iremonger 			continue;
31100e545d30SBernard Iremonger 		}
31110e545d30SBernard Iremonger 
3112ce8d5614SIntel 		port = &ports[pi];
3113ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
3114d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
311561a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is already closed\n", pi);
3116d4e8ad64SMichael Qiu 			continue;
3117d4e8ad64SMichael Qiu 		}
3118d4e8ad64SMichael Qiu 
3119a550baf2SMin Hu (Connor) 		if (is_proc_primary()) {
3120938a184aSAdrien Mazarguil 			port_flow_flush(pi);
3121*59f3a8acSGregory Etelson 			port_flex_item_flush(pi);
3122ce8d5614SIntel 			rte_eth_dev_close(pi);
3123ce8d5614SIntel 		}
312463b72657SIvan Ilchenko 
312563b72657SIvan Ilchenko 		free_xstats_display_info(pi);
3126a550baf2SMin Hu (Connor) 	}
3127ce8d5614SIntel 
312885c6571cSThomas Monjalon 	remove_invalid_ports();
3129ce8d5614SIntel 	printf("Done\n");
3130ce8d5614SIntel }
3131ce8d5614SIntel 
3132edab33b1STetsuya Mukawa void
313397f1e196SWei Dai reset_port(portid_t pid)
313497f1e196SWei Dai {
313597f1e196SWei Dai 	int diag;
313697f1e196SWei Dai 	portid_t pi;
313797f1e196SWei Dai 	struct rte_port *port;
313897f1e196SWei Dai 
313997f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
314097f1e196SWei Dai 		return;
314197f1e196SWei Dai 
31421cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
31431cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
314461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
314561a3b0e5SAndrew Rybchenko 			"Can not reset port(s), please stop port(s) first.\n");
31461cde1b9aSShougang Wang 		return;
31471cde1b9aSShougang Wang 	}
31481cde1b9aSShougang Wang 
314997f1e196SWei Dai 	printf("Resetting ports...\n");
315097f1e196SWei Dai 
315197f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
315297f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
315397f1e196SWei Dai 			continue;
315497f1e196SWei Dai 
315597f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
315661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
315761a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
315861a3b0e5SAndrew Rybchenko 				pi);
315997f1e196SWei Dai 			continue;
316097f1e196SWei Dai 		}
316197f1e196SWei Dai 
316297f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
316361a3b0e5SAndrew Rybchenko 			fprintf(stderr,
316461a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
316597f1e196SWei Dai 				pi);
316697f1e196SWei Dai 			continue;
316797f1e196SWei Dai 		}
316897f1e196SWei Dai 
316997f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
317097f1e196SWei Dai 		if (diag == 0) {
317197f1e196SWei Dai 			port = &ports[pi];
317297f1e196SWei Dai 			port->need_reconfig = 1;
317397f1e196SWei Dai 			port->need_reconfig_queues = 1;
317497f1e196SWei Dai 		} else {
317561a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Failed to reset port %d. diag=%d\n",
317661a3b0e5SAndrew Rybchenko 				pi, diag);
317797f1e196SWei Dai 		}
317897f1e196SWei Dai 	}
317997f1e196SWei Dai 
318097f1e196SWei Dai 	printf("Done\n");
318197f1e196SWei Dai }
318297f1e196SWei Dai 
318397f1e196SWei Dai void
3184edab33b1STetsuya Mukawa attach_port(char *identifier)
3185ce8d5614SIntel {
31864f1ed78eSThomas Monjalon 	portid_t pi;
3187c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
3188ce8d5614SIntel 
3189edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
3190edab33b1STetsuya Mukawa 
3191edab33b1STetsuya Mukawa 	if (identifier == NULL) {
319261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Invalid parameters are specified\n");
3193edab33b1STetsuya Mukawa 		return;
3194ce8d5614SIntel 	}
3195ce8d5614SIntel 
319675b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
3197c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3198edab33b1STetsuya Mukawa 		return;
3199c9cce428SThomas Monjalon 	}
3200c9cce428SThomas Monjalon 
32014f1ed78eSThomas Monjalon 	/* first attach mode: event */
32024f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
32034f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
32044f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
32054f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
32064f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
32074f1ed78eSThomas Monjalon 				setup_attached_port(pi);
32084f1ed78eSThomas Monjalon 		return;
32094f1ed78eSThomas Monjalon 	}
32104f1ed78eSThomas Monjalon 
32114f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
321286fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
32134f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
321486fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
321586fa5de1SThomas Monjalon 			continue; /* port was already attached before */
3216c9cce428SThomas Monjalon 		setup_attached_port(pi);
3217c9cce428SThomas Monjalon 	}
321886fa5de1SThomas Monjalon }
3219c9cce428SThomas Monjalon 
3220c9cce428SThomas Monjalon static void
3221c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
3222c9cce428SThomas Monjalon {
3223c9cce428SThomas Monjalon 	unsigned int socket_id;
322434fc1051SIvan Ilchenko 	int ret;
3225edab33b1STetsuya Mukawa 
3226931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
322729841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
3228931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
322929841336SPhil Yang 		socket_id = socket_ids[0];
3230931126baSBernard Iremonger 	reconfig(pi, socket_id);
323134fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
323234fc1051SIvan Ilchenko 	if (ret != 0)
323361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
323461a3b0e5SAndrew Rybchenko 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
323534fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
3236edab33b1STetsuya Mukawa 
32374f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
32384f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
32394f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
32404f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
3241edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
3242edab33b1STetsuya Mukawa 
3243edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3244edab33b1STetsuya Mukawa 	printf("Done\n");
3245edab33b1STetsuya Mukawa }
3246edab33b1STetsuya Mukawa 
32470654d4a8SThomas Monjalon static void
32480654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
32495f4ec54fSChen Jing D(Mark) {
3250f8e5baa2SThomas Monjalon 	portid_t sibling;
3251f8e5baa2SThomas Monjalon 
3252f8e5baa2SThomas Monjalon 	if (dev == NULL) {
325361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Device already removed\n");
3254f8e5baa2SThomas Monjalon 		return;
3255f8e5baa2SThomas Monjalon 	}
3256f8e5baa2SThomas Monjalon 
32570654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3258938a184aSAdrien Mazarguil 
32592a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
32602a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
32612a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
326261a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
326361a3b0e5SAndrew Rybchenko 					sibling);
32642a449871SThomas Monjalon 				return;
32652a449871SThomas Monjalon 			}
32662a449871SThomas Monjalon 			port_flow_flush(sibling);
32672a449871SThomas Monjalon 		}
32682a449871SThomas Monjalon 	}
32692a449871SThomas Monjalon 
327075b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3271f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3272edab33b1STetsuya Mukawa 		return;
32733070419eSGaetan Rivet 	}
32744f1de450SThomas Monjalon 	remove_invalid_ports();
327503ce2c53SMatan Azrad 
32760654d4a8SThomas Monjalon 	printf("Device is detached\n");
3277f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3278edab33b1STetsuya Mukawa 	printf("Done\n");
3279edab33b1STetsuya Mukawa 	return;
32805f4ec54fSChen Jing D(Mark) }
32815f4ec54fSChen Jing D(Mark) 
3282af75078fSIntel void
32830654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
32840654d4a8SThomas Monjalon {
32850a0821bcSPaulis Gributs 	int ret;
32860a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
32870a0821bcSPaulis Gributs 
32880654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
32890654d4a8SThomas Monjalon 		return;
32900654d4a8SThomas Monjalon 
32910654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
32920654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
329361a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port not stopped\n");
32940654d4a8SThomas Monjalon 			return;
32950654d4a8SThomas Monjalon 		}
329661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Port was not closed\n");
32970654d4a8SThomas Monjalon 	}
32980654d4a8SThomas Monjalon 
32990a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
33000a0821bcSPaulis Gributs 	if (ret != 0) {
33010a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
33020a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
33030a0821bcSPaulis Gributs 			port_id);
33040a0821bcSPaulis Gributs 		return;
33050a0821bcSPaulis Gributs 	}
33060a0821bcSPaulis Gributs 	detach_device(dev_info.device);
33070654d4a8SThomas Monjalon }
33080654d4a8SThomas Monjalon 
33090654d4a8SThomas Monjalon void
33105edee5f6SThomas Monjalon detach_devargs(char *identifier)
331155e51c96SNithin Dabilpuram {
331255e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
331355e51c96SNithin Dabilpuram 	struct rte_devargs da;
331455e51c96SNithin Dabilpuram 	portid_t port_id;
331555e51c96SNithin Dabilpuram 
331655e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
331755e51c96SNithin Dabilpuram 
331855e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
331955e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
332061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "cannot parse identifier\n");
332155e51c96SNithin Dabilpuram 		return;
332255e51c96SNithin Dabilpuram 	}
332355e51c96SNithin Dabilpuram 
332455e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
332555e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
332655e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
332761a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
332861a3b0e5SAndrew Rybchenko 					port_id);
3329149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
333064051bb1SXueming Li 				rte_devargs_reset(&da);
333155e51c96SNithin Dabilpuram 				return;
333255e51c96SNithin Dabilpuram 			}
333355e51c96SNithin Dabilpuram 			port_flow_flush(port_id);
333455e51c96SNithin Dabilpuram 		}
333555e51c96SNithin Dabilpuram 	}
333655e51c96SNithin Dabilpuram 
333755e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
333855e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
333955e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
334064051bb1SXueming Li 		rte_devargs_reset(&da);
334155e51c96SNithin Dabilpuram 		return;
334255e51c96SNithin Dabilpuram 	}
334355e51c96SNithin Dabilpuram 
334455e51c96SNithin Dabilpuram 	remove_invalid_ports();
334555e51c96SNithin Dabilpuram 
334655e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
334755e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
334855e51c96SNithin Dabilpuram 	printf("Done\n");
334964051bb1SXueming Li 	rte_devargs_reset(&da);
335055e51c96SNithin Dabilpuram }
335155e51c96SNithin Dabilpuram 
335255e51c96SNithin Dabilpuram void
3353af75078fSIntel pmd_test_exit(void)
3354af75078fSIntel {
3355af75078fSIntel 	portid_t pt_id;
335626cbb419SViacheslav Ovsiienko 	unsigned int i;
3357fb73e096SJeff Guo 	int ret;
3358af75078fSIntel 
33598210ec25SPablo de Lara 	if (test_done == 0)
33608210ec25SPablo de Lara 		stop_packet_forwarding();
33618210ec25SPablo de Lara 
3362761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
336326cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
33643a0968c8SShahaf Shuler 		if (mempools[i]) {
33653a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
33663a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
33673a0968c8SShahaf Shuler 						     NULL);
33683a0968c8SShahaf Shuler 		}
33693a0968c8SShahaf Shuler 	}
3370761f7ae1SJie Zhou #endif
3371d3a274ceSZhihong Wang 	if (ports != NULL) {
3372d3a274ceSZhihong Wang 		no_link_check = 1;
33737d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
337408fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3375af75078fSIntel 			fflush(stdout);
3376d3a274ceSZhihong Wang 			stop_port(pt_id);
337708fd782bSCristian Dumitrescu 		}
337808fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
337908fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
338008fd782bSCristian Dumitrescu 			fflush(stdout);
3381d3a274ceSZhihong Wang 			close_port(pt_id);
3382af75078fSIntel 		}
3383d3a274ceSZhihong Wang 	}
3384fb73e096SJeff Guo 
3385fb73e096SJeff Guo 	if (hot_plug) {
3386fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
33872049c511SJeff Guo 		if (ret) {
3388fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3389fb73e096SJeff Guo 				"fail to stop device event monitor.");
33902049c511SJeff Guo 			return;
33912049c511SJeff Guo 		}
3392fb73e096SJeff Guo 
33932049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3394cc1bf307SJeff Guo 			dev_event_callback, NULL);
33952049c511SJeff Guo 		if (ret < 0) {
3396fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
33972049c511SJeff Guo 				"fail to unregister device event callback.\n");
33982049c511SJeff Guo 			return;
33992049c511SJeff Guo 		}
34002049c511SJeff Guo 
34012049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
34022049c511SJeff Guo 		if (ret) {
34032049c511SJeff Guo 			RTE_LOG(ERR, EAL,
34042049c511SJeff Guo 				"fail to disable hotplug handling.\n");
34052049c511SJeff Guo 			return;
34062049c511SJeff Guo 		}
3407fb73e096SJeff Guo 	}
340826cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3409401b744dSShahaf Shuler 		if (mempools[i])
3410a550baf2SMin Hu (Connor) 			mempool_free_mp(mempools[i]);
3411401b744dSShahaf Shuler 	}
341263b72657SIvan Ilchenko 	free(xstats_display);
3413fb73e096SJeff Guo 
3414d3a274ceSZhihong Wang 	printf("\nBye...\n");
3415af75078fSIntel }
3416af75078fSIntel 
3417af75078fSIntel typedef void (*cmd_func_t)(void);
3418af75078fSIntel struct pmd_test_command {
3419af75078fSIntel 	const char *cmd_name;
3420af75078fSIntel 	cmd_func_t cmd_func;
3421af75078fSIntel };
3422af75078fSIntel 
3423ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3424af75078fSIntel static void
3425edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3426af75078fSIntel {
3427ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3428ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3429f8244c63SZhiyong Yang 	portid_t portid;
3430f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3431ce8d5614SIntel 	struct rte_eth_link link;
3432e661a08bSIgor Romanov 	int ret;
3433ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3434ce8d5614SIntel 
3435ce8d5614SIntel 	printf("Checking link statuses...\n");
3436ce8d5614SIntel 	fflush(stdout);
3437ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3438ce8d5614SIntel 		all_ports_up = 1;
34397d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3440ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3441ce8d5614SIntel 				continue;
3442ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3443e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3444e661a08bSIgor Romanov 			if (ret < 0) {
3445e661a08bSIgor Romanov 				all_ports_up = 0;
3446e661a08bSIgor Romanov 				if (print_flag == 1)
344761a3b0e5SAndrew Rybchenko 					fprintf(stderr,
344861a3b0e5SAndrew Rybchenko 						"Port %u link get failed: %s\n",
3449e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3450e661a08bSIgor Romanov 				continue;
3451e661a08bSIgor Romanov 			}
3452ce8d5614SIntel 			/* print link status if flag set */
3453ce8d5614SIntel 			if (print_flag == 1) {
3454ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3455ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3456ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3457ce8d5614SIntel 				continue;
3458ce8d5614SIntel 			}
3459ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
346009419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
3461ce8d5614SIntel 				all_ports_up = 0;
3462ce8d5614SIntel 				break;
3463ce8d5614SIntel 			}
3464ce8d5614SIntel 		}
3465ce8d5614SIntel 		/* after finally printing all link status, get out */
3466ce8d5614SIntel 		if (print_flag == 1)
3467ce8d5614SIntel 			break;
3468ce8d5614SIntel 
3469ce8d5614SIntel 		if (all_ports_up == 0) {
3470ce8d5614SIntel 			fflush(stdout);
3471ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3472ce8d5614SIntel 		}
3473ce8d5614SIntel 
3474ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3475ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3476ce8d5614SIntel 			print_flag = 1;
3477ce8d5614SIntel 		}
34788ea656f8SGaetan Rivet 
34798ea656f8SGaetan Rivet 		if (lsc_interrupt)
34808ea656f8SGaetan Rivet 			break;
3481ce8d5614SIntel 	}
3482af75078fSIntel }
3483af75078fSIntel 
3484284c908cSGaetan Rivet static void
3485cc1bf307SJeff Guo rmv_port_callback(void *arg)
3486284c908cSGaetan Rivet {
34873b97888aSMatan Azrad 	int need_to_start = 0;
34880da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
348928caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
34900a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
34910a0821bcSPaulis Gributs 	int ret;
3492284c908cSGaetan Rivet 
3493284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3494284c908cSGaetan Rivet 
34953b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
34963b97888aSMatan Azrad 		need_to_start = 1;
34973b97888aSMatan Azrad 		stop_packet_forwarding();
34983b97888aSMatan Azrad 	}
34990da2a62bSMatan Azrad 	no_link_check = 1;
3500284c908cSGaetan Rivet 	stop_port(port_id);
35010da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
35020654d4a8SThomas Monjalon 
35030a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
35040a0821bcSPaulis Gributs 	if (ret != 0)
35050a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
35060a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
35070a0821bcSPaulis Gributs 			port_id);
3508e1d38504SPaulis Gributs 	else {
3509e1d38504SPaulis Gributs 		struct rte_device *device = dev_info.device;
3510e1d38504SPaulis Gributs 		close_port(port_id);
3511e1d38504SPaulis Gributs 		detach_device(device); /* might be already removed or have more ports */
3512e1d38504SPaulis Gributs 	}
35133b97888aSMatan Azrad 	if (need_to_start)
35143b97888aSMatan Azrad 		start_packet_forwarding(0);
3515284c908cSGaetan Rivet }
3516284c908cSGaetan Rivet 
351776ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3518d6af1a13SBernard Iremonger static int
3519f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3520d6af1a13SBernard Iremonger 		  void *ret_param)
352176ad4a2dSGaetan Rivet {
352276ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3523d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
352476ad4a2dSGaetan Rivet 
352576ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
352661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
352761a3b0e5SAndrew Rybchenko 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
352876ad4a2dSGaetan Rivet 			port_id, __func__, type);
352976ad4a2dSGaetan Rivet 		fflush(stderr);
35303af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3531f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
353297b5d8b5SThomas Monjalon 			eth_event_desc[type]);
353376ad4a2dSGaetan Rivet 		fflush(stdout);
353476ad4a2dSGaetan Rivet 	}
3535284c908cSGaetan Rivet 
3536284c908cSGaetan Rivet 	switch (type) {
35374f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
35384f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
35394f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
35404f1ed78eSThomas Monjalon 		break;
3541284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
35424f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
35434f1ed78eSThomas Monjalon 			break;
3544284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3545cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
354661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
354761a3b0e5SAndrew Rybchenko 				"Could not set up deferred device removal\n");
3548284c908cSGaetan Rivet 		break;
354985c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
355085c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
355185c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
355285c6571cSThomas Monjalon 		break;
3553284c908cSGaetan Rivet 	default:
3554284c908cSGaetan Rivet 		break;
3555284c908cSGaetan Rivet 	}
3556d6af1a13SBernard Iremonger 	return 0;
355776ad4a2dSGaetan Rivet }
355876ad4a2dSGaetan Rivet 
355997b5d8b5SThomas Monjalon static int
356097b5d8b5SThomas Monjalon register_eth_event_callback(void)
356197b5d8b5SThomas Monjalon {
356297b5d8b5SThomas Monjalon 	int ret;
356397b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
356497b5d8b5SThomas Monjalon 
356597b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
356697b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
356797b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
356897b5d8b5SThomas Monjalon 				event,
356997b5d8b5SThomas Monjalon 				eth_event_callback,
357097b5d8b5SThomas Monjalon 				NULL);
357197b5d8b5SThomas Monjalon 		if (ret != 0) {
357297b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
357397b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
357497b5d8b5SThomas Monjalon 			return -1;
357597b5d8b5SThomas Monjalon 		}
357697b5d8b5SThomas Monjalon 	}
357797b5d8b5SThomas Monjalon 
357897b5d8b5SThomas Monjalon 	return 0;
357997b5d8b5SThomas Monjalon }
358097b5d8b5SThomas Monjalon 
3581fb73e096SJeff Guo /* This function is used by the interrupt thread */
3582fb73e096SJeff Guo static void
3583cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3584fb73e096SJeff Guo 			     __rte_unused void *arg)
3585fb73e096SJeff Guo {
35862049c511SJeff Guo 	uint16_t port_id;
35872049c511SJeff Guo 	int ret;
35882049c511SJeff Guo 
3589fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3590fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3591fb73e096SJeff Guo 			__func__, type);
3592fb73e096SJeff Guo 		fflush(stderr);
3593fb73e096SJeff Guo 	}
3594fb73e096SJeff Guo 
3595fb73e096SJeff Guo 	switch (type) {
3596fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3597cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3598fb73e096SJeff Guo 			device_name);
35992049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
36002049c511SJeff Guo 		if (ret) {
36012049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
36022049c511SJeff Guo 				device_name);
36032049c511SJeff Guo 			return;
36042049c511SJeff Guo 		}
3605cc1bf307SJeff Guo 		/*
3606cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3607cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3608cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3609cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3610cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3611cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3612cc1bf307SJeff Guo 		 * be deleted.
3613cc1bf307SJeff Guo 		 */
3614cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3615cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3616cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3617cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3618fb73e096SJeff Guo 		break;
3619fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3620fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3621fb73e096SJeff Guo 			device_name);
3622fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3623fb73e096SJeff Guo 		 * begin to attach port.
3624fb73e096SJeff Guo 		 */
3625fb73e096SJeff Guo 		break;
3626fb73e096SJeff Guo 	default:
3627fb73e096SJeff Guo 		break;
3628fb73e096SJeff Guo 	}
3629fb73e096SJeff Guo }
3630fb73e096SJeff Guo 
3631f2c5125aSPablo de Lara static void
3632f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
3633f2c5125aSPablo de Lara {
3634d44f8a48SQi Zhang 	uint16_t qid;
36355e91aeefSWei Zhao 	uint64_t offloads;
3636f2c5125aSPablo de Lara 
3637d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
36385e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
3639d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3640575e0fd1SWei Zhao 		if (offloads != 0)
3641575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
3642d44f8a48SQi Zhang 
3643d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3644f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3645d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3646f2c5125aSPablo de Lara 
3647f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3648d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3649f2c5125aSPablo de Lara 
3650f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3651d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3652f2c5125aSPablo de Lara 
3653f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3654d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3655f2c5125aSPablo de Lara 
3656f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3657d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3658f2c5125aSPablo de Lara 
3659d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3660d44f8a48SQi Zhang 	}
3661d44f8a48SQi Zhang 
3662d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
36635e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
3664d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
3665575e0fd1SWei Zhao 		if (offloads != 0)
3666575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
3667d44f8a48SQi Zhang 
3668d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3669f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3670d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3671f2c5125aSPablo de Lara 
3672f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3673d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3674f2c5125aSPablo de Lara 
3675f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3676d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3677f2c5125aSPablo de Lara 
3678f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3679d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3680f2c5125aSPablo de Lara 
3681f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3682d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3683d44f8a48SQi Zhang 
3684d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3685d44f8a48SQi Zhang 	}
3686f2c5125aSPablo de Lara }
3687f2c5125aSPablo de Lara 
36880c4abd36SSteve Yang /*
3689b563c142SFerruh Yigit  * Helper function to set MTU from frame size
36900c4abd36SSteve Yang  *
36910c4abd36SSteve Yang  * port->dev_info should be set before calling this function.
36920c4abd36SSteve Yang  *
36930c4abd36SSteve Yang  * return 0 on success, negative on error
36940c4abd36SSteve Yang  */
36950c4abd36SSteve Yang int
3696b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
36970c4abd36SSteve Yang {
36980c4abd36SSteve Yang 	struct rte_port *port = &ports[portid];
36990c4abd36SSteve Yang 	uint32_t eth_overhead;
37001bb4a528SFerruh Yigit 	uint16_t mtu, new_mtu;
37010c4abd36SSteve Yang 
37021bb4a528SFerruh Yigit 	eth_overhead = get_eth_overhead(&port->dev_info);
37031bb4a528SFerruh Yigit 
37041bb4a528SFerruh Yigit 	if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
37051bb4a528SFerruh Yigit 		printf("Failed to get MTU for port %u\n", portid);
37061bb4a528SFerruh Yigit 		return -1;
37071bb4a528SFerruh Yigit 	}
37081bb4a528SFerruh Yigit 
37091bb4a528SFerruh Yigit 	new_mtu = max_rx_pktlen - eth_overhead;
37100c4abd36SSteve Yang 
37111bb4a528SFerruh Yigit 	if (mtu == new_mtu)
37121bb4a528SFerruh Yigit 		return 0;
37131bb4a528SFerruh Yigit 
37141bb4a528SFerruh Yigit 	if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
371561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
371661a3b0e5SAndrew Rybchenko 			"Failed to set MTU to %u for port %u\n",
37171bb4a528SFerruh Yigit 			new_mtu, portid);
37181bb4a528SFerruh Yigit 		return -1;
37190c4abd36SSteve Yang 	}
37200c4abd36SSteve Yang 
37211bb4a528SFerruh Yigit 	port->dev_conf.rxmode.mtu = new_mtu;
37221bb4a528SFerruh Yigit 
37230c4abd36SSteve Yang 	return 0;
37240c4abd36SSteve Yang }
37250c4abd36SSteve Yang 
3726013af9b6SIntel void
3727013af9b6SIntel init_port_config(void)
3728013af9b6SIntel {
3729013af9b6SIntel 	portid_t pid;
3730013af9b6SIntel 	struct rte_port *port;
3731655eae01SJie Wang 	int ret, i;
3732013af9b6SIntel 
37337d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3734013af9b6SIntel 		port = &ports[pid];
3735013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
37366f51deb9SIvan Ilchenko 
37376f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
37386f51deb9SIvan Ilchenko 		if (ret != 0)
37396f51deb9SIvan Ilchenko 			return;
37406f51deb9SIvan Ilchenko 
37413ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3742013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
374390892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3744422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3745af75078fSIntel 		} else {
3746013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3747013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3748af75078fSIntel 		}
37493ce690d3SBruce Richardson 
37505f592039SJingjing Wu 		if (port->dcb_flag == 0) {
3751655eae01SJie Wang 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3752f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
3753f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
3754f9295aa2SXiaoyu Min 						(rx_mq_mode & ETH_MQ_RX_RSS);
3755655eae01SJie Wang 			} else {
37563ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3757655eae01SJie Wang 				port->dev_conf.rxmode.offloads &=
3758655eae01SJie Wang 						~DEV_RX_OFFLOAD_RSS_HASH;
3759655eae01SJie Wang 
3760655eae01SJie Wang 				for (i = 0;
3761655eae01SJie Wang 				     i < port->dev_info.nb_rx_queues;
3762655eae01SJie Wang 				     i++)
3763655eae01SJie Wang 					port->rx_conf[i].offloads &=
3764655eae01SJie Wang 						~DEV_RX_OFFLOAD_RSS_HASH;
3765655eae01SJie Wang 			}
37663ce690d3SBruce Richardson 		}
37673ce690d3SBruce Richardson 
3768f2c5125aSPablo de Lara 		rxtx_port_config(port);
3769013af9b6SIntel 
3770a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3771a5279d25SIgor Romanov 		if (ret != 0)
3772a5279d25SIgor Romanov 			return;
3773013af9b6SIntel 
3774a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3775e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
37767b7e5ba7SIntel #endif
37778ea656f8SGaetan Rivet 
37780a0821bcSPaulis Gributs 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
37798ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
37800a0821bcSPaulis Gributs 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3781284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3782013af9b6SIntel 	}
3783013af9b6SIntel }
3784013af9b6SIntel 
378541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
378641b05095SBernard Iremonger {
378741b05095SBernard Iremonger 	struct rte_port *port;
378841b05095SBernard Iremonger 
378941b05095SBernard Iremonger 	port = &ports[slave_pid];
379041b05095SBernard Iremonger 	port->slave_flag = 1;
379141b05095SBernard Iremonger }
379241b05095SBernard Iremonger 
379341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
379441b05095SBernard Iremonger {
379541b05095SBernard Iremonger 	struct rte_port *port;
379641b05095SBernard Iremonger 
379741b05095SBernard Iremonger 	port = &ports[slave_pid];
379841b05095SBernard Iremonger 	port->slave_flag = 0;
379941b05095SBernard Iremonger }
380041b05095SBernard Iremonger 
38010e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
38020e545d30SBernard Iremonger {
38030e545d30SBernard Iremonger 	struct rte_port *port;
38040a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
38050a0821bcSPaulis Gributs 	int ret;
38060e545d30SBernard Iremonger 
38070e545d30SBernard Iremonger 	port = &ports[slave_pid];
38080a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
38090a0821bcSPaulis Gributs 	if (ret != 0) {
38100a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
38110a0821bcSPaulis Gributs 			"Failed to get device info for port id %d,"
38120a0821bcSPaulis Gributs 			"cannot determine if the port is a bonded slave",
38130a0821bcSPaulis Gributs 			slave_pid);
38140a0821bcSPaulis Gributs 		return 0;
38150a0821bcSPaulis Gributs 	}
38160a0821bcSPaulis Gributs 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3817b8b8b344SMatan Azrad 		return 1;
3818b8b8b344SMatan Azrad 	return 0;
38190e545d30SBernard Iremonger }
38200e545d30SBernard Iremonger 
3821013af9b6SIntel const uint16_t vlan_tags[] = {
3822013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3823013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3824013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3825013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3826013af9b6SIntel };
3827013af9b6SIntel 
3828013af9b6SIntel static  int
3829ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
38301a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
38311a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
38321a572499SJingjing Wu 		 uint8_t pfc_en)
3833013af9b6SIntel {
3834013af9b6SIntel 	uint8_t i;
3835ac7c491cSKonstantin Ananyev 	int32_t rc;
3836ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3837af75078fSIntel 
3838af75078fSIntel 	/*
3839013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3840013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3841af75078fSIntel 	 */
38421a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
38431a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
38441a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
38451a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
38461a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3847013af9b6SIntel 
3848547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
38491a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
38501a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
38511a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
38521a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
38531a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
38541a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3855013af9b6SIntel 
38561a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
38571a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
38581a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
38591a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
38601a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3861af75078fSIntel 		}
3862013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3863f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3864f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3865013af9b6SIntel 		}
3866013af9b6SIntel 
3867013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
3868f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3869f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3870f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
387132e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
38721a572499SJingjing Wu 	} else {
38731a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
38741a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
38751a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
38761a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3877013af9b6SIntel 
38785139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
38795139bc12STing Xu 
3880ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3881ac7c491cSKonstantin Ananyev 		if (rc != 0)
3882ac7c491cSKonstantin Ananyev 			return rc;
3883ac7c491cSKonstantin Ananyev 
38841a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
38851a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
38861a572499SJingjing Wu 
3887bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3888bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3889bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3890013af9b6SIntel 		}
3891ac7c491cSKonstantin Ananyev 
3892f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3893f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3894f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3895ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
389632e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
38971a572499SJingjing Wu 	}
38981a572499SJingjing Wu 
38991a572499SJingjing Wu 	if (pfc_en)
39001a572499SJingjing Wu 		eth_conf->dcb_capability_en =
39011a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3902013af9b6SIntel 	else
3903013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3904013af9b6SIntel 
3905013af9b6SIntel 	return 0;
3906013af9b6SIntel }
3907013af9b6SIntel 
3908013af9b6SIntel int
39091a572499SJingjing Wu init_port_dcb_config(portid_t pid,
39101a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
39111a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
39121a572499SJingjing Wu 		     uint8_t pfc_en)
3913013af9b6SIntel {
3914013af9b6SIntel 	struct rte_eth_conf port_conf;
3915013af9b6SIntel 	struct rte_port *rte_port;
3916013af9b6SIntel 	int retval;
3917013af9b6SIntel 	uint16_t i;
3918013af9b6SIntel 
3919a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
3920a550baf2SMin Hu (Connor) 		printf("The multi-process feature doesn't support dcb.\n");
3921a550baf2SMin Hu (Connor) 		return -ENOTSUP;
3922a550baf2SMin Hu (Connor) 	}
39232a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3924013af9b6SIntel 
3925c1ba6c32SHuisong Li 	/* retain the original device configuration. */
3926c1ba6c32SHuisong Li 	memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3927d5354e89SYanglong Wu 
3928013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3929ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3930013af9b6SIntel 	if (retval < 0)
3931013af9b6SIntel 		return retval;
39320074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3933013af9b6SIntel 
39342f203d44SQi Zhang 	/* re-configure the device . */
39352b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
39362b0e0ebaSChenbo Xia 	if (retval < 0)
39372b0e0ebaSChenbo Xia 		return retval;
39386f51deb9SIvan Ilchenko 
39396f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
39406f51deb9SIvan Ilchenko 	if (retval != 0)
39416f51deb9SIvan Ilchenko 		return retval;
39422a977b89SWenzhuo Lu 
39432a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
39442a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
39452a977b89SWenzhuo Lu 	 */
39462a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
39472a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
394861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
394961a3b0e5SAndrew Rybchenko 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
395061a3b0e5SAndrew Rybchenko 			pid);
39512a977b89SWenzhuo Lu 		return -1;
39522a977b89SWenzhuo Lu 	}
39532a977b89SWenzhuo Lu 
39542a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
39552a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
39562a977b89SWenzhuo Lu 	 */
39572a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
395886ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
395986ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
396086ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
396186ef65eeSBernard Iremonger 		} else {
39622a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
39632a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
396486ef65eeSBernard Iremonger 		}
39652a977b89SWenzhuo Lu 	} else {
39662a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
39672a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
39682a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
39692a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
39702a977b89SWenzhuo Lu 		} else {
39712a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
39722a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
39732a977b89SWenzhuo Lu 
39742a977b89SWenzhuo Lu 		}
39752a977b89SWenzhuo Lu 	}
39762a977b89SWenzhuo Lu 	rx_free_thresh = 64;
39772a977b89SWenzhuo Lu 
3978013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3979013af9b6SIntel 
3980f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3981013af9b6SIntel 	/* VLAN filter */
39820074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
39831a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3984013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3985013af9b6SIntel 
3986a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3987a5279d25SIgor Romanov 	if (retval != 0)
3988a5279d25SIgor Romanov 		return retval;
3989a5279d25SIgor Romanov 
39907741e4cfSIntel 	rte_port->dcb_flag = 1;
39917741e4cfSIntel 
3992a690a070SHuisong Li 	/* Enter DCB configuration status */
3993a690a070SHuisong Li 	dcb_config = 1;
3994a690a070SHuisong Li 
3995013af9b6SIntel 	return 0;
3996af75078fSIntel }
3997af75078fSIntel 
3998ffc468ffSTetsuya Mukawa static void
3999ffc468ffSTetsuya Mukawa init_port(void)
4000ffc468ffSTetsuya Mukawa {
40011b9f2746SGregory Etelson 	int i;
40021b9f2746SGregory Etelson 
4003ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
4004ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
4005ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4006ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
4007ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
4008ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
4009ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
4010ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
4011ffc468ffSTetsuya Mukawa 	}
40121b9f2746SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
401363b72657SIvan Ilchenko 		ports[i].xstats_info.allocated = false;
401463b72657SIvan Ilchenko 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
40151b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
401629841336SPhil Yang 	/* Initialize ports NUMA structures */
401729841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
401829841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
401929841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4020ffc468ffSTetsuya Mukawa }
4021ffc468ffSTetsuya Mukawa 
4022d3a274ceSZhihong Wang static void
4023d3a274ceSZhihong Wang force_quit(void)
4024d3a274ceSZhihong Wang {
4025d3a274ceSZhihong Wang 	pmd_test_exit();
4026d3a274ceSZhihong Wang 	prompt_exit();
4027d3a274ceSZhihong Wang }
4028d3a274ceSZhihong Wang 
4029d3a274ceSZhihong Wang static void
4030cfea1f30SPablo de Lara print_stats(void)
4031cfea1f30SPablo de Lara {
4032cfea1f30SPablo de Lara 	uint8_t i;
4033cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
4034cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4035cfea1f30SPablo de Lara 
4036cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
4037cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
4038cfea1f30SPablo de Lara 
4039cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
4040cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4041cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
4042683d1e82SIgor Romanov 
4043683d1e82SIgor Romanov 	fflush(stdout);
4044cfea1f30SPablo de Lara }
4045cfea1f30SPablo de Lara 
4046cfea1f30SPablo de Lara static void
4047d3a274ceSZhihong Wang signal_handler(int signum)
4048d3a274ceSZhihong Wang {
4049d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
405061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4051d3a274ceSZhihong Wang 			signum);
4052a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
4053102b7329SReshma Pattan 		/* uninitialize packet capture framework */
4054102b7329SReshma Pattan 		rte_pdump_uninit();
4055102b7329SReshma Pattan #endif
4056a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
40578b36297dSAmit Gupta 		if (latencystats_enabled != 0)
405862d3216dSReshma Pattan 			rte_latencystats_uninit();
405962d3216dSReshma Pattan #endif
4060d3a274ceSZhihong Wang 		force_quit();
4061d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
4062d9a191a0SPhil Yang 		f_quit = 1;
4063d3a274ceSZhihong Wang 		/* exit with the expected status */
4064761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4065d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
4066d3a274ceSZhihong Wang 		kill(getpid(), signum);
4067761f7ae1SJie Zhou #endif
4068d3a274ceSZhihong Wang 	}
4069d3a274ceSZhihong Wang }
4070d3a274ceSZhihong Wang 
4071af75078fSIntel int
4072af75078fSIntel main(int argc, char** argv)
4073af75078fSIntel {
4074af75078fSIntel 	int diag;
4075f8244c63SZhiyong Yang 	portid_t port_id;
40764918a357SXiaoyun Li 	uint16_t count;
4077fb73e096SJeff Guo 	int ret;
4078af75078fSIntel 
4079d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
4080d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
4081d3a274ceSZhihong Wang 
4082285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
4083285fd101SOlivier Matz 	if (testpmd_logtype < 0)
408416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
4085285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4086285fd101SOlivier Matz 
40879201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
40889201806eSStephen Hemminger 	if (diag < 0)
408916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
409016267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
40919201806eSStephen Hemminger 
409297b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
409397b5d8b5SThomas Monjalon 	if (ret != 0)
409416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
409597b5d8b5SThomas Monjalon 
4096a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
40974aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
4098e9436f54STiwei Bie 	rte_pdump_init();
40994aa0d012SAnatoly Burakov #endif
41004aa0d012SAnatoly Burakov 
41014918a357SXiaoyun Li 	count = 0;
41024918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
41034918a357SXiaoyun Li 		ports_ids[count] = port_id;
41044918a357SXiaoyun Li 		count++;
41054918a357SXiaoyun Li 	}
41064918a357SXiaoyun Li 	nb_ports = (portid_t) count;
41074aa0d012SAnatoly Burakov 	if (nb_ports == 0)
41084aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
41094aa0d012SAnatoly Burakov 
41104aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
41114aa0d012SAnatoly Burakov 	init_port();
41124aa0d012SAnatoly Burakov 
41134aa0d012SAnatoly Burakov 	set_def_fwd_config();
41144aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
411516267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
411616267ceeSStephen Hemminger 			 "Check the core mask argument\n");
41174aa0d012SAnatoly Burakov 
4118e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
4119a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4120e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
4121e505d84cSAnatoly Burakov #endif
4122a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
4123e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
4124e505d84cSAnatoly Burakov #endif
4125e505d84cSAnatoly Burakov 
4126fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
41275fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
4128fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
4129fb7b8b32SAnatoly Burakov #else
4130fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
4131fb7b8b32SAnatoly Burakov #endif
4132fb7b8b32SAnatoly Burakov 
4133e505d84cSAnatoly Burakov 	argc -= diag;
4134e505d84cSAnatoly Burakov 	argv += diag;
4135e505d84cSAnatoly Burakov 	if (argc > 1)
4136e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
4137e505d84cSAnatoly Burakov 
4138761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4139e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4140285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
41411c036b16SEelco Chaudron 			strerror(errno));
41421c036b16SEelco Chaudron 	}
4143761f7ae1SJie Zhou #endif
41441c036b16SEelco Chaudron 
414599cabef0SPablo de Lara 	if (tx_first && interactive)
414699cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
414799cabef0SPablo de Lara 				"interactive mode.\n");
41488820cba4SDavid Hunt 
41498820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
415061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
415161a3b0e5SAndrew Rybchenko 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
41528820cba4SDavid Hunt 		lsc_interrupt = 0;
41538820cba4SDavid Hunt 	}
41548820cba4SDavid Hunt 
41555a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
415661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
415761a3b0e5SAndrew Rybchenko 			"Warning: Either rx or tx queues should be non-zero\n");
41585a8fb55cSReshma Pattan 
41595a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
416061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
416161a3b0e5SAndrew Rybchenko 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4162af75078fSIntel 			nb_rxq, nb_txq);
4163af75078fSIntel 
4164af75078fSIntel 	init_config();
4165fb73e096SJeff Guo 
4166fb73e096SJeff Guo 	if (hot_plug) {
41672049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
4168fb73e096SJeff Guo 		if (ret) {
41692049c511SJeff Guo 			RTE_LOG(ERR, EAL,
41702049c511SJeff Guo 				"fail to enable hotplug handling.");
4171fb73e096SJeff Guo 			return -1;
4172fb73e096SJeff Guo 		}
4173fb73e096SJeff Guo 
41742049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
41752049c511SJeff Guo 		if (ret) {
41762049c511SJeff Guo 			RTE_LOG(ERR, EAL,
41772049c511SJeff Guo 				"fail to start device event monitoring.");
41782049c511SJeff Guo 			return -1;
41792049c511SJeff Guo 		}
41802049c511SJeff Guo 
41812049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
4182cc1bf307SJeff Guo 			dev_event_callback, NULL);
41832049c511SJeff Guo 		if (ret) {
41842049c511SJeff Guo 			RTE_LOG(ERR, EAL,
41852049c511SJeff Guo 				"fail  to register device event callback\n");
41862049c511SJeff Guo 			return -1;
41872049c511SJeff Guo 		}
4188fb73e096SJeff Guo 	}
4189fb73e096SJeff Guo 
41906937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4191148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
4192af75078fSIntel 
4193ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
419434fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
419534fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
419634fc1051SIvan Ilchenko 		if (ret != 0)
419761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
419861a3b0e5SAndrew Rybchenko 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
419934fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
420034fc1051SIvan Ilchenko 	}
4201af75078fSIntel 
42027e4441c8SRemy Horton 	/* Init metrics library */
42037e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
42047e4441c8SRemy Horton 
4205a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
420662d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
420762d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
420862d3216dSReshma Pattan 		if (ret)
420961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
421061a3b0e5SAndrew Rybchenko 				"Warning: latencystats init() returned error %d\n",
421161a3b0e5SAndrew Rybchenko 				ret);
421261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Latencystats running on lcore %d\n",
421362d3216dSReshma Pattan 			latencystats_lcore_id);
421462d3216dSReshma Pattan 	}
421562d3216dSReshma Pattan #endif
421662d3216dSReshma Pattan 
42177e4441c8SRemy Horton 	/* Setup bitrate stats */
4218a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4219e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
42207e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
42217e4441c8SRemy Horton 		if (bitrate_data == NULL)
4222e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
4223e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
42247e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
4225e25e6c70SRemy Horton 	}
42267e4441c8SRemy Horton #endif
4227a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE
422881ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
422981ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
423081ef862bSAllain Legacy 
4231ca7feb22SCyril Chemparathy 	if (interactive == 1) {
4232ca7feb22SCyril Chemparathy 		if (auto_start) {
4233ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
4234ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
4235ca7feb22SCyril Chemparathy 		}
4236af75078fSIntel 		prompt();
42370de738cfSJiayu Hu 		pmd_test_exit();
4238ca7feb22SCyril Chemparathy 	} else
42390d56cb81SThomas Monjalon #endif
42400d56cb81SThomas Monjalon 	{
4241af75078fSIntel 		char c;
4242af75078fSIntel 		int rc;
4243af75078fSIntel 
4244d9a191a0SPhil Yang 		f_quit = 0;
4245d9a191a0SPhil Yang 
4246af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
424799cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
4248cfea1f30SPablo de Lara 		if (stats_period != 0) {
4249cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4250cfea1f30SPablo de Lara 			uint64_t timer_period;
4251cfea1f30SPablo de Lara 
4252cfea1f30SPablo de Lara 			/* Convert to number of cycles */
4253cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
4254cfea1f30SPablo de Lara 
4255d9a191a0SPhil Yang 			while (f_quit == 0) {
4256cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4257cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4258cfea1f30SPablo de Lara 
4259cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4260cfea1f30SPablo de Lara 					print_stats();
4261cfea1f30SPablo de Lara 					/* Reset the timer */
4262cfea1f30SPablo de Lara 					diff_time = 0;
4263cfea1f30SPablo de Lara 				}
4264cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4265cfea1f30SPablo de Lara 				prev_time = cur_time;
4266761f7ae1SJie Zhou 				rte_delay_us_sleep(US_PER_S);
4267cfea1f30SPablo de Lara 			}
4268cfea1f30SPablo de Lara 		}
4269cfea1f30SPablo de Lara 
4270af75078fSIntel 		printf("Press enter to exit\n");
4271af75078fSIntel 		rc = read(0, &c, 1);
4272d3a274ceSZhihong Wang 		pmd_test_exit();
4273af75078fSIntel 		if (rc < 0)
4274af75078fSIntel 			return 1;
4275af75078fSIntel 	}
4276af75078fSIntel 
42775e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
42785e516c89SStephen Hemminger 	if (ret != 0)
42795e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
42805e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
42815e516c89SStephen Hemminger 
42825e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4283af75078fSIntel }
4284