xref: /dpdk/app/test-pmd/testpmd.c (revision 1b9f274623b8019493bd0c0ff97af9c81c00be4b)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30af75078fSIntel #include <rte_memory.h>
31af75078fSIntel #include <rte_memcpy.h>
32af75078fSIntel #include <rte_launch.h>
33af75078fSIntel #include <rte_eal.h>
34284c908cSGaetan Rivet #include <rte_alarm.h>
35af75078fSIntel #include <rte_per_lcore.h>
36af75078fSIntel #include <rte_lcore.h>
37af75078fSIntel #include <rte_atomic.h>
38af75078fSIntel #include <rte_branch_prediction.h>
39af75078fSIntel #include <rte_mempool.h>
40af75078fSIntel #include <rte_malloc.h>
41af75078fSIntel #include <rte_mbuf.h>
420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
43af75078fSIntel #include <rte_interrupts.h>
44af75078fSIntel #include <rte_pci.h>
45af75078fSIntel #include <rte_ether.h>
46af75078fSIntel #include <rte_ethdev.h>
47edab33b1STetsuya Mukawa #include <rte_dev.h>
48af75078fSIntel #include <rte_string_fns.h>
49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
51e261265eSRadu Nicolau #endif
52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
53102b7329SReshma Pattan #include <rte_pdump.h>
54102b7329SReshma Pattan #endif
55938a184aSAdrien Mazarguil #include <rte_flow.h>
567e4441c8SRemy Horton #include <rte_metrics.h>
5754f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
587e4441c8SRemy Horton #include <rte_bitrate.h>
597e4441c8SRemy Horton #endif
6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6162d3216dSReshma Pattan #include <rte_latencystats.h>
6262d3216dSReshma Pattan #endif
63af75078fSIntel 
64af75078fSIntel #include "testpmd.h"
65af75078fSIntel 
66c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
67c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
69c7f5dba7SAnatoly Burakov #else
70c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
71c7f5dba7SAnatoly Burakov #endif
72c7f5dba7SAnatoly Burakov 
73c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
74c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
75c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
76c7f5dba7SAnatoly Burakov #else
77c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
78c7f5dba7SAnatoly Burakov #endif
79c7f5dba7SAnatoly Burakov 
80c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
8172512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1822564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
183af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
184af75078fSIntel 	&ieee1588_fwd_engine,
185af75078fSIntel #endif
186af75078fSIntel 	NULL,
187af75078fSIntel };
188af75078fSIntel 
189401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
19059fcf854SShahaf Shuler uint16_t mempool_flags;
191401b744dSShahaf Shuler 
192af75078fSIntel struct fwd_config cur_fwd_config;
193af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194bf56fce1SZhihong Wang uint32_t retry_enabled;
195bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197af75078fSIntel 
198af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
199c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
200c8798818SIntel                                       * specified on command-line. */
201cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
202d9a191a0SPhil Yang 
203d9a191a0SPhil Yang /*
204d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
205d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
206d9a191a0SPhil Yang  */
207d9a191a0SPhil Yang uint8_t f_quit;
208d9a191a0SPhil Yang 
209af75078fSIntel /*
210af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
211af75078fSIntel  */
212af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
213af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
214af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
215af75078fSIntel };
216af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217af75078fSIntel 
21879bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
21979bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
22079bec05bSKonstantin Ananyev 
22182010ef5SYongseok Koh uint8_t txonly_multi_flow;
22282010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
22382010ef5SYongseok Koh 
2244940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2254940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2264940344dSViacheslav Ovsiienko 
2274940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2284940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2294940344dSViacheslav Ovsiienko 
230af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
231e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
232af75078fSIntel 
233900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
234900550deSIntel uint8_t dcb_config = 0;
235900550deSIntel 
236900550deSIntel /* Whether the dcb is in testing status */
237900550deSIntel uint8_t dcb_test = 0;
238900550deSIntel 
239af75078fSIntel /*
240af75078fSIntel  * Configurable number of RX/TX queues.
241af75078fSIntel  */
2421c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
243af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
244af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
245af75078fSIntel 
246af75078fSIntel /*
247af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2488599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
249af75078fSIntel  */
2508599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2518599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
252af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
253af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
254af75078fSIntel 
255f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
256af75078fSIntel /*
257af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
258af75078fSIntel  */
259af75078fSIntel 
260f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
261f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
262f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
263af75078fSIntel 
264f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
265f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
266f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
267af75078fSIntel 
268af75078fSIntel /*
269af75078fSIntel  * Configurable value of RX free threshold.
270af75078fSIntel  */
271f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
272af75078fSIntel 
273af75078fSIntel /*
274ce8d5614SIntel  * Configurable value of RX drop enable.
275ce8d5614SIntel  */
276f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
277ce8d5614SIntel 
278ce8d5614SIntel /*
279af75078fSIntel  * Configurable value of TX free threshold.
280af75078fSIntel  */
281f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
282af75078fSIntel 
283af75078fSIntel /*
284af75078fSIntel  * Configurable value of TX RS bit threshold.
285af75078fSIntel  */
286f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
287af75078fSIntel 
288af75078fSIntel /*
2893c156061SJens Freimann  * Configurable value of buffered packets before sending.
2903c156061SJens Freimann  */
2913c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2923c156061SJens Freimann 
2933c156061SJens Freimann /*
2943c156061SJens Freimann  * Configurable value of packet buffer timeout.
2953c156061SJens Freimann  */
2963c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2973c156061SJens Freimann 
2983c156061SJens Freimann /*
2993c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3003c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3013c156061SJens Freimann  */
3023c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3033c156061SJens Freimann 
3043c156061SJens Freimann /*
3053c156061SJens Freimann  * Configurable value of number of random writes done in
3063c156061SJens Freimann  * VNF simulation memory area.
3073c156061SJens Freimann  */
3083c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3093c156061SJens Freimann 
3103c156061SJens Freimann /*
3113c156061SJens Freimann  * Configurable value of number of random reads done in
3123c156061SJens Freimann  * VNF simulation memory area.
3133c156061SJens Freimann  */
3143c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3153c156061SJens Freimann 
3163c156061SJens Freimann /*
3173c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3183c156061SJens Freimann  * VNF simulation memory area.
3193c156061SJens Freimann  */
3203c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3213c156061SJens Freimann 
3223c156061SJens Freimann /*
323af75078fSIntel  * Receive Side Scaling (RSS) configuration.
324af75078fSIntel  */
3258a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
326af75078fSIntel 
327af75078fSIntel /*
328af75078fSIntel  * Port topology configuration
329af75078fSIntel  */
330af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
331af75078fSIntel 
3327741e4cfSIntel /*
3337741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3347741e4cfSIntel  */
3357741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3367741e4cfSIntel 
337af75078fSIntel /*
3387ee3e944SVasily Philipov  * Flow API isolated mode.
3397ee3e944SVasily Philipov  */
3407ee3e944SVasily Philipov uint8_t flow_isolate_all;
3417ee3e944SVasily Philipov 
3427ee3e944SVasily Philipov /*
343bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
344bc202406SDavid Marchand  */
345bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
346bc202406SDavid Marchand 
347bc202406SDavid Marchand /*
3486937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3496937d210SStephen Hemminger  */
3506937d210SStephen Hemminger uint8_t no_device_start = 0;
3516937d210SStephen Hemminger 
3526937d210SStephen Hemminger /*
3538ea656f8SGaetan Rivet  * Enable link status change notification
3548ea656f8SGaetan Rivet  */
3558ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3568ea656f8SGaetan Rivet 
3578ea656f8SGaetan Rivet /*
358284c908cSGaetan Rivet  * Enable device removal notification.
359284c908cSGaetan Rivet  */
360284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
361284c908cSGaetan Rivet 
362fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
363fb73e096SJeff Guo 
3644f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3654f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3664f1ed78eSThomas Monjalon 
367b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
368b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
369b0a9354aSPavan Nikhilesh 
37001817b10SBing Zhao /* Hairpin ports configuration mode. */
37101817b10SBing Zhao uint16_t hairpin_mode;
37201817b10SBing Zhao 
37397b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
37497b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
37597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
37697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
37797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
37897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
37997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
38097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
38197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
38297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
38397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
38497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
3850e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
38697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
38797b5d8b5SThomas Monjalon };
38897b5d8b5SThomas Monjalon 
389284c908cSGaetan Rivet /*
3903af72783SGaetan Rivet  * Display or mask ether events
3913af72783SGaetan Rivet  * Default to all events except VF_MBOX
3923af72783SGaetan Rivet  */
3933af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3943af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3953af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3963af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
397badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3983af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3990e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
4000e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
401e505d84cSAnatoly Burakov /*
402e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
403e505d84cSAnatoly Burakov  */
404e505d84cSAnatoly Burakov int do_mlockall = 0;
4053af72783SGaetan Rivet 
4063af72783SGaetan Rivet /*
4077b7e5ba7SIntel  * NIC bypass mode configuration options.
4087b7e5ba7SIntel  */
4097b7e5ba7SIntel 
41050c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
4117b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
412e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4137b7e5ba7SIntel #endif
4147b7e5ba7SIntel 
415e261265eSRadu Nicolau 
41662d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
41762d3216dSReshma Pattan 
41862d3216dSReshma Pattan /*
41962d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
42062d3216dSReshma Pattan  */
42162d3216dSReshma Pattan uint8_t latencystats_enabled;
42262d3216dSReshma Pattan 
42362d3216dSReshma Pattan /*
42462d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
42562d3216dSReshma Pattan  */
42662d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
42762d3216dSReshma Pattan 
42862d3216dSReshma Pattan #endif
42962d3216dSReshma Pattan 
4307b7e5ba7SIntel /*
431af75078fSIntel  * Ethernet device configuration.
432af75078fSIntel  */
433af75078fSIntel struct rte_eth_rxmode rx_mode = {
43435b2d13fSOlivier Matz 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
43535b2d13fSOlivier Matz 		/**< Default maximum frame length. */
436af75078fSIntel };
437af75078fSIntel 
43807e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
43907e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
44007e5f7bdSShahaf Shuler };
441fd8c20aaSShahaf Shuler 
442af75078fSIntel struct rte_fdir_conf fdir_conf = {
443af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
444af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
445af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
446d9d5e6f2SJingjing Wu 	.mask = {
44726f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
448d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
449d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
450d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
451d9d5e6f2SJingjing Wu 		},
452d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
453d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
454d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
455d9d5e6f2SJingjing Wu 		},
456d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
457d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
45847b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
45947b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
46047b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
461d9d5e6f2SJingjing Wu 	},
462af75078fSIntel 	.drop_queue = 127,
463af75078fSIntel };
464af75078fSIntel 
4652950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
466af75078fSIntel 
467ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
468ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
469ed30d9b6SIntel 
470ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
471ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
472ed30d9b6SIntel 
473ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
474ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
475ed30d9b6SIntel 
476a4fd5eeeSElza Mathew /*
477a4fd5eeeSElza Mathew  * Display zero values by default for xstats
478a4fd5eeeSElza Mathew  */
479a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
480a4fd5eeeSElza Mathew 
481bc700b67SDharmik Thakkar /*
482bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
483bc700b67SDharmik Thakkar  */
484bc700b67SDharmik Thakkar uint8_t record_core_cycles;
485bc700b67SDharmik Thakkar 
4860e4b1963SDharmik Thakkar /*
4870e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
4880e4b1963SDharmik Thakkar  */
4890e4b1963SDharmik Thakkar uint8_t record_burst_stats;
4900e4b1963SDharmik Thakkar 
491c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
492c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4937acf894dSStephen Hurd 
49454f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
4957e4441c8SRemy Horton /* Bitrate statistics */
4967e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
497e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
498e25e6c70SRemy Horton uint8_t bitrate_enabled;
499e25e6c70SRemy Horton #endif
5007e4441c8SRemy Horton 
501b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
502b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
503b40f8d78SJiayu Hu 
504f9295aa2SXiaoyu Min /*
505f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
506f9295aa2SXiaoyu Min  */
507f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
508f9295aa2SXiaoyu Min 
509ed30d9b6SIntel /* Forward function declarations */
510c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
51128caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
51228caa76aSZhiyong Yang 						   struct rte_port *port);
513edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
514f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
51576ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
516d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
517cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
518fb73e096SJeff Guo 				enum rte_dev_event_type type,
519fb73e096SJeff Guo 				void *param);
520ce8d5614SIntel 
521ce8d5614SIntel /*
522ce8d5614SIntel  * Check if all the ports are started.
523ce8d5614SIntel  * If yes, return positive value. If not, return zero.
524ce8d5614SIntel  */
525ce8d5614SIntel static int all_ports_started(void);
526ed30d9b6SIntel 
52752f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
52835b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
52952f38a20SJiayu Hu 
530b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
531b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
532b57b66a9SOri Kam 
533af75078fSIntel /*
53498a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
535c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
536c9cafcc8SShahaf Shuler  */
537c9cafcc8SShahaf Shuler int
538c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
539c9cafcc8SShahaf Shuler {
540c9cafcc8SShahaf Shuler 	unsigned int i;
541c9cafcc8SShahaf Shuler 
542c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
543c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
544c9cafcc8SShahaf Shuler 			return 0;
545c9cafcc8SShahaf Shuler 	}
546c9cafcc8SShahaf Shuler 	return 1;
547c9cafcc8SShahaf Shuler }
548c9cafcc8SShahaf Shuler 
549c9cafcc8SShahaf Shuler /*
550af75078fSIntel  * Setup default configuration.
551af75078fSIntel  */
552af75078fSIntel static void
553af75078fSIntel set_default_fwd_lcores_config(void)
554af75078fSIntel {
555af75078fSIntel 	unsigned int i;
556af75078fSIntel 	unsigned int nb_lc;
5577acf894dSStephen Hurd 	unsigned int sock_num;
558af75078fSIntel 
559af75078fSIntel 	nb_lc = 0;
560af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
561dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
562dbfb8ec7SPhil Yang 			continue;
563c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
564c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
565c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
566c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
567c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
568c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
569c9cafcc8SShahaf Shuler 			}
570c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5717acf894dSStephen Hurd 		}
572f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
573f54fe5eeSStephen Hurd 			continue;
574f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
575af75078fSIntel 	}
576af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
577af75078fSIntel 	nb_cfg_lcores = nb_lcores;
578af75078fSIntel 	nb_fwd_lcores = 1;
579af75078fSIntel }
580af75078fSIntel 
581af75078fSIntel static void
582af75078fSIntel set_def_peer_eth_addrs(void)
583af75078fSIntel {
584af75078fSIntel 	portid_t i;
585af75078fSIntel 
586af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
58735b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
588af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
589af75078fSIntel 	}
590af75078fSIntel }
591af75078fSIntel 
592af75078fSIntel static void
593af75078fSIntel set_default_fwd_ports_config(void)
594af75078fSIntel {
595af75078fSIntel 	portid_t pt_id;
59665a7360cSMatan Azrad 	int i = 0;
597af75078fSIntel 
598effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
59965a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
600af75078fSIntel 
601effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
602effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
603effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
604effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
605effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
606effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
607effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
608effdb8bbSPhil Yang 			}
609effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
610effdb8bbSPhil Yang 		}
611effdb8bbSPhil Yang 	}
612effdb8bbSPhil Yang 
613af75078fSIntel 	nb_cfg_ports = nb_ports;
614af75078fSIntel 	nb_fwd_ports = nb_ports;
615af75078fSIntel }
616af75078fSIntel 
617af75078fSIntel void
618af75078fSIntel set_def_fwd_config(void)
619af75078fSIntel {
620af75078fSIntel 	set_default_fwd_lcores_config();
621af75078fSIntel 	set_def_peer_eth_addrs();
622af75078fSIntel 	set_default_fwd_ports_config();
623af75078fSIntel }
624af75078fSIntel 
625c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
626c7f5dba7SAnatoly Burakov static int
627c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
628c7f5dba7SAnatoly Burakov {
629c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
630c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
631c7f5dba7SAnatoly Burakov 
632c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
633c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
634c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
635c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
636c7f5dba7SAnatoly Burakov 	 */
637c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
638c7f5dba7SAnatoly Burakov 
639c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
640c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
641c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
642c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
643c7f5dba7SAnatoly Burakov 		return -1;
644c7f5dba7SAnatoly Burakov 	}
645c7f5dba7SAnatoly Burakov 
646c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
647c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
648c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
649c7f5dba7SAnatoly Burakov 
650c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
651c7f5dba7SAnatoly Burakov 
652c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
653c7f5dba7SAnatoly Burakov 
654c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
655c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
656c7f5dba7SAnatoly Burakov 		return -1;
657c7f5dba7SAnatoly Burakov 	}
658c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
659c7f5dba7SAnatoly Burakov 
660c7f5dba7SAnatoly Burakov 	return 0;
661c7f5dba7SAnatoly Burakov }
662c7f5dba7SAnatoly Burakov 
663c7f5dba7SAnatoly Burakov static int
664c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
665c7f5dba7SAnatoly Burakov {
666c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
667c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
668c7f5dba7SAnatoly Burakov 	 */
6699d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
670c7f5dba7SAnatoly Burakov 
671c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
672c7f5dba7SAnatoly Burakov }
673c7f5dba7SAnatoly Burakov 
674c7f5dba7SAnatoly Burakov static void *
675c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
676c7f5dba7SAnatoly Burakov {
677c7f5dba7SAnatoly Burakov 	void *addr;
678c7f5dba7SAnatoly Burakov 	int flags;
679c7f5dba7SAnatoly Burakov 
680c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
681c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
682c7f5dba7SAnatoly Burakov 	if (huge)
683c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
684c7f5dba7SAnatoly Burakov 
685c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
686c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
687c7f5dba7SAnatoly Burakov 		return NULL;
688c7f5dba7SAnatoly Burakov 
689c7f5dba7SAnatoly Burakov 	return addr;
690c7f5dba7SAnatoly Burakov }
691c7f5dba7SAnatoly Burakov 
692c7f5dba7SAnatoly Burakov struct extmem_param {
693c7f5dba7SAnatoly Burakov 	void *addr;
694c7f5dba7SAnatoly Burakov 	size_t len;
695c7f5dba7SAnatoly Burakov 	size_t pgsz;
696c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
697c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
698c7f5dba7SAnatoly Burakov };
699c7f5dba7SAnatoly Burakov 
700c7f5dba7SAnatoly Burakov static int
701c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
702c7f5dba7SAnatoly Burakov 		bool huge)
703c7f5dba7SAnatoly Burakov {
704c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
705c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
706c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
707c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
708c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
709c7f5dba7SAnatoly Burakov 	void *addr;
710c7f5dba7SAnatoly Burakov 	int ret;
711c7f5dba7SAnatoly Burakov 
712c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
713c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
714c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
715c7f5dba7SAnatoly Burakov 			continue;
716c7f5dba7SAnatoly Burakov 
717c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
718c7f5dba7SAnatoly Burakov 
719c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
720c7f5dba7SAnatoly Burakov 		if (!huge)
721c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
722c7f5dba7SAnatoly Burakov 
723c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
724c7f5dba7SAnatoly Burakov 		if (ret < 0) {
725c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
726c7f5dba7SAnatoly Burakov 			return -1;
727c7f5dba7SAnatoly Burakov 		}
728c7f5dba7SAnatoly Burakov 
729c7f5dba7SAnatoly Burakov 		/* allocate our memory */
730c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
731c7f5dba7SAnatoly Burakov 
732c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
733c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
734c7f5dba7SAnatoly Burakov 		 * try another one.
735c7f5dba7SAnatoly Burakov 		 */
736c7f5dba7SAnatoly Burakov 		if (addr == NULL)
737c7f5dba7SAnatoly Burakov 			continue;
738c7f5dba7SAnatoly Burakov 
739c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
740c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
741c7f5dba7SAnatoly Burakov 
742c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
743c7f5dba7SAnatoly Burakov 
744c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
745c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
746c7f5dba7SAnatoly Burakov 			goto fail;
747c7f5dba7SAnatoly Burakov 		}
748c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
749c7f5dba7SAnatoly Burakov 		if (!huge)
750c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
751c7f5dba7SAnatoly Burakov 
752c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
753c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
754c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
755c7f5dba7SAnatoly Burakov 			size_t offset;
756c7f5dba7SAnatoly Burakov 			void *cur;
757c7f5dba7SAnatoly Burakov 
758c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
759c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
760c7f5dba7SAnatoly Burakov 
761c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
762c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
763c7f5dba7SAnatoly Burakov 
764c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
765c7f5dba7SAnatoly Burakov 
766c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
767c7f5dba7SAnatoly Burakov 		}
768c7f5dba7SAnatoly Burakov 
769c7f5dba7SAnatoly Burakov 		break;
770c7f5dba7SAnatoly Burakov 	}
771c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
772c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
773c7f5dba7SAnatoly Burakov 		return -1;
774c7f5dba7SAnatoly Burakov 
775c7f5dba7SAnatoly Burakov 	param->addr = addr;
776c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
777c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
778c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
779c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
780c7f5dba7SAnatoly Burakov 
781c7f5dba7SAnatoly Burakov 	return 0;
782c7f5dba7SAnatoly Burakov fail:
783c7f5dba7SAnatoly Burakov 	if (iovas)
784c7f5dba7SAnatoly Burakov 		free(iovas);
785c7f5dba7SAnatoly Burakov 	if (addr)
786c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
787c7f5dba7SAnatoly Burakov 
788c7f5dba7SAnatoly Burakov 	return -1;
789c7f5dba7SAnatoly Burakov }
790c7f5dba7SAnatoly Burakov 
791c7f5dba7SAnatoly Burakov static int
792c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
793c7f5dba7SAnatoly Burakov {
794c7f5dba7SAnatoly Burakov 	struct extmem_param param;
795c7f5dba7SAnatoly Burakov 	int socket_id, ret;
796c7f5dba7SAnatoly Burakov 
797c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
798c7f5dba7SAnatoly Burakov 
799c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
800c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
801c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
802c7f5dba7SAnatoly Burakov 		/* create our heap */
803c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
804c7f5dba7SAnatoly Burakov 		if (ret < 0) {
805c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
806c7f5dba7SAnatoly Burakov 			return -1;
807c7f5dba7SAnatoly Burakov 		}
808c7f5dba7SAnatoly Burakov 	}
809c7f5dba7SAnatoly Burakov 
810c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
811c7f5dba7SAnatoly Burakov 	if (ret < 0) {
812c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
813c7f5dba7SAnatoly Burakov 		return -1;
814c7f5dba7SAnatoly Burakov 	}
815c7f5dba7SAnatoly Burakov 
816c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
817c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
818c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
819c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
820c7f5dba7SAnatoly Burakov 
821c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
822c7f5dba7SAnatoly Burakov 
823c7f5dba7SAnatoly Burakov 	/* not needed any more */
824c7f5dba7SAnatoly Burakov 	free(param.iova_table);
825c7f5dba7SAnatoly Burakov 
826c7f5dba7SAnatoly Burakov 	if (ret < 0) {
827c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
828c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
829c7f5dba7SAnatoly Burakov 		return -1;
830c7f5dba7SAnatoly Burakov 	}
831c7f5dba7SAnatoly Burakov 
832c7f5dba7SAnatoly Burakov 	/* success */
833c7f5dba7SAnatoly Burakov 
834c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
835c7f5dba7SAnatoly Burakov 			param.len >> 20);
836c7f5dba7SAnatoly Burakov 
837c7f5dba7SAnatoly Burakov 	return 0;
838c7f5dba7SAnatoly Burakov }
8393a0968c8SShahaf Shuler static void
8403a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8413a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8423a0968c8SShahaf Shuler {
8433a0968c8SShahaf Shuler 	uint16_t pid = 0;
8443a0968c8SShahaf Shuler 	int ret;
8453a0968c8SShahaf Shuler 
8463a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8473a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8483a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8493a0968c8SShahaf Shuler 
8503a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8513a0968c8SShahaf Shuler 					memhdr->len);
8523a0968c8SShahaf Shuler 		if (ret) {
8533a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8543a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8553a0968c8SShahaf Shuler 				    "for device %s\n",
8563a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8573a0968c8SShahaf Shuler 		}
8583a0968c8SShahaf Shuler 	}
8593a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8603a0968c8SShahaf Shuler 	if (ret) {
8613a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8623a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8633a0968c8SShahaf Shuler 	}
8643a0968c8SShahaf Shuler }
8653a0968c8SShahaf Shuler 
8663a0968c8SShahaf Shuler static void
8673a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8683a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8693a0968c8SShahaf Shuler {
8703a0968c8SShahaf Shuler 	uint16_t pid = 0;
8713a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8723a0968c8SShahaf Shuler 	int ret;
8733a0968c8SShahaf Shuler 
8743a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8753a0968c8SShahaf Shuler 				  page_size);
8763a0968c8SShahaf Shuler 	if (ret) {
8773a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8783a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8793a0968c8SShahaf Shuler 		return;
8803a0968c8SShahaf Shuler 	}
8813a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8823a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8833a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8843a0968c8SShahaf Shuler 
8853a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8863a0968c8SShahaf Shuler 				      memhdr->len);
8873a0968c8SShahaf Shuler 		if (ret) {
8883a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8893a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
8903a0968c8SShahaf Shuler 				    "for device %s\n",
8913a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8923a0968c8SShahaf Shuler 		}
8933a0968c8SShahaf Shuler 	}
8943a0968c8SShahaf Shuler }
895c7f5dba7SAnatoly Burakov 
89672512e18SViacheslav Ovsiienko static unsigned int
89772512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
89872512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
89972512e18SViacheslav Ovsiienko {
90072512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
90172512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
90272512e18SViacheslav Ovsiienko 	uint16_t elt_size;
90372512e18SViacheslav Ovsiienko 
90472512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
90572512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
90672512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
90772512e18SViacheslav Ovsiienko 
90872512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
90972512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
91072512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
91172512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
91272512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
91372512e18SViacheslav Ovsiienko 		return 0;
91472512e18SViacheslav Ovsiienko 	}
91572512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
91672512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
91772512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
91872512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
91972512e18SViacheslav Ovsiienko 		int ret;
92072512e18SViacheslav Ovsiienko 
92172512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
92272512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
92372512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
92472512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
92572512e18SViacheslav Ovsiienko 			ext_num = 0;
92672512e18SViacheslav Ovsiienko 			break;
92772512e18SViacheslav Ovsiienko 		}
92872512e18SViacheslav Ovsiienko 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
92972512e18SViacheslav Ovsiienko 						 socket_id,
93072512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_IOVA_CONTIG |
93172512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_1GB |
93272512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_SIZE_HINT_ONLY,
93372512e18SViacheslav Ovsiienko 						 EXTBUF_ZONE_SIZE);
93472512e18SViacheslav Ovsiienko 		if (mz == NULL) {
93572512e18SViacheslav Ovsiienko 			/*
93672512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
93772512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
93872512e18SViacheslav Ovsiienko 			 */
93972512e18SViacheslav Ovsiienko 			errno = ENOMEM;
94072512e18SViacheslav Ovsiienko 			ext_num = 0;
94172512e18SViacheslav Ovsiienko 			break;
94272512e18SViacheslav Ovsiienko 		}
94372512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
94472512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
94572512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
94672512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
94772512e18SViacheslav Ovsiienko 	}
94872512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
94972512e18SViacheslav Ovsiienko 		free(xmem);
95072512e18SViacheslav Ovsiienko 		xmem = NULL;
95172512e18SViacheslav Ovsiienko 	}
95272512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
95372512e18SViacheslav Ovsiienko 	return ext_num;
95472512e18SViacheslav Ovsiienko }
95572512e18SViacheslav Ovsiienko 
956af75078fSIntel /*
957af75078fSIntel  * Configuration initialisation done once at init time.
958af75078fSIntel  */
959401b744dSShahaf Shuler static struct rte_mempool *
960af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
961af75078fSIntel 		 unsigned int socket_id)
962af75078fSIntel {
963af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
964bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
965af75078fSIntel 	uint32_t mb_size;
966af75078fSIntel 
967dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
968af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
969148f963fSBruce Richardson 
970285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
971d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
972d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
973d1eb542eSOlivier Matz 
974c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
975c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
976c7f5dba7SAnatoly Burakov 		{
977c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
978c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
979c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
980c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
981c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
982c7f5dba7SAnatoly Burakov 			break;
983c7f5dba7SAnatoly Burakov 		}
984c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
985c7f5dba7SAnatoly Burakov 		{
986b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
987c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
988148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
98959fcf854SShahaf Shuler 				socket_id, mempool_flags);
99024427bb9SOlivier Matz 			if (rte_mp == NULL)
99124427bb9SOlivier Matz 				goto err;
992b19a0c75SOlivier Matz 
993b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
994b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
995b19a0c75SOlivier Matz 				rte_mp = NULL;
99624427bb9SOlivier Matz 				goto err;
997b19a0c75SOlivier Matz 			}
998b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
999b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
10003a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1001c7f5dba7SAnatoly Burakov 			break;
1002c7f5dba7SAnatoly Burakov 		}
1003c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1004c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1005c7f5dba7SAnatoly Burakov 		{
1006c7f5dba7SAnatoly Burakov 			int heap_socket;
1007c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1008c7f5dba7SAnatoly Burakov 
1009c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1010c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1011c7f5dba7SAnatoly Burakov 
1012c7f5dba7SAnatoly Burakov 			heap_socket =
1013c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1014c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1015c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1016c7f5dba7SAnatoly Burakov 
10170e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
10180e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1019ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1020c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1021c7f5dba7SAnatoly Burakov 					heap_socket);
1022c7f5dba7SAnatoly Burakov 			break;
1023c7f5dba7SAnatoly Burakov 		}
102472512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
102572512e18SViacheslav Ovsiienko 		{
102672512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
102772512e18SViacheslav Ovsiienko 			unsigned int ext_num;
102872512e18SViacheslav Ovsiienko 
102972512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
103072512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
103172512e18SViacheslav Ovsiienko 			if (ext_num == 0)
103272512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
103372512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
103472512e18SViacheslav Ovsiienko 
103572512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
103672512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
103772512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
103872512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
103972512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
104072512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
104172512e18SViacheslav Ovsiienko 			free(ext_mem);
104272512e18SViacheslav Ovsiienko 			break;
104372512e18SViacheslav Ovsiienko 		}
1044c7f5dba7SAnatoly Burakov 	default:
1045c7f5dba7SAnatoly Burakov 		{
1046c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1047c7f5dba7SAnatoly Burakov 		}
1048bece7b6cSChristian Ehrhardt 	}
1049148f963fSBruce Richardson 
105024427bb9SOlivier Matz err:
1051af75078fSIntel 	if (rte_mp == NULL) {
1052d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1053d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1054d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1055148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1056591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1057af75078fSIntel 	}
1058401b744dSShahaf Shuler 	return rte_mp;
1059af75078fSIntel }
1060af75078fSIntel 
106120a0286fSLiu Xiaofeng /*
106220a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
106320a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
106420a0286fSLiu Xiaofeng  */
106520a0286fSLiu Xiaofeng static int
106620a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
106720a0286fSLiu Xiaofeng {
106820a0286fSLiu Xiaofeng 	static int warning_once = 0;
106920a0286fSLiu Xiaofeng 
1070c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
107120a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
107220a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
107320a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
107420a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
107520a0286fSLiu Xiaofeng 			       " --numa.\n");
107620a0286fSLiu Xiaofeng 		warning_once = 1;
107720a0286fSLiu Xiaofeng 		return -1;
107820a0286fSLiu Xiaofeng 	}
107920a0286fSLiu Xiaofeng 	return 0;
108020a0286fSLiu Xiaofeng }
108120a0286fSLiu Xiaofeng 
10823f7311baSWei Dai /*
10833f7311baSWei Dai  * Get the allowed maximum number of RX queues.
10843f7311baSWei Dai  * *pid return the port id which has minimal value of
10853f7311baSWei Dai  * max_rx_queues in all ports.
10863f7311baSWei Dai  */
10873f7311baSWei Dai queueid_t
10883f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
10893f7311baSWei Dai {
10909e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
10916f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
10923f7311baSWei Dai 	portid_t pi;
10933f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
10943f7311baSWei Dai 
10953f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10966f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
10976f51deb9SIvan Ilchenko 			continue;
10986f51deb9SIvan Ilchenko 
10996f51deb9SIvan Ilchenko 		max_rxq_valid = true;
11003f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
11013f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
11023f7311baSWei Dai 			*pid = pi;
11033f7311baSWei Dai 		}
11043f7311baSWei Dai 	}
11056f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
11063f7311baSWei Dai }
11073f7311baSWei Dai 
11083f7311baSWei Dai /*
11093f7311baSWei Dai  * Check input rxq is valid or not.
11103f7311baSWei Dai  * If input rxq is not greater than any of maximum number
11113f7311baSWei Dai  * of RX queues of all ports, it is valid.
11123f7311baSWei Dai  * if valid, return 0, else return -1
11133f7311baSWei Dai  */
11143f7311baSWei Dai int
11153f7311baSWei Dai check_nb_rxq(queueid_t rxq)
11163f7311baSWei Dai {
11173f7311baSWei Dai 	queueid_t allowed_max_rxq;
11183f7311baSWei Dai 	portid_t pid = 0;
11193f7311baSWei Dai 
11203f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
11213f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
11223f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
11233f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
11243f7311baSWei Dai 		       rxq,
11253f7311baSWei Dai 		       allowed_max_rxq,
11263f7311baSWei Dai 		       pid);
11273f7311baSWei Dai 		return -1;
11283f7311baSWei Dai 	}
11293f7311baSWei Dai 	return 0;
11303f7311baSWei Dai }
11313f7311baSWei Dai 
113236db4f6cSWei Dai /*
113336db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
113436db4f6cSWei Dai  * *pid return the port id which has minimal value of
113536db4f6cSWei Dai  * max_tx_queues in all ports.
113636db4f6cSWei Dai  */
113736db4f6cSWei Dai queueid_t
113836db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
113936db4f6cSWei Dai {
11409e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
11416f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
114236db4f6cSWei Dai 	portid_t pi;
114336db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
114436db4f6cSWei Dai 
114536db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
11466f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
11476f51deb9SIvan Ilchenko 			continue;
11486f51deb9SIvan Ilchenko 
11496f51deb9SIvan Ilchenko 		max_txq_valid = true;
115036db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
115136db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
115236db4f6cSWei Dai 			*pid = pi;
115336db4f6cSWei Dai 		}
115436db4f6cSWei Dai 	}
11556f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
115636db4f6cSWei Dai }
115736db4f6cSWei Dai 
115836db4f6cSWei Dai /*
115936db4f6cSWei Dai  * Check input txq is valid or not.
116036db4f6cSWei Dai  * If input txq is not greater than any of maximum number
116136db4f6cSWei Dai  * of TX queues of all ports, it is valid.
116236db4f6cSWei Dai  * if valid, return 0, else return -1
116336db4f6cSWei Dai  */
116436db4f6cSWei Dai int
116536db4f6cSWei Dai check_nb_txq(queueid_t txq)
116636db4f6cSWei Dai {
116736db4f6cSWei Dai 	queueid_t allowed_max_txq;
116836db4f6cSWei Dai 	portid_t pid = 0;
116936db4f6cSWei Dai 
117036db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
117136db4f6cSWei Dai 	if (txq > allowed_max_txq) {
117236db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
117336db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
117436db4f6cSWei Dai 		       txq,
117536db4f6cSWei Dai 		       allowed_max_txq,
117636db4f6cSWei Dai 		       pid);
117736db4f6cSWei Dai 		return -1;
117836db4f6cSWei Dai 	}
117936db4f6cSWei Dai 	return 0;
118036db4f6cSWei Dai }
118136db4f6cSWei Dai 
11821c69df45SOri Kam /*
118399e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
118499e040d3SLijun Ou  * *pid return the port id which has minimal value of
118599e040d3SLijun Ou  * max_rxd in all queues of all ports.
118699e040d3SLijun Ou  */
118799e040d3SLijun Ou static uint16_t
118899e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
118999e040d3SLijun Ou {
119099e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
119199e040d3SLijun Ou 	portid_t pi;
119299e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
119399e040d3SLijun Ou 
119499e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
119599e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
119699e040d3SLijun Ou 			continue;
119799e040d3SLijun Ou 
119899e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
119999e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
120099e040d3SLijun Ou 			*pid = pi;
120199e040d3SLijun Ou 		}
120299e040d3SLijun Ou 	}
120399e040d3SLijun Ou 	return allowed_max_rxd;
120499e040d3SLijun Ou }
120599e040d3SLijun Ou 
120699e040d3SLijun Ou /*
120799e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
120899e040d3SLijun Ou  * *pid return the port id which has minimal value of
120999e040d3SLijun Ou  * min_rxd in all queues of all ports.
121099e040d3SLijun Ou  */
121199e040d3SLijun Ou static uint16_t
121299e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
121399e040d3SLijun Ou {
121499e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
121599e040d3SLijun Ou 	portid_t pi;
121699e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
121799e040d3SLijun Ou 
121899e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
121999e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
122099e040d3SLijun Ou 			continue;
122199e040d3SLijun Ou 
122299e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
122399e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
122499e040d3SLijun Ou 			*pid = pi;
122599e040d3SLijun Ou 		}
122699e040d3SLijun Ou 	}
122799e040d3SLijun Ou 
122899e040d3SLijun Ou 	return allowed_min_rxd;
122999e040d3SLijun Ou }
123099e040d3SLijun Ou 
123199e040d3SLijun Ou /*
123299e040d3SLijun Ou  * Check input rxd is valid or not.
123399e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
123499e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
123599e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
123699e040d3SLijun Ou  * if valid, return 0, else return -1
123799e040d3SLijun Ou  */
123899e040d3SLijun Ou int
123999e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
124099e040d3SLijun Ou {
124199e040d3SLijun Ou 	uint16_t allowed_max_rxd;
124299e040d3SLijun Ou 	uint16_t allowed_min_rxd;
124399e040d3SLijun Ou 	portid_t pid = 0;
124499e040d3SLijun Ou 
124599e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
124699e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
124799e040d3SLijun Ou 		printf("Fail: input rxd (%u) can't be greater "
124899e040d3SLijun Ou 		       "than max_rxds (%u) of port %u\n",
124999e040d3SLijun Ou 		       rxd,
125099e040d3SLijun Ou 		       allowed_max_rxd,
125199e040d3SLijun Ou 		       pid);
125299e040d3SLijun Ou 		return -1;
125399e040d3SLijun Ou 	}
125499e040d3SLijun Ou 
125599e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
125699e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
125799e040d3SLijun Ou 		printf("Fail: input rxd (%u) can't be less "
125899e040d3SLijun Ou 		       "than min_rxds (%u) of port %u\n",
125999e040d3SLijun Ou 		       rxd,
126099e040d3SLijun Ou 		       allowed_min_rxd,
126199e040d3SLijun Ou 		       pid);
126299e040d3SLijun Ou 		return -1;
126399e040d3SLijun Ou 	}
126499e040d3SLijun Ou 
126599e040d3SLijun Ou 	return 0;
126699e040d3SLijun Ou }
126799e040d3SLijun Ou 
126899e040d3SLijun Ou /*
126999e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
127099e040d3SLijun Ou  * *pid return the port id which has minimal value of
127199e040d3SLijun Ou  * max_txd in every tx queue.
127299e040d3SLijun Ou  */
127399e040d3SLijun Ou static uint16_t
127499e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
127599e040d3SLijun Ou {
127699e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
127799e040d3SLijun Ou 	portid_t pi;
127899e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
127999e040d3SLijun Ou 
128099e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
128199e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
128299e040d3SLijun Ou 			continue;
128399e040d3SLijun Ou 
128499e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
128599e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
128699e040d3SLijun Ou 			*pid = pi;
128799e040d3SLijun Ou 		}
128899e040d3SLijun Ou 	}
128999e040d3SLijun Ou 	return allowed_max_txd;
129099e040d3SLijun Ou }
129199e040d3SLijun Ou 
129299e040d3SLijun Ou /*
129399e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
129499e040d3SLijun Ou  * *pid return the port id which has minimal value of
129599e040d3SLijun Ou  * min_txd in every tx queue.
129699e040d3SLijun Ou  */
129799e040d3SLijun Ou static uint16_t
129899e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
129999e040d3SLijun Ou {
130099e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
130199e040d3SLijun Ou 	portid_t pi;
130299e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
130399e040d3SLijun Ou 
130499e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
130599e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
130699e040d3SLijun Ou 			continue;
130799e040d3SLijun Ou 
130899e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
130999e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
131099e040d3SLijun Ou 			*pid = pi;
131199e040d3SLijun Ou 		}
131299e040d3SLijun Ou 	}
131399e040d3SLijun Ou 
131499e040d3SLijun Ou 	return allowed_min_txd;
131599e040d3SLijun Ou }
131699e040d3SLijun Ou 
131799e040d3SLijun Ou /*
131899e040d3SLijun Ou  * Check input txd is valid or not.
131999e040d3SLijun Ou  * If input txd is not greater than any of maximum number
132099e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
132199e040d3SLijun Ou  * if valid, return 0, else return -1
132299e040d3SLijun Ou  */
132399e040d3SLijun Ou int
132499e040d3SLijun Ou check_nb_txd(queueid_t txd)
132599e040d3SLijun Ou {
132699e040d3SLijun Ou 	uint16_t allowed_max_txd;
132799e040d3SLijun Ou 	uint16_t allowed_min_txd;
132899e040d3SLijun Ou 	portid_t pid = 0;
132999e040d3SLijun Ou 
133099e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
133199e040d3SLijun Ou 	if (txd > allowed_max_txd) {
133299e040d3SLijun Ou 		printf("Fail: input txd (%u) can't be greater "
133399e040d3SLijun Ou 		       "than max_txds (%u) of port %u\n",
133499e040d3SLijun Ou 		       txd,
133599e040d3SLijun Ou 		       allowed_max_txd,
133699e040d3SLijun Ou 		       pid);
133799e040d3SLijun Ou 		return -1;
133899e040d3SLijun Ou 	}
133999e040d3SLijun Ou 
134099e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
134199e040d3SLijun Ou 	if (txd < allowed_min_txd) {
134299e040d3SLijun Ou 		printf("Fail: input txd (%u) can't be less "
134399e040d3SLijun Ou 		       "than min_txds (%u) of port %u\n",
134499e040d3SLijun Ou 		       txd,
134599e040d3SLijun Ou 		       allowed_min_txd,
134699e040d3SLijun Ou 		       pid);
134799e040d3SLijun Ou 		return -1;
134899e040d3SLijun Ou 	}
134999e040d3SLijun Ou 	return 0;
135099e040d3SLijun Ou }
135199e040d3SLijun Ou 
135299e040d3SLijun Ou 
135399e040d3SLijun Ou /*
13541c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
13551c69df45SOri Kam  * *pid return the port id which has minimal value of
13561c69df45SOri Kam  * max_hairpin_queues in all ports.
13571c69df45SOri Kam  */
13581c69df45SOri Kam queueid_t
13591c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
13601c69df45SOri Kam {
13619e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
13621c69df45SOri Kam 	portid_t pi;
13631c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
13641c69df45SOri Kam 
13651c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
13661c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
13671c69df45SOri Kam 			*pid = pi;
13681c69df45SOri Kam 			return 0;
13691c69df45SOri Kam 		}
13701c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
13711c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
13721c69df45SOri Kam 			*pid = pi;
13731c69df45SOri Kam 		}
13741c69df45SOri Kam 	}
13751c69df45SOri Kam 	return allowed_max_hairpinq;
13761c69df45SOri Kam }
13771c69df45SOri Kam 
13781c69df45SOri Kam /*
13791c69df45SOri Kam  * Check input hairpin is valid or not.
13801c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
13811c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
13821c69df45SOri Kam  * if valid, return 0, else return -1
13831c69df45SOri Kam  */
13841c69df45SOri Kam int
13851c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
13861c69df45SOri Kam {
13871c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
13881c69df45SOri Kam 	portid_t pid = 0;
13891c69df45SOri Kam 
13901c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
13911c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
13921c69df45SOri Kam 		printf("Fail: input hairpin (%u) can't be greater "
13931c69df45SOri Kam 		       "than max_hairpin_queues (%u) of port %u\n",
13941c69df45SOri Kam 		       hairpinq, allowed_max_hairpinq, pid);
13951c69df45SOri Kam 		return -1;
13961c69df45SOri Kam 	}
13971c69df45SOri Kam 	return 0;
13981c69df45SOri Kam }
13991c69df45SOri Kam 
1400af75078fSIntel static void
1401af75078fSIntel init_config(void)
1402af75078fSIntel {
1403ce8d5614SIntel 	portid_t pid;
1404af75078fSIntel 	struct rte_port *port;
1405af75078fSIntel 	struct rte_mempool *mbp;
1406af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1407af75078fSIntel 	lcoreid_t  lc_id;
14087acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1409b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
141052f38a20SJiayu Hu 	uint32_t gso_types;
141133f9630fSSunil Kumar Kori 	uint16_t data_size;
141233f9630fSSunil Kumar Kori 	bool warning = 0;
1413c73a9071SWei Dai 	int k;
14146f51deb9SIvan Ilchenko 	int ret;
1415af75078fSIntel 
14167acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1417487f9a59SYulong Pei 
1418af75078fSIntel 	/* Configuration of logical cores. */
1419af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1420af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1421fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1422af75078fSIntel 	if (fwd_lcores == NULL) {
1423ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1424ce8d5614SIntel 							"failed\n", nb_lcores);
1425af75078fSIntel 	}
1426af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1427af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1428af75078fSIntel 					       sizeof(struct fwd_lcore),
1429fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1430af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1431ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1432ce8d5614SIntel 								"failed\n");
1433af75078fSIntel 		}
1434af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1435af75078fSIntel 	}
1436af75078fSIntel 
14377d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1438ce8d5614SIntel 		port = &ports[pid];
14398b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1440fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1441384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
14426f51deb9SIvan Ilchenko 
14436f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
14446f51deb9SIvan Ilchenko 		if (ret != 0)
14456f51deb9SIvan Ilchenko 			rte_exit(EXIT_FAILURE,
14466f51deb9SIvan Ilchenko 				 "rte_eth_dev_info_get() failed\n");
14477c45f6c0SFerruh Yigit 
144807e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
144907e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
145007e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
145107e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1452b6ea6408SIntel 		if (numa_support) {
1453b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1454b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1455b6ea6408SIntel 			else {
1456b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
145720a0286fSLiu Xiaofeng 
145829841336SPhil Yang 				/*
145929841336SPhil Yang 				 * if socket_id is invalid,
146029841336SPhil Yang 				 * set to the first available socket.
146129841336SPhil Yang 				 */
146220a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
146329841336SPhil Yang 					socket_id = socket_ids[0];
1464b6ea6408SIntel 				port_per_socket[socket_id]++;
1465b6ea6408SIntel 			}
1466b6ea6408SIntel 		}
1467b6ea6408SIntel 
1468c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1469c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1470c73a9071SWei Dai 			port->rx_conf[k].offloads =
1471c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1472c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1473c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1474c73a9071SWei Dai 			port->tx_conf[k].offloads =
1475c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1476c73a9071SWei Dai 
1477ce8d5614SIntel 		/* set flag to initialize port/queue */
1478ce8d5614SIntel 		port->need_reconfig = 1;
1479ce8d5614SIntel 		port->need_reconfig_queues = 1;
1480c18feafaSDekel Peled 		port->tx_metadata = 0;
148133f9630fSSunil Kumar Kori 
148233f9630fSSunil Kumar Kori 		/* Check for maximum number of segments per MTU. Accordingly
148333f9630fSSunil Kumar Kori 		 * update the mbuf data size.
148433f9630fSSunil Kumar Kori 		 */
1485163fbaafSFerruh Yigit 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1486163fbaafSFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
148733f9630fSSunil Kumar Kori 			data_size = rx_mode.max_rx_pkt_len /
148833f9630fSSunil Kumar Kori 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
148933f9630fSSunil Kumar Kori 
149033f9630fSSunil Kumar Kori 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
149133f9630fSSunil Kumar Kori 							mbuf_data_size) {
149233f9630fSSunil Kumar Kori 				mbuf_data_size = data_size +
149333f9630fSSunil Kumar Kori 						 RTE_PKTMBUF_HEADROOM;
149433f9630fSSunil Kumar Kori 				warning = 1;
1495ce8d5614SIntel 			}
149633f9630fSSunil Kumar Kori 		}
149733f9630fSSunil Kumar Kori 	}
149833f9630fSSunil Kumar Kori 
149933f9630fSSunil Kumar Kori 	if (warning)
150033f9630fSSunil Kumar Kori 		TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
150133f9630fSSunil Kumar Kori 			    mbuf_data_size);
1502ce8d5614SIntel 
15033ab64341SOlivier Matz 	/*
15043ab64341SOlivier Matz 	 * Create pools of mbuf.
15053ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
15063ab64341SOlivier Matz 	 * socket 0 memory by default.
15073ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
15083ab64341SOlivier Matz 	 *
15093ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
15103ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
15113ab64341SOlivier Matz 	 */
15123ab64341SOlivier Matz 	if (param_total_num_mbufs)
15133ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
15143ab64341SOlivier Matz 	else {
15153ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
15163ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
15173ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
15183ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
15193ab64341SOlivier Matz 	}
15203ab64341SOlivier Matz 
1521b6ea6408SIntel 	if (numa_support) {
1522b6ea6408SIntel 		uint8_t i;
1523ce8d5614SIntel 
1524c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1525401b744dSShahaf Shuler 			mempools[i] = mbuf_pool_create(mbuf_data_size,
1526401b744dSShahaf Shuler 						       nb_mbuf_per_pool,
1527c9cafcc8SShahaf Shuler 						       socket_ids[i]);
15283ab64341SOlivier Matz 	} else {
15293ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
1530401b744dSShahaf Shuler 			mempools[0] = mbuf_pool_create(mbuf_data_size,
1531401b744dSShahaf Shuler 						       nb_mbuf_per_pool, 0);
15323ab64341SOlivier Matz 		else
1533401b744dSShahaf Shuler 			mempools[socket_num] = mbuf_pool_create
1534401b744dSShahaf Shuler 							(mbuf_data_size,
1535401b744dSShahaf Shuler 							 nb_mbuf_per_pool,
15363ab64341SOlivier Matz 							 socket_num);
15373ab64341SOlivier Matz 	}
1538b6ea6408SIntel 
1539b6ea6408SIntel 	init_port_config();
15405886ae07SAdrien Mazarguil 
154152f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1542aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
15435886ae07SAdrien Mazarguil 	/*
15445886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
15455886ae07SAdrien Mazarguil 	 */
15465886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
15478fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
15488fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
15498fd8bebcSAdrien Mazarguil 
15505886ae07SAdrien Mazarguil 		if (mbp == NULL)
15515886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
15525886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
155352f38a20SJiayu Hu 		/* initialize GSO context */
155452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
155552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
155652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
155735b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
155835b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
155952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
15605886ae07SAdrien Mazarguil 	}
15615886ae07SAdrien Mazarguil 
1562ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1563ce8d5614SIntel 	if (init_fwd_streams() < 0)
1564ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
15650c0db76fSBernard Iremonger 
15660c0db76fSBernard Iremonger 	fwd_config_setup();
1567b7091f1dSJiayu Hu 
1568b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1569b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1570b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1571b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1572b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1573b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1574b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1575b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1576b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1577b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1578b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1579b7091f1dSJiayu Hu 		}
1580b7091f1dSJiayu Hu 	}
1581ce8d5614SIntel }
1582ce8d5614SIntel 
15832950a769SDeclan Doherty 
15842950a769SDeclan Doherty void
1585a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
15862950a769SDeclan Doherty {
15872950a769SDeclan Doherty 	struct rte_port *port;
15886f51deb9SIvan Ilchenko 	int ret;
15892950a769SDeclan Doherty 
15902950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
15912950a769SDeclan Doherty 	port = &ports[new_port_id];
15926f51deb9SIvan Ilchenko 
15936f51deb9SIvan Ilchenko 	ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
15946f51deb9SIvan Ilchenko 	if (ret != 0)
15956f51deb9SIvan Ilchenko 		return;
15962950a769SDeclan Doherty 
15972950a769SDeclan Doherty 	/* set flag to initialize port/queue */
15982950a769SDeclan Doherty 	port->need_reconfig = 1;
15992950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1600a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
16012950a769SDeclan Doherty 
16022950a769SDeclan Doherty 	init_port_config();
16032950a769SDeclan Doherty }
16042950a769SDeclan Doherty 
16052950a769SDeclan Doherty 
1606ce8d5614SIntel int
1607ce8d5614SIntel init_fwd_streams(void)
1608ce8d5614SIntel {
1609ce8d5614SIntel 	portid_t pid;
1610ce8d5614SIntel 	struct rte_port *port;
1611ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
16125a8fb55cSReshma Pattan 	queueid_t q;
1613ce8d5614SIntel 
1614ce8d5614SIntel 	/* set socket id according to numa or not */
16157d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1616ce8d5614SIntel 		port = &ports[pid];
1617ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1618ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1619ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1620ce8d5614SIntel 				port->dev_info.max_rx_queues);
1621ce8d5614SIntel 			return -1;
1622ce8d5614SIntel 		}
1623ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1624ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1625ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1626ce8d5614SIntel 				port->dev_info.max_tx_queues);
1627ce8d5614SIntel 			return -1;
1628ce8d5614SIntel 		}
162920a0286fSLiu Xiaofeng 		if (numa_support) {
163020a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
163120a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
163220a0286fSLiu Xiaofeng 			else {
1633b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
163420a0286fSLiu Xiaofeng 
163529841336SPhil Yang 				/*
163629841336SPhil Yang 				 * if socket_id is invalid,
163729841336SPhil Yang 				 * set to the first available socket.
163829841336SPhil Yang 				 */
163920a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
164029841336SPhil Yang 					port->socket_id = socket_ids[0];
164120a0286fSLiu Xiaofeng 			}
164220a0286fSLiu Xiaofeng 		}
1643b6ea6408SIntel 		else {
1644b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1645af75078fSIntel 				port->socket_id = 0;
1646b6ea6408SIntel 			else
1647b6ea6408SIntel 				port->socket_id = socket_num;
1648b6ea6408SIntel 		}
1649af75078fSIntel 	}
1650af75078fSIntel 
16515a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
16525a8fb55cSReshma Pattan 	if (q == 0) {
16535a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
16545a8fb55cSReshma Pattan 		return -1;
16555a8fb55cSReshma Pattan 	}
16565a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1657ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1658ce8d5614SIntel 		return 0;
1659ce8d5614SIntel 	/* clear the old */
1660ce8d5614SIntel 	if (fwd_streams != NULL) {
1661ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1662ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1663ce8d5614SIntel 				continue;
1664ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1665ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1666af75078fSIntel 		}
1667ce8d5614SIntel 		rte_free(fwd_streams);
1668ce8d5614SIntel 		fwd_streams = NULL;
1669ce8d5614SIntel 	}
1670ce8d5614SIntel 
1671ce8d5614SIntel 	/* init new */
1672ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
16731f84c469SMatan Azrad 	if (nb_fwd_streams) {
1674ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
16751f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
16761f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1677ce8d5614SIntel 		if (fwd_streams == NULL)
16781f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
16791f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
16801f84c469SMatan Azrad 				 nb_fwd_streams);
1681ce8d5614SIntel 
1682af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
16831f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
16841f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
16851f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1686ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
16871f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
16881f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
16891f84c469SMatan Azrad 		}
1690af75078fSIntel 	}
1691ce8d5614SIntel 
1692ce8d5614SIntel 	return 0;
1693af75078fSIntel }
1694af75078fSIntel 
1695af75078fSIntel static void
1696af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1697af75078fSIntel {
16987569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
169985de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
17007569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
17017569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1702af75078fSIntel 	uint16_t nb_pkt;
17037569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
17047569b8c1SHonnappa Nagarahalli 	int i;
1705af75078fSIntel 
1706af75078fSIntel 	/*
1707af75078fSIntel 	 * First compute the total number of packet bursts and the
1708af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1709af75078fSIntel 	 */
17107569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
17117569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
17127569b8c1SHonnappa Nagarahalli 
17137569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
17147569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
17157569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
17167569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
17177569b8c1SHonnappa Nagarahalli 
17187569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
17197569b8c1SHonnappa Nagarahalli 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1720af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
17217569b8c1SHonnappa Nagarahalli 
1722af75078fSIntel 		if (nb_burst == 0)
1723af75078fSIntel 			continue;
17247569b8c1SHonnappa Nagarahalli 
1725af75078fSIntel 		total_burst += nb_burst;
17267569b8c1SHonnappa Nagarahalli 
17277569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
17287569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
17297569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1730fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1731fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
17327569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
17337569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
17347569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1735af75078fSIntel 		}
1736af75078fSIntel 	}
1737af75078fSIntel 	if (total_burst == 0)
1738af75078fSIntel 		return;
17397569b8c1SHonnappa Nagarahalli 
17407569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
17417569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
17427569b8c1SHonnappa Nagarahalli 		if (i == 3) {
17437569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1744af75078fSIntel 			return;
1745af75078fSIntel 		}
17467569b8c1SHonnappa Nagarahalli 
17477569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
17487569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
17497569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
17507569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1751af75078fSIntel 			return;
1752af75078fSIntel 		}
17537569b8c1SHonnappa Nagarahalli 
17547569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
17557569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
17567569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
17577569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
17587569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1759af75078fSIntel 	}
1760af75078fSIntel }
1761af75078fSIntel 
1762af75078fSIntel static void
1763af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1764af75078fSIntel {
1765af75078fSIntel 	struct fwd_stream *fs;
1766af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1767af75078fSIntel 
1768af75078fSIntel 	fs = fwd_streams[stream_id];
1769af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1770af75078fSIntel 	    (fs->fwd_dropped == 0))
1771af75078fSIntel 		return;
1772af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1773af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1774af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1775af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1776c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1777c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1778af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1779af75078fSIntel 
1780af75078fSIntel 	/* if checksum mode */
1781af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1782c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1783c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1784c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
178558d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
178658d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
178794d65546SDavid Marchand 	} else {
178894d65546SDavid Marchand 		printf("\n");
1789af75078fSIntel 	}
1790af75078fSIntel 
17910e4b1963SDharmik Thakkar 	if (record_burst_stats) {
1792af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1793af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
17940e4b1963SDharmik Thakkar 	}
1795af75078fSIntel }
1796af75078fSIntel 
179753324971SDavid Marchand void
179853324971SDavid Marchand fwd_stats_display(void)
179953324971SDavid Marchand {
180053324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
180153324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
180253324971SDavid Marchand 	struct {
180353324971SDavid Marchand 		struct fwd_stream *rx_stream;
180453324971SDavid Marchand 		struct fwd_stream *tx_stream;
180553324971SDavid Marchand 		uint64_t tx_dropped;
180653324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
180753324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
180853324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
180953324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
181053324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
181153324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
181253324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
181353324971SDavid Marchand 	struct rte_eth_stats stats;
181453324971SDavid Marchand 	uint64_t fwd_cycles = 0;
181553324971SDavid Marchand 	uint64_t total_recv = 0;
181653324971SDavid Marchand 	uint64_t total_xmit = 0;
181753324971SDavid Marchand 	struct rte_port *port;
181853324971SDavid Marchand 	streamid_t sm_id;
181953324971SDavid Marchand 	portid_t pt_id;
182053324971SDavid Marchand 	int i;
182153324971SDavid Marchand 
182253324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
182353324971SDavid Marchand 
182453324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
182553324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
182653324971SDavid Marchand 
182753324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
182853324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
182953324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
183053324971SDavid Marchand 		} else {
183153324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
183253324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
183353324971SDavid Marchand 		}
183453324971SDavid Marchand 
183553324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
183653324971SDavid Marchand 
183753324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
183853324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
183953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
184053324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
184153324971SDavid Marchand 
1842bc700b67SDharmik Thakkar 		if (record_core_cycles)
184353324971SDavid Marchand 			fwd_cycles += fs->core_cycles;
184453324971SDavid Marchand 	}
184553324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
184653324971SDavid Marchand 		uint8_t j;
184753324971SDavid Marchand 
184853324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
184953324971SDavid Marchand 		port = &ports[pt_id];
185053324971SDavid Marchand 
185153324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
185253324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
185353324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
185453324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
185553324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
185653324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
185753324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
185853324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
185953324971SDavid Marchand 
186053324971SDavid Marchand 		total_recv += stats.ipackets;
186153324971SDavid Marchand 		total_xmit += stats.opackets;
186253324971SDavid Marchand 		total_rx_dropped += stats.imissed;
186353324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
186453324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
186553324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
186653324971SDavid Marchand 
186753324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
186853324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
186953324971SDavid Marchand 
187053324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
187153324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
187253324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
187353324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
187453324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
187553324971SDavid Marchand 			       stats.ipackets, stats.imissed,
187653324971SDavid Marchand 			       stats.ipackets + stats.imissed);
187753324971SDavid Marchand 
187853324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
187953324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
188053324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
188153324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
188253324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
188353324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
188453324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
188553324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
188653324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
188753324971SDavid Marchand 				       stats.ierrors);
188853324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
188953324971SDavid Marchand 				       stats.rx_nombuf);
189053324971SDavid Marchand 			}
189153324971SDavid Marchand 
189253324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
189353324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
189453324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
189553324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
189653324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
189753324971SDavid Marchand 		} else {
189853324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
189953324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
190053324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
190153324971SDavid Marchand 			       stats.ipackets, stats.imissed,
190253324971SDavid Marchand 			       stats.ipackets + stats.imissed);
190353324971SDavid Marchand 
190453324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
190553324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
190653324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
190753324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
190853324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
190953324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
191053324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
191153324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
191253324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
191353324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
191453324971SDavid Marchand 				       stats.rx_nombuf);
191553324971SDavid Marchand 			}
191653324971SDavid Marchand 
191753324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
191853324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
191953324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
192053324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
192153324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
192253324971SDavid Marchand 		}
192353324971SDavid Marchand 
19240e4b1963SDharmik Thakkar 		if (record_burst_stats) {
192553324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
192653324971SDavid Marchand 				pkt_burst_stats_display("RX",
192753324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
192853324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
192953324971SDavid Marchand 				pkt_burst_stats_display("TX",
193053324971SDavid Marchand 					&ports_stats[pt_id].tx_stream->tx_burst_stats);
19310e4b1963SDharmik Thakkar 		}
193253324971SDavid Marchand 
193353324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
193453324971SDavid Marchand 			printf("\n");
193553324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
193653324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
193753324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
193853324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
193953324971SDavid Marchand 				       j, stats.q_ipackets[j],
194053324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
194153324971SDavid Marchand 			}
194253324971SDavid Marchand 			printf("\n");
194353324971SDavid Marchand 		}
194453324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
194553324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
194653324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
194753324971SDavid Marchand 				       "                                 TX-bytes:%14"
194853324971SDavid Marchand 				       PRIu64"\n",
194953324971SDavid Marchand 				       j, stats.q_opackets[j],
195053324971SDavid Marchand 				       stats.q_obytes[j]);
195153324971SDavid Marchand 			}
195253324971SDavid Marchand 		}
195353324971SDavid Marchand 
195453324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
195553324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
195653324971SDavid Marchand 	}
195753324971SDavid Marchand 
195853324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
195953324971SDavid Marchand 	       "%s\n",
196053324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
196153324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
196253324971SDavid Marchand 	       "%-"PRIu64"\n"
196353324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
196453324971SDavid Marchand 	       "%-"PRIu64"\n",
196553324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
196653324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
196753324971SDavid Marchand 	if (total_rx_nombuf > 0)
196853324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
196953324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
197053324971SDavid Marchand 	       "%s\n",
197153324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
1972bc700b67SDharmik Thakkar 	if (record_core_cycles) {
19734c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
19743a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
19753a164e00SPhil Yang 			uint64_t total_pkts = 0;
19763a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
19773a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
19783a164e00SPhil Yang 				total_pkts = total_xmit;
19793a164e00SPhil Yang 			else
19803a164e00SPhil Yang 				total_pkts = total_recv;
19813a164e00SPhil Yang 
19821920832aSDharmik Thakkar 			printf("\n  CPU cycles/packet=%.2F (total cycles="
19833a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
19844c0497b1SDharmik Thakkar 			       " MHz Clock\n",
19853a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
19863a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
19874c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
19883a164e00SPhil Yang 		}
1989bc700b67SDharmik Thakkar 	}
199053324971SDavid Marchand }
199153324971SDavid Marchand 
199253324971SDavid Marchand void
199353324971SDavid Marchand fwd_stats_reset(void)
199453324971SDavid Marchand {
199553324971SDavid Marchand 	streamid_t sm_id;
199653324971SDavid Marchand 	portid_t pt_id;
199753324971SDavid Marchand 	int i;
199853324971SDavid Marchand 
199953324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
200053324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
200153324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
200253324971SDavid Marchand 	}
200353324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
200453324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
200553324971SDavid Marchand 
200653324971SDavid Marchand 		fs->rx_packets = 0;
200753324971SDavid Marchand 		fs->tx_packets = 0;
200853324971SDavid Marchand 		fs->fwd_dropped = 0;
200953324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
201053324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
201153324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
201253324971SDavid Marchand 
201353324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
201453324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
201553324971SDavid Marchand 		fs->core_cycles = 0;
201653324971SDavid Marchand 	}
201753324971SDavid Marchand }
201853324971SDavid Marchand 
2019af75078fSIntel static void
20207741e4cfSIntel flush_fwd_rx_queues(void)
2021af75078fSIntel {
2022af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2023af75078fSIntel 	portid_t  rxp;
20247741e4cfSIntel 	portid_t port_id;
2025af75078fSIntel 	queueid_t rxq;
2026af75078fSIntel 	uint16_t  nb_rx;
2027af75078fSIntel 	uint16_t  i;
2028af75078fSIntel 	uint8_t   j;
2029f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2030594302c7SJames Poole 	uint64_t timer_period;
2031f487715fSReshma Pattan 
2032f487715fSReshma Pattan 	/* convert to number of cycles */
2033594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2034af75078fSIntel 
2035af75078fSIntel 	for (j = 0; j < 2; j++) {
20367741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2037af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
20387741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
2039f487715fSReshma Pattan 				/**
2040f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2041f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2042f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2043f487715fSReshma Pattan 				* after 1sec timer expiry.
2044f487715fSReshma Pattan 				*/
2045f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2046af75078fSIntel 				do {
20477741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2048013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2049af75078fSIntel 					for (i = 0; i < nb_rx; i++)
2050af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
2051f487715fSReshma Pattan 
2052f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2053f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2054f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2055f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2056f487715fSReshma Pattan 					(timer_tsc < timer_period));
2057f487715fSReshma Pattan 				timer_tsc = 0;
2058af75078fSIntel 			}
2059af75078fSIntel 		}
2060af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2061af75078fSIntel 	}
2062af75078fSIntel }
2063af75078fSIntel 
2064af75078fSIntel static void
2065af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2066af75078fSIntel {
2067af75078fSIntel 	struct fwd_stream **fsm;
2068af75078fSIntel 	streamid_t nb_fs;
2069af75078fSIntel 	streamid_t sm_id;
207054f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
20717e4441c8SRemy Horton 	uint64_t tics_per_1sec;
20727e4441c8SRemy Horton 	uint64_t tics_datum;
20737e4441c8SRemy Horton 	uint64_t tics_current;
20744918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2075af75078fSIntel 
20764918a357SXiaoyun Li 	cnt_ports = nb_ports;
20777e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
20787e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
20797e4441c8SRemy Horton #endif
2080af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2081af75078fSIntel 	nb_fs = fc->stream_nb;
2082af75078fSIntel 	do {
2083af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
2084af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
208554f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
2086e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2087e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
20887e4441c8SRemy Horton 			tics_current = rte_rdtsc();
20897e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
20907e4441c8SRemy Horton 				/* Periodic bitrate calculation */
20914918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2092e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
20934918a357SXiaoyun Li 						ports_ids[i]);
20947e4441c8SRemy Horton 				tics_datum = tics_current;
20957e4441c8SRemy Horton 			}
2096e25e6c70SRemy Horton 		}
20977e4441c8SRemy Horton #endif
209862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
209965eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
210065eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
210162d3216dSReshma Pattan 			rte_latencystats_update();
210262d3216dSReshma Pattan #endif
210362d3216dSReshma Pattan 
2104af75078fSIntel 	} while (! fc->stopped);
2105af75078fSIntel }
2106af75078fSIntel 
2107af75078fSIntel static int
2108af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2109af75078fSIntel {
2110af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2111af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2112af75078fSIntel 	return 0;
2113af75078fSIntel }
2114af75078fSIntel 
2115af75078fSIntel /*
2116af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2117af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2118af75078fSIntel  */
2119af75078fSIntel static int
2120af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2121af75078fSIntel {
2122af75078fSIntel 	struct fwd_lcore *fwd_lc;
2123af75078fSIntel 	struct fwd_lcore tmp_lcore;
2124af75078fSIntel 
2125af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2126af75078fSIntel 	tmp_lcore = *fwd_lc;
2127af75078fSIntel 	tmp_lcore.stopped = 1;
2128af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2129af75078fSIntel 	return 0;
2130af75078fSIntel }
2131af75078fSIntel 
2132af75078fSIntel /*
2133af75078fSIntel  * Launch packet forwarding:
2134af75078fSIntel  *     - Setup per-port forwarding context.
2135af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2136af75078fSIntel  */
2137af75078fSIntel static void
2138af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2139af75078fSIntel {
2140af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2141af75078fSIntel 	unsigned int i;
2142af75078fSIntel 	unsigned int lc_id;
2143af75078fSIntel 	int diag;
2144af75078fSIntel 
2145af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2146af75078fSIntel 	if (port_fwd_begin != NULL) {
2147af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2148af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
2149af75078fSIntel 	}
2150af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2151af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2152af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2153af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2154af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2155af75078fSIntel 						     fwd_lcores[i], lc_id);
2156af75078fSIntel 			if (diag != 0)
2157af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
2158af75078fSIntel 				       lc_id, diag);
2159af75078fSIntel 		}
2160af75078fSIntel 	}
2161af75078fSIntel }
2162af75078fSIntel 
2163af75078fSIntel /*
2164af75078fSIntel  * Launch packet forwarding configuration.
2165af75078fSIntel  */
2166af75078fSIntel void
2167af75078fSIntel start_packet_forwarding(int with_tx_first)
2168af75078fSIntel {
2169af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2170af75078fSIntel 	port_fwd_end_t  port_fwd_end;
2171af75078fSIntel 	struct rte_port *port;
2172af75078fSIntel 	unsigned int i;
2173af75078fSIntel 	portid_t   pt_id;
2174af75078fSIntel 
21755a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
21765a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
21775a8fb55cSReshma Pattan 
21785a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
21795a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
21805a8fb55cSReshma Pattan 
21815a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
21825a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
21835a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
21845a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
21855a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
21865a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
21875a8fb55cSReshma Pattan 
2188ce8d5614SIntel 	if (all_ports_started() == 0) {
2189ce8d5614SIntel 		printf("Not all ports were started\n");
2190ce8d5614SIntel 		return;
2191ce8d5614SIntel 	}
2192af75078fSIntel 	if (test_done == 0) {
2193af75078fSIntel 		printf("Packet forwarding already started\n");
2194af75078fSIntel 		return;
2195af75078fSIntel 	}
2196edf87b4aSBernard Iremonger 
2197edf87b4aSBernard Iremonger 
21987741e4cfSIntel 	if(dcb_test) {
21997741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
22007741e4cfSIntel 			pt_id = fwd_ports_ids[i];
22017741e4cfSIntel 			port = &ports[pt_id];
22027741e4cfSIntel 			if (!port->dcb_flag) {
22037741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
22047741e4cfSIntel                                        "be configured in this mode.\n");
2205013af9b6SIntel 				return;
2206013af9b6SIntel 			}
22077741e4cfSIntel 		}
22087741e4cfSIntel 		if (nb_fwd_lcores == 1) {
22097741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
22107741e4cfSIntel                                "should be larger than 1.\n");
22117741e4cfSIntel 			return;
22127741e4cfSIntel 		}
22137741e4cfSIntel 	}
2214af75078fSIntel 	test_done = 0;
22157741e4cfSIntel 
221647a767b2SMatan Azrad 	fwd_config_setup();
221747a767b2SMatan Azrad 
22187741e4cfSIntel 	if(!no_flush_rx)
22197741e4cfSIntel 		flush_fwd_rx_queues();
22207741e4cfSIntel 
2221933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
2222af75078fSIntel 	rxtx_config_display();
2223af75078fSIntel 
222453324971SDavid Marchand 	fwd_stats_reset();
2225af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2226af75078fSIntel 		pt_id = fwd_ports_ids[i];
2227af75078fSIntel 		port = &ports[pt_id];
2228013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
2229af75078fSIntel 	}
2230af75078fSIntel 	if (with_tx_first) {
2231af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2232af75078fSIntel 		if (port_fwd_begin != NULL) {
2233af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2234af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
2235af75078fSIntel 		}
2236acbf77a6SZhihong Wang 		while (with_tx_first--) {
2237acbf77a6SZhihong Wang 			launch_packet_forwarding(
2238acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2239af75078fSIntel 			rte_eal_mp_wait_lcore();
2240acbf77a6SZhihong Wang 		}
2241af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2242af75078fSIntel 		if (port_fwd_end != NULL) {
2243af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2244af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2245af75078fSIntel 		}
2246af75078fSIntel 	}
2247af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2248af75078fSIntel }
2249af75078fSIntel 
2250af75078fSIntel void
2251af75078fSIntel stop_packet_forwarding(void)
2252af75078fSIntel {
2253af75078fSIntel 	port_fwd_end_t port_fwd_end;
2254af75078fSIntel 	lcoreid_t lc_id;
225553324971SDavid Marchand 	portid_t pt_id;
225653324971SDavid Marchand 	int i;
2257af75078fSIntel 
2258af75078fSIntel 	if (test_done) {
2259af75078fSIntel 		printf("Packet forwarding not started\n");
2260af75078fSIntel 		return;
2261af75078fSIntel 	}
2262af75078fSIntel 	printf("Telling cores to stop...");
2263af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2264af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2265af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2266af75078fSIntel 	rte_eal_mp_wait_lcore();
2267af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2268af75078fSIntel 	if (port_fwd_end != NULL) {
2269af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2270af75078fSIntel 			pt_id = fwd_ports_ids[i];
2271af75078fSIntel 			(*port_fwd_end)(pt_id);
2272af75078fSIntel 		}
2273af75078fSIntel 	}
2274c185d42cSDavid Marchand 
227553324971SDavid Marchand 	fwd_stats_display();
227658d475b7SJerin Jacob 
2277af75078fSIntel 	printf("\nDone.\n");
2278af75078fSIntel 	test_done = 1;
2279af75078fSIntel }
2280af75078fSIntel 
2281cfae07fdSOuyang Changchun void
2282cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2283cfae07fdSOuyang Changchun {
2284492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
2285cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
2286cfae07fdSOuyang Changchun }
2287cfae07fdSOuyang Changchun 
2288cfae07fdSOuyang Changchun void
2289cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2290cfae07fdSOuyang Changchun {
2291492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
2292cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
2293cfae07fdSOuyang Changchun }
2294cfae07fdSOuyang Changchun 
2295ce8d5614SIntel static int
2296ce8d5614SIntel all_ports_started(void)
2297ce8d5614SIntel {
2298ce8d5614SIntel 	portid_t pi;
2299ce8d5614SIntel 	struct rte_port *port;
2300ce8d5614SIntel 
23017d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2302ce8d5614SIntel 		port = &ports[pi];
2303ce8d5614SIntel 		/* Check if there is a port which is not started */
230441b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
230541b05095SBernard Iremonger 			(port->slave_flag == 0))
2306ce8d5614SIntel 			return 0;
2307ce8d5614SIntel 	}
2308ce8d5614SIntel 
2309ce8d5614SIntel 	/* No port is not started */
2310ce8d5614SIntel 	return 1;
2311ce8d5614SIntel }
2312ce8d5614SIntel 
2313148f963fSBruce Richardson int
23146018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
23156018eb8cSShahaf Shuler {
23166018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
23176018eb8cSShahaf Shuler 
23186018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
23196018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
23206018eb8cSShahaf Shuler 		return 0;
23216018eb8cSShahaf Shuler 	return 1;
23226018eb8cSShahaf Shuler }
23236018eb8cSShahaf Shuler 
23246018eb8cSShahaf Shuler int
2325edab33b1STetsuya Mukawa all_ports_stopped(void)
2326edab33b1STetsuya Mukawa {
2327edab33b1STetsuya Mukawa 	portid_t pi;
2328edab33b1STetsuya Mukawa 
23297d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
23306018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2331edab33b1STetsuya Mukawa 			return 0;
2332edab33b1STetsuya Mukawa 	}
2333edab33b1STetsuya Mukawa 
2334edab33b1STetsuya Mukawa 	return 1;
2335edab33b1STetsuya Mukawa }
2336edab33b1STetsuya Mukawa 
2337edab33b1STetsuya Mukawa int
2338edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2339edab33b1STetsuya Mukawa {
2340edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2341edab33b1STetsuya Mukawa 		return 0;
2342edab33b1STetsuya Mukawa 
2343edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2344edab33b1STetsuya Mukawa 		return 0;
2345edab33b1STetsuya Mukawa 
2346edab33b1STetsuya Mukawa 	return 1;
2347edab33b1STetsuya Mukawa }
2348edab33b1STetsuya Mukawa 
23491c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
23501c69df45SOri Kam static int
235101817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
23521c69df45SOri Kam {
23531c69df45SOri Kam 	queueid_t qi;
23541c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
23551c69df45SOri Kam 		.peer_count = 1,
23561c69df45SOri Kam 	};
23571c69df45SOri Kam 	int i;
23581c69df45SOri Kam 	int diag;
23591c69df45SOri Kam 	struct rte_port *port = &ports[pi];
236001817b10SBing Zhao 	uint16_t peer_rx_port = pi;
236101817b10SBing Zhao 	uint16_t peer_tx_port = pi;
236201817b10SBing Zhao 	uint32_t manual = 1;
236301817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
236401817b10SBing Zhao 
236501817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
236601817b10SBing Zhao 		peer_rx_port = pi;
236701817b10SBing Zhao 		peer_tx_port = pi;
236801817b10SBing Zhao 		manual = 0;
236901817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
237001817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
237101817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
237201817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
237301817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
237401817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
237501817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
237601817b10SBing Zhao 			peer_rx_port = p_pi;
237701817b10SBing Zhao 		} else {
237801817b10SBing Zhao 			uint16_t next_pi;
237901817b10SBing Zhao 
238001817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
238101817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
238201817b10SBing Zhao 				peer_rx_port = next_pi;
238301817b10SBing Zhao 		}
238401817b10SBing Zhao 		manual = 1;
238501817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
238601817b10SBing Zhao 		if (cnt_pi & 0x1) {
238701817b10SBing Zhao 			peer_rx_port = p_pi;
238801817b10SBing Zhao 		} else {
238901817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
239001817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
239101817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
239201817b10SBing Zhao 				peer_rx_port = pi;
239301817b10SBing Zhao 		}
239401817b10SBing Zhao 		peer_tx_port = peer_rx_port;
239501817b10SBing Zhao 		manual = 1;
239601817b10SBing Zhao 	}
23971c69df45SOri Kam 
23981c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
239901817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
24001c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
240101817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
240201817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
24031c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
24041c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
24051c69df45SOri Kam 		i++;
24061c69df45SOri Kam 		if (diag == 0)
24071c69df45SOri Kam 			continue;
24081c69df45SOri Kam 
24091c69df45SOri Kam 		/* Fail to setup rx queue, return */
24101c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
24111c69df45SOri Kam 					RTE_PORT_HANDLING,
24121c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
24131c69df45SOri Kam 			printf("Port %d can not be set back "
24141c69df45SOri Kam 					"to stopped\n", pi);
24151c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
24161c69df45SOri Kam 				"queues\n", pi);
24171c69df45SOri Kam 		/* try to reconfigure queues next time */
24181c69df45SOri Kam 		port->need_reconfig_queues = 1;
24191c69df45SOri Kam 		return -1;
24201c69df45SOri Kam 	}
24211c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
242201817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
24231c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
242401817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
242501817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
24261c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
24271c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
24281c69df45SOri Kam 		i++;
24291c69df45SOri Kam 		if (diag == 0)
24301c69df45SOri Kam 			continue;
24311c69df45SOri Kam 
24321c69df45SOri Kam 		/* Fail to setup rx queue, return */
24331c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
24341c69df45SOri Kam 					RTE_PORT_HANDLING,
24351c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
24361c69df45SOri Kam 			printf("Port %d can not be set back "
24371c69df45SOri Kam 					"to stopped\n", pi);
24381c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
24391c69df45SOri Kam 				"queues\n", pi);
24401c69df45SOri Kam 		/* try to reconfigure queues next time */
24411c69df45SOri Kam 		port->need_reconfig_queues = 1;
24421c69df45SOri Kam 		return -1;
24431c69df45SOri Kam 	}
24441c69df45SOri Kam 	return 0;
24451c69df45SOri Kam }
24461c69df45SOri Kam 
2447edab33b1STetsuya Mukawa int
2448ce8d5614SIntel start_port(portid_t pid)
2449ce8d5614SIntel {
245092d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2451ce8d5614SIntel 	portid_t pi;
245201817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
245301817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
245401817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
245501817b10SBing Zhao 	uint16_t cnt_pi = 0;
245601817b10SBing Zhao 	uint16_t cfg_pi = 0;
245701817b10SBing Zhao 	int peer_pi;
2458ce8d5614SIntel 	queueid_t qi;
2459ce8d5614SIntel 	struct rte_port *port;
24606d13ea8eSOlivier Matz 	struct rte_ether_addr mac_addr;
24611c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2462ce8d5614SIntel 
24634468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
24644468635fSMichael Qiu 		return 0;
24654468635fSMichael Qiu 
2466ce8d5614SIntel 	if(dcb_config)
2467ce8d5614SIntel 		dcb_test = 1;
24687d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2469edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2470ce8d5614SIntel 			continue;
2471ce8d5614SIntel 
247292d2703eSMichael Qiu 		need_check_link_status = 0;
2473ce8d5614SIntel 		port = &ports[pi];
2474ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2475ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2476ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2477ce8d5614SIntel 			continue;
2478ce8d5614SIntel 		}
2479ce8d5614SIntel 
2480ce8d5614SIntel 		if (port->need_reconfig > 0) {
2481ce8d5614SIntel 			port->need_reconfig = 0;
2482ce8d5614SIntel 
24837ee3e944SVasily Philipov 			if (flow_isolate_all) {
24847ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
24857ee3e944SVasily Philipov 				if (ret) {
24867ee3e944SVasily Philipov 					printf("Failed to apply isolated"
24877ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
24887ee3e944SVasily Philipov 					return -1;
24897ee3e944SVasily Philipov 				}
24907ee3e944SVasily Philipov 			}
2491b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
24925706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
249320a0286fSLiu Xiaofeng 					port->socket_id);
24941c69df45SOri Kam 			if (nb_hairpinq > 0 &&
24951c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
24961c69df45SOri Kam 				printf("Port %d doesn't support hairpin "
24971c69df45SOri Kam 				       "queues\n", pi);
24981c69df45SOri Kam 				return -1;
24991c69df45SOri Kam 			}
2500ce8d5614SIntel 			/* configure port */
25011c69df45SOri Kam 			diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
25021c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2503ce8d5614SIntel 						     &(port->dev_conf));
2504ce8d5614SIntel 			if (diag != 0) {
2505ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2506ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2507ce8d5614SIntel 					printf("Port %d can not be set back "
2508ce8d5614SIntel 							"to stopped\n", pi);
2509ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2510ce8d5614SIntel 				/* try to reconfigure port next time */
2511ce8d5614SIntel 				port->need_reconfig = 1;
2512148f963fSBruce Richardson 				return -1;
2513ce8d5614SIntel 			}
2514ce8d5614SIntel 		}
2515ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2516ce8d5614SIntel 			port->need_reconfig_queues = 0;
2517ce8d5614SIntel 			/* setup tx queues */
2518ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2519b6ea6408SIntel 				if ((numa_support) &&
2520b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2521b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2522d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2523d44f8a48SQi Zhang 						txring_numa[pi],
2524d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2525b6ea6408SIntel 				else
2526b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2527d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2528d44f8a48SQi Zhang 						port->socket_id,
2529d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2530b6ea6408SIntel 
2531ce8d5614SIntel 				if (diag == 0)
2532ce8d5614SIntel 					continue;
2533ce8d5614SIntel 
2534ce8d5614SIntel 				/* Fail to setup tx queue, return */
2535ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2536ce8d5614SIntel 							RTE_PORT_HANDLING,
2537ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2538ce8d5614SIntel 					printf("Port %d can not be set back "
2539ce8d5614SIntel 							"to stopped\n", pi);
2540d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2541d44f8a48SQi Zhang 				       pi);
2542ce8d5614SIntel 				/* try to reconfigure queues next time */
2543ce8d5614SIntel 				port->need_reconfig_queues = 1;
2544148f963fSBruce Richardson 				return -1;
2545ce8d5614SIntel 			}
2546ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2547d44f8a48SQi Zhang 				/* setup rx queues */
2548b6ea6408SIntel 				if ((numa_support) &&
2549b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2550b6ea6408SIntel 					struct rte_mempool * mp =
2551b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2552b6ea6408SIntel 					if (mp == NULL) {
2553b6ea6408SIntel 						printf("Failed to setup RX queue:"
2554b6ea6408SIntel 							"No mempool allocation"
2555b6ea6408SIntel 							" on the socket %d\n",
2556b6ea6408SIntel 							rxring_numa[pi]);
2557148f963fSBruce Richardson 						return -1;
2558b6ea6408SIntel 					}
2559b6ea6408SIntel 
2560b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2561d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2562d44f8a48SQi Zhang 					     rxring_numa[pi],
2563d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2564d44f8a48SQi Zhang 					     mp);
25651e1d6bddSBernard Iremonger 				} else {
25661e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
25671e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
25681e1d6bddSBernard Iremonger 					if (mp == NULL) {
25691e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
25701e1d6bddSBernard Iremonger 							"No mempool allocation"
25711e1d6bddSBernard Iremonger 							" on the socket %d\n",
25721e1d6bddSBernard Iremonger 							port->socket_id);
25731e1d6bddSBernard Iremonger 						return -1;
2574b6ea6408SIntel 					}
2575b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2576d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2577d44f8a48SQi Zhang 					     port->socket_id,
2578d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2579d44f8a48SQi Zhang 					     mp);
25801e1d6bddSBernard Iremonger 				}
2581ce8d5614SIntel 				if (diag == 0)
2582ce8d5614SIntel 					continue;
2583ce8d5614SIntel 
2584ce8d5614SIntel 				/* Fail to setup rx queue, return */
2585ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2586ce8d5614SIntel 							RTE_PORT_HANDLING,
2587ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2588ce8d5614SIntel 					printf("Port %d can not be set back "
2589ce8d5614SIntel 							"to stopped\n", pi);
2590d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2591d44f8a48SQi Zhang 				       pi);
2592ce8d5614SIntel 				/* try to reconfigure queues next time */
2593ce8d5614SIntel 				port->need_reconfig_queues = 1;
2594148f963fSBruce Richardson 				return -1;
2595ce8d5614SIntel 			}
25961c69df45SOri Kam 			/* setup hairpin queues */
259701817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
25981c69df45SOri Kam 				return -1;
2599ce8d5614SIntel 		}
2600b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2601b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
2602b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2603b0a9354aSPavan Nikhilesh 					NULL, 0);
2604b0a9354aSPavan Nikhilesh 			if (diag < 0)
2605b0a9354aSPavan Nikhilesh 				printf(
2606b0a9354aSPavan Nikhilesh 				"Port %d: Failed to disable Ptype parsing\n",
2607b0a9354aSPavan Nikhilesh 				pi);
2608b0a9354aSPavan Nikhilesh 		}
2609b0a9354aSPavan Nikhilesh 
261001817b10SBing Zhao 		p_pi = pi;
261101817b10SBing Zhao 		cnt_pi++;
261201817b10SBing Zhao 
2613ce8d5614SIntel 		/* start port */
2614ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2615ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2616ce8d5614SIntel 
2617ce8d5614SIntel 			/* Fail to setup rx queue, return */
2618ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2619ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2620ce8d5614SIntel 				printf("Port %d can not be set back to "
2621ce8d5614SIntel 							"stopped\n", pi);
2622ce8d5614SIntel 			continue;
2623ce8d5614SIntel 		}
2624ce8d5614SIntel 
2625ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2626ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2627ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2628ce8d5614SIntel 
2629a5279d25SIgor Romanov 		if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2630d8c89163SZijie Pan 			printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
26312950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
26322950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
26332950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2634d8c89163SZijie Pan 
2635ce8d5614SIntel 		/* at least one port started, need checking link status */
2636ce8d5614SIntel 		need_check_link_status = 1;
263701817b10SBing Zhao 
263801817b10SBing Zhao 		pl[cfg_pi++] = pi;
2639ce8d5614SIntel 	}
2640ce8d5614SIntel 
264192d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2642edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
264392d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2644ce8d5614SIntel 		printf("Please stop the ports first\n");
2645ce8d5614SIntel 
264601817b10SBing Zhao 	if (hairpin_mode & 0xf) {
264701817b10SBing Zhao 		uint16_t i;
264801817b10SBing Zhao 		int j;
264901817b10SBing Zhao 
265001817b10SBing Zhao 		/* bind all started hairpin ports */
265101817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
265201817b10SBing Zhao 			pi = pl[i];
265301817b10SBing Zhao 			/* bind current Tx to all peer Rx */
265401817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
265501817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
265601817b10SBing Zhao 			if (peer_pi < 0)
265701817b10SBing Zhao 				return peer_pi;
265801817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
265901817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
266001817b10SBing Zhao 					continue;
266101817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
266201817b10SBing Zhao 				if (diag < 0) {
266301817b10SBing Zhao 					printf("Error during binding hairpin"
266401817b10SBing Zhao 					       " Tx port %u to %u: %s\n",
266501817b10SBing Zhao 					       pi, peer_pl[j],
266601817b10SBing Zhao 					       rte_strerror(-diag));
266701817b10SBing Zhao 					return -1;
266801817b10SBing Zhao 				}
266901817b10SBing Zhao 			}
267001817b10SBing Zhao 			/* bind all peer Tx to current Rx */
267101817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
267201817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
267301817b10SBing Zhao 			if (peer_pi < 0)
267401817b10SBing Zhao 				return peer_pi;
267501817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
267601817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
267701817b10SBing Zhao 					continue;
267801817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
267901817b10SBing Zhao 				if (diag < 0) {
268001817b10SBing Zhao 					printf("Error during binding hairpin"
268101817b10SBing Zhao 					       " Tx port %u to %u: %s\n",
268201817b10SBing Zhao 					       peer_pl[j], pi,
268301817b10SBing Zhao 					       rte_strerror(-diag));
268401817b10SBing Zhao 					return -1;
268501817b10SBing Zhao 				}
268601817b10SBing Zhao 			}
268701817b10SBing Zhao 		}
268801817b10SBing Zhao 	}
268901817b10SBing Zhao 
2690ce8d5614SIntel 	printf("Done\n");
2691148f963fSBruce Richardson 	return 0;
2692ce8d5614SIntel }
2693ce8d5614SIntel 
2694ce8d5614SIntel void
2695ce8d5614SIntel stop_port(portid_t pid)
2696ce8d5614SIntel {
2697ce8d5614SIntel 	portid_t pi;
2698ce8d5614SIntel 	struct rte_port *port;
2699ce8d5614SIntel 	int need_check_link_status = 0;
270001817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
270101817b10SBing Zhao 	int peer_pi;
2702ce8d5614SIntel 
2703ce8d5614SIntel 	if (dcb_test) {
2704ce8d5614SIntel 		dcb_test = 0;
2705ce8d5614SIntel 		dcb_config = 0;
2706ce8d5614SIntel 	}
27074468635fSMichael Qiu 
27084468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
27094468635fSMichael Qiu 		return;
27104468635fSMichael Qiu 
2711ce8d5614SIntel 	printf("Stopping ports...\n");
2712ce8d5614SIntel 
27137d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
27144468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2715ce8d5614SIntel 			continue;
2716ce8d5614SIntel 
2717a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2718a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2719a8ef3e3aSBernard Iremonger 			continue;
2720a8ef3e3aSBernard Iremonger 		}
2721a8ef3e3aSBernard Iremonger 
27220e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
27230e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
27240e545d30SBernard Iremonger 			continue;
27250e545d30SBernard Iremonger 		}
27260e545d30SBernard Iremonger 
2727ce8d5614SIntel 		port = &ports[pi];
2728ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2729ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2730ce8d5614SIntel 			continue;
2731ce8d5614SIntel 
273201817b10SBing Zhao 		if (hairpin_mode & 0xf) {
273301817b10SBing Zhao 			int j;
273401817b10SBing Zhao 
273501817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
273601817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
273701817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
273801817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
273901817b10SBing Zhao 			if (peer_pi < 0)
274001817b10SBing Zhao 				continue;
274101817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
274201817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
274301817b10SBing Zhao 					continue;
274401817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
274501817b10SBing Zhao 			}
274601817b10SBing Zhao 		}
274701817b10SBing Zhao 
2748ce8d5614SIntel 		rte_eth_dev_stop(pi);
2749ce8d5614SIntel 
2750ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2751ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2752ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2753ce8d5614SIntel 		need_check_link_status = 1;
2754ce8d5614SIntel 	}
2755bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2756edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2757ce8d5614SIntel 
2758ce8d5614SIntel 	printf("Done\n");
2759ce8d5614SIntel }
2760ce8d5614SIntel 
2761ce6959bfSWisam Jaddo static void
27624f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2763ce6959bfSWisam Jaddo {
27644f1de450SThomas Monjalon 	portid_t i;
27654f1de450SThomas Monjalon 	portid_t new_total = 0;
2766ce6959bfSWisam Jaddo 
27674f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
27684f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
27694f1de450SThomas Monjalon 			array[new_total] = array[i];
27704f1de450SThomas Monjalon 			new_total++;
2771ce6959bfSWisam Jaddo 		}
27724f1de450SThomas Monjalon 	*total = new_total;
27734f1de450SThomas Monjalon }
27744f1de450SThomas Monjalon 
27754f1de450SThomas Monjalon static void
27764f1de450SThomas Monjalon remove_invalid_ports(void)
27774f1de450SThomas Monjalon {
27784f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
27794f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
27804f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2781ce6959bfSWisam Jaddo }
2782ce6959bfSWisam Jaddo 
2783ce8d5614SIntel void
2784ce8d5614SIntel close_port(portid_t pid)
2785ce8d5614SIntel {
2786ce8d5614SIntel 	portid_t pi;
2787ce8d5614SIntel 	struct rte_port *port;
2788ce8d5614SIntel 
27894468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
27904468635fSMichael Qiu 		return;
27914468635fSMichael Qiu 
2792ce8d5614SIntel 	printf("Closing ports...\n");
2793ce8d5614SIntel 
27947d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
27954468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2796ce8d5614SIntel 			continue;
2797ce8d5614SIntel 
2798a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2799a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2800a8ef3e3aSBernard Iremonger 			continue;
2801a8ef3e3aSBernard Iremonger 		}
2802a8ef3e3aSBernard Iremonger 
28030e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
28040e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
28050e545d30SBernard Iremonger 			continue;
28060e545d30SBernard Iremonger 		}
28070e545d30SBernard Iremonger 
2808ce8d5614SIntel 		port = &ports[pi];
2809ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2810d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2811d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2812d4e8ad64SMichael Qiu 			continue;
2813d4e8ad64SMichael Qiu 		}
2814d4e8ad64SMichael Qiu 
2815938a184aSAdrien Mazarguil 		port_flow_flush(pi);
2816ce8d5614SIntel 		rte_eth_dev_close(pi);
2817ce8d5614SIntel 	}
2818ce8d5614SIntel 
281985c6571cSThomas Monjalon 	remove_invalid_ports();
2820ce8d5614SIntel 	printf("Done\n");
2821ce8d5614SIntel }
2822ce8d5614SIntel 
2823edab33b1STetsuya Mukawa void
282497f1e196SWei Dai reset_port(portid_t pid)
282597f1e196SWei Dai {
282697f1e196SWei Dai 	int diag;
282797f1e196SWei Dai 	portid_t pi;
282897f1e196SWei Dai 	struct rte_port *port;
282997f1e196SWei Dai 
283097f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
283197f1e196SWei Dai 		return;
283297f1e196SWei Dai 
28331cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
28341cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
28351cde1b9aSShougang Wang 		printf("Can not reset port(s), please stop port(s) first.\n");
28361cde1b9aSShougang Wang 		return;
28371cde1b9aSShougang Wang 	}
28381cde1b9aSShougang Wang 
283997f1e196SWei Dai 	printf("Resetting ports...\n");
284097f1e196SWei Dai 
284197f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
284297f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
284397f1e196SWei Dai 			continue;
284497f1e196SWei Dai 
284597f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
284697f1e196SWei Dai 			printf("Please remove port %d from forwarding "
284797f1e196SWei Dai 			       "configuration.\n", pi);
284897f1e196SWei Dai 			continue;
284997f1e196SWei Dai 		}
285097f1e196SWei Dai 
285197f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
285297f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
285397f1e196SWei Dai 			       pi);
285497f1e196SWei Dai 			continue;
285597f1e196SWei Dai 		}
285697f1e196SWei Dai 
285797f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
285897f1e196SWei Dai 		if (diag == 0) {
285997f1e196SWei Dai 			port = &ports[pi];
286097f1e196SWei Dai 			port->need_reconfig = 1;
286197f1e196SWei Dai 			port->need_reconfig_queues = 1;
286297f1e196SWei Dai 		} else {
286397f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
286497f1e196SWei Dai 		}
286597f1e196SWei Dai 	}
286697f1e196SWei Dai 
286797f1e196SWei Dai 	printf("Done\n");
286897f1e196SWei Dai }
286997f1e196SWei Dai 
287097f1e196SWei Dai void
2871edab33b1STetsuya Mukawa attach_port(char *identifier)
2872ce8d5614SIntel {
28734f1ed78eSThomas Monjalon 	portid_t pi;
2874c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2875ce8d5614SIntel 
2876edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2877edab33b1STetsuya Mukawa 
2878edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2879edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2880edab33b1STetsuya Mukawa 		return;
2881ce8d5614SIntel 	}
2882ce8d5614SIntel 
288375b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
2884c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2885edab33b1STetsuya Mukawa 		return;
2886c9cce428SThomas Monjalon 	}
2887c9cce428SThomas Monjalon 
28884f1ed78eSThomas Monjalon 	/* first attach mode: event */
28894f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
28904f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
28914f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
28924f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
28934f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
28944f1ed78eSThomas Monjalon 				setup_attached_port(pi);
28954f1ed78eSThomas Monjalon 		return;
28964f1ed78eSThomas Monjalon 	}
28974f1ed78eSThomas Monjalon 
28984f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
289986fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
29004f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
290186fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
290286fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2903c9cce428SThomas Monjalon 		setup_attached_port(pi);
2904c9cce428SThomas Monjalon 	}
290586fa5de1SThomas Monjalon }
2906c9cce428SThomas Monjalon 
2907c9cce428SThomas Monjalon static void
2908c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2909c9cce428SThomas Monjalon {
2910c9cce428SThomas Monjalon 	unsigned int socket_id;
291134fc1051SIvan Ilchenko 	int ret;
2912edab33b1STetsuya Mukawa 
2913931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
291429841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2915931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
291629841336SPhil Yang 		socket_id = socket_ids[0];
2917931126baSBernard Iremonger 	reconfig(pi, socket_id);
291834fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
291934fc1051SIvan Ilchenko 	if (ret != 0)
292034fc1051SIvan Ilchenko 		printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
292134fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
2922edab33b1STetsuya Mukawa 
29234f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
29244f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
29254f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
29264f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2927edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2928edab33b1STetsuya Mukawa 
2929edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2930edab33b1STetsuya Mukawa 	printf("Done\n");
2931edab33b1STetsuya Mukawa }
2932edab33b1STetsuya Mukawa 
29330654d4a8SThomas Monjalon static void
29340654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
29355f4ec54fSChen Jing D(Mark) {
2936f8e5baa2SThomas Monjalon 	portid_t sibling;
2937f8e5baa2SThomas Monjalon 
2938f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2939f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2940f8e5baa2SThomas Monjalon 		return;
2941f8e5baa2SThomas Monjalon 	}
2942f8e5baa2SThomas Monjalon 
29430654d4a8SThomas Monjalon 	printf("Removing a device...\n");
2944938a184aSAdrien Mazarguil 
29452a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
29462a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
29472a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
29482a449871SThomas Monjalon 				printf("Port %u not stopped\n", sibling);
29492a449871SThomas Monjalon 				return;
29502a449871SThomas Monjalon 			}
29512a449871SThomas Monjalon 			port_flow_flush(sibling);
29522a449871SThomas Monjalon 		}
29532a449871SThomas Monjalon 	}
29542a449871SThomas Monjalon 
295575b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
2956f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2957edab33b1STetsuya Mukawa 		return;
29583070419eSGaetan Rivet 	}
29594f1de450SThomas Monjalon 	remove_invalid_ports();
296003ce2c53SMatan Azrad 
29610654d4a8SThomas Monjalon 	printf("Device is detached\n");
2962f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2963edab33b1STetsuya Mukawa 	printf("Done\n");
2964edab33b1STetsuya Mukawa 	return;
29655f4ec54fSChen Jing D(Mark) }
29665f4ec54fSChen Jing D(Mark) 
2967af75078fSIntel void
29680654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
29690654d4a8SThomas Monjalon {
29700654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
29710654d4a8SThomas Monjalon 		return;
29720654d4a8SThomas Monjalon 
29730654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
29740654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
29750654d4a8SThomas Monjalon 			printf("Port not stopped\n");
29760654d4a8SThomas Monjalon 			return;
29770654d4a8SThomas Monjalon 		}
29780654d4a8SThomas Monjalon 		printf("Port was not closed\n");
29790654d4a8SThomas Monjalon 	}
29800654d4a8SThomas Monjalon 
29810654d4a8SThomas Monjalon 	detach_device(rte_eth_devices[port_id].device);
29820654d4a8SThomas Monjalon }
29830654d4a8SThomas Monjalon 
29840654d4a8SThomas Monjalon void
29855edee5f6SThomas Monjalon detach_devargs(char *identifier)
298655e51c96SNithin Dabilpuram {
298755e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
298855e51c96SNithin Dabilpuram 	struct rte_devargs da;
298955e51c96SNithin Dabilpuram 	portid_t port_id;
299055e51c96SNithin Dabilpuram 
299155e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
299255e51c96SNithin Dabilpuram 
299355e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
299455e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
299555e51c96SNithin Dabilpuram 		printf("cannot parse identifier\n");
299655e51c96SNithin Dabilpuram 		if (da.args)
299755e51c96SNithin Dabilpuram 			free(da.args);
299855e51c96SNithin Dabilpuram 		return;
299955e51c96SNithin Dabilpuram 	}
300055e51c96SNithin Dabilpuram 
300155e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
300255e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
300355e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
300455e51c96SNithin Dabilpuram 				printf("Port %u not stopped\n", port_id);
3005149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
300655e51c96SNithin Dabilpuram 				return;
300755e51c96SNithin Dabilpuram 			}
300855e51c96SNithin Dabilpuram 			port_flow_flush(port_id);
300955e51c96SNithin Dabilpuram 		}
301055e51c96SNithin Dabilpuram 	}
301155e51c96SNithin Dabilpuram 
301255e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
301355e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
301455e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
301555e51c96SNithin Dabilpuram 		return;
301655e51c96SNithin Dabilpuram 	}
301755e51c96SNithin Dabilpuram 
301855e51c96SNithin Dabilpuram 	remove_invalid_ports();
301955e51c96SNithin Dabilpuram 
302055e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
302155e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
302255e51c96SNithin Dabilpuram 	printf("Done\n");
302355e51c96SNithin Dabilpuram }
302455e51c96SNithin Dabilpuram 
302555e51c96SNithin Dabilpuram void
3026af75078fSIntel pmd_test_exit(void)
3027af75078fSIntel {
3028af75078fSIntel 	portid_t pt_id;
3029fb73e096SJeff Guo 	int ret;
3030401b744dSShahaf Shuler 	int i;
3031af75078fSIntel 
30328210ec25SPablo de Lara 	if (test_done == 0)
30338210ec25SPablo de Lara 		stop_packet_forwarding();
30348210ec25SPablo de Lara 
30353a0968c8SShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
30363a0968c8SShahaf Shuler 		if (mempools[i]) {
30373a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
30383a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
30393a0968c8SShahaf Shuler 						     NULL);
30403a0968c8SShahaf Shuler 		}
30413a0968c8SShahaf Shuler 	}
3042d3a274ceSZhihong Wang 	if (ports != NULL) {
3043d3a274ceSZhihong Wang 		no_link_check = 1;
30447d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
304508fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3046af75078fSIntel 			fflush(stdout);
3047d3a274ceSZhihong Wang 			stop_port(pt_id);
304808fd782bSCristian Dumitrescu 		}
304908fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
305008fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
305108fd782bSCristian Dumitrescu 			fflush(stdout);
3052d3a274ceSZhihong Wang 			close_port(pt_id);
3053af75078fSIntel 		}
3054d3a274ceSZhihong Wang 	}
3055fb73e096SJeff Guo 
3056fb73e096SJeff Guo 	if (hot_plug) {
3057fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
30582049c511SJeff Guo 		if (ret) {
3059fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3060fb73e096SJeff Guo 				"fail to stop device event monitor.");
30612049c511SJeff Guo 			return;
30622049c511SJeff Guo 		}
3063fb73e096SJeff Guo 
30642049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3065cc1bf307SJeff Guo 			dev_event_callback, NULL);
30662049c511SJeff Guo 		if (ret < 0) {
3067fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
30682049c511SJeff Guo 				"fail to unregister device event callback.\n");
30692049c511SJeff Guo 			return;
30702049c511SJeff Guo 		}
30712049c511SJeff Guo 
30722049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
30732049c511SJeff Guo 		if (ret) {
30742049c511SJeff Guo 			RTE_LOG(ERR, EAL,
30752049c511SJeff Guo 				"fail to disable hotplug handling.\n");
30762049c511SJeff Guo 			return;
30772049c511SJeff Guo 		}
3078fb73e096SJeff Guo 	}
3079401b744dSShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
3080401b744dSShahaf Shuler 		if (mempools[i])
3081401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
3082401b744dSShahaf Shuler 	}
3083fb73e096SJeff Guo 
3084d3a274ceSZhihong Wang 	printf("\nBye...\n");
3085af75078fSIntel }
3086af75078fSIntel 
3087af75078fSIntel typedef void (*cmd_func_t)(void);
3088af75078fSIntel struct pmd_test_command {
3089af75078fSIntel 	const char *cmd_name;
3090af75078fSIntel 	cmd_func_t cmd_func;
3091af75078fSIntel };
3092af75078fSIntel 
3093ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3094af75078fSIntel static void
3095edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3096af75078fSIntel {
3097ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3098ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3099f8244c63SZhiyong Yang 	portid_t portid;
3100f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3101ce8d5614SIntel 	struct rte_eth_link link;
3102e661a08bSIgor Romanov 	int ret;
3103ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3104ce8d5614SIntel 
3105ce8d5614SIntel 	printf("Checking link statuses...\n");
3106ce8d5614SIntel 	fflush(stdout);
3107ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3108ce8d5614SIntel 		all_ports_up = 1;
31097d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3110ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3111ce8d5614SIntel 				continue;
3112ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3113e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3114e661a08bSIgor Romanov 			if (ret < 0) {
3115e661a08bSIgor Romanov 				all_ports_up = 0;
3116e661a08bSIgor Romanov 				if (print_flag == 1)
3117e661a08bSIgor Romanov 					printf("Port %u link get failed: %s\n",
3118e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3119e661a08bSIgor Romanov 				continue;
3120e661a08bSIgor Romanov 			}
3121ce8d5614SIntel 			/* print link status if flag set */
3122ce8d5614SIntel 			if (print_flag == 1) {
3123ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3124ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3125ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3126ce8d5614SIntel 				continue;
3127ce8d5614SIntel 			}
3128ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
312909419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
3130ce8d5614SIntel 				all_ports_up = 0;
3131ce8d5614SIntel 				break;
3132ce8d5614SIntel 			}
3133ce8d5614SIntel 		}
3134ce8d5614SIntel 		/* after finally printing all link status, get out */
3135ce8d5614SIntel 		if (print_flag == 1)
3136ce8d5614SIntel 			break;
3137ce8d5614SIntel 
3138ce8d5614SIntel 		if (all_ports_up == 0) {
3139ce8d5614SIntel 			fflush(stdout);
3140ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3141ce8d5614SIntel 		}
3142ce8d5614SIntel 
3143ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3144ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3145ce8d5614SIntel 			print_flag = 1;
3146ce8d5614SIntel 		}
31478ea656f8SGaetan Rivet 
31488ea656f8SGaetan Rivet 		if (lsc_interrupt)
31498ea656f8SGaetan Rivet 			break;
3150ce8d5614SIntel 	}
3151af75078fSIntel }
3152af75078fSIntel 
3153284c908cSGaetan Rivet static void
3154cc1bf307SJeff Guo rmv_port_callback(void *arg)
3155284c908cSGaetan Rivet {
31563b97888aSMatan Azrad 	int need_to_start = 0;
31570da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
315828caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
31590654d4a8SThomas Monjalon 	struct rte_device *dev;
3160284c908cSGaetan Rivet 
3161284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3162284c908cSGaetan Rivet 
31633b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
31643b97888aSMatan Azrad 		need_to_start = 1;
31653b97888aSMatan Azrad 		stop_packet_forwarding();
31663b97888aSMatan Azrad 	}
31670da2a62bSMatan Azrad 	no_link_check = 1;
3168284c908cSGaetan Rivet 	stop_port(port_id);
31690da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
31700654d4a8SThomas Monjalon 
31710654d4a8SThomas Monjalon 	/* Save rte_device pointer before closing ethdev port */
31720654d4a8SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
3173284c908cSGaetan Rivet 	close_port(port_id);
31740654d4a8SThomas Monjalon 	detach_device(dev); /* might be already removed or have more ports */
31750654d4a8SThomas Monjalon 
31763b97888aSMatan Azrad 	if (need_to_start)
31773b97888aSMatan Azrad 		start_packet_forwarding(0);
3178284c908cSGaetan Rivet }
3179284c908cSGaetan Rivet 
318076ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3181d6af1a13SBernard Iremonger static int
3182f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3183d6af1a13SBernard Iremonger 		  void *ret_param)
318476ad4a2dSGaetan Rivet {
318576ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3186d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
318776ad4a2dSGaetan Rivet 
318876ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
3189f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
319076ad4a2dSGaetan Rivet 			port_id, __func__, type);
319176ad4a2dSGaetan Rivet 		fflush(stderr);
31923af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3193f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
319497b5d8b5SThomas Monjalon 			eth_event_desc[type]);
319576ad4a2dSGaetan Rivet 		fflush(stdout);
319676ad4a2dSGaetan Rivet 	}
3197284c908cSGaetan Rivet 
3198284c908cSGaetan Rivet 	switch (type) {
31994f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
32004f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
32014f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
32024f1ed78eSThomas Monjalon 		break;
3203284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
32044f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
32054f1ed78eSThomas Monjalon 			break;
3206284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3207cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3208284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
3209284c908cSGaetan Rivet 		break;
321085c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
321185c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
321285c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
321385c6571cSThomas Monjalon 		break;
3214284c908cSGaetan Rivet 	default:
3215284c908cSGaetan Rivet 		break;
3216284c908cSGaetan Rivet 	}
3217d6af1a13SBernard Iremonger 	return 0;
321876ad4a2dSGaetan Rivet }
321976ad4a2dSGaetan Rivet 
322097b5d8b5SThomas Monjalon static int
322197b5d8b5SThomas Monjalon register_eth_event_callback(void)
322297b5d8b5SThomas Monjalon {
322397b5d8b5SThomas Monjalon 	int ret;
322497b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
322597b5d8b5SThomas Monjalon 
322697b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
322797b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
322897b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
322997b5d8b5SThomas Monjalon 				event,
323097b5d8b5SThomas Monjalon 				eth_event_callback,
323197b5d8b5SThomas Monjalon 				NULL);
323297b5d8b5SThomas Monjalon 		if (ret != 0) {
323397b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
323497b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
323597b5d8b5SThomas Monjalon 			return -1;
323697b5d8b5SThomas Monjalon 		}
323797b5d8b5SThomas Monjalon 	}
323897b5d8b5SThomas Monjalon 
323997b5d8b5SThomas Monjalon 	return 0;
324097b5d8b5SThomas Monjalon }
324197b5d8b5SThomas Monjalon 
3242fb73e096SJeff Guo /* This function is used by the interrupt thread */
3243fb73e096SJeff Guo static void
3244cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3245fb73e096SJeff Guo 			     __rte_unused void *arg)
3246fb73e096SJeff Guo {
32472049c511SJeff Guo 	uint16_t port_id;
32482049c511SJeff Guo 	int ret;
32492049c511SJeff Guo 
3250fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3251fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3252fb73e096SJeff Guo 			__func__, type);
3253fb73e096SJeff Guo 		fflush(stderr);
3254fb73e096SJeff Guo 	}
3255fb73e096SJeff Guo 
3256fb73e096SJeff Guo 	switch (type) {
3257fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3258cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3259fb73e096SJeff Guo 			device_name);
32602049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
32612049c511SJeff Guo 		if (ret) {
32622049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
32632049c511SJeff Guo 				device_name);
32642049c511SJeff Guo 			return;
32652049c511SJeff Guo 		}
3266cc1bf307SJeff Guo 		/*
3267cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3268cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3269cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3270cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3271cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3272cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3273cc1bf307SJeff Guo 		 * be deleted.
3274cc1bf307SJeff Guo 		 */
3275cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3276cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3277cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3278cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3279fb73e096SJeff Guo 		break;
3280fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3281fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3282fb73e096SJeff Guo 			device_name);
3283fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3284fb73e096SJeff Guo 		 * begin to attach port.
3285fb73e096SJeff Guo 		 */
3286fb73e096SJeff Guo 		break;
3287fb73e096SJeff Guo 	default:
3288fb73e096SJeff Guo 		break;
3289fb73e096SJeff Guo 	}
3290fb73e096SJeff Guo }
3291fb73e096SJeff Guo 
3292013af9b6SIntel static int
329328caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3294af75078fSIntel {
3295013af9b6SIntel 	uint16_t i;
3296af75078fSIntel 	int diag;
3297013af9b6SIntel 	uint8_t mapping_found = 0;
3298af75078fSIntel 
3299013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3300013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3301013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3302013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3303013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
3304013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
3305013af9b6SIntel 			if (diag != 0)
3306013af9b6SIntel 				return diag;
3307013af9b6SIntel 			mapping_found = 1;
3308af75078fSIntel 		}
3309013af9b6SIntel 	}
3310013af9b6SIntel 	if (mapping_found)
3311013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
3312013af9b6SIntel 	return 0;
3313013af9b6SIntel }
3314013af9b6SIntel 
3315013af9b6SIntel static int
331628caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3317013af9b6SIntel {
3318013af9b6SIntel 	uint16_t i;
3319013af9b6SIntel 	int diag;
3320013af9b6SIntel 	uint8_t mapping_found = 0;
3321013af9b6SIntel 
3322013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3323013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3324013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3325013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3326013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
3327013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
3328013af9b6SIntel 			if (diag != 0)
3329013af9b6SIntel 				return diag;
3330013af9b6SIntel 			mapping_found = 1;
3331013af9b6SIntel 		}
3332013af9b6SIntel 	}
3333013af9b6SIntel 	if (mapping_found)
3334013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
3335013af9b6SIntel 	return 0;
3336013af9b6SIntel }
3337013af9b6SIntel 
3338013af9b6SIntel static void
333928caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3340013af9b6SIntel {
3341013af9b6SIntel 	int diag = 0;
3342013af9b6SIntel 
3343013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
3344af75078fSIntel 	if (diag != 0) {
3345013af9b6SIntel 		if (diag == -ENOTSUP) {
3346013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
3347013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
3348013af9b6SIntel 		}
3349013af9b6SIntel 		else
3350013af9b6SIntel 			rte_exit(EXIT_FAILURE,
3351013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
3352013af9b6SIntel 					"failed for port id=%d diag=%d\n",
3353af75078fSIntel 					pi, diag);
3354af75078fSIntel 	}
3355013af9b6SIntel 
3356013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
3357af75078fSIntel 	if (diag != 0) {
3358013af9b6SIntel 		if (diag == -ENOTSUP) {
3359013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
3360013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
3361013af9b6SIntel 		}
3362013af9b6SIntel 		else
3363013af9b6SIntel 			rte_exit(EXIT_FAILURE,
3364013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
3365013af9b6SIntel 					"failed for port id=%d diag=%d\n",
3366af75078fSIntel 					pi, diag);
3367af75078fSIntel 	}
3368af75078fSIntel }
3369af75078fSIntel 
3370f2c5125aSPablo de Lara static void
3371f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
3372f2c5125aSPablo de Lara {
3373d44f8a48SQi Zhang 	uint16_t qid;
33745e91aeefSWei Zhao 	uint64_t offloads;
3375f2c5125aSPablo de Lara 
3376d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
33775e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
3378d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3379575e0fd1SWei Zhao 		if (offloads != 0)
3380575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
3381d44f8a48SQi Zhang 
3382d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3383f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3384d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3385f2c5125aSPablo de Lara 
3386f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3387d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3388f2c5125aSPablo de Lara 
3389f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3390d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3391f2c5125aSPablo de Lara 
3392f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3393d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3394f2c5125aSPablo de Lara 
3395f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3396d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3397f2c5125aSPablo de Lara 
3398d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3399d44f8a48SQi Zhang 	}
3400d44f8a48SQi Zhang 
3401d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
34025e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
3403d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
3404575e0fd1SWei Zhao 		if (offloads != 0)
3405575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
3406d44f8a48SQi Zhang 
3407d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3408f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3409d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3410f2c5125aSPablo de Lara 
3411f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3412d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3413f2c5125aSPablo de Lara 
3414f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3415d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3416f2c5125aSPablo de Lara 
3417f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3418d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3419f2c5125aSPablo de Lara 
3420f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3421d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3422d44f8a48SQi Zhang 
3423d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3424d44f8a48SQi Zhang 	}
3425f2c5125aSPablo de Lara }
3426f2c5125aSPablo de Lara 
3427013af9b6SIntel void
3428013af9b6SIntel init_port_config(void)
3429013af9b6SIntel {
3430013af9b6SIntel 	portid_t pid;
3431013af9b6SIntel 	struct rte_port *port;
34326f51deb9SIvan Ilchenko 	int ret;
3433013af9b6SIntel 
34347d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3435013af9b6SIntel 		port = &ports[pid];
3436013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
34376f51deb9SIvan Ilchenko 
34386f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
34396f51deb9SIvan Ilchenko 		if (ret != 0)
34406f51deb9SIvan Ilchenko 			return;
34416f51deb9SIvan Ilchenko 
34423ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3443013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
344490892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3445422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3446af75078fSIntel 		} else {
3447013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3448013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3449af75078fSIntel 		}
34503ce690d3SBruce Richardson 
34515f592039SJingjing Wu 		if (port->dcb_flag == 0) {
34523ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3453f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
3454f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
3455f9295aa2SXiaoyu Min 						(rx_mq_mode & ETH_MQ_RX_RSS);
34563ce690d3SBruce Richardson 			else
34573ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
34583ce690d3SBruce Richardson 		}
34593ce690d3SBruce Richardson 
3460f2c5125aSPablo de Lara 		rxtx_port_config(port);
3461013af9b6SIntel 
3462a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3463a5279d25SIgor Romanov 		if (ret != 0)
3464a5279d25SIgor Romanov 			return;
3465013af9b6SIntel 
3466013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
346750c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3468e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
34697b7e5ba7SIntel #endif
34708ea656f8SGaetan Rivet 
34718ea656f8SGaetan Rivet 		if (lsc_interrupt &&
34728ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
34738ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
34748ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
3475284c908cSGaetan Rivet 		if (rmv_interrupt &&
3476284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
3477284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
3478284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3479013af9b6SIntel 	}
3480013af9b6SIntel }
3481013af9b6SIntel 
348241b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
348341b05095SBernard Iremonger {
348441b05095SBernard Iremonger 	struct rte_port *port;
348541b05095SBernard Iremonger 
348641b05095SBernard Iremonger 	port = &ports[slave_pid];
348741b05095SBernard Iremonger 	port->slave_flag = 1;
348841b05095SBernard Iremonger }
348941b05095SBernard Iremonger 
349041b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
349141b05095SBernard Iremonger {
349241b05095SBernard Iremonger 	struct rte_port *port;
349341b05095SBernard Iremonger 
349441b05095SBernard Iremonger 	port = &ports[slave_pid];
349541b05095SBernard Iremonger 	port->slave_flag = 0;
349641b05095SBernard Iremonger }
349741b05095SBernard Iremonger 
34980e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
34990e545d30SBernard Iremonger {
35000e545d30SBernard Iremonger 	struct rte_port *port;
35010e545d30SBernard Iremonger 
35020e545d30SBernard Iremonger 	port = &ports[slave_pid];
3503b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
3504b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3505b8b8b344SMatan Azrad 		return 1;
3506b8b8b344SMatan Azrad 	return 0;
35070e545d30SBernard Iremonger }
35080e545d30SBernard Iremonger 
3509013af9b6SIntel const uint16_t vlan_tags[] = {
3510013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3511013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3512013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3513013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3514013af9b6SIntel };
3515013af9b6SIntel 
3516013af9b6SIntel static  int
3517ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
35181a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
35191a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
35201a572499SJingjing Wu 		 uint8_t pfc_en)
3521013af9b6SIntel {
3522013af9b6SIntel 	uint8_t i;
3523ac7c491cSKonstantin Ananyev 	int32_t rc;
3524ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3525af75078fSIntel 
3526af75078fSIntel 	/*
3527013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3528013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3529af75078fSIntel 	 */
35301a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
35311a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
35321a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
35331a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
35341a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3535013af9b6SIntel 
3536547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
35371a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
35381a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
35391a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
35401a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
35411a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
35421a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3543013af9b6SIntel 
35441a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
35451a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
35461a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
35471a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
35481a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3549af75078fSIntel 		}
3550013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3551f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3552f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3553013af9b6SIntel 		}
3554013af9b6SIntel 
3555013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
3556f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3557f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3558f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
355932e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
35601a572499SJingjing Wu 	} else {
35611a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
35621a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
35631a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
35641a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3565013af9b6SIntel 
35665139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
35675139bc12STing Xu 
3568ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3569ac7c491cSKonstantin Ananyev 		if (rc != 0)
3570ac7c491cSKonstantin Ananyev 			return rc;
3571ac7c491cSKonstantin Ananyev 
35721a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
35731a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
35741a572499SJingjing Wu 
3575bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3576bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3577bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3578013af9b6SIntel 		}
3579ac7c491cSKonstantin Ananyev 
3580f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3581f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3582f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3583ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
358432e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
35851a572499SJingjing Wu 	}
35861a572499SJingjing Wu 
35871a572499SJingjing Wu 	if (pfc_en)
35881a572499SJingjing Wu 		eth_conf->dcb_capability_en =
35891a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3590013af9b6SIntel 	else
3591013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3592013af9b6SIntel 
3593013af9b6SIntel 	return 0;
3594013af9b6SIntel }
3595013af9b6SIntel 
3596013af9b6SIntel int
35971a572499SJingjing Wu init_port_dcb_config(portid_t pid,
35981a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
35991a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
36001a572499SJingjing Wu 		     uint8_t pfc_en)
3601013af9b6SIntel {
3602013af9b6SIntel 	struct rte_eth_conf port_conf;
3603013af9b6SIntel 	struct rte_port *rte_port;
3604013af9b6SIntel 	int retval;
3605013af9b6SIntel 	uint16_t i;
3606013af9b6SIntel 
36072a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3608013af9b6SIntel 
3609013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3610013af9b6SIntel 	/* Enter DCB configuration status */
3611013af9b6SIntel 	dcb_config = 1;
3612013af9b6SIntel 
3613d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3614d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3615d5354e89SYanglong Wu 
3616013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3617ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3618013af9b6SIntel 	if (retval < 0)
3619013af9b6SIntel 		return retval;
36200074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3621013af9b6SIntel 
36222f203d44SQi Zhang 	/* re-configure the device . */
36232b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
36242b0e0ebaSChenbo Xia 	if (retval < 0)
36252b0e0ebaSChenbo Xia 		return retval;
36266f51deb9SIvan Ilchenko 
36276f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
36286f51deb9SIvan Ilchenko 	if (retval != 0)
36296f51deb9SIvan Ilchenko 		return retval;
36302a977b89SWenzhuo Lu 
36312a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
36322a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
36332a977b89SWenzhuo Lu 	 */
36342a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
36352a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
36362a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
36372a977b89SWenzhuo Lu 			" for port %d.", pid);
36382a977b89SWenzhuo Lu 		return -1;
36392a977b89SWenzhuo Lu 	}
36402a977b89SWenzhuo Lu 
36412a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
36422a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
36432a977b89SWenzhuo Lu 	 */
36442a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
364586ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
364686ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
364786ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
364886ef65eeSBernard Iremonger 		} else {
36492a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
36502a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
365186ef65eeSBernard Iremonger 		}
36522a977b89SWenzhuo Lu 	} else {
36532a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
36542a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
36552a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
36562a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
36572a977b89SWenzhuo Lu 		} else {
36582a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
36592a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
36602a977b89SWenzhuo Lu 
36612a977b89SWenzhuo Lu 		}
36622a977b89SWenzhuo Lu 	}
36632a977b89SWenzhuo Lu 	rx_free_thresh = 64;
36642a977b89SWenzhuo Lu 
3665013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3666013af9b6SIntel 
3667f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3668013af9b6SIntel 	/* VLAN filter */
36690074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
36701a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3671013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3672013af9b6SIntel 
3673a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3674a5279d25SIgor Romanov 	if (retval != 0)
3675a5279d25SIgor Romanov 		return retval;
3676a5279d25SIgor Romanov 
3677013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3678013af9b6SIntel 
36797741e4cfSIntel 	rte_port->dcb_flag = 1;
36807741e4cfSIntel 
3681013af9b6SIntel 	return 0;
3682af75078fSIntel }
3683af75078fSIntel 
3684ffc468ffSTetsuya Mukawa static void
3685ffc468ffSTetsuya Mukawa init_port(void)
3686ffc468ffSTetsuya Mukawa {
3687*1b9f2746SGregory Etelson 	int i;
3688*1b9f2746SGregory Etelson 
3689ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3690ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3691ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3692ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3693ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3694ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3695ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3696ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3697ffc468ffSTetsuya Mukawa 	}
3698*1b9f2746SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3699*1b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
370029841336SPhil Yang 	/* Initialize ports NUMA structures */
370129841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
370229841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
370329841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3704ffc468ffSTetsuya Mukawa }
3705ffc468ffSTetsuya Mukawa 
3706d3a274ceSZhihong Wang static void
3707d3a274ceSZhihong Wang force_quit(void)
3708d3a274ceSZhihong Wang {
3709d3a274ceSZhihong Wang 	pmd_test_exit();
3710d3a274ceSZhihong Wang 	prompt_exit();
3711d3a274ceSZhihong Wang }
3712d3a274ceSZhihong Wang 
3713d3a274ceSZhihong Wang static void
3714cfea1f30SPablo de Lara print_stats(void)
3715cfea1f30SPablo de Lara {
3716cfea1f30SPablo de Lara 	uint8_t i;
3717cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3718cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3719cfea1f30SPablo de Lara 
3720cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3721cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3722cfea1f30SPablo de Lara 
3723cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3724cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3725cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3726683d1e82SIgor Romanov 
3727683d1e82SIgor Romanov 	fflush(stdout);
3728cfea1f30SPablo de Lara }
3729cfea1f30SPablo de Lara 
3730cfea1f30SPablo de Lara static void
3731d3a274ceSZhihong Wang signal_handler(int signum)
3732d3a274ceSZhihong Wang {
3733d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3734d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3735d3a274ceSZhihong Wang 				signum);
3736102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3737102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3738102b7329SReshma Pattan 		rte_pdump_uninit();
3739102b7329SReshma Pattan #endif
374062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
37418b36297dSAmit Gupta 		if (latencystats_enabled != 0)
374262d3216dSReshma Pattan 			rte_latencystats_uninit();
374362d3216dSReshma Pattan #endif
3744d3a274ceSZhihong Wang 		force_quit();
3745d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3746d9a191a0SPhil Yang 		f_quit = 1;
3747d3a274ceSZhihong Wang 		/* exit with the expected status */
3748d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3749d3a274ceSZhihong Wang 		kill(getpid(), signum);
3750d3a274ceSZhihong Wang 	}
3751d3a274ceSZhihong Wang }
3752d3a274ceSZhihong Wang 
3753af75078fSIntel int
3754af75078fSIntel main(int argc, char** argv)
3755af75078fSIntel {
3756af75078fSIntel 	int diag;
3757f8244c63SZhiyong Yang 	portid_t port_id;
37584918a357SXiaoyun Li 	uint16_t count;
3759fb73e096SJeff Guo 	int ret;
3760af75078fSIntel 
3761d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3762d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3763d3a274ceSZhihong Wang 
3764285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3765285fd101SOlivier Matz 	if (testpmd_logtype < 0)
376616267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3767285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3768285fd101SOlivier Matz 
37699201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
37709201806eSStephen Hemminger 	if (diag < 0)
377116267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
377216267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
37739201806eSStephen Hemminger 
3774a87ab9f7SStephen Hemminger 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
377516267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE,
377616267ceeSStephen Hemminger 			 "Secondary process type not supported.\n");
3777a87ab9f7SStephen Hemminger 
377897b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
377997b5d8b5SThomas Monjalon 	if (ret != 0)
378016267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
378197b5d8b5SThomas Monjalon 
37824aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
37834aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3784e9436f54STiwei Bie 	rte_pdump_init();
37854aa0d012SAnatoly Burakov #endif
37864aa0d012SAnatoly Burakov 
37874918a357SXiaoyun Li 	count = 0;
37884918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
37894918a357SXiaoyun Li 		ports_ids[count] = port_id;
37904918a357SXiaoyun Li 		count++;
37914918a357SXiaoyun Li 	}
37924918a357SXiaoyun Li 	nb_ports = (portid_t) count;
37934aa0d012SAnatoly Burakov 	if (nb_ports == 0)
37944aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
37954aa0d012SAnatoly Burakov 
37964aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
37974aa0d012SAnatoly Burakov 	init_port();
37984aa0d012SAnatoly Burakov 
37994aa0d012SAnatoly Burakov 	set_def_fwd_config();
38004aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
380116267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
380216267ceeSStephen Hemminger 			 "Check the core mask argument\n");
38034aa0d012SAnatoly Burakov 
3804e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
380554f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
3806e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3807e505d84cSAnatoly Burakov #endif
3808e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3809e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3810e505d84cSAnatoly Burakov #endif
3811e505d84cSAnatoly Burakov 
3812fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
38135fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3814fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3815fb7b8b32SAnatoly Burakov #else
3816fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3817fb7b8b32SAnatoly Burakov #endif
3818fb7b8b32SAnatoly Burakov 
3819e505d84cSAnatoly Burakov 	argc -= diag;
3820e505d84cSAnatoly Burakov 	argv += diag;
3821e505d84cSAnatoly Burakov 	if (argc > 1)
3822e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3823e505d84cSAnatoly Burakov 
3824e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3825285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
38261c036b16SEelco Chaudron 			strerror(errno));
38271c036b16SEelco Chaudron 	}
38281c036b16SEelco Chaudron 
382999cabef0SPablo de Lara 	if (tx_first && interactive)
383099cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
383199cabef0SPablo de Lara 				"interactive mode.\n");
38328820cba4SDavid Hunt 
38338820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
38348820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
38358820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
38368820cba4SDavid Hunt 		lsc_interrupt = 0;
38378820cba4SDavid Hunt 	}
38388820cba4SDavid Hunt 
38395a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
38405a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
38415a8fb55cSReshma Pattan 
38425a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3843af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3844af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3845af75078fSIntel 		       nb_rxq, nb_txq);
3846af75078fSIntel 
3847af75078fSIntel 	init_config();
3848fb73e096SJeff Guo 
3849fb73e096SJeff Guo 	if (hot_plug) {
38502049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3851fb73e096SJeff Guo 		if (ret) {
38522049c511SJeff Guo 			RTE_LOG(ERR, EAL,
38532049c511SJeff Guo 				"fail to enable hotplug handling.");
3854fb73e096SJeff Guo 			return -1;
3855fb73e096SJeff Guo 		}
3856fb73e096SJeff Guo 
38572049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
38582049c511SJeff Guo 		if (ret) {
38592049c511SJeff Guo 			RTE_LOG(ERR, EAL,
38602049c511SJeff Guo 				"fail to start device event monitoring.");
38612049c511SJeff Guo 			return -1;
38622049c511SJeff Guo 		}
38632049c511SJeff Guo 
38642049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3865cc1bf307SJeff Guo 			dev_event_callback, NULL);
38662049c511SJeff Guo 		if (ret) {
38672049c511SJeff Guo 			RTE_LOG(ERR, EAL,
38682049c511SJeff Guo 				"fail  to register device event callback\n");
38692049c511SJeff Guo 			return -1;
38702049c511SJeff Guo 		}
3871fb73e096SJeff Guo 	}
3872fb73e096SJeff Guo 
38736937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3874148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3875af75078fSIntel 
3876ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
387734fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
387834fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
387934fc1051SIvan Ilchenko 		if (ret != 0)
388034fc1051SIvan Ilchenko 			printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
388134fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
388234fc1051SIvan Ilchenko 	}
3883af75078fSIntel 
38847e4441c8SRemy Horton 	/* Init metrics library */
38857e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
38867e4441c8SRemy Horton 
388762d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
388862d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
388962d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
389062d3216dSReshma Pattan 		if (ret)
389162d3216dSReshma Pattan 			printf("Warning: latencystats init()"
389262d3216dSReshma Pattan 				" returned error %d\n",	ret);
389362d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
389462d3216dSReshma Pattan 			latencystats_lcore_id);
389562d3216dSReshma Pattan 	}
389662d3216dSReshma Pattan #endif
389762d3216dSReshma Pattan 
38987e4441c8SRemy Horton 	/* Setup bitrate stats */
389954f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
3900e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
39017e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
39027e4441c8SRemy Horton 		if (bitrate_data == NULL)
3903e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3904e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
39057e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3906e25e6c70SRemy Horton 	}
39077e4441c8SRemy Horton #endif
39087e4441c8SRemy Horton 
39090d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
391081ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
391181ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
391281ef862bSAllain Legacy 
3913ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3914ca7feb22SCyril Chemparathy 		if (auto_start) {
3915ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3916ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3917ca7feb22SCyril Chemparathy 		}
3918af75078fSIntel 		prompt();
39190de738cfSJiayu Hu 		pmd_test_exit();
3920ca7feb22SCyril Chemparathy 	} else
39210d56cb81SThomas Monjalon #endif
39220d56cb81SThomas Monjalon 	{
3923af75078fSIntel 		char c;
3924af75078fSIntel 		int rc;
3925af75078fSIntel 
3926d9a191a0SPhil Yang 		f_quit = 0;
3927d9a191a0SPhil Yang 
3928af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
392999cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3930cfea1f30SPablo de Lara 		if (stats_period != 0) {
3931cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3932cfea1f30SPablo de Lara 			uint64_t timer_period;
3933cfea1f30SPablo de Lara 
3934cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3935cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3936cfea1f30SPablo de Lara 
3937d9a191a0SPhil Yang 			while (f_quit == 0) {
3938cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3939cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3940cfea1f30SPablo de Lara 
3941cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3942cfea1f30SPablo de Lara 					print_stats();
3943cfea1f30SPablo de Lara 					/* Reset the timer */
3944cfea1f30SPablo de Lara 					diff_time = 0;
3945cfea1f30SPablo de Lara 				}
3946cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3947cfea1f30SPablo de Lara 				prev_time = cur_time;
3948cfea1f30SPablo de Lara 				sleep(1);
3949cfea1f30SPablo de Lara 			}
3950cfea1f30SPablo de Lara 		}
3951cfea1f30SPablo de Lara 
3952af75078fSIntel 		printf("Press enter to exit\n");
3953af75078fSIntel 		rc = read(0, &c, 1);
3954d3a274ceSZhihong Wang 		pmd_test_exit();
3955af75078fSIntel 		if (rc < 0)
3956af75078fSIntel 			return 1;
3957af75078fSIntel 	}
3958af75078fSIntel 
39595e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
39605e516c89SStephen Hemminger 	if (ret != 0)
39615e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
39625e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
39635e516c89SStephen Hemminger 
39645e516c89SStephen Hemminger 	return EXIT_SUCCESS;
3965af75078fSIntel }
3966