xref: /dpdk/app/test-pmd/testpmd.c (revision e62c5a12d9f40f3969d6e0d8c49bd575ea6f01dc)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30af75078fSIntel #include <rte_memory.h>
31af75078fSIntel #include <rte_memcpy.h>
32af75078fSIntel #include <rte_launch.h>
33af75078fSIntel #include <rte_eal.h>
34284c908cSGaetan Rivet #include <rte_alarm.h>
35af75078fSIntel #include <rte_per_lcore.h>
36af75078fSIntel #include <rte_lcore.h>
37af75078fSIntel #include <rte_atomic.h>
38af75078fSIntel #include <rte_branch_prediction.h>
39af75078fSIntel #include <rte_mempool.h>
40af75078fSIntel #include <rte_malloc.h>
41af75078fSIntel #include <rte_mbuf.h>
420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
43af75078fSIntel #include <rte_interrupts.h>
44af75078fSIntel #include <rte_pci.h>
45af75078fSIntel #include <rte_ether.h>
46af75078fSIntel #include <rte_ethdev.h>
47edab33b1STetsuya Mukawa #include <rte_dev.h>
48af75078fSIntel #include <rte_string_fns.h>
49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
51e261265eSRadu Nicolau #endif
52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
53102b7329SReshma Pattan #include <rte_pdump.h>
54102b7329SReshma Pattan #endif
55938a184aSAdrien Mazarguil #include <rte_flow.h>
567e4441c8SRemy Horton #include <rte_metrics.h>
5754f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
587e4441c8SRemy Horton #include <rte_bitrate.h>
597e4441c8SRemy Horton #endif
6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6162d3216dSReshma Pattan #include <rte_latencystats.h>
6262d3216dSReshma Pattan #endif
63af75078fSIntel 
64af75078fSIntel #include "testpmd.h"
65af75078fSIntel 
66c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
67c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
69c7f5dba7SAnatoly Burakov #else
70c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
71c7f5dba7SAnatoly Burakov #endif
72c7f5dba7SAnatoly Burakov 
73c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
74c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
75c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
76c7f5dba7SAnatoly Burakov #else
77c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
78c7f5dba7SAnatoly Burakov #endif
79c7f5dba7SAnatoly Burakov 
80c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
8172512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1822564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
183af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
184af75078fSIntel 	&ieee1588_fwd_engine,
185af75078fSIntel #endif
186af75078fSIntel 	NULL,
187af75078fSIntel };
188af75078fSIntel 
18926cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
19059fcf854SShahaf Shuler uint16_t mempool_flags;
191401b744dSShahaf Shuler 
192af75078fSIntel struct fwd_config cur_fwd_config;
193af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194bf56fce1SZhihong Wang uint32_t retry_enabled;
195bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197af75078fSIntel 
19826cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
19926cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
20026cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
20126cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
202c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
203c8798818SIntel                                       * specified on command-line. */
204cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
205d9a191a0SPhil Yang 
206d9a191a0SPhil Yang /*
207d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
208d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209d9a191a0SPhil Yang  */
210d9a191a0SPhil Yang uint8_t f_quit;
211d9a191a0SPhil Yang 
212af75078fSIntel /*
2130f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2140f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2150f2096d7SViacheslav Ovsiienko  */
2160f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2170f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
21891c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
21991c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
2200f2096d7SViacheslav Ovsiienko 
2210f2096d7SViacheslav Ovsiienko /*
222af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
223af75078fSIntel  */
224af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
225af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
226af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
227af75078fSIntel };
228af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
229af75078fSIntel 
23079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
23179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
23279bec05bSKonstantin Ananyev 
23382010ef5SYongseok Koh uint8_t txonly_multi_flow;
23482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
23582010ef5SYongseok Koh 
2364940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2374940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2384940344dSViacheslav Ovsiienko 
2394940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2404940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2414940344dSViacheslav Ovsiienko 
242af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
243e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
244af75078fSIntel 
245900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
246900550deSIntel uint8_t dcb_config = 0;
247900550deSIntel 
248900550deSIntel /* Whether the dcb is in testing status */
249900550deSIntel uint8_t dcb_test = 0;
250900550deSIntel 
251af75078fSIntel /*
252af75078fSIntel  * Configurable number of RX/TX queues.
253af75078fSIntel  */
2541c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
255af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
256af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
257af75078fSIntel 
258af75078fSIntel /*
259af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2608599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
261af75078fSIntel  */
2628599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2638599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
264af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
265af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
266af75078fSIntel 
267f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
268af75078fSIntel /*
269af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
270af75078fSIntel  */
271af75078fSIntel 
272f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
273f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
274f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
275af75078fSIntel 
276f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
277f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
278f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
279af75078fSIntel 
280af75078fSIntel /*
281af75078fSIntel  * Configurable value of RX free threshold.
282af75078fSIntel  */
283f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
284af75078fSIntel 
285af75078fSIntel /*
286ce8d5614SIntel  * Configurable value of RX drop enable.
287ce8d5614SIntel  */
288f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
289ce8d5614SIntel 
290ce8d5614SIntel /*
291af75078fSIntel  * Configurable value of TX free threshold.
292af75078fSIntel  */
293f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
294af75078fSIntel 
295af75078fSIntel /*
296af75078fSIntel  * Configurable value of TX RS bit threshold.
297af75078fSIntel  */
298f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
299af75078fSIntel 
300af75078fSIntel /*
3013c156061SJens Freimann  * Configurable value of buffered packets before sending.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of packet buffer timeout.
3073c156061SJens Freimann  */
3083c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3093c156061SJens Freimann 
3103c156061SJens Freimann /*
3113c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3123c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3133c156061SJens Freimann  */
3143c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3153c156061SJens Freimann 
3163c156061SJens Freimann /*
3173c156061SJens Freimann  * Configurable value of number of random writes done in
3183c156061SJens Freimann  * VNF simulation memory area.
3193c156061SJens Freimann  */
3203c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3213c156061SJens Freimann 
3223c156061SJens Freimann /*
3233c156061SJens Freimann  * Configurable value of number of random reads done in
3243c156061SJens Freimann  * VNF simulation memory area.
3253c156061SJens Freimann  */
3263c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3273c156061SJens Freimann 
3283c156061SJens Freimann /*
3293c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3303c156061SJens Freimann  * VNF simulation memory area.
3313c156061SJens Freimann  */
3323c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3333c156061SJens Freimann 
3343c156061SJens Freimann /*
335af75078fSIntel  * Receive Side Scaling (RSS) configuration.
336af75078fSIntel  */
3378a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
338af75078fSIntel 
339af75078fSIntel /*
340af75078fSIntel  * Port topology configuration
341af75078fSIntel  */
342af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
343af75078fSIntel 
3447741e4cfSIntel /*
3457741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3467741e4cfSIntel  */
3477741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3487741e4cfSIntel 
349af75078fSIntel /*
3507ee3e944SVasily Philipov  * Flow API isolated mode.
3517ee3e944SVasily Philipov  */
3527ee3e944SVasily Philipov uint8_t flow_isolate_all;
3537ee3e944SVasily Philipov 
3547ee3e944SVasily Philipov /*
355bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
356bc202406SDavid Marchand  */
357bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
358bc202406SDavid Marchand 
359bc202406SDavid Marchand /*
3606937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3616937d210SStephen Hemminger  */
3626937d210SStephen Hemminger uint8_t no_device_start = 0;
3636937d210SStephen Hemminger 
3646937d210SStephen Hemminger /*
3658ea656f8SGaetan Rivet  * Enable link status change notification
3668ea656f8SGaetan Rivet  */
3678ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3688ea656f8SGaetan Rivet 
3698ea656f8SGaetan Rivet /*
370284c908cSGaetan Rivet  * Enable device removal notification.
371284c908cSGaetan Rivet  */
372284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
373284c908cSGaetan Rivet 
374fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
375fb73e096SJeff Guo 
3764f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3774f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3784f1ed78eSThomas Monjalon 
379b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
380b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
381b0a9354aSPavan Nikhilesh 
38201817b10SBing Zhao /* Hairpin ports configuration mode. */
38301817b10SBing Zhao uint16_t hairpin_mode;
38401817b10SBing Zhao 
38597b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
38697b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
38797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
38897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
38997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
39097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
39197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
39297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
39397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
39497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
39597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
39697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
3970e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
39897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
39997b5d8b5SThomas Monjalon };
40097b5d8b5SThomas Monjalon 
401284c908cSGaetan Rivet /*
4023af72783SGaetan Rivet  * Display or mask ether events
4033af72783SGaetan Rivet  * Default to all events except VF_MBOX
4043af72783SGaetan Rivet  */
4053af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4063af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4073af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4083af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
409badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4103af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4110e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
4120e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
413e505d84cSAnatoly Burakov /*
414e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
415e505d84cSAnatoly Burakov  */
416e505d84cSAnatoly Burakov int do_mlockall = 0;
4173af72783SGaetan Rivet 
4183af72783SGaetan Rivet /*
4197b7e5ba7SIntel  * NIC bypass mode configuration options.
4207b7e5ba7SIntel  */
4217b7e5ba7SIntel 
42250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
4237b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
424e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4257b7e5ba7SIntel #endif
4267b7e5ba7SIntel 
427e261265eSRadu Nicolau 
42862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
42962d3216dSReshma Pattan 
43062d3216dSReshma Pattan /*
43162d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
43262d3216dSReshma Pattan  */
43362d3216dSReshma Pattan uint8_t latencystats_enabled;
43462d3216dSReshma Pattan 
43562d3216dSReshma Pattan /*
43662d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
43762d3216dSReshma Pattan  */
43862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
43962d3216dSReshma Pattan 
44062d3216dSReshma Pattan #endif
44162d3216dSReshma Pattan 
4427b7e5ba7SIntel /*
443af75078fSIntel  * Ethernet device configuration.
444af75078fSIntel  */
445af75078fSIntel struct rte_eth_rxmode rx_mode = {
44635b2d13fSOlivier Matz 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
44735b2d13fSOlivier Matz 		/**< Default maximum frame length. */
448af75078fSIntel };
449af75078fSIntel 
45007e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
45107e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
45207e5f7bdSShahaf Shuler };
453fd8c20aaSShahaf Shuler 
454af75078fSIntel struct rte_fdir_conf fdir_conf = {
455af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
456af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
457af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
458d9d5e6f2SJingjing Wu 	.mask = {
45926f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
460d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
461d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
462d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
463d9d5e6f2SJingjing Wu 		},
464d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
465d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
466d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
467d9d5e6f2SJingjing Wu 		},
468d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
469d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
47047b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
47147b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
47247b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
473d9d5e6f2SJingjing Wu 	},
474af75078fSIntel 	.drop_queue = 127,
475af75078fSIntel };
476af75078fSIntel 
4772950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
478af75078fSIntel 
479ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
480ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
481ed30d9b6SIntel 
482ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
483ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
484ed30d9b6SIntel 
485ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
486ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
487ed30d9b6SIntel 
488a4fd5eeeSElza Mathew /*
489a4fd5eeeSElza Mathew  * Display zero values by default for xstats
490a4fd5eeeSElza Mathew  */
491a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
492a4fd5eeeSElza Mathew 
493bc700b67SDharmik Thakkar /*
494bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
495bc700b67SDharmik Thakkar  */
496bc700b67SDharmik Thakkar uint8_t record_core_cycles;
497bc700b67SDharmik Thakkar 
4980e4b1963SDharmik Thakkar /*
4990e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
5000e4b1963SDharmik Thakkar  */
5010e4b1963SDharmik Thakkar uint8_t record_burst_stats;
5020e4b1963SDharmik Thakkar 
503c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
504c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5057acf894dSStephen Hurd 
50654f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
5077e4441c8SRemy Horton /* Bitrate statistics */
5087e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
509e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
510e25e6c70SRemy Horton uint8_t bitrate_enabled;
511e25e6c70SRemy Horton #endif
5127e4441c8SRemy Horton 
513b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
514b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
515b40f8d78SJiayu Hu 
516f9295aa2SXiaoyu Min /*
517f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
518f9295aa2SXiaoyu Min  */
519f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
520f9295aa2SXiaoyu Min 
521ed30d9b6SIntel /* Forward function declarations */
522c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
52328caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
52428caa76aSZhiyong Yang 						   struct rte_port *port);
525edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
526f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
52776ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
528d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
529cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
530fb73e096SJeff Guo 				enum rte_dev_event_type type,
531fb73e096SJeff Guo 				void *param);
532ce8d5614SIntel 
533ce8d5614SIntel /*
534ce8d5614SIntel  * Check if all the ports are started.
535ce8d5614SIntel  * If yes, return positive value. If not, return zero.
536ce8d5614SIntel  */
537ce8d5614SIntel static int all_ports_started(void);
538ed30d9b6SIntel 
53952f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
54035b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
54152f38a20SJiayu Hu 
542b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
543b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
544b57b66a9SOri Kam 
545af75078fSIntel /*
54698a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
547c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
548c9cafcc8SShahaf Shuler  */
549c9cafcc8SShahaf Shuler int
550c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
551c9cafcc8SShahaf Shuler {
552c9cafcc8SShahaf Shuler 	unsigned int i;
553c9cafcc8SShahaf Shuler 
554c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
555c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
556c9cafcc8SShahaf Shuler 			return 0;
557c9cafcc8SShahaf Shuler 	}
558c9cafcc8SShahaf Shuler 	return 1;
559c9cafcc8SShahaf Shuler }
560c9cafcc8SShahaf Shuler 
561c9cafcc8SShahaf Shuler /*
562af75078fSIntel  * Setup default configuration.
563af75078fSIntel  */
564af75078fSIntel static void
565af75078fSIntel set_default_fwd_lcores_config(void)
566af75078fSIntel {
567af75078fSIntel 	unsigned int i;
568af75078fSIntel 	unsigned int nb_lc;
5697acf894dSStephen Hurd 	unsigned int sock_num;
570af75078fSIntel 
571af75078fSIntel 	nb_lc = 0;
572af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
573dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
574dbfb8ec7SPhil Yang 			continue;
575c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
576c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
577c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
578c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
579c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
580c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
581c9cafcc8SShahaf Shuler 			}
582c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5837acf894dSStephen Hurd 		}
584f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
585f54fe5eeSStephen Hurd 			continue;
586f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
587af75078fSIntel 	}
588af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
589af75078fSIntel 	nb_cfg_lcores = nb_lcores;
590af75078fSIntel 	nb_fwd_lcores = 1;
591af75078fSIntel }
592af75078fSIntel 
593af75078fSIntel static void
594af75078fSIntel set_def_peer_eth_addrs(void)
595af75078fSIntel {
596af75078fSIntel 	portid_t i;
597af75078fSIntel 
598af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
59935b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
600af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
601af75078fSIntel 	}
602af75078fSIntel }
603af75078fSIntel 
604af75078fSIntel static void
605af75078fSIntel set_default_fwd_ports_config(void)
606af75078fSIntel {
607af75078fSIntel 	portid_t pt_id;
60865a7360cSMatan Azrad 	int i = 0;
609af75078fSIntel 
610effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
61165a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
612af75078fSIntel 
613effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
614effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
615effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
616effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
617effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
618effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
619effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
620effdb8bbSPhil Yang 			}
621effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
622effdb8bbSPhil Yang 		}
623effdb8bbSPhil Yang 	}
624effdb8bbSPhil Yang 
625af75078fSIntel 	nb_cfg_ports = nb_ports;
626af75078fSIntel 	nb_fwd_ports = nb_ports;
627af75078fSIntel }
628af75078fSIntel 
629af75078fSIntel void
630af75078fSIntel set_def_fwd_config(void)
631af75078fSIntel {
632af75078fSIntel 	set_default_fwd_lcores_config();
633af75078fSIntel 	set_def_peer_eth_addrs();
634af75078fSIntel 	set_default_fwd_ports_config();
635af75078fSIntel }
636af75078fSIntel 
637c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
638c7f5dba7SAnatoly Burakov static int
639c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
640c7f5dba7SAnatoly Burakov {
641c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
642c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
643c7f5dba7SAnatoly Burakov 
644c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
645c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
646c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
647c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
648c7f5dba7SAnatoly Burakov 	 */
649c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
650c7f5dba7SAnatoly Burakov 
651c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
652c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
653c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
654c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
655c7f5dba7SAnatoly Burakov 		return -1;
656c7f5dba7SAnatoly Burakov 	}
657c7f5dba7SAnatoly Burakov 
658c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
659c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
660c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
661c7f5dba7SAnatoly Burakov 
662c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
663c7f5dba7SAnatoly Burakov 
664c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
665c7f5dba7SAnatoly Burakov 
666c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
667c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
668c7f5dba7SAnatoly Burakov 		return -1;
669c7f5dba7SAnatoly Burakov 	}
670c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
671c7f5dba7SAnatoly Burakov 
672c7f5dba7SAnatoly Burakov 	return 0;
673c7f5dba7SAnatoly Burakov }
674c7f5dba7SAnatoly Burakov 
675c7f5dba7SAnatoly Burakov static int
676c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
677c7f5dba7SAnatoly Burakov {
678c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
679c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
680c7f5dba7SAnatoly Burakov 	 */
6819d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
682c7f5dba7SAnatoly Burakov 
683c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
684c7f5dba7SAnatoly Burakov }
685c7f5dba7SAnatoly Burakov 
686c7f5dba7SAnatoly Burakov static void *
687c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
688c7f5dba7SAnatoly Burakov {
689c7f5dba7SAnatoly Burakov 	void *addr;
690c7f5dba7SAnatoly Burakov 	int flags;
691c7f5dba7SAnatoly Burakov 
692c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
693c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
694c7f5dba7SAnatoly Burakov 	if (huge)
695c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
696c7f5dba7SAnatoly Burakov 
697c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
698c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
699c7f5dba7SAnatoly Burakov 		return NULL;
700c7f5dba7SAnatoly Burakov 
701c7f5dba7SAnatoly Burakov 	return addr;
702c7f5dba7SAnatoly Burakov }
703c7f5dba7SAnatoly Burakov 
704c7f5dba7SAnatoly Burakov struct extmem_param {
705c7f5dba7SAnatoly Burakov 	void *addr;
706c7f5dba7SAnatoly Burakov 	size_t len;
707c7f5dba7SAnatoly Burakov 	size_t pgsz;
708c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
709c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
710c7f5dba7SAnatoly Burakov };
711c7f5dba7SAnatoly Burakov 
712c7f5dba7SAnatoly Burakov static int
713c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
714c7f5dba7SAnatoly Burakov 		bool huge)
715c7f5dba7SAnatoly Burakov {
716c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
717c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
718c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
719c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
720c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
721c7f5dba7SAnatoly Burakov 	void *addr;
722c7f5dba7SAnatoly Burakov 	int ret;
723c7f5dba7SAnatoly Burakov 
724c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
725c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
726c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
727c7f5dba7SAnatoly Burakov 			continue;
728c7f5dba7SAnatoly Burakov 
729c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
730c7f5dba7SAnatoly Burakov 
731c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
732c7f5dba7SAnatoly Burakov 		if (!huge)
733c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
734c7f5dba7SAnatoly Burakov 
735c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
736c7f5dba7SAnatoly Burakov 		if (ret < 0) {
737c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
738c7f5dba7SAnatoly Burakov 			return -1;
739c7f5dba7SAnatoly Burakov 		}
740c7f5dba7SAnatoly Burakov 
741c7f5dba7SAnatoly Burakov 		/* allocate our memory */
742c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
743c7f5dba7SAnatoly Burakov 
744c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
745c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
746c7f5dba7SAnatoly Burakov 		 * try another one.
747c7f5dba7SAnatoly Burakov 		 */
748c7f5dba7SAnatoly Burakov 		if (addr == NULL)
749c7f5dba7SAnatoly Burakov 			continue;
750c7f5dba7SAnatoly Burakov 
751c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
752c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
753c7f5dba7SAnatoly Burakov 
754c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
755c7f5dba7SAnatoly Burakov 
756c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
757c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
758c7f5dba7SAnatoly Burakov 			goto fail;
759c7f5dba7SAnatoly Burakov 		}
760c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
761c7f5dba7SAnatoly Burakov 		if (!huge)
762c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
763c7f5dba7SAnatoly Burakov 
764c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
765c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
766c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
767c7f5dba7SAnatoly Burakov 			size_t offset;
768c7f5dba7SAnatoly Burakov 			void *cur;
769c7f5dba7SAnatoly Burakov 
770c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
771c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
772c7f5dba7SAnatoly Burakov 
773c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
774c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
775c7f5dba7SAnatoly Burakov 
776c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
777c7f5dba7SAnatoly Burakov 
778c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
779c7f5dba7SAnatoly Burakov 		}
780c7f5dba7SAnatoly Burakov 
781c7f5dba7SAnatoly Burakov 		break;
782c7f5dba7SAnatoly Burakov 	}
783c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
784c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
785c7f5dba7SAnatoly Burakov 		return -1;
786c7f5dba7SAnatoly Burakov 
787c7f5dba7SAnatoly Burakov 	param->addr = addr;
788c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
789c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
790c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
791c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
792c7f5dba7SAnatoly Burakov 
793c7f5dba7SAnatoly Burakov 	return 0;
794c7f5dba7SAnatoly Burakov fail:
795c7f5dba7SAnatoly Burakov 	if (iovas)
796c7f5dba7SAnatoly Burakov 		free(iovas);
797c7f5dba7SAnatoly Burakov 	if (addr)
798c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
799c7f5dba7SAnatoly Burakov 
800c7f5dba7SAnatoly Burakov 	return -1;
801c7f5dba7SAnatoly Burakov }
802c7f5dba7SAnatoly Burakov 
803c7f5dba7SAnatoly Burakov static int
804c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
805c7f5dba7SAnatoly Burakov {
806c7f5dba7SAnatoly Burakov 	struct extmem_param param;
807c7f5dba7SAnatoly Burakov 	int socket_id, ret;
808c7f5dba7SAnatoly Burakov 
809c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
810c7f5dba7SAnatoly Burakov 
811c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
812c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
813c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
814c7f5dba7SAnatoly Burakov 		/* create our heap */
815c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
816c7f5dba7SAnatoly Burakov 		if (ret < 0) {
817c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
818c7f5dba7SAnatoly Burakov 			return -1;
819c7f5dba7SAnatoly Burakov 		}
820c7f5dba7SAnatoly Burakov 	}
821c7f5dba7SAnatoly Burakov 
822c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
823c7f5dba7SAnatoly Burakov 	if (ret < 0) {
824c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
825c7f5dba7SAnatoly Burakov 		return -1;
826c7f5dba7SAnatoly Burakov 	}
827c7f5dba7SAnatoly Burakov 
828c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
829c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
830c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
831c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
832c7f5dba7SAnatoly Burakov 
833c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
834c7f5dba7SAnatoly Burakov 
835c7f5dba7SAnatoly Burakov 	/* not needed any more */
836c7f5dba7SAnatoly Burakov 	free(param.iova_table);
837c7f5dba7SAnatoly Burakov 
838c7f5dba7SAnatoly Burakov 	if (ret < 0) {
839c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
840c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
841c7f5dba7SAnatoly Burakov 		return -1;
842c7f5dba7SAnatoly Burakov 	}
843c7f5dba7SAnatoly Burakov 
844c7f5dba7SAnatoly Burakov 	/* success */
845c7f5dba7SAnatoly Burakov 
846c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
847c7f5dba7SAnatoly Burakov 			param.len >> 20);
848c7f5dba7SAnatoly Burakov 
849c7f5dba7SAnatoly Burakov 	return 0;
850c7f5dba7SAnatoly Burakov }
8513a0968c8SShahaf Shuler static void
8523a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8533a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8543a0968c8SShahaf Shuler {
8553a0968c8SShahaf Shuler 	uint16_t pid = 0;
8563a0968c8SShahaf Shuler 	int ret;
8573a0968c8SShahaf Shuler 
8583a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8593a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8603a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8613a0968c8SShahaf Shuler 
8623a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8633a0968c8SShahaf Shuler 					memhdr->len);
8643a0968c8SShahaf Shuler 		if (ret) {
8653a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8663a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8673a0968c8SShahaf Shuler 				    "for device %s\n",
8683a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8693a0968c8SShahaf Shuler 		}
8703a0968c8SShahaf Shuler 	}
8713a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8723a0968c8SShahaf Shuler 	if (ret) {
8733a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8743a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8753a0968c8SShahaf Shuler 	}
8763a0968c8SShahaf Shuler }
8773a0968c8SShahaf Shuler 
8783a0968c8SShahaf Shuler static void
8793a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8803a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8813a0968c8SShahaf Shuler {
8823a0968c8SShahaf Shuler 	uint16_t pid = 0;
8833a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8843a0968c8SShahaf Shuler 	int ret;
8853a0968c8SShahaf Shuler 
8863a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8873a0968c8SShahaf Shuler 				  page_size);
8883a0968c8SShahaf Shuler 	if (ret) {
8893a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8903a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8913a0968c8SShahaf Shuler 		return;
8923a0968c8SShahaf Shuler 	}
8933a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8943a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8953a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8963a0968c8SShahaf Shuler 
8973a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8983a0968c8SShahaf Shuler 				      memhdr->len);
8993a0968c8SShahaf Shuler 		if (ret) {
9003a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
9013a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
9023a0968c8SShahaf Shuler 				    "for device %s\n",
9033a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
9043a0968c8SShahaf Shuler 		}
9053a0968c8SShahaf Shuler 	}
9063a0968c8SShahaf Shuler }
907c7f5dba7SAnatoly Burakov 
90872512e18SViacheslav Ovsiienko static unsigned int
90972512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
91072512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
91172512e18SViacheslav Ovsiienko {
91272512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
91372512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
91472512e18SViacheslav Ovsiienko 	uint16_t elt_size;
91572512e18SViacheslav Ovsiienko 
91672512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
91772512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
91872512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
91972512e18SViacheslav Ovsiienko 
92072512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
92172512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
92272512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
92372512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
92472512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
92572512e18SViacheslav Ovsiienko 		return 0;
92672512e18SViacheslav Ovsiienko 	}
92772512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
92872512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
92972512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
93072512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
93172512e18SViacheslav Ovsiienko 		int ret;
93272512e18SViacheslav Ovsiienko 
93372512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
93472512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
93572512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
93672512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
93772512e18SViacheslav Ovsiienko 			ext_num = 0;
93872512e18SViacheslav Ovsiienko 			break;
93972512e18SViacheslav Ovsiienko 		}
94072512e18SViacheslav Ovsiienko 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
94172512e18SViacheslav Ovsiienko 						 socket_id,
94272512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_IOVA_CONTIG |
94372512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_1GB |
94472512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_SIZE_HINT_ONLY,
94572512e18SViacheslav Ovsiienko 						 EXTBUF_ZONE_SIZE);
94672512e18SViacheslav Ovsiienko 		if (mz == NULL) {
94772512e18SViacheslav Ovsiienko 			/*
94872512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
94972512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
95072512e18SViacheslav Ovsiienko 			 */
95172512e18SViacheslav Ovsiienko 			errno = ENOMEM;
95272512e18SViacheslav Ovsiienko 			ext_num = 0;
95372512e18SViacheslav Ovsiienko 			break;
95472512e18SViacheslav Ovsiienko 		}
95572512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
95672512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
95772512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
95872512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
95972512e18SViacheslav Ovsiienko 	}
96072512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
96172512e18SViacheslav Ovsiienko 		free(xmem);
96272512e18SViacheslav Ovsiienko 		xmem = NULL;
96372512e18SViacheslav Ovsiienko 	}
96472512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
96572512e18SViacheslav Ovsiienko 	return ext_num;
96672512e18SViacheslav Ovsiienko }
96772512e18SViacheslav Ovsiienko 
968af75078fSIntel /*
969af75078fSIntel  * Configuration initialisation done once at init time.
970af75078fSIntel  */
971401b744dSShahaf Shuler static struct rte_mempool *
972af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
97326cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
974af75078fSIntel {
975af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
976bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
977af75078fSIntel 	uint32_t mb_size;
978af75078fSIntel 
979dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
98026cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
981148f963fSBruce Richardson 
982285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
983d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
984d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
985d1eb542eSOlivier Matz 
986c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
987c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
988c7f5dba7SAnatoly Burakov 		{
989c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
990c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
991c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
992c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
993c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
994c7f5dba7SAnatoly Burakov 			break;
995c7f5dba7SAnatoly Burakov 		}
996c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
997c7f5dba7SAnatoly Burakov 		{
998b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
999c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1000148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
100159fcf854SShahaf Shuler 				socket_id, mempool_flags);
100224427bb9SOlivier Matz 			if (rte_mp == NULL)
100324427bb9SOlivier Matz 				goto err;
1004b19a0c75SOlivier Matz 
1005b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1006b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1007b19a0c75SOlivier Matz 				rte_mp = NULL;
100824427bb9SOlivier Matz 				goto err;
1009b19a0c75SOlivier Matz 			}
1010b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1011b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
10123a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1013c7f5dba7SAnatoly Burakov 			break;
1014c7f5dba7SAnatoly Burakov 		}
1015c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1016c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1017c7f5dba7SAnatoly Burakov 		{
1018c7f5dba7SAnatoly Burakov 			int heap_socket;
1019c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1020c7f5dba7SAnatoly Burakov 
1021c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1022c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1023c7f5dba7SAnatoly Burakov 
1024c7f5dba7SAnatoly Burakov 			heap_socket =
1025c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1026c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1027c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1028c7f5dba7SAnatoly Burakov 
10290e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
10300e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1031ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1032c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1033c7f5dba7SAnatoly Burakov 					heap_socket);
1034c7f5dba7SAnatoly Burakov 			break;
1035c7f5dba7SAnatoly Burakov 		}
103672512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
103772512e18SViacheslav Ovsiienko 		{
103872512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
103972512e18SViacheslav Ovsiienko 			unsigned int ext_num;
104072512e18SViacheslav Ovsiienko 
104172512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
104272512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
104372512e18SViacheslav Ovsiienko 			if (ext_num == 0)
104472512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
104572512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
104672512e18SViacheslav Ovsiienko 
104772512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
104872512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
104972512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
105072512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
105172512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
105272512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
105372512e18SViacheslav Ovsiienko 			free(ext_mem);
105472512e18SViacheslav Ovsiienko 			break;
105572512e18SViacheslav Ovsiienko 		}
1056c7f5dba7SAnatoly Burakov 	default:
1057c7f5dba7SAnatoly Burakov 		{
1058c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1059c7f5dba7SAnatoly Burakov 		}
1060bece7b6cSChristian Ehrhardt 	}
1061148f963fSBruce Richardson 
106224427bb9SOlivier Matz err:
1063af75078fSIntel 	if (rte_mp == NULL) {
1064d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1065d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1066d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1067148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1068591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1069af75078fSIntel 	}
1070401b744dSShahaf Shuler 	return rte_mp;
1071af75078fSIntel }
1072af75078fSIntel 
107320a0286fSLiu Xiaofeng /*
107420a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
107520a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
107620a0286fSLiu Xiaofeng  */
107720a0286fSLiu Xiaofeng static int
107820a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
107920a0286fSLiu Xiaofeng {
108020a0286fSLiu Xiaofeng 	static int warning_once = 0;
108120a0286fSLiu Xiaofeng 
1082c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
108320a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
108420a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
108520a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
108620a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
108720a0286fSLiu Xiaofeng 			       " --numa.\n");
108820a0286fSLiu Xiaofeng 		warning_once = 1;
108920a0286fSLiu Xiaofeng 		return -1;
109020a0286fSLiu Xiaofeng 	}
109120a0286fSLiu Xiaofeng 	return 0;
109220a0286fSLiu Xiaofeng }
109320a0286fSLiu Xiaofeng 
10943f7311baSWei Dai /*
10953f7311baSWei Dai  * Get the allowed maximum number of RX queues.
10963f7311baSWei Dai  * *pid return the port id which has minimal value of
10973f7311baSWei Dai  * max_rx_queues in all ports.
10983f7311baSWei Dai  */
10993f7311baSWei Dai queueid_t
11003f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
11013f7311baSWei Dai {
11029e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
11036f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
11043f7311baSWei Dai 	portid_t pi;
11053f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
11063f7311baSWei Dai 
11073f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
11086f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
11096f51deb9SIvan Ilchenko 			continue;
11106f51deb9SIvan Ilchenko 
11116f51deb9SIvan Ilchenko 		max_rxq_valid = true;
11123f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
11133f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
11143f7311baSWei Dai 			*pid = pi;
11153f7311baSWei Dai 		}
11163f7311baSWei Dai 	}
11176f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
11183f7311baSWei Dai }
11193f7311baSWei Dai 
11203f7311baSWei Dai /*
11213f7311baSWei Dai  * Check input rxq is valid or not.
11223f7311baSWei Dai  * If input rxq is not greater than any of maximum number
11233f7311baSWei Dai  * of RX queues of all ports, it is valid.
11243f7311baSWei Dai  * if valid, return 0, else return -1
11253f7311baSWei Dai  */
11263f7311baSWei Dai int
11273f7311baSWei Dai check_nb_rxq(queueid_t rxq)
11283f7311baSWei Dai {
11293f7311baSWei Dai 	queueid_t allowed_max_rxq;
11303f7311baSWei Dai 	portid_t pid = 0;
11313f7311baSWei Dai 
11323f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
11333f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
11343f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
11353f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
11363f7311baSWei Dai 		       rxq,
11373f7311baSWei Dai 		       allowed_max_rxq,
11383f7311baSWei Dai 		       pid);
11393f7311baSWei Dai 		return -1;
11403f7311baSWei Dai 	}
11413f7311baSWei Dai 	return 0;
11423f7311baSWei Dai }
11433f7311baSWei Dai 
114436db4f6cSWei Dai /*
114536db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
114636db4f6cSWei Dai  * *pid return the port id which has minimal value of
114736db4f6cSWei Dai  * max_tx_queues in all ports.
114836db4f6cSWei Dai  */
114936db4f6cSWei Dai queueid_t
115036db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
115136db4f6cSWei Dai {
11529e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
11536f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
115436db4f6cSWei Dai 	portid_t pi;
115536db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
115636db4f6cSWei Dai 
115736db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
11586f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
11596f51deb9SIvan Ilchenko 			continue;
11606f51deb9SIvan Ilchenko 
11616f51deb9SIvan Ilchenko 		max_txq_valid = true;
116236db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
116336db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
116436db4f6cSWei Dai 			*pid = pi;
116536db4f6cSWei Dai 		}
116636db4f6cSWei Dai 	}
11676f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
116836db4f6cSWei Dai }
116936db4f6cSWei Dai 
117036db4f6cSWei Dai /*
117136db4f6cSWei Dai  * Check input txq is valid or not.
117236db4f6cSWei Dai  * If input txq is not greater than any of maximum number
117336db4f6cSWei Dai  * of TX queues of all ports, it is valid.
117436db4f6cSWei Dai  * if valid, return 0, else return -1
117536db4f6cSWei Dai  */
117636db4f6cSWei Dai int
117736db4f6cSWei Dai check_nb_txq(queueid_t txq)
117836db4f6cSWei Dai {
117936db4f6cSWei Dai 	queueid_t allowed_max_txq;
118036db4f6cSWei Dai 	portid_t pid = 0;
118136db4f6cSWei Dai 
118236db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
118336db4f6cSWei Dai 	if (txq > allowed_max_txq) {
118436db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
118536db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
118636db4f6cSWei Dai 		       txq,
118736db4f6cSWei Dai 		       allowed_max_txq,
118836db4f6cSWei Dai 		       pid);
118936db4f6cSWei Dai 		return -1;
119036db4f6cSWei Dai 	}
119136db4f6cSWei Dai 	return 0;
119236db4f6cSWei Dai }
119336db4f6cSWei Dai 
11941c69df45SOri Kam /*
119599e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
119699e040d3SLijun Ou  * *pid return the port id which has minimal value of
119799e040d3SLijun Ou  * max_rxd in all queues of all ports.
119899e040d3SLijun Ou  */
119999e040d3SLijun Ou static uint16_t
120099e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
120199e040d3SLijun Ou {
120299e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
120399e040d3SLijun Ou 	portid_t pi;
120499e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
120599e040d3SLijun Ou 
120699e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
120799e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
120899e040d3SLijun Ou 			continue;
120999e040d3SLijun Ou 
121099e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
121199e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
121299e040d3SLijun Ou 			*pid = pi;
121399e040d3SLijun Ou 		}
121499e040d3SLijun Ou 	}
121599e040d3SLijun Ou 	return allowed_max_rxd;
121699e040d3SLijun Ou }
121799e040d3SLijun Ou 
121899e040d3SLijun Ou /*
121999e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
122099e040d3SLijun Ou  * *pid return the port id which has minimal value of
122199e040d3SLijun Ou  * min_rxd in all queues of all ports.
122299e040d3SLijun Ou  */
122399e040d3SLijun Ou static uint16_t
122499e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
122599e040d3SLijun Ou {
122699e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
122799e040d3SLijun Ou 	portid_t pi;
122899e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
122999e040d3SLijun Ou 
123099e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
123199e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
123299e040d3SLijun Ou 			continue;
123399e040d3SLijun Ou 
123499e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
123599e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
123699e040d3SLijun Ou 			*pid = pi;
123799e040d3SLijun Ou 		}
123899e040d3SLijun Ou 	}
123999e040d3SLijun Ou 
124099e040d3SLijun Ou 	return allowed_min_rxd;
124199e040d3SLijun Ou }
124299e040d3SLijun Ou 
124399e040d3SLijun Ou /*
124499e040d3SLijun Ou  * Check input rxd is valid or not.
124599e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
124699e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
124799e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
124899e040d3SLijun Ou  * if valid, return 0, else return -1
124999e040d3SLijun Ou  */
125099e040d3SLijun Ou int
125199e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
125299e040d3SLijun Ou {
125399e040d3SLijun Ou 	uint16_t allowed_max_rxd;
125499e040d3SLijun Ou 	uint16_t allowed_min_rxd;
125599e040d3SLijun Ou 	portid_t pid = 0;
125699e040d3SLijun Ou 
125799e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
125899e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
125999e040d3SLijun Ou 		printf("Fail: input rxd (%u) can't be greater "
126099e040d3SLijun Ou 		       "than max_rxds (%u) of port %u\n",
126199e040d3SLijun Ou 		       rxd,
126299e040d3SLijun Ou 		       allowed_max_rxd,
126399e040d3SLijun Ou 		       pid);
126499e040d3SLijun Ou 		return -1;
126599e040d3SLijun Ou 	}
126699e040d3SLijun Ou 
126799e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
126899e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
126999e040d3SLijun Ou 		printf("Fail: input rxd (%u) can't be less "
127099e040d3SLijun Ou 		       "than min_rxds (%u) of port %u\n",
127199e040d3SLijun Ou 		       rxd,
127299e040d3SLijun Ou 		       allowed_min_rxd,
127399e040d3SLijun Ou 		       pid);
127499e040d3SLijun Ou 		return -1;
127599e040d3SLijun Ou 	}
127699e040d3SLijun Ou 
127799e040d3SLijun Ou 	return 0;
127899e040d3SLijun Ou }
127999e040d3SLijun Ou 
128099e040d3SLijun Ou /*
128199e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
128299e040d3SLijun Ou  * *pid return the port id which has minimal value of
128399e040d3SLijun Ou  * max_txd in every tx queue.
128499e040d3SLijun Ou  */
128599e040d3SLijun Ou static uint16_t
128699e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
128799e040d3SLijun Ou {
128899e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
128999e040d3SLijun Ou 	portid_t pi;
129099e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
129199e040d3SLijun Ou 
129299e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
129399e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
129499e040d3SLijun Ou 			continue;
129599e040d3SLijun Ou 
129699e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
129799e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
129899e040d3SLijun Ou 			*pid = pi;
129999e040d3SLijun Ou 		}
130099e040d3SLijun Ou 	}
130199e040d3SLijun Ou 	return allowed_max_txd;
130299e040d3SLijun Ou }
130399e040d3SLijun Ou 
130499e040d3SLijun Ou /*
130599e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
130699e040d3SLijun Ou  * *pid return the port id which has minimal value of
130799e040d3SLijun Ou  * min_txd in every tx queue.
130899e040d3SLijun Ou  */
130999e040d3SLijun Ou static uint16_t
131099e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
131199e040d3SLijun Ou {
131299e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
131399e040d3SLijun Ou 	portid_t pi;
131499e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
131599e040d3SLijun Ou 
131699e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
131799e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
131899e040d3SLijun Ou 			continue;
131999e040d3SLijun Ou 
132099e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
132199e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
132299e040d3SLijun Ou 			*pid = pi;
132399e040d3SLijun Ou 		}
132499e040d3SLijun Ou 	}
132599e040d3SLijun Ou 
132699e040d3SLijun Ou 	return allowed_min_txd;
132799e040d3SLijun Ou }
132899e040d3SLijun Ou 
132999e040d3SLijun Ou /*
133099e040d3SLijun Ou  * Check input txd is valid or not.
133199e040d3SLijun Ou  * If input txd is not greater than any of maximum number
133299e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
133399e040d3SLijun Ou  * if valid, return 0, else return -1
133499e040d3SLijun Ou  */
133599e040d3SLijun Ou int
133699e040d3SLijun Ou check_nb_txd(queueid_t txd)
133799e040d3SLijun Ou {
133899e040d3SLijun Ou 	uint16_t allowed_max_txd;
133999e040d3SLijun Ou 	uint16_t allowed_min_txd;
134099e040d3SLijun Ou 	portid_t pid = 0;
134199e040d3SLijun Ou 
134299e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
134399e040d3SLijun Ou 	if (txd > allowed_max_txd) {
134499e040d3SLijun Ou 		printf("Fail: input txd (%u) can't be greater "
134599e040d3SLijun Ou 		       "than max_txds (%u) of port %u\n",
134699e040d3SLijun Ou 		       txd,
134799e040d3SLijun Ou 		       allowed_max_txd,
134899e040d3SLijun Ou 		       pid);
134999e040d3SLijun Ou 		return -1;
135099e040d3SLijun Ou 	}
135199e040d3SLijun Ou 
135299e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
135399e040d3SLijun Ou 	if (txd < allowed_min_txd) {
135499e040d3SLijun Ou 		printf("Fail: input txd (%u) can't be less "
135599e040d3SLijun Ou 		       "than min_txds (%u) of port %u\n",
135699e040d3SLijun Ou 		       txd,
135799e040d3SLijun Ou 		       allowed_min_txd,
135899e040d3SLijun Ou 		       pid);
135999e040d3SLijun Ou 		return -1;
136099e040d3SLijun Ou 	}
136199e040d3SLijun Ou 	return 0;
136299e040d3SLijun Ou }
136399e040d3SLijun Ou 
136499e040d3SLijun Ou 
136599e040d3SLijun Ou /*
13661c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
13671c69df45SOri Kam  * *pid return the port id which has minimal value of
13681c69df45SOri Kam  * max_hairpin_queues in all ports.
13691c69df45SOri Kam  */
13701c69df45SOri Kam queueid_t
13711c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
13721c69df45SOri Kam {
13739e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
13741c69df45SOri Kam 	portid_t pi;
13751c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
13761c69df45SOri Kam 
13771c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
13781c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
13791c69df45SOri Kam 			*pid = pi;
13801c69df45SOri Kam 			return 0;
13811c69df45SOri Kam 		}
13821c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
13831c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
13841c69df45SOri Kam 			*pid = pi;
13851c69df45SOri Kam 		}
13861c69df45SOri Kam 	}
13871c69df45SOri Kam 	return allowed_max_hairpinq;
13881c69df45SOri Kam }
13891c69df45SOri Kam 
13901c69df45SOri Kam /*
13911c69df45SOri Kam  * Check input hairpin is valid or not.
13921c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
13931c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
13941c69df45SOri Kam  * if valid, return 0, else return -1
13951c69df45SOri Kam  */
13961c69df45SOri Kam int
13971c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
13981c69df45SOri Kam {
13991c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
14001c69df45SOri Kam 	portid_t pid = 0;
14011c69df45SOri Kam 
14021c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
14031c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
14041c69df45SOri Kam 		printf("Fail: input hairpin (%u) can't be greater "
14051c69df45SOri Kam 		       "than max_hairpin_queues (%u) of port %u\n",
14061c69df45SOri Kam 		       hairpinq, allowed_max_hairpinq, pid);
14071c69df45SOri Kam 		return -1;
14081c69df45SOri Kam 	}
14091c69df45SOri Kam 	return 0;
14101c69df45SOri Kam }
14111c69df45SOri Kam 
1412af75078fSIntel static void
1413af75078fSIntel init_config(void)
1414af75078fSIntel {
1415ce8d5614SIntel 	portid_t pid;
1416af75078fSIntel 	struct rte_port *port;
1417af75078fSIntel 	struct rte_mempool *mbp;
1418af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1419af75078fSIntel 	lcoreid_t  lc_id;
14207acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1421b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
142252f38a20SJiayu Hu 	uint32_t gso_types;
142333f9630fSSunil Kumar Kori 	uint16_t data_size;
142433f9630fSSunil Kumar Kori 	bool warning = 0;
1425c73a9071SWei Dai 	int k;
14266f51deb9SIvan Ilchenko 	int ret;
1427af75078fSIntel 
14287acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1429487f9a59SYulong Pei 
1430af75078fSIntel 	/* Configuration of logical cores. */
1431af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1432af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1433fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1434af75078fSIntel 	if (fwd_lcores == NULL) {
1435ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1436ce8d5614SIntel 							"failed\n", nb_lcores);
1437af75078fSIntel 	}
1438af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1439af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1440af75078fSIntel 					       sizeof(struct fwd_lcore),
1441fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1442af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1443ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1444ce8d5614SIntel 								"failed\n");
1445af75078fSIntel 		}
1446af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1447af75078fSIntel 	}
1448af75078fSIntel 
14497d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1450ce8d5614SIntel 		port = &ports[pid];
14518b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1452fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1453384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
14546f51deb9SIvan Ilchenko 
14556f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
14566f51deb9SIvan Ilchenko 		if (ret != 0)
14576f51deb9SIvan Ilchenko 			rte_exit(EXIT_FAILURE,
14586f51deb9SIvan Ilchenko 				 "rte_eth_dev_info_get() failed\n");
14597c45f6c0SFerruh Yigit 
146007e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
146107e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
146207e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
146307e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1464b6ea6408SIntel 		if (numa_support) {
1465b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1466b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1467b6ea6408SIntel 			else {
1468b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
146920a0286fSLiu Xiaofeng 
147029841336SPhil Yang 				/*
147129841336SPhil Yang 				 * if socket_id is invalid,
147229841336SPhil Yang 				 * set to the first available socket.
147329841336SPhil Yang 				 */
147420a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
147529841336SPhil Yang 					socket_id = socket_ids[0];
1476b6ea6408SIntel 				port_per_socket[socket_id]++;
1477b6ea6408SIntel 			}
1478b6ea6408SIntel 		}
1479b6ea6408SIntel 
1480c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1481c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1482c73a9071SWei Dai 			port->rx_conf[k].offloads =
1483c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1484c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1485c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1486c73a9071SWei Dai 			port->tx_conf[k].offloads =
1487c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1488c73a9071SWei Dai 
1489ce8d5614SIntel 		/* set flag to initialize port/queue */
1490ce8d5614SIntel 		port->need_reconfig = 1;
1491ce8d5614SIntel 		port->need_reconfig_queues = 1;
1492c18feafaSDekel Peled 		port->tx_metadata = 0;
149333f9630fSSunil Kumar Kori 
149433f9630fSSunil Kumar Kori 		/* Check for maximum number of segments per MTU. Accordingly
149533f9630fSSunil Kumar Kori 		 * update the mbuf data size.
149633f9630fSSunil Kumar Kori 		 */
1497163fbaafSFerruh Yigit 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1498163fbaafSFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
149933f9630fSSunil Kumar Kori 			data_size = rx_mode.max_rx_pkt_len /
150033f9630fSSunil Kumar Kori 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
150133f9630fSSunil Kumar Kori 
150233f9630fSSunil Kumar Kori 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
150326cbb419SViacheslav Ovsiienko 							mbuf_data_size[0]) {
150426cbb419SViacheslav Ovsiienko 				mbuf_data_size[0] = data_size +
150533f9630fSSunil Kumar Kori 						 RTE_PKTMBUF_HEADROOM;
150633f9630fSSunil Kumar Kori 				warning = 1;
1507ce8d5614SIntel 			}
150833f9630fSSunil Kumar Kori 		}
150933f9630fSSunil Kumar Kori 	}
151033f9630fSSunil Kumar Kori 
151133f9630fSSunil Kumar Kori 	if (warning)
151226cbb419SViacheslav Ovsiienko 		TESTPMD_LOG(WARNING,
151326cbb419SViacheslav Ovsiienko 			    "Configured mbuf size of the first segment %hu\n",
151426cbb419SViacheslav Ovsiienko 			    mbuf_data_size[0]);
15153ab64341SOlivier Matz 	/*
15163ab64341SOlivier Matz 	 * Create pools of mbuf.
15173ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
15183ab64341SOlivier Matz 	 * socket 0 memory by default.
15193ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
15203ab64341SOlivier Matz 	 *
15213ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
15223ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
15233ab64341SOlivier Matz 	 */
15243ab64341SOlivier Matz 	if (param_total_num_mbufs)
15253ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
15263ab64341SOlivier Matz 	else {
15273ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
15283ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
15293ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
15303ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
15313ab64341SOlivier Matz 	}
15323ab64341SOlivier Matz 
1533b6ea6408SIntel 	if (numa_support) {
153426cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1535ce8d5614SIntel 
1536c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
153726cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
153826cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
153926cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1540401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
154126cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
15423ab64341SOlivier Matz 	} else {
154326cbb419SViacheslav Ovsiienko 		uint8_t i;
154426cbb419SViacheslav Ovsiienko 
154526cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
154626cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
154726cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1548401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
154926cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
155026cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
15513ab64341SOlivier Matz 	}
1552b6ea6408SIntel 
1553b6ea6408SIntel 	init_port_config();
15545886ae07SAdrien Mazarguil 
155552f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1556aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
15575886ae07SAdrien Mazarguil 	/*
15585886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
15595886ae07SAdrien Mazarguil 	 */
15605886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
15618fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
156226cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
15638fd8bebcSAdrien Mazarguil 
15645886ae07SAdrien Mazarguil 		if (mbp == NULL)
156526cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
15665886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
156752f38a20SJiayu Hu 		/* initialize GSO context */
156852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
156952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
157052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
157135b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
157235b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
157352f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
15745886ae07SAdrien Mazarguil 	}
15755886ae07SAdrien Mazarguil 
1576ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1577ce8d5614SIntel 	if (init_fwd_streams() < 0)
1578ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
15790c0db76fSBernard Iremonger 
15800c0db76fSBernard Iremonger 	fwd_config_setup();
1581b7091f1dSJiayu Hu 
1582b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1583b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1584b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1585b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1586b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1587b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1588b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1589b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1590b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1591b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1592b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1593b7091f1dSJiayu Hu 		}
1594b7091f1dSJiayu Hu 	}
1595ce8d5614SIntel }
1596ce8d5614SIntel 
15972950a769SDeclan Doherty 
15982950a769SDeclan Doherty void
1599a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
16002950a769SDeclan Doherty {
16012950a769SDeclan Doherty 	struct rte_port *port;
16026f51deb9SIvan Ilchenko 	int ret;
16032950a769SDeclan Doherty 
16042950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
16052950a769SDeclan Doherty 	port = &ports[new_port_id];
16066f51deb9SIvan Ilchenko 
16076f51deb9SIvan Ilchenko 	ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
16086f51deb9SIvan Ilchenko 	if (ret != 0)
16096f51deb9SIvan Ilchenko 		return;
16102950a769SDeclan Doherty 
16112950a769SDeclan Doherty 	/* set flag to initialize port/queue */
16122950a769SDeclan Doherty 	port->need_reconfig = 1;
16132950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1614a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
16152950a769SDeclan Doherty 
16162950a769SDeclan Doherty 	init_port_config();
16172950a769SDeclan Doherty }
16182950a769SDeclan Doherty 
16192950a769SDeclan Doherty 
1620ce8d5614SIntel int
1621ce8d5614SIntel init_fwd_streams(void)
1622ce8d5614SIntel {
1623ce8d5614SIntel 	portid_t pid;
1624ce8d5614SIntel 	struct rte_port *port;
1625ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
16265a8fb55cSReshma Pattan 	queueid_t q;
1627ce8d5614SIntel 
1628ce8d5614SIntel 	/* set socket id according to numa or not */
16297d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1630ce8d5614SIntel 		port = &ports[pid];
1631ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1632ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1633ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1634ce8d5614SIntel 				port->dev_info.max_rx_queues);
1635ce8d5614SIntel 			return -1;
1636ce8d5614SIntel 		}
1637ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1638ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1639ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1640ce8d5614SIntel 				port->dev_info.max_tx_queues);
1641ce8d5614SIntel 			return -1;
1642ce8d5614SIntel 		}
164320a0286fSLiu Xiaofeng 		if (numa_support) {
164420a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
164520a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
164620a0286fSLiu Xiaofeng 			else {
1647b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
164820a0286fSLiu Xiaofeng 
164929841336SPhil Yang 				/*
165029841336SPhil Yang 				 * if socket_id is invalid,
165129841336SPhil Yang 				 * set to the first available socket.
165229841336SPhil Yang 				 */
165320a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
165429841336SPhil Yang 					port->socket_id = socket_ids[0];
165520a0286fSLiu Xiaofeng 			}
165620a0286fSLiu Xiaofeng 		}
1657b6ea6408SIntel 		else {
1658b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1659af75078fSIntel 				port->socket_id = 0;
1660b6ea6408SIntel 			else
1661b6ea6408SIntel 				port->socket_id = socket_num;
1662b6ea6408SIntel 		}
1663af75078fSIntel 	}
1664af75078fSIntel 
16655a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
16665a8fb55cSReshma Pattan 	if (q == 0) {
16675a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
16685a8fb55cSReshma Pattan 		return -1;
16695a8fb55cSReshma Pattan 	}
16705a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1671ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1672ce8d5614SIntel 		return 0;
1673ce8d5614SIntel 	/* clear the old */
1674ce8d5614SIntel 	if (fwd_streams != NULL) {
1675ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1676ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1677ce8d5614SIntel 				continue;
1678ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1679ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1680af75078fSIntel 		}
1681ce8d5614SIntel 		rte_free(fwd_streams);
1682ce8d5614SIntel 		fwd_streams = NULL;
1683ce8d5614SIntel 	}
1684ce8d5614SIntel 
1685ce8d5614SIntel 	/* init new */
1686ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
16871f84c469SMatan Azrad 	if (nb_fwd_streams) {
1688ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
16891f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
16901f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1691ce8d5614SIntel 		if (fwd_streams == NULL)
16921f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
16931f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
16941f84c469SMatan Azrad 				 nb_fwd_streams);
1695ce8d5614SIntel 
1696af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
16971f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
16981f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
16991f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1700ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
17011f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
17021f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
17031f84c469SMatan Azrad 		}
1704af75078fSIntel 	}
1705ce8d5614SIntel 
1706ce8d5614SIntel 	return 0;
1707af75078fSIntel }
1708af75078fSIntel 
1709af75078fSIntel static void
1710af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1711af75078fSIntel {
17127569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
171385de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
17147569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
17157569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1716af75078fSIntel 	uint16_t nb_pkt;
17177569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
17187569b8c1SHonnappa Nagarahalli 	int i;
1719af75078fSIntel 
1720af75078fSIntel 	/*
1721af75078fSIntel 	 * First compute the total number of packet bursts and the
1722af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1723af75078fSIntel 	 */
17247569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
17257569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
17267569b8c1SHonnappa Nagarahalli 
17277569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
17287569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
17297569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
17307569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
17317569b8c1SHonnappa Nagarahalli 
17327569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
17337569b8c1SHonnappa Nagarahalli 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1734af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
17357569b8c1SHonnappa Nagarahalli 
1736af75078fSIntel 		if (nb_burst == 0)
1737af75078fSIntel 			continue;
17387569b8c1SHonnappa Nagarahalli 
1739af75078fSIntel 		total_burst += nb_burst;
17407569b8c1SHonnappa Nagarahalli 
17417569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
17427569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
17437569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1744fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1745fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
17467569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
17477569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
17487569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1749af75078fSIntel 		}
1750af75078fSIntel 	}
1751af75078fSIntel 	if (total_burst == 0)
1752af75078fSIntel 		return;
17537569b8c1SHonnappa Nagarahalli 
17547569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
17557569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
17567569b8c1SHonnappa Nagarahalli 		if (i == 3) {
17577569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1758af75078fSIntel 			return;
1759af75078fSIntel 		}
17607569b8c1SHonnappa Nagarahalli 
17617569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
17627569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
17637569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
17647569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1765af75078fSIntel 			return;
1766af75078fSIntel 		}
17677569b8c1SHonnappa Nagarahalli 
17687569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
17697569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
17707569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
17717569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
17727569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1773af75078fSIntel 	}
1774af75078fSIntel }
1775af75078fSIntel 
1776af75078fSIntel static void
1777af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1778af75078fSIntel {
1779af75078fSIntel 	struct fwd_stream *fs;
1780af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1781af75078fSIntel 
1782af75078fSIntel 	fs = fwd_streams[stream_id];
1783af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1784af75078fSIntel 	    (fs->fwd_dropped == 0))
1785af75078fSIntel 		return;
1786af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1787af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1788af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1789af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1790c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1791c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1792af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1793af75078fSIntel 
1794af75078fSIntel 	/* if checksum mode */
1795af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1796c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1797c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1798c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
179958d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
180058d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
180194d65546SDavid Marchand 	} else {
180294d65546SDavid Marchand 		printf("\n");
1803af75078fSIntel 	}
1804af75078fSIntel 
18050e4b1963SDharmik Thakkar 	if (record_burst_stats) {
1806af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1807af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
18080e4b1963SDharmik Thakkar 	}
1809af75078fSIntel }
1810af75078fSIntel 
181153324971SDavid Marchand void
181253324971SDavid Marchand fwd_stats_display(void)
181353324971SDavid Marchand {
181453324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
181553324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
181653324971SDavid Marchand 	struct {
181753324971SDavid Marchand 		struct fwd_stream *rx_stream;
181853324971SDavid Marchand 		struct fwd_stream *tx_stream;
181953324971SDavid Marchand 		uint64_t tx_dropped;
182053324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
182153324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
182253324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
182353324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
182453324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
182553324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
182653324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
182753324971SDavid Marchand 	struct rte_eth_stats stats;
182853324971SDavid Marchand 	uint64_t fwd_cycles = 0;
182953324971SDavid Marchand 	uint64_t total_recv = 0;
183053324971SDavid Marchand 	uint64_t total_xmit = 0;
183153324971SDavid Marchand 	struct rte_port *port;
183253324971SDavid Marchand 	streamid_t sm_id;
183353324971SDavid Marchand 	portid_t pt_id;
183453324971SDavid Marchand 	int i;
183553324971SDavid Marchand 
183653324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
183753324971SDavid Marchand 
183853324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
183953324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
184053324971SDavid Marchand 
184153324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
184253324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
184353324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
184453324971SDavid Marchand 		} else {
184553324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
184653324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
184753324971SDavid Marchand 		}
184853324971SDavid Marchand 
184953324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
185053324971SDavid Marchand 
185153324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
185253324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
185353324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
185453324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
185553324971SDavid Marchand 
1856bc700b67SDharmik Thakkar 		if (record_core_cycles)
185753324971SDavid Marchand 			fwd_cycles += fs->core_cycles;
185853324971SDavid Marchand 	}
185953324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
186053324971SDavid Marchand 		uint8_t j;
186153324971SDavid Marchand 
186253324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
186353324971SDavid Marchand 		port = &ports[pt_id];
186453324971SDavid Marchand 
186553324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
186653324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
186753324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
186853324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
186953324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
187053324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
187153324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
187253324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
187353324971SDavid Marchand 
187453324971SDavid Marchand 		total_recv += stats.ipackets;
187553324971SDavid Marchand 		total_xmit += stats.opackets;
187653324971SDavid Marchand 		total_rx_dropped += stats.imissed;
187753324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
187853324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
187953324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
188053324971SDavid Marchand 
188153324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
188253324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
188353324971SDavid Marchand 
188453324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
188553324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
188653324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
188753324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
188853324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
188953324971SDavid Marchand 			       stats.ipackets, stats.imissed,
189053324971SDavid Marchand 			       stats.ipackets + stats.imissed);
189153324971SDavid Marchand 
189253324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
189353324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
189453324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
189553324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
189653324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
189753324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
189853324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
189953324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
190053324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
190153324971SDavid Marchand 				       stats.ierrors);
190253324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
190353324971SDavid Marchand 				       stats.rx_nombuf);
190453324971SDavid Marchand 			}
190553324971SDavid Marchand 
190653324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
190753324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
190853324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
190953324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
191053324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
191153324971SDavid Marchand 		} else {
191253324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
191353324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
191453324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
191553324971SDavid Marchand 			       stats.ipackets, stats.imissed,
191653324971SDavid Marchand 			       stats.ipackets + stats.imissed);
191753324971SDavid Marchand 
191853324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
191953324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
192053324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
192153324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
192253324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
192353324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
192453324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
192553324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
192653324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
192753324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
192853324971SDavid Marchand 				       stats.rx_nombuf);
192953324971SDavid Marchand 			}
193053324971SDavid Marchand 
193153324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
193253324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
193353324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
193453324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
193553324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
193653324971SDavid Marchand 		}
193753324971SDavid Marchand 
19380e4b1963SDharmik Thakkar 		if (record_burst_stats) {
193953324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
194053324971SDavid Marchand 				pkt_burst_stats_display("RX",
194153324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
194253324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
194353324971SDavid Marchand 				pkt_burst_stats_display("TX",
194453324971SDavid Marchand 					&ports_stats[pt_id].tx_stream->tx_burst_stats);
19450e4b1963SDharmik Thakkar 		}
194653324971SDavid Marchand 
194753324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
194853324971SDavid Marchand 			printf("\n");
194953324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
195053324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
195153324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
195253324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
195353324971SDavid Marchand 				       j, stats.q_ipackets[j],
195453324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
195553324971SDavid Marchand 			}
195653324971SDavid Marchand 			printf("\n");
195753324971SDavid Marchand 		}
195853324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
195953324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
196053324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
196153324971SDavid Marchand 				       "                                 TX-bytes:%14"
196253324971SDavid Marchand 				       PRIu64"\n",
196353324971SDavid Marchand 				       j, stats.q_opackets[j],
196453324971SDavid Marchand 				       stats.q_obytes[j]);
196553324971SDavid Marchand 			}
196653324971SDavid Marchand 		}
196753324971SDavid Marchand 
196853324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
196953324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
197053324971SDavid Marchand 	}
197153324971SDavid Marchand 
197253324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
197353324971SDavid Marchand 	       "%s\n",
197453324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
197553324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
197653324971SDavid Marchand 	       "%-"PRIu64"\n"
197753324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
197853324971SDavid Marchand 	       "%-"PRIu64"\n",
197953324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
198053324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
198153324971SDavid Marchand 	if (total_rx_nombuf > 0)
198253324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
198353324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
198453324971SDavid Marchand 	       "%s\n",
198553324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
1986bc700b67SDharmik Thakkar 	if (record_core_cycles) {
19874c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
19883a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
19893a164e00SPhil Yang 			uint64_t total_pkts = 0;
19903a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
19913a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
19923a164e00SPhil Yang 				total_pkts = total_xmit;
19933a164e00SPhil Yang 			else
19943a164e00SPhil Yang 				total_pkts = total_recv;
19953a164e00SPhil Yang 
19961920832aSDharmik Thakkar 			printf("\n  CPU cycles/packet=%.2F (total cycles="
19973a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
19984c0497b1SDharmik Thakkar 			       " MHz Clock\n",
19993a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
20003a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
20014c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
20023a164e00SPhil Yang 		}
2003bc700b67SDharmik Thakkar 	}
200453324971SDavid Marchand }
200553324971SDavid Marchand 
200653324971SDavid Marchand void
200753324971SDavid Marchand fwd_stats_reset(void)
200853324971SDavid Marchand {
200953324971SDavid Marchand 	streamid_t sm_id;
201053324971SDavid Marchand 	portid_t pt_id;
201153324971SDavid Marchand 	int i;
201253324971SDavid Marchand 
201353324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
201453324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
201553324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
201653324971SDavid Marchand 	}
201753324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
201853324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
201953324971SDavid Marchand 
202053324971SDavid Marchand 		fs->rx_packets = 0;
202153324971SDavid Marchand 		fs->tx_packets = 0;
202253324971SDavid Marchand 		fs->fwd_dropped = 0;
202353324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
202453324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
202553324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
202653324971SDavid Marchand 
202753324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
202853324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
202953324971SDavid Marchand 		fs->core_cycles = 0;
203053324971SDavid Marchand 	}
203153324971SDavid Marchand }
203253324971SDavid Marchand 
2033af75078fSIntel static void
20347741e4cfSIntel flush_fwd_rx_queues(void)
2035af75078fSIntel {
2036af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2037af75078fSIntel 	portid_t  rxp;
20387741e4cfSIntel 	portid_t port_id;
2039af75078fSIntel 	queueid_t rxq;
2040af75078fSIntel 	uint16_t  nb_rx;
2041af75078fSIntel 	uint16_t  i;
2042af75078fSIntel 	uint8_t   j;
2043f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2044594302c7SJames Poole 	uint64_t timer_period;
2045f487715fSReshma Pattan 
2046f487715fSReshma Pattan 	/* convert to number of cycles */
2047594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2048af75078fSIntel 
2049af75078fSIntel 	for (j = 0; j < 2; j++) {
20507741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2051af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
20527741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
2053f487715fSReshma Pattan 				/**
2054f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2055f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2056f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2057f487715fSReshma Pattan 				* after 1sec timer expiry.
2058f487715fSReshma Pattan 				*/
2059f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2060af75078fSIntel 				do {
20617741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2062013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2063af75078fSIntel 					for (i = 0; i < nb_rx; i++)
2064af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
2065f487715fSReshma Pattan 
2066f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2067f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2068f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2069f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2070f487715fSReshma Pattan 					(timer_tsc < timer_period));
2071f487715fSReshma Pattan 				timer_tsc = 0;
2072af75078fSIntel 			}
2073af75078fSIntel 		}
2074af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2075af75078fSIntel 	}
2076af75078fSIntel }
2077af75078fSIntel 
2078af75078fSIntel static void
2079af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2080af75078fSIntel {
2081af75078fSIntel 	struct fwd_stream **fsm;
2082af75078fSIntel 	streamid_t nb_fs;
2083af75078fSIntel 	streamid_t sm_id;
208454f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
20857e4441c8SRemy Horton 	uint64_t tics_per_1sec;
20867e4441c8SRemy Horton 	uint64_t tics_datum;
20877e4441c8SRemy Horton 	uint64_t tics_current;
20884918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2089af75078fSIntel 
20904918a357SXiaoyun Li 	cnt_ports = nb_ports;
20917e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
20927e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
20937e4441c8SRemy Horton #endif
2094af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2095af75078fSIntel 	nb_fs = fc->stream_nb;
2096af75078fSIntel 	do {
2097af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
2098af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
209954f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
2100e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2101e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
21027e4441c8SRemy Horton 			tics_current = rte_rdtsc();
21037e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
21047e4441c8SRemy Horton 				/* Periodic bitrate calculation */
21054918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2106e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
21074918a357SXiaoyun Li 						ports_ids[i]);
21087e4441c8SRemy Horton 				tics_datum = tics_current;
21097e4441c8SRemy Horton 			}
2110e25e6c70SRemy Horton 		}
21117e4441c8SRemy Horton #endif
211262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
211365eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
211465eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
211562d3216dSReshma Pattan 			rte_latencystats_update();
211662d3216dSReshma Pattan #endif
211762d3216dSReshma Pattan 
2118af75078fSIntel 	} while (! fc->stopped);
2119af75078fSIntel }
2120af75078fSIntel 
2121af75078fSIntel static int
2122af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2123af75078fSIntel {
2124af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2125af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2126af75078fSIntel 	return 0;
2127af75078fSIntel }
2128af75078fSIntel 
2129af75078fSIntel /*
2130af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2131af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2132af75078fSIntel  */
2133af75078fSIntel static int
2134af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2135af75078fSIntel {
2136af75078fSIntel 	struct fwd_lcore *fwd_lc;
2137af75078fSIntel 	struct fwd_lcore tmp_lcore;
2138af75078fSIntel 
2139af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2140af75078fSIntel 	tmp_lcore = *fwd_lc;
2141af75078fSIntel 	tmp_lcore.stopped = 1;
2142af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2143af75078fSIntel 	return 0;
2144af75078fSIntel }
2145af75078fSIntel 
2146af75078fSIntel /*
2147af75078fSIntel  * Launch packet forwarding:
2148af75078fSIntel  *     - Setup per-port forwarding context.
2149af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2150af75078fSIntel  */
2151af75078fSIntel static void
2152af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2153af75078fSIntel {
2154af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2155af75078fSIntel 	unsigned int i;
2156af75078fSIntel 	unsigned int lc_id;
2157af75078fSIntel 	int diag;
2158af75078fSIntel 
2159af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2160af75078fSIntel 	if (port_fwd_begin != NULL) {
2161af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2162af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
2163af75078fSIntel 	}
2164af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2165af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2166af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2167af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2168af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2169af75078fSIntel 						     fwd_lcores[i], lc_id);
2170af75078fSIntel 			if (diag != 0)
2171af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
2172af75078fSIntel 				       lc_id, diag);
2173af75078fSIntel 		}
2174af75078fSIntel 	}
2175af75078fSIntel }
2176af75078fSIntel 
2177af75078fSIntel /*
2178af75078fSIntel  * Launch packet forwarding configuration.
2179af75078fSIntel  */
2180af75078fSIntel void
2181af75078fSIntel start_packet_forwarding(int with_tx_first)
2182af75078fSIntel {
2183af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2184af75078fSIntel 	port_fwd_end_t  port_fwd_end;
2185af75078fSIntel 	struct rte_port *port;
2186af75078fSIntel 	unsigned int i;
2187af75078fSIntel 	portid_t   pt_id;
2188af75078fSIntel 
21895a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
21905a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
21915a8fb55cSReshma Pattan 
21925a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
21935a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
21945a8fb55cSReshma Pattan 
21955a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
21965a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
21975a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
21985a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
21995a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
22005a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
22015a8fb55cSReshma Pattan 
2202ce8d5614SIntel 	if (all_ports_started() == 0) {
2203ce8d5614SIntel 		printf("Not all ports were started\n");
2204ce8d5614SIntel 		return;
2205ce8d5614SIntel 	}
2206af75078fSIntel 	if (test_done == 0) {
2207af75078fSIntel 		printf("Packet forwarding already started\n");
2208af75078fSIntel 		return;
2209af75078fSIntel 	}
2210edf87b4aSBernard Iremonger 
2211edf87b4aSBernard Iremonger 
22127741e4cfSIntel 	if(dcb_test) {
22137741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
22147741e4cfSIntel 			pt_id = fwd_ports_ids[i];
22157741e4cfSIntel 			port = &ports[pt_id];
22167741e4cfSIntel 			if (!port->dcb_flag) {
22177741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
22187741e4cfSIntel                                        "be configured in this mode.\n");
2219013af9b6SIntel 				return;
2220013af9b6SIntel 			}
22217741e4cfSIntel 		}
22227741e4cfSIntel 		if (nb_fwd_lcores == 1) {
22237741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
22247741e4cfSIntel                                "should be larger than 1.\n");
22257741e4cfSIntel 			return;
22267741e4cfSIntel 		}
22277741e4cfSIntel 	}
2228af75078fSIntel 	test_done = 0;
22297741e4cfSIntel 
223047a767b2SMatan Azrad 	fwd_config_setup();
223147a767b2SMatan Azrad 
22327741e4cfSIntel 	if(!no_flush_rx)
22337741e4cfSIntel 		flush_fwd_rx_queues();
22347741e4cfSIntel 
2235933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
2236af75078fSIntel 	rxtx_config_display();
2237af75078fSIntel 
223853324971SDavid Marchand 	fwd_stats_reset();
2239af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2240af75078fSIntel 		pt_id = fwd_ports_ids[i];
2241af75078fSIntel 		port = &ports[pt_id];
2242013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
2243af75078fSIntel 	}
2244af75078fSIntel 	if (with_tx_first) {
2245af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2246af75078fSIntel 		if (port_fwd_begin != NULL) {
2247af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2248af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
2249af75078fSIntel 		}
2250acbf77a6SZhihong Wang 		while (with_tx_first--) {
2251acbf77a6SZhihong Wang 			launch_packet_forwarding(
2252acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2253af75078fSIntel 			rte_eal_mp_wait_lcore();
2254acbf77a6SZhihong Wang 		}
2255af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2256af75078fSIntel 		if (port_fwd_end != NULL) {
2257af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2258af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2259af75078fSIntel 		}
2260af75078fSIntel 	}
2261af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2262af75078fSIntel }
2263af75078fSIntel 
2264af75078fSIntel void
2265af75078fSIntel stop_packet_forwarding(void)
2266af75078fSIntel {
2267af75078fSIntel 	port_fwd_end_t port_fwd_end;
2268af75078fSIntel 	lcoreid_t lc_id;
226953324971SDavid Marchand 	portid_t pt_id;
227053324971SDavid Marchand 	int i;
2271af75078fSIntel 
2272af75078fSIntel 	if (test_done) {
2273af75078fSIntel 		printf("Packet forwarding not started\n");
2274af75078fSIntel 		return;
2275af75078fSIntel 	}
2276af75078fSIntel 	printf("Telling cores to stop...");
2277af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2278af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2279af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2280af75078fSIntel 	rte_eal_mp_wait_lcore();
2281af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2282af75078fSIntel 	if (port_fwd_end != NULL) {
2283af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2284af75078fSIntel 			pt_id = fwd_ports_ids[i];
2285af75078fSIntel 			(*port_fwd_end)(pt_id);
2286af75078fSIntel 		}
2287af75078fSIntel 	}
2288c185d42cSDavid Marchand 
228953324971SDavid Marchand 	fwd_stats_display();
229058d475b7SJerin Jacob 
2291af75078fSIntel 	printf("\nDone.\n");
2292af75078fSIntel 	test_done = 1;
2293af75078fSIntel }
2294af75078fSIntel 
2295cfae07fdSOuyang Changchun void
2296cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2297cfae07fdSOuyang Changchun {
2298492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
2299cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
2300cfae07fdSOuyang Changchun }
2301cfae07fdSOuyang Changchun 
2302cfae07fdSOuyang Changchun void
2303cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2304cfae07fdSOuyang Changchun {
2305492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
2306cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
2307cfae07fdSOuyang Changchun }
2308cfae07fdSOuyang Changchun 
2309ce8d5614SIntel static int
2310ce8d5614SIntel all_ports_started(void)
2311ce8d5614SIntel {
2312ce8d5614SIntel 	portid_t pi;
2313ce8d5614SIntel 	struct rte_port *port;
2314ce8d5614SIntel 
23157d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2316ce8d5614SIntel 		port = &ports[pi];
2317ce8d5614SIntel 		/* Check if there is a port which is not started */
231841b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
231941b05095SBernard Iremonger 			(port->slave_flag == 0))
2320ce8d5614SIntel 			return 0;
2321ce8d5614SIntel 	}
2322ce8d5614SIntel 
2323ce8d5614SIntel 	/* No port is not started */
2324ce8d5614SIntel 	return 1;
2325ce8d5614SIntel }
2326ce8d5614SIntel 
2327148f963fSBruce Richardson int
23286018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
23296018eb8cSShahaf Shuler {
23306018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
23316018eb8cSShahaf Shuler 
23326018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
23336018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
23346018eb8cSShahaf Shuler 		return 0;
23356018eb8cSShahaf Shuler 	return 1;
23366018eb8cSShahaf Shuler }
23376018eb8cSShahaf Shuler 
23386018eb8cSShahaf Shuler int
2339edab33b1STetsuya Mukawa all_ports_stopped(void)
2340edab33b1STetsuya Mukawa {
2341edab33b1STetsuya Mukawa 	portid_t pi;
2342edab33b1STetsuya Mukawa 
23437d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
23446018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2345edab33b1STetsuya Mukawa 			return 0;
2346edab33b1STetsuya Mukawa 	}
2347edab33b1STetsuya Mukawa 
2348edab33b1STetsuya Mukawa 	return 1;
2349edab33b1STetsuya Mukawa }
2350edab33b1STetsuya Mukawa 
2351edab33b1STetsuya Mukawa int
2352edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2353edab33b1STetsuya Mukawa {
2354edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2355edab33b1STetsuya Mukawa 		return 0;
2356edab33b1STetsuya Mukawa 
2357edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2358edab33b1STetsuya Mukawa 		return 0;
2359edab33b1STetsuya Mukawa 
2360edab33b1STetsuya Mukawa 	return 1;
2361edab33b1STetsuya Mukawa }
2362edab33b1STetsuya Mukawa 
23631c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
23641c69df45SOri Kam static int
236501817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
23661c69df45SOri Kam {
23671c69df45SOri Kam 	queueid_t qi;
23681c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
23691c69df45SOri Kam 		.peer_count = 1,
23701c69df45SOri Kam 	};
23711c69df45SOri Kam 	int i;
23721c69df45SOri Kam 	int diag;
23731c69df45SOri Kam 	struct rte_port *port = &ports[pi];
237401817b10SBing Zhao 	uint16_t peer_rx_port = pi;
237501817b10SBing Zhao 	uint16_t peer_tx_port = pi;
237601817b10SBing Zhao 	uint32_t manual = 1;
237701817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
237801817b10SBing Zhao 
237901817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
238001817b10SBing Zhao 		peer_rx_port = pi;
238101817b10SBing Zhao 		peer_tx_port = pi;
238201817b10SBing Zhao 		manual = 0;
238301817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
238401817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
238501817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
238601817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
238701817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
238801817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
238901817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
239001817b10SBing Zhao 			peer_rx_port = p_pi;
239101817b10SBing Zhao 		} else {
239201817b10SBing Zhao 			uint16_t next_pi;
239301817b10SBing Zhao 
239401817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
239501817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
239601817b10SBing Zhao 				peer_rx_port = next_pi;
239701817b10SBing Zhao 		}
239801817b10SBing Zhao 		manual = 1;
239901817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
240001817b10SBing Zhao 		if (cnt_pi & 0x1) {
240101817b10SBing Zhao 			peer_rx_port = p_pi;
240201817b10SBing Zhao 		} else {
240301817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
240401817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
240501817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
240601817b10SBing Zhao 				peer_rx_port = pi;
240701817b10SBing Zhao 		}
240801817b10SBing Zhao 		peer_tx_port = peer_rx_port;
240901817b10SBing Zhao 		manual = 1;
241001817b10SBing Zhao 	}
24111c69df45SOri Kam 
24121c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
241301817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
24141c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
241501817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
241601817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
24171c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
24181c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
24191c69df45SOri Kam 		i++;
24201c69df45SOri Kam 		if (diag == 0)
24211c69df45SOri Kam 			continue;
24221c69df45SOri Kam 
24231c69df45SOri Kam 		/* Fail to setup rx queue, return */
24241c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
24251c69df45SOri Kam 					RTE_PORT_HANDLING,
24261c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
24271c69df45SOri Kam 			printf("Port %d can not be set back "
24281c69df45SOri Kam 					"to stopped\n", pi);
24291c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
24301c69df45SOri Kam 				"queues\n", pi);
24311c69df45SOri Kam 		/* try to reconfigure queues next time */
24321c69df45SOri Kam 		port->need_reconfig_queues = 1;
24331c69df45SOri Kam 		return -1;
24341c69df45SOri Kam 	}
24351c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
243601817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
24371c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
243801817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
243901817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
24401c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
24411c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
24421c69df45SOri Kam 		i++;
24431c69df45SOri Kam 		if (diag == 0)
24441c69df45SOri Kam 			continue;
24451c69df45SOri Kam 
24461c69df45SOri Kam 		/* Fail to setup rx queue, return */
24471c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
24481c69df45SOri Kam 					RTE_PORT_HANDLING,
24491c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
24501c69df45SOri Kam 			printf("Port %d can not be set back "
24511c69df45SOri Kam 					"to stopped\n", pi);
24521c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
24531c69df45SOri Kam 				"queues\n", pi);
24541c69df45SOri Kam 		/* try to reconfigure queues next time */
24551c69df45SOri Kam 		port->need_reconfig_queues = 1;
24561c69df45SOri Kam 		return -1;
24571c69df45SOri Kam 	}
24581c69df45SOri Kam 	return 0;
24591c69df45SOri Kam }
24601c69df45SOri Kam 
24612befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
24622befc67fSViacheslav Ovsiienko int
24632befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
24642befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
24652befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
24662befc67fSViacheslav Ovsiienko {
24672befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
24682befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
24692befc67fSViacheslav Ovsiienko 	int ret;
24702befc67fSViacheslav Ovsiienko 
24712befc67fSViacheslav Ovsiienko 	if (rx_pkt_nb_segs <= 1 ||
24722befc67fSViacheslav Ovsiienko 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
24732befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
24742befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
24752befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
24762befc67fSViacheslav Ovsiienko 					     nb_rx_desc, socket_id,
24772befc67fSViacheslav Ovsiienko 					     rx_conf, mp);
24782befc67fSViacheslav Ovsiienko 		return ret;
24792befc67fSViacheslav Ovsiienko 	}
24802befc67fSViacheslav Ovsiienko 	for (i = 0; i < rx_pkt_nb_segs; i++) {
24812befc67fSViacheslav Ovsiienko 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
24822befc67fSViacheslav Ovsiienko 		struct rte_mempool *mpx;
24832befc67fSViacheslav Ovsiienko 		/*
24842befc67fSViacheslav Ovsiienko 		 * Use last valid pool for the segments with number
24852befc67fSViacheslav Ovsiienko 		 * exceeding the pool index.
24862befc67fSViacheslav Ovsiienko 		 */
24872befc67fSViacheslav Ovsiienko 		mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
24882befc67fSViacheslav Ovsiienko 		mpx = mbuf_pool_find(socket_id, mp_n);
24892befc67fSViacheslav Ovsiienko 		/* Handle zero as mbuf data buffer size. */
24902befc67fSViacheslav Ovsiienko 		rx_seg->length = rx_pkt_seg_lengths[i] ?
24912befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_lengths[i] :
24922befc67fSViacheslav Ovsiienko 				   mbuf_data_size[mp_n];
24932befc67fSViacheslav Ovsiienko 		rx_seg->offset = i < rx_pkt_nb_offs ?
24942befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_offsets[i] : 0;
24952befc67fSViacheslav Ovsiienko 		rx_seg->mp = mpx ? mpx : mp;
24962befc67fSViacheslav Ovsiienko 	}
24972befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = rx_pkt_nb_segs;
24982befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = rx_useg;
24992befc67fSViacheslav Ovsiienko 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
25002befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
25012befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = NULL;
25022befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = 0;
25032befc67fSViacheslav Ovsiienko 	return ret;
25042befc67fSViacheslav Ovsiienko }
25052befc67fSViacheslav Ovsiienko 
2506edab33b1STetsuya Mukawa int
2507ce8d5614SIntel start_port(portid_t pid)
2508ce8d5614SIntel {
250992d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2510ce8d5614SIntel 	portid_t pi;
251101817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
251201817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
251301817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
251401817b10SBing Zhao 	uint16_t cnt_pi = 0;
251501817b10SBing Zhao 	uint16_t cfg_pi = 0;
251601817b10SBing Zhao 	int peer_pi;
2517ce8d5614SIntel 	queueid_t qi;
2518ce8d5614SIntel 	struct rte_port *port;
25196d13ea8eSOlivier Matz 	struct rte_ether_addr mac_addr;
25201c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2521ce8d5614SIntel 
25224468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
25234468635fSMichael Qiu 		return 0;
25244468635fSMichael Qiu 
2525ce8d5614SIntel 	if(dcb_config)
2526ce8d5614SIntel 		dcb_test = 1;
25277d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2528edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2529ce8d5614SIntel 			continue;
2530ce8d5614SIntel 
253192d2703eSMichael Qiu 		need_check_link_status = 0;
2532ce8d5614SIntel 		port = &ports[pi];
2533ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2534ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2535ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2536ce8d5614SIntel 			continue;
2537ce8d5614SIntel 		}
2538ce8d5614SIntel 
2539ce8d5614SIntel 		if (port->need_reconfig > 0) {
2540ce8d5614SIntel 			port->need_reconfig = 0;
2541ce8d5614SIntel 
25427ee3e944SVasily Philipov 			if (flow_isolate_all) {
25437ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
25447ee3e944SVasily Philipov 				if (ret) {
25457ee3e944SVasily Philipov 					printf("Failed to apply isolated"
25467ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
25477ee3e944SVasily Philipov 					return -1;
25487ee3e944SVasily Philipov 				}
25497ee3e944SVasily Philipov 			}
2550b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
25515706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
255220a0286fSLiu Xiaofeng 					port->socket_id);
25531c69df45SOri Kam 			if (nb_hairpinq > 0 &&
25541c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
25551c69df45SOri Kam 				printf("Port %d doesn't support hairpin "
25561c69df45SOri Kam 				       "queues\n", pi);
25571c69df45SOri Kam 				return -1;
25581c69df45SOri Kam 			}
2559ce8d5614SIntel 			/* configure port */
25601c69df45SOri Kam 			diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
25611c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2562ce8d5614SIntel 						     &(port->dev_conf));
2563ce8d5614SIntel 			if (diag != 0) {
2564ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2565ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2566ce8d5614SIntel 					printf("Port %d can not be set back "
2567ce8d5614SIntel 							"to stopped\n", pi);
2568ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2569ce8d5614SIntel 				/* try to reconfigure port next time */
2570ce8d5614SIntel 				port->need_reconfig = 1;
2571148f963fSBruce Richardson 				return -1;
2572ce8d5614SIntel 			}
2573ce8d5614SIntel 		}
2574ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2575ce8d5614SIntel 			port->need_reconfig_queues = 0;
2576ce8d5614SIntel 			/* setup tx queues */
2577ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2578b6ea6408SIntel 				if ((numa_support) &&
2579b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2580b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2581d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2582d44f8a48SQi Zhang 						txring_numa[pi],
2583d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2584b6ea6408SIntel 				else
2585b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2586d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2587d44f8a48SQi Zhang 						port->socket_id,
2588d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2589b6ea6408SIntel 
2590ce8d5614SIntel 				if (diag == 0)
2591ce8d5614SIntel 					continue;
2592ce8d5614SIntel 
2593ce8d5614SIntel 				/* Fail to setup tx queue, return */
2594ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2595ce8d5614SIntel 							RTE_PORT_HANDLING,
2596ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2597ce8d5614SIntel 					printf("Port %d can not be set back "
2598ce8d5614SIntel 							"to stopped\n", pi);
2599d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2600d44f8a48SQi Zhang 				       pi);
2601ce8d5614SIntel 				/* try to reconfigure queues next time */
2602ce8d5614SIntel 				port->need_reconfig_queues = 1;
2603148f963fSBruce Richardson 				return -1;
2604ce8d5614SIntel 			}
2605ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2606d44f8a48SQi Zhang 				/* setup rx queues */
2607b6ea6408SIntel 				if ((numa_support) &&
2608b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2609b6ea6408SIntel 					struct rte_mempool * mp =
261026cbb419SViacheslav Ovsiienko 						mbuf_pool_find
261126cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
2612b6ea6408SIntel 					if (mp == NULL) {
2613b6ea6408SIntel 						printf("Failed to setup RX queue:"
2614b6ea6408SIntel 							"No mempool allocation"
2615b6ea6408SIntel 							" on the socket %d\n",
2616b6ea6408SIntel 							rxring_numa[pi]);
2617148f963fSBruce Richardson 						return -1;
2618b6ea6408SIntel 					}
2619b6ea6408SIntel 
26202befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2621d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2622d44f8a48SQi Zhang 					     rxring_numa[pi],
2623d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2624d44f8a48SQi Zhang 					     mp);
26251e1d6bddSBernard Iremonger 				} else {
26261e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
262726cbb419SViacheslav Ovsiienko 						mbuf_pool_find
262826cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
26291e1d6bddSBernard Iremonger 					if (mp == NULL) {
26301e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
26311e1d6bddSBernard Iremonger 							"No mempool allocation"
26321e1d6bddSBernard Iremonger 							" on the socket %d\n",
26331e1d6bddSBernard Iremonger 							port->socket_id);
26341e1d6bddSBernard Iremonger 						return -1;
2635b6ea6408SIntel 					}
26362befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2637d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2638d44f8a48SQi Zhang 					     port->socket_id,
2639d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2640d44f8a48SQi Zhang 					     mp);
26411e1d6bddSBernard Iremonger 				}
2642ce8d5614SIntel 				if (diag == 0)
2643ce8d5614SIntel 					continue;
2644ce8d5614SIntel 
2645ce8d5614SIntel 				/* Fail to setup rx queue, return */
2646ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2647ce8d5614SIntel 							RTE_PORT_HANDLING,
2648ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2649ce8d5614SIntel 					printf("Port %d can not be set back "
2650ce8d5614SIntel 							"to stopped\n", pi);
2651d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2652d44f8a48SQi Zhang 				       pi);
2653ce8d5614SIntel 				/* try to reconfigure queues next time */
2654ce8d5614SIntel 				port->need_reconfig_queues = 1;
2655148f963fSBruce Richardson 				return -1;
2656ce8d5614SIntel 			}
26571c69df45SOri Kam 			/* setup hairpin queues */
265801817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
26591c69df45SOri Kam 				return -1;
2660ce8d5614SIntel 		}
2661b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2662b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
2663b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2664b0a9354aSPavan Nikhilesh 					NULL, 0);
2665b0a9354aSPavan Nikhilesh 			if (diag < 0)
2666b0a9354aSPavan Nikhilesh 				printf(
2667b0a9354aSPavan Nikhilesh 				"Port %d: Failed to disable Ptype parsing\n",
2668b0a9354aSPavan Nikhilesh 				pi);
2669b0a9354aSPavan Nikhilesh 		}
2670b0a9354aSPavan Nikhilesh 
267101817b10SBing Zhao 		p_pi = pi;
267201817b10SBing Zhao 		cnt_pi++;
267301817b10SBing Zhao 
2674ce8d5614SIntel 		/* start port */
2675ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2676ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2677ce8d5614SIntel 
2678ce8d5614SIntel 			/* Fail to setup rx queue, return */
2679ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2680ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2681ce8d5614SIntel 				printf("Port %d can not be set back to "
2682ce8d5614SIntel 							"stopped\n", pi);
2683ce8d5614SIntel 			continue;
2684ce8d5614SIntel 		}
2685ce8d5614SIntel 
2686ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2687ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2688ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2689ce8d5614SIntel 
2690a5279d25SIgor Romanov 		if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2691d8c89163SZijie Pan 			printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
26922950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
26932950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
26942950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2695d8c89163SZijie Pan 
2696ce8d5614SIntel 		/* at least one port started, need checking link status */
2697ce8d5614SIntel 		need_check_link_status = 1;
269801817b10SBing Zhao 
269901817b10SBing Zhao 		pl[cfg_pi++] = pi;
2700ce8d5614SIntel 	}
2701ce8d5614SIntel 
270292d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2703edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
270492d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2705ce8d5614SIntel 		printf("Please stop the ports first\n");
2706ce8d5614SIntel 
270701817b10SBing Zhao 	if (hairpin_mode & 0xf) {
270801817b10SBing Zhao 		uint16_t i;
270901817b10SBing Zhao 		int j;
271001817b10SBing Zhao 
271101817b10SBing Zhao 		/* bind all started hairpin ports */
271201817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
271301817b10SBing Zhao 			pi = pl[i];
271401817b10SBing Zhao 			/* bind current Tx to all peer Rx */
271501817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
271601817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
271701817b10SBing Zhao 			if (peer_pi < 0)
271801817b10SBing Zhao 				return peer_pi;
271901817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
272001817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
272101817b10SBing Zhao 					continue;
272201817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
272301817b10SBing Zhao 				if (diag < 0) {
272401817b10SBing Zhao 					printf("Error during binding hairpin"
272501817b10SBing Zhao 					       " Tx port %u to %u: %s\n",
272601817b10SBing Zhao 					       pi, peer_pl[j],
272701817b10SBing Zhao 					       rte_strerror(-diag));
272801817b10SBing Zhao 					return -1;
272901817b10SBing Zhao 				}
273001817b10SBing Zhao 			}
273101817b10SBing Zhao 			/* bind all peer Tx to current Rx */
273201817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
273301817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
273401817b10SBing Zhao 			if (peer_pi < 0)
273501817b10SBing Zhao 				return peer_pi;
273601817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
273701817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
273801817b10SBing Zhao 					continue;
273901817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
274001817b10SBing Zhao 				if (diag < 0) {
274101817b10SBing Zhao 					printf("Error during binding hairpin"
274201817b10SBing Zhao 					       " Tx port %u to %u: %s\n",
274301817b10SBing Zhao 					       peer_pl[j], pi,
274401817b10SBing Zhao 					       rte_strerror(-diag));
274501817b10SBing Zhao 					return -1;
274601817b10SBing Zhao 				}
274701817b10SBing Zhao 			}
274801817b10SBing Zhao 		}
274901817b10SBing Zhao 	}
275001817b10SBing Zhao 
2751ce8d5614SIntel 	printf("Done\n");
2752148f963fSBruce Richardson 	return 0;
2753ce8d5614SIntel }
2754ce8d5614SIntel 
2755ce8d5614SIntel void
2756ce8d5614SIntel stop_port(portid_t pid)
2757ce8d5614SIntel {
2758ce8d5614SIntel 	portid_t pi;
2759ce8d5614SIntel 	struct rte_port *port;
2760ce8d5614SIntel 	int need_check_link_status = 0;
276101817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
276201817b10SBing Zhao 	int peer_pi;
2763ce8d5614SIntel 
2764ce8d5614SIntel 	if (dcb_test) {
2765ce8d5614SIntel 		dcb_test = 0;
2766ce8d5614SIntel 		dcb_config = 0;
2767ce8d5614SIntel 	}
27684468635fSMichael Qiu 
27694468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
27704468635fSMichael Qiu 		return;
27714468635fSMichael Qiu 
2772ce8d5614SIntel 	printf("Stopping ports...\n");
2773ce8d5614SIntel 
27747d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
27754468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2776ce8d5614SIntel 			continue;
2777ce8d5614SIntel 
2778a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2779a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2780a8ef3e3aSBernard Iremonger 			continue;
2781a8ef3e3aSBernard Iremonger 		}
2782a8ef3e3aSBernard Iremonger 
27830e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
27840e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
27850e545d30SBernard Iremonger 			continue;
27860e545d30SBernard Iremonger 		}
27870e545d30SBernard Iremonger 
2788ce8d5614SIntel 		port = &ports[pi];
2789ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2790ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2791ce8d5614SIntel 			continue;
2792ce8d5614SIntel 
279301817b10SBing Zhao 		if (hairpin_mode & 0xf) {
279401817b10SBing Zhao 			int j;
279501817b10SBing Zhao 
279601817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
279701817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
279801817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
279901817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
280001817b10SBing Zhao 			if (peer_pi < 0)
280101817b10SBing Zhao 				continue;
280201817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
280301817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
280401817b10SBing Zhao 					continue;
280501817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
280601817b10SBing Zhao 			}
280701817b10SBing Zhao 		}
280801817b10SBing Zhao 
2809*e62c5a12SIvan Ilchenko 		if (rte_eth_dev_stop(pi) != 0)
2810*e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2811*e62c5a12SIvan Ilchenko 				pi);
2812ce8d5614SIntel 
2813ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2814ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2815ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2816ce8d5614SIntel 		need_check_link_status = 1;
2817ce8d5614SIntel 	}
2818bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2819edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2820ce8d5614SIntel 
2821ce8d5614SIntel 	printf("Done\n");
2822ce8d5614SIntel }
2823ce8d5614SIntel 
2824ce6959bfSWisam Jaddo static void
28254f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2826ce6959bfSWisam Jaddo {
28274f1de450SThomas Monjalon 	portid_t i;
28284f1de450SThomas Monjalon 	portid_t new_total = 0;
2829ce6959bfSWisam Jaddo 
28304f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
28314f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
28324f1de450SThomas Monjalon 			array[new_total] = array[i];
28334f1de450SThomas Monjalon 			new_total++;
2834ce6959bfSWisam Jaddo 		}
28354f1de450SThomas Monjalon 	*total = new_total;
28364f1de450SThomas Monjalon }
28374f1de450SThomas Monjalon 
28384f1de450SThomas Monjalon static void
28394f1de450SThomas Monjalon remove_invalid_ports(void)
28404f1de450SThomas Monjalon {
28414f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
28424f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
28434f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2844ce6959bfSWisam Jaddo }
2845ce6959bfSWisam Jaddo 
2846ce8d5614SIntel void
2847ce8d5614SIntel close_port(portid_t pid)
2848ce8d5614SIntel {
2849ce8d5614SIntel 	portid_t pi;
2850ce8d5614SIntel 	struct rte_port *port;
2851ce8d5614SIntel 
28524468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
28534468635fSMichael Qiu 		return;
28544468635fSMichael Qiu 
2855ce8d5614SIntel 	printf("Closing ports...\n");
2856ce8d5614SIntel 
28577d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
28584468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2859ce8d5614SIntel 			continue;
2860ce8d5614SIntel 
2861a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2862a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2863a8ef3e3aSBernard Iremonger 			continue;
2864a8ef3e3aSBernard Iremonger 		}
2865a8ef3e3aSBernard Iremonger 
28660e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
28670e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
28680e545d30SBernard Iremonger 			continue;
28690e545d30SBernard Iremonger 		}
28700e545d30SBernard Iremonger 
2871ce8d5614SIntel 		port = &ports[pi];
2872ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2873d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2874d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2875d4e8ad64SMichael Qiu 			continue;
2876d4e8ad64SMichael Qiu 		}
2877d4e8ad64SMichael Qiu 
2878938a184aSAdrien Mazarguil 		port_flow_flush(pi);
2879ce8d5614SIntel 		rte_eth_dev_close(pi);
2880ce8d5614SIntel 	}
2881ce8d5614SIntel 
288285c6571cSThomas Monjalon 	remove_invalid_ports();
2883ce8d5614SIntel 	printf("Done\n");
2884ce8d5614SIntel }
2885ce8d5614SIntel 
2886edab33b1STetsuya Mukawa void
288797f1e196SWei Dai reset_port(portid_t pid)
288897f1e196SWei Dai {
288997f1e196SWei Dai 	int diag;
289097f1e196SWei Dai 	portid_t pi;
289197f1e196SWei Dai 	struct rte_port *port;
289297f1e196SWei Dai 
289397f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
289497f1e196SWei Dai 		return;
289597f1e196SWei Dai 
28961cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
28971cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
28981cde1b9aSShougang Wang 		printf("Can not reset port(s), please stop port(s) first.\n");
28991cde1b9aSShougang Wang 		return;
29001cde1b9aSShougang Wang 	}
29011cde1b9aSShougang Wang 
290297f1e196SWei Dai 	printf("Resetting ports...\n");
290397f1e196SWei Dai 
290497f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
290597f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
290697f1e196SWei Dai 			continue;
290797f1e196SWei Dai 
290897f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
290997f1e196SWei Dai 			printf("Please remove port %d from forwarding "
291097f1e196SWei Dai 			       "configuration.\n", pi);
291197f1e196SWei Dai 			continue;
291297f1e196SWei Dai 		}
291397f1e196SWei Dai 
291497f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
291597f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
291697f1e196SWei Dai 			       pi);
291797f1e196SWei Dai 			continue;
291897f1e196SWei Dai 		}
291997f1e196SWei Dai 
292097f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
292197f1e196SWei Dai 		if (diag == 0) {
292297f1e196SWei Dai 			port = &ports[pi];
292397f1e196SWei Dai 			port->need_reconfig = 1;
292497f1e196SWei Dai 			port->need_reconfig_queues = 1;
292597f1e196SWei Dai 		} else {
292697f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
292797f1e196SWei Dai 		}
292897f1e196SWei Dai 	}
292997f1e196SWei Dai 
293097f1e196SWei Dai 	printf("Done\n");
293197f1e196SWei Dai }
293297f1e196SWei Dai 
293397f1e196SWei Dai void
2934edab33b1STetsuya Mukawa attach_port(char *identifier)
2935ce8d5614SIntel {
29364f1ed78eSThomas Monjalon 	portid_t pi;
2937c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2938ce8d5614SIntel 
2939edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2940edab33b1STetsuya Mukawa 
2941edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2942edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2943edab33b1STetsuya Mukawa 		return;
2944ce8d5614SIntel 	}
2945ce8d5614SIntel 
294675b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
2947c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2948edab33b1STetsuya Mukawa 		return;
2949c9cce428SThomas Monjalon 	}
2950c9cce428SThomas Monjalon 
29514f1ed78eSThomas Monjalon 	/* first attach mode: event */
29524f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
29534f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
29544f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
29554f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
29564f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
29574f1ed78eSThomas Monjalon 				setup_attached_port(pi);
29584f1ed78eSThomas Monjalon 		return;
29594f1ed78eSThomas Monjalon 	}
29604f1ed78eSThomas Monjalon 
29614f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
296286fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
29634f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
296486fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
296586fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2966c9cce428SThomas Monjalon 		setup_attached_port(pi);
2967c9cce428SThomas Monjalon 	}
296886fa5de1SThomas Monjalon }
2969c9cce428SThomas Monjalon 
2970c9cce428SThomas Monjalon static void
2971c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2972c9cce428SThomas Monjalon {
2973c9cce428SThomas Monjalon 	unsigned int socket_id;
297434fc1051SIvan Ilchenko 	int ret;
2975edab33b1STetsuya Mukawa 
2976931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
297729841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2978931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
297929841336SPhil Yang 		socket_id = socket_ids[0];
2980931126baSBernard Iremonger 	reconfig(pi, socket_id);
298134fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
298234fc1051SIvan Ilchenko 	if (ret != 0)
298334fc1051SIvan Ilchenko 		printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
298434fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
2985edab33b1STetsuya Mukawa 
29864f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
29874f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
29884f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
29894f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2990edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2991edab33b1STetsuya Mukawa 
2992edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2993edab33b1STetsuya Mukawa 	printf("Done\n");
2994edab33b1STetsuya Mukawa }
2995edab33b1STetsuya Mukawa 
29960654d4a8SThomas Monjalon static void
29970654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
29985f4ec54fSChen Jing D(Mark) {
2999f8e5baa2SThomas Monjalon 	portid_t sibling;
3000f8e5baa2SThomas Monjalon 
3001f8e5baa2SThomas Monjalon 	if (dev == NULL) {
3002f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
3003f8e5baa2SThomas Monjalon 		return;
3004f8e5baa2SThomas Monjalon 	}
3005f8e5baa2SThomas Monjalon 
30060654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3007938a184aSAdrien Mazarguil 
30082a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
30092a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
30102a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
30112a449871SThomas Monjalon 				printf("Port %u not stopped\n", sibling);
30122a449871SThomas Monjalon 				return;
30132a449871SThomas Monjalon 			}
30142a449871SThomas Monjalon 			port_flow_flush(sibling);
30152a449871SThomas Monjalon 		}
30162a449871SThomas Monjalon 	}
30172a449871SThomas Monjalon 
301875b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3019f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3020edab33b1STetsuya Mukawa 		return;
30213070419eSGaetan Rivet 	}
30224f1de450SThomas Monjalon 	remove_invalid_ports();
302303ce2c53SMatan Azrad 
30240654d4a8SThomas Monjalon 	printf("Device is detached\n");
3025f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3026edab33b1STetsuya Mukawa 	printf("Done\n");
3027edab33b1STetsuya Mukawa 	return;
30285f4ec54fSChen Jing D(Mark) }
30295f4ec54fSChen Jing D(Mark) 
3030af75078fSIntel void
30310654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
30320654d4a8SThomas Monjalon {
30330654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
30340654d4a8SThomas Monjalon 		return;
30350654d4a8SThomas Monjalon 
30360654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
30370654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
30380654d4a8SThomas Monjalon 			printf("Port not stopped\n");
30390654d4a8SThomas Monjalon 			return;
30400654d4a8SThomas Monjalon 		}
30410654d4a8SThomas Monjalon 		printf("Port was not closed\n");
30420654d4a8SThomas Monjalon 	}
30430654d4a8SThomas Monjalon 
30440654d4a8SThomas Monjalon 	detach_device(rte_eth_devices[port_id].device);
30450654d4a8SThomas Monjalon }
30460654d4a8SThomas Monjalon 
30470654d4a8SThomas Monjalon void
30485edee5f6SThomas Monjalon detach_devargs(char *identifier)
304955e51c96SNithin Dabilpuram {
305055e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
305155e51c96SNithin Dabilpuram 	struct rte_devargs da;
305255e51c96SNithin Dabilpuram 	portid_t port_id;
305355e51c96SNithin Dabilpuram 
305455e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
305555e51c96SNithin Dabilpuram 
305655e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
305755e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
305855e51c96SNithin Dabilpuram 		printf("cannot parse identifier\n");
305955e51c96SNithin Dabilpuram 		if (da.args)
306055e51c96SNithin Dabilpuram 			free(da.args);
306155e51c96SNithin Dabilpuram 		return;
306255e51c96SNithin Dabilpuram 	}
306355e51c96SNithin Dabilpuram 
306455e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
306555e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
306655e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
306755e51c96SNithin Dabilpuram 				printf("Port %u not stopped\n", port_id);
3068149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
306955e51c96SNithin Dabilpuram 				return;
307055e51c96SNithin Dabilpuram 			}
307155e51c96SNithin Dabilpuram 			port_flow_flush(port_id);
307255e51c96SNithin Dabilpuram 		}
307355e51c96SNithin Dabilpuram 	}
307455e51c96SNithin Dabilpuram 
307555e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
307655e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
307755e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
307855e51c96SNithin Dabilpuram 		return;
307955e51c96SNithin Dabilpuram 	}
308055e51c96SNithin Dabilpuram 
308155e51c96SNithin Dabilpuram 	remove_invalid_ports();
308255e51c96SNithin Dabilpuram 
308355e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
308455e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
308555e51c96SNithin Dabilpuram 	printf("Done\n");
308655e51c96SNithin Dabilpuram }
308755e51c96SNithin Dabilpuram 
308855e51c96SNithin Dabilpuram void
3089af75078fSIntel pmd_test_exit(void)
3090af75078fSIntel {
3091af75078fSIntel 	portid_t pt_id;
309226cbb419SViacheslav Ovsiienko 	unsigned int i;
3093fb73e096SJeff Guo 	int ret;
3094af75078fSIntel 
30958210ec25SPablo de Lara 	if (test_done == 0)
30968210ec25SPablo de Lara 		stop_packet_forwarding();
30978210ec25SPablo de Lara 
309826cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
30993a0968c8SShahaf Shuler 		if (mempools[i]) {
31003a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
31013a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
31023a0968c8SShahaf Shuler 						     NULL);
31033a0968c8SShahaf Shuler 		}
31043a0968c8SShahaf Shuler 	}
3105d3a274ceSZhihong Wang 	if (ports != NULL) {
3106d3a274ceSZhihong Wang 		no_link_check = 1;
31077d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
310808fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3109af75078fSIntel 			fflush(stdout);
3110d3a274ceSZhihong Wang 			stop_port(pt_id);
311108fd782bSCristian Dumitrescu 		}
311208fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
311308fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
311408fd782bSCristian Dumitrescu 			fflush(stdout);
3115d3a274ceSZhihong Wang 			close_port(pt_id);
3116af75078fSIntel 		}
3117d3a274ceSZhihong Wang 	}
3118fb73e096SJeff Guo 
3119fb73e096SJeff Guo 	if (hot_plug) {
3120fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
31212049c511SJeff Guo 		if (ret) {
3122fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3123fb73e096SJeff Guo 				"fail to stop device event monitor.");
31242049c511SJeff Guo 			return;
31252049c511SJeff Guo 		}
3126fb73e096SJeff Guo 
31272049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3128cc1bf307SJeff Guo 			dev_event_callback, NULL);
31292049c511SJeff Guo 		if (ret < 0) {
3130fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
31312049c511SJeff Guo 				"fail to unregister device event callback.\n");
31322049c511SJeff Guo 			return;
31332049c511SJeff Guo 		}
31342049c511SJeff Guo 
31352049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
31362049c511SJeff Guo 		if (ret) {
31372049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31382049c511SJeff Guo 				"fail to disable hotplug handling.\n");
31392049c511SJeff Guo 			return;
31402049c511SJeff Guo 		}
3141fb73e096SJeff Guo 	}
314226cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3143401b744dSShahaf Shuler 		if (mempools[i])
3144401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
3145401b744dSShahaf Shuler 	}
3146fb73e096SJeff Guo 
3147d3a274ceSZhihong Wang 	printf("\nBye...\n");
3148af75078fSIntel }
3149af75078fSIntel 
3150af75078fSIntel typedef void (*cmd_func_t)(void);
3151af75078fSIntel struct pmd_test_command {
3152af75078fSIntel 	const char *cmd_name;
3153af75078fSIntel 	cmd_func_t cmd_func;
3154af75078fSIntel };
3155af75078fSIntel 
3156ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3157af75078fSIntel static void
3158edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3159af75078fSIntel {
3160ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3161ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3162f8244c63SZhiyong Yang 	portid_t portid;
3163f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3164ce8d5614SIntel 	struct rte_eth_link link;
3165e661a08bSIgor Romanov 	int ret;
3166ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3167ce8d5614SIntel 
3168ce8d5614SIntel 	printf("Checking link statuses...\n");
3169ce8d5614SIntel 	fflush(stdout);
3170ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3171ce8d5614SIntel 		all_ports_up = 1;
31727d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3173ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3174ce8d5614SIntel 				continue;
3175ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3176e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3177e661a08bSIgor Romanov 			if (ret < 0) {
3178e661a08bSIgor Romanov 				all_ports_up = 0;
3179e661a08bSIgor Romanov 				if (print_flag == 1)
3180e661a08bSIgor Romanov 					printf("Port %u link get failed: %s\n",
3181e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3182e661a08bSIgor Romanov 				continue;
3183e661a08bSIgor Romanov 			}
3184ce8d5614SIntel 			/* print link status if flag set */
3185ce8d5614SIntel 			if (print_flag == 1) {
3186ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3187ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3188ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3189ce8d5614SIntel 				continue;
3190ce8d5614SIntel 			}
3191ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
319209419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
3193ce8d5614SIntel 				all_ports_up = 0;
3194ce8d5614SIntel 				break;
3195ce8d5614SIntel 			}
3196ce8d5614SIntel 		}
3197ce8d5614SIntel 		/* after finally printing all link status, get out */
3198ce8d5614SIntel 		if (print_flag == 1)
3199ce8d5614SIntel 			break;
3200ce8d5614SIntel 
3201ce8d5614SIntel 		if (all_ports_up == 0) {
3202ce8d5614SIntel 			fflush(stdout);
3203ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3204ce8d5614SIntel 		}
3205ce8d5614SIntel 
3206ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3207ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3208ce8d5614SIntel 			print_flag = 1;
3209ce8d5614SIntel 		}
32108ea656f8SGaetan Rivet 
32118ea656f8SGaetan Rivet 		if (lsc_interrupt)
32128ea656f8SGaetan Rivet 			break;
3213ce8d5614SIntel 	}
3214af75078fSIntel }
3215af75078fSIntel 
3216284c908cSGaetan Rivet static void
3217cc1bf307SJeff Guo rmv_port_callback(void *arg)
3218284c908cSGaetan Rivet {
32193b97888aSMatan Azrad 	int need_to_start = 0;
32200da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
322128caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
32220654d4a8SThomas Monjalon 	struct rte_device *dev;
3223284c908cSGaetan Rivet 
3224284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3225284c908cSGaetan Rivet 
32263b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
32273b97888aSMatan Azrad 		need_to_start = 1;
32283b97888aSMatan Azrad 		stop_packet_forwarding();
32293b97888aSMatan Azrad 	}
32300da2a62bSMatan Azrad 	no_link_check = 1;
3231284c908cSGaetan Rivet 	stop_port(port_id);
32320da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
32330654d4a8SThomas Monjalon 
32340654d4a8SThomas Monjalon 	/* Save rte_device pointer before closing ethdev port */
32350654d4a8SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
3236284c908cSGaetan Rivet 	close_port(port_id);
32370654d4a8SThomas Monjalon 	detach_device(dev); /* might be already removed or have more ports */
32380654d4a8SThomas Monjalon 
32393b97888aSMatan Azrad 	if (need_to_start)
32403b97888aSMatan Azrad 		start_packet_forwarding(0);
3241284c908cSGaetan Rivet }
3242284c908cSGaetan Rivet 
324376ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3244d6af1a13SBernard Iremonger static int
3245f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3246d6af1a13SBernard Iremonger 		  void *ret_param)
324776ad4a2dSGaetan Rivet {
324876ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3249d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
325076ad4a2dSGaetan Rivet 
325176ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
3252f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
325376ad4a2dSGaetan Rivet 			port_id, __func__, type);
325476ad4a2dSGaetan Rivet 		fflush(stderr);
32553af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3256f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
325797b5d8b5SThomas Monjalon 			eth_event_desc[type]);
325876ad4a2dSGaetan Rivet 		fflush(stdout);
325976ad4a2dSGaetan Rivet 	}
3260284c908cSGaetan Rivet 
3261284c908cSGaetan Rivet 	switch (type) {
32624f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
32634f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
32644f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
32654f1ed78eSThomas Monjalon 		break;
3266284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
32674f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
32684f1ed78eSThomas Monjalon 			break;
3269284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3270cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3271284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
3272284c908cSGaetan Rivet 		break;
327385c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
327485c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
327585c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
327685c6571cSThomas Monjalon 		break;
3277284c908cSGaetan Rivet 	default:
3278284c908cSGaetan Rivet 		break;
3279284c908cSGaetan Rivet 	}
3280d6af1a13SBernard Iremonger 	return 0;
328176ad4a2dSGaetan Rivet }
328276ad4a2dSGaetan Rivet 
328397b5d8b5SThomas Monjalon static int
328497b5d8b5SThomas Monjalon register_eth_event_callback(void)
328597b5d8b5SThomas Monjalon {
328697b5d8b5SThomas Monjalon 	int ret;
328797b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
328897b5d8b5SThomas Monjalon 
328997b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
329097b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
329197b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
329297b5d8b5SThomas Monjalon 				event,
329397b5d8b5SThomas Monjalon 				eth_event_callback,
329497b5d8b5SThomas Monjalon 				NULL);
329597b5d8b5SThomas Monjalon 		if (ret != 0) {
329697b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
329797b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
329897b5d8b5SThomas Monjalon 			return -1;
329997b5d8b5SThomas Monjalon 		}
330097b5d8b5SThomas Monjalon 	}
330197b5d8b5SThomas Monjalon 
330297b5d8b5SThomas Monjalon 	return 0;
330397b5d8b5SThomas Monjalon }
330497b5d8b5SThomas Monjalon 
3305fb73e096SJeff Guo /* This function is used by the interrupt thread */
3306fb73e096SJeff Guo static void
3307cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3308fb73e096SJeff Guo 			     __rte_unused void *arg)
3309fb73e096SJeff Guo {
33102049c511SJeff Guo 	uint16_t port_id;
33112049c511SJeff Guo 	int ret;
33122049c511SJeff Guo 
3313fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3314fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3315fb73e096SJeff Guo 			__func__, type);
3316fb73e096SJeff Guo 		fflush(stderr);
3317fb73e096SJeff Guo 	}
3318fb73e096SJeff Guo 
3319fb73e096SJeff Guo 	switch (type) {
3320fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3321cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3322fb73e096SJeff Guo 			device_name);
33232049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
33242049c511SJeff Guo 		if (ret) {
33252049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
33262049c511SJeff Guo 				device_name);
33272049c511SJeff Guo 			return;
33282049c511SJeff Guo 		}
3329cc1bf307SJeff Guo 		/*
3330cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3331cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3332cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3333cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3334cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3335cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3336cc1bf307SJeff Guo 		 * be deleted.
3337cc1bf307SJeff Guo 		 */
3338cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3339cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3340cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3341cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3342fb73e096SJeff Guo 		break;
3343fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3344fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3345fb73e096SJeff Guo 			device_name);
3346fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3347fb73e096SJeff Guo 		 * begin to attach port.
3348fb73e096SJeff Guo 		 */
3349fb73e096SJeff Guo 		break;
3350fb73e096SJeff Guo 	default:
3351fb73e096SJeff Guo 		break;
3352fb73e096SJeff Guo 	}
3353fb73e096SJeff Guo }
3354fb73e096SJeff Guo 
3355013af9b6SIntel static int
335628caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3357af75078fSIntel {
3358013af9b6SIntel 	uint16_t i;
3359af75078fSIntel 	int diag;
3360013af9b6SIntel 	uint8_t mapping_found = 0;
3361af75078fSIntel 
3362013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3363013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3364013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3365013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3366013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
3367013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
3368013af9b6SIntel 			if (diag != 0)
3369013af9b6SIntel 				return diag;
3370013af9b6SIntel 			mapping_found = 1;
3371af75078fSIntel 		}
3372013af9b6SIntel 	}
3373013af9b6SIntel 	if (mapping_found)
3374013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
3375013af9b6SIntel 	return 0;
3376013af9b6SIntel }
3377013af9b6SIntel 
3378013af9b6SIntel static int
337928caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3380013af9b6SIntel {
3381013af9b6SIntel 	uint16_t i;
3382013af9b6SIntel 	int diag;
3383013af9b6SIntel 	uint8_t mapping_found = 0;
3384013af9b6SIntel 
3385013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3386013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3387013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3388013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3389013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
3390013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
3391013af9b6SIntel 			if (diag != 0)
3392013af9b6SIntel 				return diag;
3393013af9b6SIntel 			mapping_found = 1;
3394013af9b6SIntel 		}
3395013af9b6SIntel 	}
3396013af9b6SIntel 	if (mapping_found)
3397013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
3398013af9b6SIntel 	return 0;
3399013af9b6SIntel }
3400013af9b6SIntel 
3401013af9b6SIntel static void
340228caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3403013af9b6SIntel {
3404013af9b6SIntel 	int diag = 0;
3405013af9b6SIntel 
3406013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
3407af75078fSIntel 	if (diag != 0) {
3408013af9b6SIntel 		if (diag == -ENOTSUP) {
3409013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
3410013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
3411013af9b6SIntel 		}
3412013af9b6SIntel 		else
3413013af9b6SIntel 			rte_exit(EXIT_FAILURE,
3414013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
3415013af9b6SIntel 					"failed for port id=%d diag=%d\n",
3416af75078fSIntel 					pi, diag);
3417af75078fSIntel 	}
3418013af9b6SIntel 
3419013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
3420af75078fSIntel 	if (diag != 0) {
3421013af9b6SIntel 		if (diag == -ENOTSUP) {
3422013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
3423013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
3424013af9b6SIntel 		}
3425013af9b6SIntel 		else
3426013af9b6SIntel 			rte_exit(EXIT_FAILURE,
3427013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
3428013af9b6SIntel 					"failed for port id=%d diag=%d\n",
3429af75078fSIntel 					pi, diag);
3430af75078fSIntel 	}
3431af75078fSIntel }
3432af75078fSIntel 
3433f2c5125aSPablo de Lara static void
3434f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
3435f2c5125aSPablo de Lara {
3436d44f8a48SQi Zhang 	uint16_t qid;
34375e91aeefSWei Zhao 	uint64_t offloads;
3438f2c5125aSPablo de Lara 
3439d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
34405e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
3441d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3442575e0fd1SWei Zhao 		if (offloads != 0)
3443575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
3444d44f8a48SQi Zhang 
3445d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3446f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3447d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3448f2c5125aSPablo de Lara 
3449f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3450d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3451f2c5125aSPablo de Lara 
3452f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3453d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3454f2c5125aSPablo de Lara 
3455f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3456d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3457f2c5125aSPablo de Lara 
3458f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3459d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3460f2c5125aSPablo de Lara 
3461d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3462d44f8a48SQi Zhang 	}
3463d44f8a48SQi Zhang 
3464d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
34655e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
3466d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
3467575e0fd1SWei Zhao 		if (offloads != 0)
3468575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
3469d44f8a48SQi Zhang 
3470d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3471f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3472d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3473f2c5125aSPablo de Lara 
3474f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3475d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3476f2c5125aSPablo de Lara 
3477f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3478d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3479f2c5125aSPablo de Lara 
3480f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3481d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3482f2c5125aSPablo de Lara 
3483f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3484d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3485d44f8a48SQi Zhang 
3486d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3487d44f8a48SQi Zhang 	}
3488f2c5125aSPablo de Lara }
3489f2c5125aSPablo de Lara 
3490013af9b6SIntel void
3491013af9b6SIntel init_port_config(void)
3492013af9b6SIntel {
3493013af9b6SIntel 	portid_t pid;
3494013af9b6SIntel 	struct rte_port *port;
34956f51deb9SIvan Ilchenko 	int ret;
3496013af9b6SIntel 
34977d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3498013af9b6SIntel 		port = &ports[pid];
3499013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
35006f51deb9SIvan Ilchenko 
35016f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
35026f51deb9SIvan Ilchenko 		if (ret != 0)
35036f51deb9SIvan Ilchenko 			return;
35046f51deb9SIvan Ilchenko 
35053ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3506013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
350790892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3508422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3509af75078fSIntel 		} else {
3510013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3511013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3512af75078fSIntel 		}
35133ce690d3SBruce Richardson 
35145f592039SJingjing Wu 		if (port->dcb_flag == 0) {
35153ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3516f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
3517f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
3518f9295aa2SXiaoyu Min 						(rx_mq_mode & ETH_MQ_RX_RSS);
35193ce690d3SBruce Richardson 			else
35203ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
35213ce690d3SBruce Richardson 		}
35223ce690d3SBruce Richardson 
3523f2c5125aSPablo de Lara 		rxtx_port_config(port);
3524013af9b6SIntel 
3525a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3526a5279d25SIgor Romanov 		if (ret != 0)
3527a5279d25SIgor Romanov 			return;
3528013af9b6SIntel 
3529013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
353050c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3531e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
35327b7e5ba7SIntel #endif
35338ea656f8SGaetan Rivet 
35348ea656f8SGaetan Rivet 		if (lsc_interrupt &&
35358ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
35368ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
35378ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
3538284c908cSGaetan Rivet 		if (rmv_interrupt &&
3539284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
3540284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
3541284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3542013af9b6SIntel 	}
3543013af9b6SIntel }
3544013af9b6SIntel 
354541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
354641b05095SBernard Iremonger {
354741b05095SBernard Iremonger 	struct rte_port *port;
354841b05095SBernard Iremonger 
354941b05095SBernard Iremonger 	port = &ports[slave_pid];
355041b05095SBernard Iremonger 	port->slave_flag = 1;
355141b05095SBernard Iremonger }
355241b05095SBernard Iremonger 
355341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
355441b05095SBernard Iremonger {
355541b05095SBernard Iremonger 	struct rte_port *port;
355641b05095SBernard Iremonger 
355741b05095SBernard Iremonger 	port = &ports[slave_pid];
355841b05095SBernard Iremonger 	port->slave_flag = 0;
355941b05095SBernard Iremonger }
356041b05095SBernard Iremonger 
35610e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
35620e545d30SBernard Iremonger {
35630e545d30SBernard Iremonger 	struct rte_port *port;
35640e545d30SBernard Iremonger 
35650e545d30SBernard Iremonger 	port = &ports[slave_pid];
3566b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
3567b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3568b8b8b344SMatan Azrad 		return 1;
3569b8b8b344SMatan Azrad 	return 0;
35700e545d30SBernard Iremonger }
35710e545d30SBernard Iremonger 
3572013af9b6SIntel const uint16_t vlan_tags[] = {
3573013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3574013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3575013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3576013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3577013af9b6SIntel };
3578013af9b6SIntel 
3579013af9b6SIntel static  int
3580ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
35811a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
35821a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
35831a572499SJingjing Wu 		 uint8_t pfc_en)
3584013af9b6SIntel {
3585013af9b6SIntel 	uint8_t i;
3586ac7c491cSKonstantin Ananyev 	int32_t rc;
3587ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3588af75078fSIntel 
3589af75078fSIntel 	/*
3590013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3591013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3592af75078fSIntel 	 */
35931a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
35941a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
35951a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
35961a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
35971a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3598013af9b6SIntel 
3599547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
36001a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
36011a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
36021a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
36031a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
36041a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
36051a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3606013af9b6SIntel 
36071a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
36081a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
36091a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
36101a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
36111a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3612af75078fSIntel 		}
3613013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3614f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3615f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3616013af9b6SIntel 		}
3617013af9b6SIntel 
3618013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
3619f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3620f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3621f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
362232e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
36231a572499SJingjing Wu 	} else {
36241a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
36251a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
36261a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
36271a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3628013af9b6SIntel 
36295139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
36305139bc12STing Xu 
3631ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3632ac7c491cSKonstantin Ananyev 		if (rc != 0)
3633ac7c491cSKonstantin Ananyev 			return rc;
3634ac7c491cSKonstantin Ananyev 
36351a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
36361a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
36371a572499SJingjing Wu 
3638bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3639bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3640bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3641013af9b6SIntel 		}
3642ac7c491cSKonstantin Ananyev 
3643f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3644f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3645f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3646ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
364732e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
36481a572499SJingjing Wu 	}
36491a572499SJingjing Wu 
36501a572499SJingjing Wu 	if (pfc_en)
36511a572499SJingjing Wu 		eth_conf->dcb_capability_en =
36521a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3653013af9b6SIntel 	else
3654013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3655013af9b6SIntel 
3656013af9b6SIntel 	return 0;
3657013af9b6SIntel }
3658013af9b6SIntel 
3659013af9b6SIntel int
36601a572499SJingjing Wu init_port_dcb_config(portid_t pid,
36611a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
36621a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
36631a572499SJingjing Wu 		     uint8_t pfc_en)
3664013af9b6SIntel {
3665013af9b6SIntel 	struct rte_eth_conf port_conf;
3666013af9b6SIntel 	struct rte_port *rte_port;
3667013af9b6SIntel 	int retval;
3668013af9b6SIntel 	uint16_t i;
3669013af9b6SIntel 
36702a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3671013af9b6SIntel 
3672013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3673013af9b6SIntel 	/* Enter DCB configuration status */
3674013af9b6SIntel 	dcb_config = 1;
3675013af9b6SIntel 
3676d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3677d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3678d5354e89SYanglong Wu 
3679013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3680ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3681013af9b6SIntel 	if (retval < 0)
3682013af9b6SIntel 		return retval;
36830074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3684013af9b6SIntel 
36852f203d44SQi Zhang 	/* re-configure the device . */
36862b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
36872b0e0ebaSChenbo Xia 	if (retval < 0)
36882b0e0ebaSChenbo Xia 		return retval;
36896f51deb9SIvan Ilchenko 
36906f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
36916f51deb9SIvan Ilchenko 	if (retval != 0)
36926f51deb9SIvan Ilchenko 		return retval;
36932a977b89SWenzhuo Lu 
36942a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
36952a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
36962a977b89SWenzhuo Lu 	 */
36972a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
36982a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
36992a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
37002a977b89SWenzhuo Lu 			" for port %d.", pid);
37012a977b89SWenzhuo Lu 		return -1;
37022a977b89SWenzhuo Lu 	}
37032a977b89SWenzhuo Lu 
37042a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
37052a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
37062a977b89SWenzhuo Lu 	 */
37072a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
370886ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
370986ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
371086ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
371186ef65eeSBernard Iremonger 		} else {
37122a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
37132a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
371486ef65eeSBernard Iremonger 		}
37152a977b89SWenzhuo Lu 	} else {
37162a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
37172a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
37182a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
37192a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
37202a977b89SWenzhuo Lu 		} else {
37212a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
37222a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
37232a977b89SWenzhuo Lu 
37242a977b89SWenzhuo Lu 		}
37252a977b89SWenzhuo Lu 	}
37262a977b89SWenzhuo Lu 	rx_free_thresh = 64;
37272a977b89SWenzhuo Lu 
3728013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3729013af9b6SIntel 
3730f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3731013af9b6SIntel 	/* VLAN filter */
37320074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
37331a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3734013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3735013af9b6SIntel 
3736a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3737a5279d25SIgor Romanov 	if (retval != 0)
3738a5279d25SIgor Romanov 		return retval;
3739a5279d25SIgor Romanov 
3740013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3741013af9b6SIntel 
37427741e4cfSIntel 	rte_port->dcb_flag = 1;
37437741e4cfSIntel 
3744013af9b6SIntel 	return 0;
3745af75078fSIntel }
3746af75078fSIntel 
3747ffc468ffSTetsuya Mukawa static void
3748ffc468ffSTetsuya Mukawa init_port(void)
3749ffc468ffSTetsuya Mukawa {
37501b9f2746SGregory Etelson 	int i;
37511b9f2746SGregory Etelson 
3752ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3753ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3754ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3755ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3756ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3757ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3758ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3759ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3760ffc468ffSTetsuya Mukawa 	}
37611b9f2746SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
37621b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
376329841336SPhil Yang 	/* Initialize ports NUMA structures */
376429841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
376529841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
376629841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3767ffc468ffSTetsuya Mukawa }
3768ffc468ffSTetsuya Mukawa 
3769d3a274ceSZhihong Wang static void
3770d3a274ceSZhihong Wang force_quit(void)
3771d3a274ceSZhihong Wang {
3772d3a274ceSZhihong Wang 	pmd_test_exit();
3773d3a274ceSZhihong Wang 	prompt_exit();
3774d3a274ceSZhihong Wang }
3775d3a274ceSZhihong Wang 
3776d3a274ceSZhihong Wang static void
3777cfea1f30SPablo de Lara print_stats(void)
3778cfea1f30SPablo de Lara {
3779cfea1f30SPablo de Lara 	uint8_t i;
3780cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3781cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3782cfea1f30SPablo de Lara 
3783cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3784cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3785cfea1f30SPablo de Lara 
3786cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3787cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3788cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3789683d1e82SIgor Romanov 
3790683d1e82SIgor Romanov 	fflush(stdout);
3791cfea1f30SPablo de Lara }
3792cfea1f30SPablo de Lara 
3793cfea1f30SPablo de Lara static void
3794d3a274ceSZhihong Wang signal_handler(int signum)
3795d3a274ceSZhihong Wang {
3796d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3797d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3798d3a274ceSZhihong Wang 				signum);
3799102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3800102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3801102b7329SReshma Pattan 		rte_pdump_uninit();
3802102b7329SReshma Pattan #endif
380362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
38048b36297dSAmit Gupta 		if (latencystats_enabled != 0)
380562d3216dSReshma Pattan 			rte_latencystats_uninit();
380662d3216dSReshma Pattan #endif
3807d3a274ceSZhihong Wang 		force_quit();
3808d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3809d9a191a0SPhil Yang 		f_quit = 1;
3810d3a274ceSZhihong Wang 		/* exit with the expected status */
3811d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3812d3a274ceSZhihong Wang 		kill(getpid(), signum);
3813d3a274ceSZhihong Wang 	}
3814d3a274ceSZhihong Wang }
3815d3a274ceSZhihong Wang 
3816af75078fSIntel int
3817af75078fSIntel main(int argc, char** argv)
3818af75078fSIntel {
3819af75078fSIntel 	int diag;
3820f8244c63SZhiyong Yang 	portid_t port_id;
38214918a357SXiaoyun Li 	uint16_t count;
3822fb73e096SJeff Guo 	int ret;
3823af75078fSIntel 
3824d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3825d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3826d3a274ceSZhihong Wang 
3827285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3828285fd101SOlivier Matz 	if (testpmd_logtype < 0)
382916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3830285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3831285fd101SOlivier Matz 
38329201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
38339201806eSStephen Hemminger 	if (diag < 0)
383416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
383516267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
38369201806eSStephen Hemminger 
3837a87ab9f7SStephen Hemminger 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
383816267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE,
383916267ceeSStephen Hemminger 			 "Secondary process type not supported.\n");
3840a87ab9f7SStephen Hemminger 
384197b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
384297b5d8b5SThomas Monjalon 	if (ret != 0)
384316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
384497b5d8b5SThomas Monjalon 
38454aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
38464aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3847e9436f54STiwei Bie 	rte_pdump_init();
38484aa0d012SAnatoly Burakov #endif
38494aa0d012SAnatoly Burakov 
38504918a357SXiaoyun Li 	count = 0;
38514918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
38524918a357SXiaoyun Li 		ports_ids[count] = port_id;
38534918a357SXiaoyun Li 		count++;
38544918a357SXiaoyun Li 	}
38554918a357SXiaoyun Li 	nb_ports = (portid_t) count;
38564aa0d012SAnatoly Burakov 	if (nb_ports == 0)
38574aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
38584aa0d012SAnatoly Burakov 
38594aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
38604aa0d012SAnatoly Burakov 	init_port();
38614aa0d012SAnatoly Burakov 
38624aa0d012SAnatoly Burakov 	set_def_fwd_config();
38634aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
386416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
386516267ceeSStephen Hemminger 			 "Check the core mask argument\n");
38664aa0d012SAnatoly Burakov 
3867e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
386854f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
3869e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3870e505d84cSAnatoly Burakov #endif
3871e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3872e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3873e505d84cSAnatoly Burakov #endif
3874e505d84cSAnatoly Burakov 
3875fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
38765fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3877fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3878fb7b8b32SAnatoly Burakov #else
3879fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3880fb7b8b32SAnatoly Burakov #endif
3881fb7b8b32SAnatoly Burakov 
3882e505d84cSAnatoly Burakov 	argc -= diag;
3883e505d84cSAnatoly Burakov 	argv += diag;
3884e505d84cSAnatoly Burakov 	if (argc > 1)
3885e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3886e505d84cSAnatoly Burakov 
3887e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3888285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
38891c036b16SEelco Chaudron 			strerror(errno));
38901c036b16SEelco Chaudron 	}
38911c036b16SEelco Chaudron 
389299cabef0SPablo de Lara 	if (tx_first && interactive)
389399cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
389499cabef0SPablo de Lara 				"interactive mode.\n");
38958820cba4SDavid Hunt 
38968820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
38978820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
38988820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
38998820cba4SDavid Hunt 		lsc_interrupt = 0;
39008820cba4SDavid Hunt 	}
39018820cba4SDavid Hunt 
39025a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
39035a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
39045a8fb55cSReshma Pattan 
39055a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3906af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3907af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3908af75078fSIntel 		       nb_rxq, nb_txq);
3909af75078fSIntel 
3910af75078fSIntel 	init_config();
3911fb73e096SJeff Guo 
3912fb73e096SJeff Guo 	if (hot_plug) {
39132049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3914fb73e096SJeff Guo 		if (ret) {
39152049c511SJeff Guo 			RTE_LOG(ERR, EAL,
39162049c511SJeff Guo 				"fail to enable hotplug handling.");
3917fb73e096SJeff Guo 			return -1;
3918fb73e096SJeff Guo 		}
3919fb73e096SJeff Guo 
39202049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
39212049c511SJeff Guo 		if (ret) {
39222049c511SJeff Guo 			RTE_LOG(ERR, EAL,
39232049c511SJeff Guo 				"fail to start device event monitoring.");
39242049c511SJeff Guo 			return -1;
39252049c511SJeff Guo 		}
39262049c511SJeff Guo 
39272049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3928cc1bf307SJeff Guo 			dev_event_callback, NULL);
39292049c511SJeff Guo 		if (ret) {
39302049c511SJeff Guo 			RTE_LOG(ERR, EAL,
39312049c511SJeff Guo 				"fail  to register device event callback\n");
39322049c511SJeff Guo 			return -1;
39332049c511SJeff Guo 		}
3934fb73e096SJeff Guo 	}
3935fb73e096SJeff Guo 
39366937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3937148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3938af75078fSIntel 
3939ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
394034fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
394134fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
394234fc1051SIvan Ilchenko 		if (ret != 0)
394334fc1051SIvan Ilchenko 			printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
394434fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
394534fc1051SIvan Ilchenko 	}
3946af75078fSIntel 
39477e4441c8SRemy Horton 	/* Init metrics library */
39487e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
39497e4441c8SRemy Horton 
395062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
395162d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
395262d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
395362d3216dSReshma Pattan 		if (ret)
395462d3216dSReshma Pattan 			printf("Warning: latencystats init()"
395562d3216dSReshma Pattan 				" returned error %d\n",	ret);
395662d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
395762d3216dSReshma Pattan 			latencystats_lcore_id);
395862d3216dSReshma Pattan 	}
395962d3216dSReshma Pattan #endif
396062d3216dSReshma Pattan 
39617e4441c8SRemy Horton 	/* Setup bitrate stats */
396254f89e3dSBruce Richardson #ifdef RTE_LIBRTE_BITRATESTATS
3963e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
39647e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
39657e4441c8SRemy Horton 		if (bitrate_data == NULL)
3966e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3967e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
39687e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3969e25e6c70SRemy Horton 	}
39707e4441c8SRemy Horton #endif
39717e4441c8SRemy Horton 
39720d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
397381ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
397481ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
397581ef862bSAllain Legacy 
3976ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3977ca7feb22SCyril Chemparathy 		if (auto_start) {
3978ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3979ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3980ca7feb22SCyril Chemparathy 		}
3981af75078fSIntel 		prompt();
39820de738cfSJiayu Hu 		pmd_test_exit();
3983ca7feb22SCyril Chemparathy 	} else
39840d56cb81SThomas Monjalon #endif
39850d56cb81SThomas Monjalon 	{
3986af75078fSIntel 		char c;
3987af75078fSIntel 		int rc;
3988af75078fSIntel 
3989d9a191a0SPhil Yang 		f_quit = 0;
3990d9a191a0SPhil Yang 
3991af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
399299cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3993cfea1f30SPablo de Lara 		if (stats_period != 0) {
3994cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3995cfea1f30SPablo de Lara 			uint64_t timer_period;
3996cfea1f30SPablo de Lara 
3997cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3998cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3999cfea1f30SPablo de Lara 
4000d9a191a0SPhil Yang 			while (f_quit == 0) {
4001cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4002cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4003cfea1f30SPablo de Lara 
4004cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4005cfea1f30SPablo de Lara 					print_stats();
4006cfea1f30SPablo de Lara 					/* Reset the timer */
4007cfea1f30SPablo de Lara 					diff_time = 0;
4008cfea1f30SPablo de Lara 				}
4009cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4010cfea1f30SPablo de Lara 				prev_time = cur_time;
4011cfea1f30SPablo de Lara 				sleep(1);
4012cfea1f30SPablo de Lara 			}
4013cfea1f30SPablo de Lara 		}
4014cfea1f30SPablo de Lara 
4015af75078fSIntel 		printf("Press enter to exit\n");
4016af75078fSIntel 		rc = read(0, &c, 1);
4017d3a274ceSZhihong Wang 		pmd_test_exit();
4018af75078fSIntel 		if (rc < 0)
4019af75078fSIntel 			return 1;
4020af75078fSIntel 	}
4021af75078fSIntel 
40225e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
40235e516c89SStephen Hemminger 	if (ret != 0)
40245e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
40255e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
40265e516c89SStephen Hemminger 
40275e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4028af75078fSIntel }
4029