xref: /dpdk/app/test-pmd/testpmd.c (revision 0f93edbf7c874480e21e365f527fecdb305984b9)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30af75078fSIntel #include <rte_memory.h>
31af75078fSIntel #include <rte_memcpy.h>
32af75078fSIntel #include <rte_launch.h>
33af75078fSIntel #include <rte_eal.h>
34284c908cSGaetan Rivet #include <rte_alarm.h>
35af75078fSIntel #include <rte_per_lcore.h>
36af75078fSIntel #include <rte_lcore.h>
37af75078fSIntel #include <rte_atomic.h>
38af75078fSIntel #include <rte_branch_prediction.h>
39af75078fSIntel #include <rte_mempool.h>
40af75078fSIntel #include <rte_malloc.h>
41af75078fSIntel #include <rte_mbuf.h>
420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
43af75078fSIntel #include <rte_interrupts.h>
44af75078fSIntel #include <rte_pci.h>
45af75078fSIntel #include <rte_ether.h>
46af75078fSIntel #include <rte_ethdev.h>
47edab33b1STetsuya Mukawa #include <rte_dev.h>
48af75078fSIntel #include <rte_string_fns.h>
49a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
51e261265eSRadu Nicolau #endif
52a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
53102b7329SReshma Pattan #include <rte_pdump.h>
54102b7329SReshma Pattan #endif
55938a184aSAdrien Mazarguil #include <rte_flow.h>
567e4441c8SRemy Horton #include <rte_metrics.h>
57a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
587e4441c8SRemy Horton #include <rte_bitrate.h>
597e4441c8SRemy Horton #endif
60a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6162d3216dSReshma Pattan #include <rte_latencystats.h>
6262d3216dSReshma Pattan #endif
63af75078fSIntel 
64af75078fSIntel #include "testpmd.h"
65af75078fSIntel 
66c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
67c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
69c7f5dba7SAnatoly Burakov #else
70c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
71c7f5dba7SAnatoly Burakov #endif
72c7f5dba7SAnatoly Burakov 
73c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
74c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
75c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
76c7f5dba7SAnatoly Burakov #else
77c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
78c7f5dba7SAnatoly Burakov #endif
79c7f5dba7SAnatoly Burakov 
80c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
8172512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86cb056611SStephen Hemminger /* use main core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1822564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
183af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
184af75078fSIntel 	&ieee1588_fwd_engine,
185af75078fSIntel #endif
186af75078fSIntel 	NULL,
187af75078fSIntel };
188af75078fSIntel 
18926cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
19059fcf854SShahaf Shuler uint16_t mempool_flags;
191401b744dSShahaf Shuler 
192af75078fSIntel struct fwd_config cur_fwd_config;
193af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194bf56fce1SZhihong Wang uint32_t retry_enabled;
195bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197af75078fSIntel 
19826cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
19926cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
20026cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
20126cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
202c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
203c8798818SIntel                                       * specified on command-line. */
204cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
205d9a191a0SPhil Yang 
206d9a191a0SPhil Yang /*
207d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
208d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209d9a191a0SPhil Yang  */
210d9a191a0SPhil Yang uint8_t f_quit;
211d9a191a0SPhil Yang 
212af75078fSIntel /*
2130f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2140f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2150f2096d7SViacheslav Ovsiienko  */
2160f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2170f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
21891c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
21991c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
2200f2096d7SViacheslav Ovsiienko 
2210f2096d7SViacheslav Ovsiienko /*
222af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
223af75078fSIntel  */
224af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
225af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
226af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
227af75078fSIntel };
228af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
229af75078fSIntel 
23079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
23179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
23279bec05bSKonstantin Ananyev 
23382010ef5SYongseok Koh uint8_t txonly_multi_flow;
23482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
23582010ef5SYongseok Koh 
2364940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2374940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2384940344dSViacheslav Ovsiienko 
2394940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2404940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2414940344dSViacheslav Ovsiienko 
242af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
243e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
244af75078fSIntel 
245900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
246900550deSIntel uint8_t dcb_config = 0;
247900550deSIntel 
248900550deSIntel /* Whether the dcb is in testing status */
249900550deSIntel uint8_t dcb_test = 0;
250900550deSIntel 
251af75078fSIntel /*
252af75078fSIntel  * Configurable number of RX/TX queues.
253af75078fSIntel  */
2541c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
255af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
256af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
257af75078fSIntel 
258af75078fSIntel /*
259af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2608599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
261af75078fSIntel  */
2628599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2638599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
264af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
265af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
266af75078fSIntel 
267f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
268af75078fSIntel /*
269af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
270af75078fSIntel  */
271af75078fSIntel 
272f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
273f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
274f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
275af75078fSIntel 
276f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
277f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
278f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
279af75078fSIntel 
280af75078fSIntel /*
281af75078fSIntel  * Configurable value of RX free threshold.
282af75078fSIntel  */
283f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
284af75078fSIntel 
285af75078fSIntel /*
286ce8d5614SIntel  * Configurable value of RX drop enable.
287ce8d5614SIntel  */
288f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
289ce8d5614SIntel 
290ce8d5614SIntel /*
291af75078fSIntel  * Configurable value of TX free threshold.
292af75078fSIntel  */
293f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
294af75078fSIntel 
295af75078fSIntel /*
296af75078fSIntel  * Configurable value of TX RS bit threshold.
297af75078fSIntel  */
298f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
299af75078fSIntel 
300af75078fSIntel /*
3013c156061SJens Freimann  * Configurable value of buffered packets before sending.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of packet buffer timeout.
3073c156061SJens Freimann  */
3083c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3093c156061SJens Freimann 
3103c156061SJens Freimann /*
3113c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3123c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3133c156061SJens Freimann  */
3143c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3153c156061SJens Freimann 
3163c156061SJens Freimann /*
3173c156061SJens Freimann  * Configurable value of number of random writes done in
3183c156061SJens Freimann  * VNF simulation memory area.
3193c156061SJens Freimann  */
3203c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3213c156061SJens Freimann 
3223c156061SJens Freimann /*
3233c156061SJens Freimann  * Configurable value of number of random reads done in
3243c156061SJens Freimann  * VNF simulation memory area.
3253c156061SJens Freimann  */
3263c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3273c156061SJens Freimann 
3283c156061SJens Freimann /*
3293c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3303c156061SJens Freimann  * VNF simulation memory area.
3313c156061SJens Freimann  */
3323c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3333c156061SJens Freimann 
3343c156061SJens Freimann /*
335af75078fSIntel  * Receive Side Scaling (RSS) configuration.
336af75078fSIntel  */
3378a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
338af75078fSIntel 
339af75078fSIntel /*
340af75078fSIntel  * Port topology configuration
341af75078fSIntel  */
342af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
343af75078fSIntel 
3447741e4cfSIntel /*
3457741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3467741e4cfSIntel  */
3477741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3487741e4cfSIntel 
349af75078fSIntel /*
3507ee3e944SVasily Philipov  * Flow API isolated mode.
3517ee3e944SVasily Philipov  */
3527ee3e944SVasily Philipov uint8_t flow_isolate_all;
3537ee3e944SVasily Philipov 
3547ee3e944SVasily Philipov /*
355bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
356bc202406SDavid Marchand  */
357bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
358bc202406SDavid Marchand 
359bc202406SDavid Marchand /*
3606937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3616937d210SStephen Hemminger  */
3626937d210SStephen Hemminger uint8_t no_device_start = 0;
3636937d210SStephen Hemminger 
3646937d210SStephen Hemminger /*
3658ea656f8SGaetan Rivet  * Enable link status change notification
3668ea656f8SGaetan Rivet  */
3678ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3688ea656f8SGaetan Rivet 
3698ea656f8SGaetan Rivet /*
370284c908cSGaetan Rivet  * Enable device removal notification.
371284c908cSGaetan Rivet  */
372284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
373284c908cSGaetan Rivet 
374fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
375fb73e096SJeff Guo 
3764f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3774f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3784f1ed78eSThomas Monjalon 
379b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
380b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
381b0a9354aSPavan Nikhilesh 
38201817b10SBing Zhao /* Hairpin ports configuration mode. */
38301817b10SBing Zhao uint16_t hairpin_mode;
38401817b10SBing Zhao 
38597b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
38697b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
38797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
38897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
38997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
39097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
39197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
39297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
39397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
39497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
39597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
39697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
3970e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
39897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
39997b5d8b5SThomas Monjalon };
40097b5d8b5SThomas Monjalon 
401284c908cSGaetan Rivet /*
4023af72783SGaetan Rivet  * Display or mask ether events
4033af72783SGaetan Rivet  * Default to all events except VF_MBOX
4043af72783SGaetan Rivet  */
4053af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4063af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4073af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4083af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
409badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4103af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4110e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
4120e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
413e505d84cSAnatoly Burakov /*
414e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
415e505d84cSAnatoly Burakov  */
416e505d84cSAnatoly Burakov int do_mlockall = 0;
4173af72783SGaetan Rivet 
4183af72783SGaetan Rivet /*
4197b7e5ba7SIntel  * NIC bypass mode configuration options.
4207b7e5ba7SIntel  */
4217b7e5ba7SIntel 
422a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
4237b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
424e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4257b7e5ba7SIntel #endif
4267b7e5ba7SIntel 
427e261265eSRadu Nicolau 
428a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
42962d3216dSReshma Pattan 
43062d3216dSReshma Pattan /*
43162d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
43262d3216dSReshma Pattan  */
43362d3216dSReshma Pattan uint8_t latencystats_enabled;
43462d3216dSReshma Pattan 
43562d3216dSReshma Pattan /*
43662d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
43762d3216dSReshma Pattan  */
43862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
43962d3216dSReshma Pattan 
44062d3216dSReshma Pattan #endif
44162d3216dSReshma Pattan 
4427b7e5ba7SIntel /*
443af75078fSIntel  * Ethernet device configuration.
444af75078fSIntel  */
445af75078fSIntel struct rte_eth_rxmode rx_mode = {
44635b2d13fSOlivier Matz 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
44735b2d13fSOlivier Matz 		/**< Default maximum frame length. */
448af75078fSIntel };
449af75078fSIntel 
45007e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
45107e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
45207e5f7bdSShahaf Shuler };
453fd8c20aaSShahaf Shuler 
454af75078fSIntel struct rte_fdir_conf fdir_conf = {
455af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
456af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
457af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
458d9d5e6f2SJingjing Wu 	.mask = {
45926f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
460d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
461d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
462d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
463d9d5e6f2SJingjing Wu 		},
464d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
465d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
466d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
467d9d5e6f2SJingjing Wu 		},
468d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
469d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
47047b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
47147b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
47247b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
473d9d5e6f2SJingjing Wu 	},
474af75078fSIntel 	.drop_queue = 127,
475af75078fSIntel };
476af75078fSIntel 
4772950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
478af75078fSIntel 
479a4fd5eeeSElza Mathew /*
480a4fd5eeeSElza Mathew  * Display zero values by default for xstats
481a4fd5eeeSElza Mathew  */
482a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
483a4fd5eeeSElza Mathew 
484bc700b67SDharmik Thakkar /*
485bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
486bc700b67SDharmik Thakkar  */
487bc700b67SDharmik Thakkar uint8_t record_core_cycles;
488bc700b67SDharmik Thakkar 
4890e4b1963SDharmik Thakkar /*
4900e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
4910e4b1963SDharmik Thakkar  */
4920e4b1963SDharmik Thakkar uint8_t record_burst_stats;
4930e4b1963SDharmik Thakkar 
494c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
495c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4967acf894dSStephen Hurd 
497a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4987e4441c8SRemy Horton /* Bitrate statistics */
4997e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
500e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
501e25e6c70SRemy Horton uint8_t bitrate_enabled;
502e25e6c70SRemy Horton #endif
5037e4441c8SRemy Horton 
504b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
505b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
506b40f8d78SJiayu Hu 
507f9295aa2SXiaoyu Min /*
508f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
509f9295aa2SXiaoyu Min  */
510f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
511f9295aa2SXiaoyu Min 
512ed30d9b6SIntel /* Forward function declarations */
513c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
514edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
515f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
51676ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
517d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
518cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
519fb73e096SJeff Guo 				enum rte_dev_event_type type,
520fb73e096SJeff Guo 				void *param);
521ce8d5614SIntel 
522ce8d5614SIntel /*
523ce8d5614SIntel  * Check if all the ports are started.
524ce8d5614SIntel  * If yes, return positive value. If not, return zero.
525ce8d5614SIntel  */
526ce8d5614SIntel static int all_ports_started(void);
527ed30d9b6SIntel 
52852f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
52935b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
53052f38a20SJiayu Hu 
531b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
532b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
533b57b66a9SOri Kam 
534af75078fSIntel /*
53598a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
536c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
537c9cafcc8SShahaf Shuler  */
538c9cafcc8SShahaf Shuler int
539c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
540c9cafcc8SShahaf Shuler {
541c9cafcc8SShahaf Shuler 	unsigned int i;
542c9cafcc8SShahaf Shuler 
543c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
544c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
545c9cafcc8SShahaf Shuler 			return 0;
546c9cafcc8SShahaf Shuler 	}
547c9cafcc8SShahaf Shuler 	return 1;
548c9cafcc8SShahaf Shuler }
549c9cafcc8SShahaf Shuler 
550c9cafcc8SShahaf Shuler /*
551af75078fSIntel  * Setup default configuration.
552af75078fSIntel  */
553af75078fSIntel static void
554af75078fSIntel set_default_fwd_lcores_config(void)
555af75078fSIntel {
556af75078fSIntel 	unsigned int i;
557af75078fSIntel 	unsigned int nb_lc;
5587acf894dSStephen Hurd 	unsigned int sock_num;
559af75078fSIntel 
560af75078fSIntel 	nb_lc = 0;
561af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
562dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
563dbfb8ec7SPhil Yang 			continue;
564c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
565c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
566c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
567c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
568c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
569c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
570c9cafcc8SShahaf Shuler 			}
571c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5727acf894dSStephen Hurd 		}
573cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
574f54fe5eeSStephen Hurd 			continue;
575f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
576af75078fSIntel 	}
577af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
578af75078fSIntel 	nb_cfg_lcores = nb_lcores;
579af75078fSIntel 	nb_fwd_lcores = 1;
580af75078fSIntel }
581af75078fSIntel 
582af75078fSIntel static void
583af75078fSIntel set_def_peer_eth_addrs(void)
584af75078fSIntel {
585af75078fSIntel 	portid_t i;
586af75078fSIntel 
587af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
58835b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
589af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
590af75078fSIntel 	}
591af75078fSIntel }
592af75078fSIntel 
593af75078fSIntel static void
594af75078fSIntel set_default_fwd_ports_config(void)
595af75078fSIntel {
596af75078fSIntel 	portid_t pt_id;
59765a7360cSMatan Azrad 	int i = 0;
598af75078fSIntel 
599effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
60065a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
601af75078fSIntel 
602effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
603effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
604effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
605effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
606effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
607effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
608effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
609effdb8bbSPhil Yang 			}
610effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
611effdb8bbSPhil Yang 		}
612effdb8bbSPhil Yang 	}
613effdb8bbSPhil Yang 
614af75078fSIntel 	nb_cfg_ports = nb_ports;
615af75078fSIntel 	nb_fwd_ports = nb_ports;
616af75078fSIntel }
617af75078fSIntel 
618af75078fSIntel void
619af75078fSIntel set_def_fwd_config(void)
620af75078fSIntel {
621af75078fSIntel 	set_default_fwd_lcores_config();
622af75078fSIntel 	set_def_peer_eth_addrs();
623af75078fSIntel 	set_default_fwd_ports_config();
624af75078fSIntel }
625af75078fSIntel 
626c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
627c7f5dba7SAnatoly Burakov static int
628c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
629c7f5dba7SAnatoly Burakov {
630c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
631c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
632c7f5dba7SAnatoly Burakov 
633c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
634c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
635c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
636c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
637c7f5dba7SAnatoly Burakov 	 */
638c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
639c7f5dba7SAnatoly Burakov 
640c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
641c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
642c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
643c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
644c7f5dba7SAnatoly Burakov 		return -1;
645c7f5dba7SAnatoly Burakov 	}
646c7f5dba7SAnatoly Burakov 
647c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
648c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
649c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
650c7f5dba7SAnatoly Burakov 
651c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
652c7f5dba7SAnatoly Burakov 
653c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
654c7f5dba7SAnatoly Burakov 
655c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
656c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
657c7f5dba7SAnatoly Burakov 		return -1;
658c7f5dba7SAnatoly Burakov 	}
659c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
660c7f5dba7SAnatoly Burakov 
661c7f5dba7SAnatoly Burakov 	return 0;
662c7f5dba7SAnatoly Burakov }
663c7f5dba7SAnatoly Burakov 
664c7f5dba7SAnatoly Burakov static int
665c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
666c7f5dba7SAnatoly Burakov {
667c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
668c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
669c7f5dba7SAnatoly Burakov 	 */
6709d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
671c7f5dba7SAnatoly Burakov 
672c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
673c7f5dba7SAnatoly Burakov }
674c7f5dba7SAnatoly Burakov 
675c7f5dba7SAnatoly Burakov static void *
676c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
677c7f5dba7SAnatoly Burakov {
678c7f5dba7SAnatoly Burakov 	void *addr;
679c7f5dba7SAnatoly Burakov 	int flags;
680c7f5dba7SAnatoly Burakov 
681c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
682c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
683c7f5dba7SAnatoly Burakov 	if (huge)
684c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
685c7f5dba7SAnatoly Burakov 
686c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
687c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
688c7f5dba7SAnatoly Burakov 		return NULL;
689c7f5dba7SAnatoly Burakov 
690c7f5dba7SAnatoly Burakov 	return addr;
691c7f5dba7SAnatoly Burakov }
692c7f5dba7SAnatoly Burakov 
693c7f5dba7SAnatoly Burakov struct extmem_param {
694c7f5dba7SAnatoly Burakov 	void *addr;
695c7f5dba7SAnatoly Burakov 	size_t len;
696c7f5dba7SAnatoly Burakov 	size_t pgsz;
697c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
698c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
699c7f5dba7SAnatoly Burakov };
700c7f5dba7SAnatoly Burakov 
701c7f5dba7SAnatoly Burakov static int
702c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
703c7f5dba7SAnatoly Burakov 		bool huge)
704c7f5dba7SAnatoly Burakov {
705c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
706c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
707c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
708c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
709c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
710c7f5dba7SAnatoly Burakov 	void *addr;
711c7f5dba7SAnatoly Burakov 	int ret;
712c7f5dba7SAnatoly Burakov 
713c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
714c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
715c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
716c7f5dba7SAnatoly Burakov 			continue;
717c7f5dba7SAnatoly Burakov 
718c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
719c7f5dba7SAnatoly Burakov 
720c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
721c7f5dba7SAnatoly Burakov 		if (!huge)
722c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
723c7f5dba7SAnatoly Burakov 
724c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
725c7f5dba7SAnatoly Burakov 		if (ret < 0) {
726c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
727c7f5dba7SAnatoly Burakov 			return -1;
728c7f5dba7SAnatoly Burakov 		}
729c7f5dba7SAnatoly Burakov 
730c7f5dba7SAnatoly Burakov 		/* allocate our memory */
731c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
732c7f5dba7SAnatoly Burakov 
733c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
734c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
735c7f5dba7SAnatoly Burakov 		 * try another one.
736c7f5dba7SAnatoly Burakov 		 */
737c7f5dba7SAnatoly Burakov 		if (addr == NULL)
738c7f5dba7SAnatoly Burakov 			continue;
739c7f5dba7SAnatoly Burakov 
740c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
741c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
742c7f5dba7SAnatoly Burakov 
743c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
744c7f5dba7SAnatoly Burakov 
745c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
746c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
747c7f5dba7SAnatoly Burakov 			goto fail;
748c7f5dba7SAnatoly Burakov 		}
749c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
750c7f5dba7SAnatoly Burakov 		if (!huge)
751c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
752c7f5dba7SAnatoly Burakov 
753c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
754c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
755c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
756c7f5dba7SAnatoly Burakov 			size_t offset;
757c7f5dba7SAnatoly Burakov 			void *cur;
758c7f5dba7SAnatoly Burakov 
759c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
760c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
761c7f5dba7SAnatoly Burakov 
762c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
763c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
764c7f5dba7SAnatoly Burakov 
765c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
766c7f5dba7SAnatoly Burakov 
767c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
768c7f5dba7SAnatoly Burakov 		}
769c7f5dba7SAnatoly Burakov 
770c7f5dba7SAnatoly Burakov 		break;
771c7f5dba7SAnatoly Burakov 	}
772c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
773c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
774c7f5dba7SAnatoly Burakov 		return -1;
775c7f5dba7SAnatoly Burakov 
776c7f5dba7SAnatoly Burakov 	param->addr = addr;
777c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
778c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
779c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
780c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
781c7f5dba7SAnatoly Burakov 
782c7f5dba7SAnatoly Burakov 	return 0;
783c7f5dba7SAnatoly Burakov fail:
784c7f5dba7SAnatoly Burakov 	if (iovas)
785c7f5dba7SAnatoly Burakov 		free(iovas);
786c7f5dba7SAnatoly Burakov 	if (addr)
787c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
788c7f5dba7SAnatoly Burakov 
789c7f5dba7SAnatoly Burakov 	return -1;
790c7f5dba7SAnatoly Burakov }
791c7f5dba7SAnatoly Burakov 
792c7f5dba7SAnatoly Burakov static int
793c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
794c7f5dba7SAnatoly Burakov {
795c7f5dba7SAnatoly Burakov 	struct extmem_param param;
796c7f5dba7SAnatoly Burakov 	int socket_id, ret;
797c7f5dba7SAnatoly Burakov 
798c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
799c7f5dba7SAnatoly Burakov 
800c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
801c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
802c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
803c7f5dba7SAnatoly Burakov 		/* create our heap */
804c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
805c7f5dba7SAnatoly Burakov 		if (ret < 0) {
806c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
807c7f5dba7SAnatoly Burakov 			return -1;
808c7f5dba7SAnatoly Burakov 		}
809c7f5dba7SAnatoly Burakov 	}
810c7f5dba7SAnatoly Burakov 
811c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
812c7f5dba7SAnatoly Burakov 	if (ret < 0) {
813c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
814c7f5dba7SAnatoly Burakov 		return -1;
815c7f5dba7SAnatoly Burakov 	}
816c7f5dba7SAnatoly Burakov 
817c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
818c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
819c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
820c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
821c7f5dba7SAnatoly Burakov 
822c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
823c7f5dba7SAnatoly Burakov 
824c7f5dba7SAnatoly Burakov 	/* not needed any more */
825c7f5dba7SAnatoly Burakov 	free(param.iova_table);
826c7f5dba7SAnatoly Burakov 
827c7f5dba7SAnatoly Burakov 	if (ret < 0) {
828c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
829c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
830c7f5dba7SAnatoly Burakov 		return -1;
831c7f5dba7SAnatoly Burakov 	}
832c7f5dba7SAnatoly Burakov 
833c7f5dba7SAnatoly Burakov 	/* success */
834c7f5dba7SAnatoly Burakov 
835c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
836c7f5dba7SAnatoly Burakov 			param.len >> 20);
837c7f5dba7SAnatoly Burakov 
838c7f5dba7SAnatoly Burakov 	return 0;
839c7f5dba7SAnatoly Burakov }
8403a0968c8SShahaf Shuler static void
8413a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8423a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8433a0968c8SShahaf Shuler {
8443a0968c8SShahaf Shuler 	uint16_t pid = 0;
8453a0968c8SShahaf Shuler 	int ret;
8463a0968c8SShahaf Shuler 
8473a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8483a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8493a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8503a0968c8SShahaf Shuler 
8513a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8523a0968c8SShahaf Shuler 					memhdr->len);
8533a0968c8SShahaf Shuler 		if (ret) {
8543a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8553a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8563a0968c8SShahaf Shuler 				    "for device %s\n",
8573a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8583a0968c8SShahaf Shuler 		}
8593a0968c8SShahaf Shuler 	}
8603a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8613a0968c8SShahaf Shuler 	if (ret) {
8623a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8633a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8643a0968c8SShahaf Shuler 	}
8653a0968c8SShahaf Shuler }
8663a0968c8SShahaf Shuler 
8673a0968c8SShahaf Shuler static void
8683a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8693a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8703a0968c8SShahaf Shuler {
8713a0968c8SShahaf Shuler 	uint16_t pid = 0;
8723a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8733a0968c8SShahaf Shuler 	int ret;
8743a0968c8SShahaf Shuler 
8753a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8763a0968c8SShahaf Shuler 				  page_size);
8773a0968c8SShahaf Shuler 	if (ret) {
8783a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8793a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8803a0968c8SShahaf Shuler 		return;
8813a0968c8SShahaf Shuler 	}
8823a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8833a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8843a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8853a0968c8SShahaf Shuler 
8863a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8873a0968c8SShahaf Shuler 				      memhdr->len);
8883a0968c8SShahaf Shuler 		if (ret) {
8893a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8903a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
8913a0968c8SShahaf Shuler 				    "for device %s\n",
8923a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8933a0968c8SShahaf Shuler 		}
8943a0968c8SShahaf Shuler 	}
8953a0968c8SShahaf Shuler }
896c7f5dba7SAnatoly Burakov 
89772512e18SViacheslav Ovsiienko static unsigned int
89872512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
89972512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
90072512e18SViacheslav Ovsiienko {
90172512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
90272512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
90372512e18SViacheslav Ovsiienko 	uint16_t elt_size;
90472512e18SViacheslav Ovsiienko 
90572512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
90672512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
90772512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
90872512e18SViacheslav Ovsiienko 
90972512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
91072512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
91172512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
91272512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
91372512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
91472512e18SViacheslav Ovsiienko 		return 0;
91572512e18SViacheslav Ovsiienko 	}
91672512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
91772512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
91872512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
91972512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
92072512e18SViacheslav Ovsiienko 		int ret;
92172512e18SViacheslav Ovsiienko 
92272512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
92372512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
92472512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
92572512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
92672512e18SViacheslav Ovsiienko 			ext_num = 0;
92772512e18SViacheslav Ovsiienko 			break;
92872512e18SViacheslav Ovsiienko 		}
92972512e18SViacheslav Ovsiienko 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
93072512e18SViacheslav Ovsiienko 						 socket_id,
93172512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_IOVA_CONTIG |
93272512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_1GB |
93372512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_SIZE_HINT_ONLY,
93472512e18SViacheslav Ovsiienko 						 EXTBUF_ZONE_SIZE);
93572512e18SViacheslav Ovsiienko 		if (mz == NULL) {
93672512e18SViacheslav Ovsiienko 			/*
93772512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
93872512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
93972512e18SViacheslav Ovsiienko 			 */
94072512e18SViacheslav Ovsiienko 			errno = ENOMEM;
94172512e18SViacheslav Ovsiienko 			ext_num = 0;
94272512e18SViacheslav Ovsiienko 			break;
94372512e18SViacheslav Ovsiienko 		}
94472512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
94572512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
94672512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
94772512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
94872512e18SViacheslav Ovsiienko 	}
94972512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
95072512e18SViacheslav Ovsiienko 		free(xmem);
95172512e18SViacheslav Ovsiienko 		xmem = NULL;
95272512e18SViacheslav Ovsiienko 	}
95372512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
95472512e18SViacheslav Ovsiienko 	return ext_num;
95572512e18SViacheslav Ovsiienko }
95672512e18SViacheslav Ovsiienko 
957af75078fSIntel /*
958af75078fSIntel  * Configuration initialisation done once at init time.
959af75078fSIntel  */
960401b744dSShahaf Shuler static struct rte_mempool *
961af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
96226cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
963af75078fSIntel {
964af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
965bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
966af75078fSIntel 	uint32_t mb_size;
967af75078fSIntel 
968dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
96926cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
970148f963fSBruce Richardson 
971285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
972d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
973d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
974d1eb542eSOlivier Matz 
975c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
976c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
977c7f5dba7SAnatoly Burakov 		{
978c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
979c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
980c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
981c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
982c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
983c7f5dba7SAnatoly Burakov 			break;
984c7f5dba7SAnatoly Burakov 		}
985c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
986c7f5dba7SAnatoly Burakov 		{
987b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
988c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
989148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
99059fcf854SShahaf Shuler 				socket_id, mempool_flags);
99124427bb9SOlivier Matz 			if (rte_mp == NULL)
99224427bb9SOlivier Matz 				goto err;
993b19a0c75SOlivier Matz 
994b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
995b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
996b19a0c75SOlivier Matz 				rte_mp = NULL;
99724427bb9SOlivier Matz 				goto err;
998b19a0c75SOlivier Matz 			}
999b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1000b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
10013a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1002c7f5dba7SAnatoly Burakov 			break;
1003c7f5dba7SAnatoly Burakov 		}
1004c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1005c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1006c7f5dba7SAnatoly Burakov 		{
1007c7f5dba7SAnatoly Burakov 			int heap_socket;
1008c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1009c7f5dba7SAnatoly Burakov 
1010c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1011c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1012c7f5dba7SAnatoly Burakov 
1013c7f5dba7SAnatoly Burakov 			heap_socket =
1014c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1015c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1016c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1017c7f5dba7SAnatoly Burakov 
10180e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
10190e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1020ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1021c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1022c7f5dba7SAnatoly Burakov 					heap_socket);
1023c7f5dba7SAnatoly Burakov 			break;
1024c7f5dba7SAnatoly Burakov 		}
102572512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
102672512e18SViacheslav Ovsiienko 		{
102772512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
102872512e18SViacheslav Ovsiienko 			unsigned int ext_num;
102972512e18SViacheslav Ovsiienko 
103072512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
103172512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
103272512e18SViacheslav Ovsiienko 			if (ext_num == 0)
103372512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
103472512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
103572512e18SViacheslav Ovsiienko 
103672512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
103772512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
103872512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
103972512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
104072512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
104172512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
104272512e18SViacheslav Ovsiienko 			free(ext_mem);
104372512e18SViacheslav Ovsiienko 			break;
104472512e18SViacheslav Ovsiienko 		}
1045c7f5dba7SAnatoly Burakov 	default:
1046c7f5dba7SAnatoly Burakov 		{
1047c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1048c7f5dba7SAnatoly Burakov 		}
1049bece7b6cSChristian Ehrhardt 	}
1050148f963fSBruce Richardson 
105124427bb9SOlivier Matz err:
1052af75078fSIntel 	if (rte_mp == NULL) {
1053d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1054d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1055d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1056148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1057591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1058af75078fSIntel 	}
1059401b744dSShahaf Shuler 	return rte_mp;
1060af75078fSIntel }
1061af75078fSIntel 
106220a0286fSLiu Xiaofeng /*
106320a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
106420a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
106520a0286fSLiu Xiaofeng  */
106620a0286fSLiu Xiaofeng static int
106720a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
106820a0286fSLiu Xiaofeng {
106920a0286fSLiu Xiaofeng 	static int warning_once = 0;
107020a0286fSLiu Xiaofeng 
1071c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
107220a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
107320a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
107420a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
107520a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
107620a0286fSLiu Xiaofeng 			       " --numa.\n");
107720a0286fSLiu Xiaofeng 		warning_once = 1;
107820a0286fSLiu Xiaofeng 		return -1;
107920a0286fSLiu Xiaofeng 	}
108020a0286fSLiu Xiaofeng 	return 0;
108120a0286fSLiu Xiaofeng }
108220a0286fSLiu Xiaofeng 
10833f7311baSWei Dai /*
10843f7311baSWei Dai  * Get the allowed maximum number of RX queues.
10853f7311baSWei Dai  * *pid return the port id which has minimal value of
10863f7311baSWei Dai  * max_rx_queues in all ports.
10873f7311baSWei Dai  */
10883f7311baSWei Dai queueid_t
10893f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
10903f7311baSWei Dai {
10919e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
10926f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
10933f7311baSWei Dai 	portid_t pi;
10943f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
10953f7311baSWei Dai 
10963f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10976f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
10986f51deb9SIvan Ilchenko 			continue;
10996f51deb9SIvan Ilchenko 
11006f51deb9SIvan Ilchenko 		max_rxq_valid = true;
11013f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
11023f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
11033f7311baSWei Dai 			*pid = pi;
11043f7311baSWei Dai 		}
11053f7311baSWei Dai 	}
11066f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
11073f7311baSWei Dai }
11083f7311baSWei Dai 
11093f7311baSWei Dai /*
11103f7311baSWei Dai  * Check input rxq is valid or not.
11113f7311baSWei Dai  * If input rxq is not greater than any of maximum number
11123f7311baSWei Dai  * of RX queues of all ports, it is valid.
11133f7311baSWei Dai  * if valid, return 0, else return -1
11143f7311baSWei Dai  */
11153f7311baSWei Dai int
11163f7311baSWei Dai check_nb_rxq(queueid_t rxq)
11173f7311baSWei Dai {
11183f7311baSWei Dai 	queueid_t allowed_max_rxq;
11193f7311baSWei Dai 	portid_t pid = 0;
11203f7311baSWei Dai 
11213f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
11223f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
11233f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
11243f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
11253f7311baSWei Dai 		       rxq,
11263f7311baSWei Dai 		       allowed_max_rxq,
11273f7311baSWei Dai 		       pid);
11283f7311baSWei Dai 		return -1;
11293f7311baSWei Dai 	}
11303f7311baSWei Dai 	return 0;
11313f7311baSWei Dai }
11323f7311baSWei Dai 
113336db4f6cSWei Dai /*
113436db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
113536db4f6cSWei Dai  * *pid return the port id which has minimal value of
113636db4f6cSWei Dai  * max_tx_queues in all ports.
113736db4f6cSWei Dai  */
113836db4f6cSWei Dai queueid_t
113936db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
114036db4f6cSWei Dai {
11419e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
11426f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
114336db4f6cSWei Dai 	portid_t pi;
114436db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
114536db4f6cSWei Dai 
114636db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
11476f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
11486f51deb9SIvan Ilchenko 			continue;
11496f51deb9SIvan Ilchenko 
11506f51deb9SIvan Ilchenko 		max_txq_valid = true;
115136db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
115236db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
115336db4f6cSWei Dai 			*pid = pi;
115436db4f6cSWei Dai 		}
115536db4f6cSWei Dai 	}
11566f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
115736db4f6cSWei Dai }
115836db4f6cSWei Dai 
115936db4f6cSWei Dai /*
116036db4f6cSWei Dai  * Check input txq is valid or not.
116136db4f6cSWei Dai  * If input txq is not greater than any of maximum number
116236db4f6cSWei Dai  * of TX queues of all ports, it is valid.
116336db4f6cSWei Dai  * if valid, return 0, else return -1
116436db4f6cSWei Dai  */
116536db4f6cSWei Dai int
116636db4f6cSWei Dai check_nb_txq(queueid_t txq)
116736db4f6cSWei Dai {
116836db4f6cSWei Dai 	queueid_t allowed_max_txq;
116936db4f6cSWei Dai 	portid_t pid = 0;
117036db4f6cSWei Dai 
117136db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
117236db4f6cSWei Dai 	if (txq > allowed_max_txq) {
117336db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
117436db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
117536db4f6cSWei Dai 		       txq,
117636db4f6cSWei Dai 		       allowed_max_txq,
117736db4f6cSWei Dai 		       pid);
117836db4f6cSWei Dai 		return -1;
117936db4f6cSWei Dai 	}
118036db4f6cSWei Dai 	return 0;
118136db4f6cSWei Dai }
118236db4f6cSWei Dai 
11831c69df45SOri Kam /*
118499e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
118599e040d3SLijun Ou  * *pid return the port id which has minimal value of
118699e040d3SLijun Ou  * max_rxd in all queues of all ports.
118799e040d3SLijun Ou  */
118899e040d3SLijun Ou static uint16_t
118999e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
119099e040d3SLijun Ou {
119199e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
119299e040d3SLijun Ou 	portid_t pi;
119399e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
119499e040d3SLijun Ou 
119599e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
119699e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
119799e040d3SLijun Ou 			continue;
119899e040d3SLijun Ou 
119999e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
120099e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
120199e040d3SLijun Ou 			*pid = pi;
120299e040d3SLijun Ou 		}
120399e040d3SLijun Ou 	}
120499e040d3SLijun Ou 	return allowed_max_rxd;
120599e040d3SLijun Ou }
120699e040d3SLijun Ou 
120799e040d3SLijun Ou /*
120899e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
120999e040d3SLijun Ou  * *pid return the port id which has minimal value of
121099e040d3SLijun Ou  * min_rxd in all queues of all ports.
121199e040d3SLijun Ou  */
121299e040d3SLijun Ou static uint16_t
121399e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
121499e040d3SLijun Ou {
121599e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
121699e040d3SLijun Ou 	portid_t pi;
121799e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
121899e040d3SLijun Ou 
121999e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
122099e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
122199e040d3SLijun Ou 			continue;
122299e040d3SLijun Ou 
122399e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
122499e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
122599e040d3SLijun Ou 			*pid = pi;
122699e040d3SLijun Ou 		}
122799e040d3SLijun Ou 	}
122899e040d3SLijun Ou 
122999e040d3SLijun Ou 	return allowed_min_rxd;
123099e040d3SLijun Ou }
123199e040d3SLijun Ou 
123299e040d3SLijun Ou /*
123399e040d3SLijun Ou  * Check input rxd is valid or not.
123499e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
123599e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
123699e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
123799e040d3SLijun Ou  * if valid, return 0, else return -1
123899e040d3SLijun Ou  */
123999e040d3SLijun Ou int
124099e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
124199e040d3SLijun Ou {
124299e040d3SLijun Ou 	uint16_t allowed_max_rxd;
124399e040d3SLijun Ou 	uint16_t allowed_min_rxd;
124499e040d3SLijun Ou 	portid_t pid = 0;
124599e040d3SLijun Ou 
124699e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
124799e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
124899e040d3SLijun Ou 		printf("Fail: input rxd (%u) can't be greater "
124999e040d3SLijun Ou 		       "than max_rxds (%u) of port %u\n",
125099e040d3SLijun Ou 		       rxd,
125199e040d3SLijun Ou 		       allowed_max_rxd,
125299e040d3SLijun Ou 		       pid);
125399e040d3SLijun Ou 		return -1;
125499e040d3SLijun Ou 	}
125599e040d3SLijun Ou 
125699e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
125799e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
125899e040d3SLijun Ou 		printf("Fail: input rxd (%u) can't be less "
125999e040d3SLijun Ou 		       "than min_rxds (%u) of port %u\n",
126099e040d3SLijun Ou 		       rxd,
126199e040d3SLijun Ou 		       allowed_min_rxd,
126299e040d3SLijun Ou 		       pid);
126399e040d3SLijun Ou 		return -1;
126499e040d3SLijun Ou 	}
126599e040d3SLijun Ou 
126699e040d3SLijun Ou 	return 0;
126799e040d3SLijun Ou }
126899e040d3SLijun Ou 
126999e040d3SLijun Ou /*
127099e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
127199e040d3SLijun Ou  * *pid return the port id which has minimal value of
127299e040d3SLijun Ou  * max_txd in every tx queue.
127399e040d3SLijun Ou  */
127499e040d3SLijun Ou static uint16_t
127599e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
127699e040d3SLijun Ou {
127799e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
127899e040d3SLijun Ou 	portid_t pi;
127999e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
128099e040d3SLijun Ou 
128199e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
128299e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
128399e040d3SLijun Ou 			continue;
128499e040d3SLijun Ou 
128599e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
128699e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
128799e040d3SLijun Ou 			*pid = pi;
128899e040d3SLijun Ou 		}
128999e040d3SLijun Ou 	}
129099e040d3SLijun Ou 	return allowed_max_txd;
129199e040d3SLijun Ou }
129299e040d3SLijun Ou 
129399e040d3SLijun Ou /*
129499e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
129599e040d3SLijun Ou  * *pid return the port id which has minimal value of
129699e040d3SLijun Ou  * min_txd in every tx queue.
129799e040d3SLijun Ou  */
129899e040d3SLijun Ou static uint16_t
129999e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
130099e040d3SLijun Ou {
130199e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
130299e040d3SLijun Ou 	portid_t pi;
130399e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
130499e040d3SLijun Ou 
130599e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
130699e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
130799e040d3SLijun Ou 			continue;
130899e040d3SLijun Ou 
130999e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
131099e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
131199e040d3SLijun Ou 			*pid = pi;
131299e040d3SLijun Ou 		}
131399e040d3SLijun Ou 	}
131499e040d3SLijun Ou 
131599e040d3SLijun Ou 	return allowed_min_txd;
131699e040d3SLijun Ou }
131799e040d3SLijun Ou 
131899e040d3SLijun Ou /*
131999e040d3SLijun Ou  * Check input txd is valid or not.
132099e040d3SLijun Ou  * If input txd is not greater than any of maximum number
132199e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
132299e040d3SLijun Ou  * if valid, return 0, else return -1
132399e040d3SLijun Ou  */
132499e040d3SLijun Ou int
132599e040d3SLijun Ou check_nb_txd(queueid_t txd)
132699e040d3SLijun Ou {
132799e040d3SLijun Ou 	uint16_t allowed_max_txd;
132899e040d3SLijun Ou 	uint16_t allowed_min_txd;
132999e040d3SLijun Ou 	portid_t pid = 0;
133099e040d3SLijun Ou 
133199e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
133299e040d3SLijun Ou 	if (txd > allowed_max_txd) {
133399e040d3SLijun Ou 		printf("Fail: input txd (%u) can't be greater "
133499e040d3SLijun Ou 		       "than max_txds (%u) of port %u\n",
133599e040d3SLijun Ou 		       txd,
133699e040d3SLijun Ou 		       allowed_max_txd,
133799e040d3SLijun Ou 		       pid);
133899e040d3SLijun Ou 		return -1;
133999e040d3SLijun Ou 	}
134099e040d3SLijun Ou 
134199e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
134299e040d3SLijun Ou 	if (txd < allowed_min_txd) {
134399e040d3SLijun Ou 		printf("Fail: input txd (%u) can't be less "
134499e040d3SLijun Ou 		       "than min_txds (%u) of port %u\n",
134599e040d3SLijun Ou 		       txd,
134699e040d3SLijun Ou 		       allowed_min_txd,
134799e040d3SLijun Ou 		       pid);
134899e040d3SLijun Ou 		return -1;
134999e040d3SLijun Ou 	}
135099e040d3SLijun Ou 	return 0;
135199e040d3SLijun Ou }
135299e040d3SLijun Ou 
135399e040d3SLijun Ou 
135499e040d3SLijun Ou /*
13551c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
13561c69df45SOri Kam  * *pid return the port id which has minimal value of
13571c69df45SOri Kam  * max_hairpin_queues in all ports.
13581c69df45SOri Kam  */
13591c69df45SOri Kam queueid_t
13601c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
13611c69df45SOri Kam {
13629e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
13631c69df45SOri Kam 	portid_t pi;
13641c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
13651c69df45SOri Kam 
13661c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
13671c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
13681c69df45SOri Kam 			*pid = pi;
13691c69df45SOri Kam 			return 0;
13701c69df45SOri Kam 		}
13711c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
13721c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
13731c69df45SOri Kam 			*pid = pi;
13741c69df45SOri Kam 		}
13751c69df45SOri Kam 	}
13761c69df45SOri Kam 	return allowed_max_hairpinq;
13771c69df45SOri Kam }
13781c69df45SOri Kam 
13791c69df45SOri Kam /*
13801c69df45SOri Kam  * Check input hairpin is valid or not.
13811c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
13821c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
13831c69df45SOri Kam  * if valid, return 0, else return -1
13841c69df45SOri Kam  */
13851c69df45SOri Kam int
13861c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
13871c69df45SOri Kam {
13881c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
13891c69df45SOri Kam 	portid_t pid = 0;
13901c69df45SOri Kam 
13911c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
13921c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
13931c69df45SOri Kam 		printf("Fail: input hairpin (%u) can't be greater "
13941c69df45SOri Kam 		       "than max_hairpin_queues (%u) of port %u\n",
13951c69df45SOri Kam 		       hairpinq, allowed_max_hairpinq, pid);
13961c69df45SOri Kam 		return -1;
13971c69df45SOri Kam 	}
13981c69df45SOri Kam 	return 0;
13991c69df45SOri Kam }
14001c69df45SOri Kam 
1401af75078fSIntel static void
1402af75078fSIntel init_config(void)
1403af75078fSIntel {
1404ce8d5614SIntel 	portid_t pid;
1405af75078fSIntel 	struct rte_port *port;
1406af75078fSIntel 	struct rte_mempool *mbp;
1407af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1408af75078fSIntel 	lcoreid_t  lc_id;
14097acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1410b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
141152f38a20SJiayu Hu 	uint32_t gso_types;
141233f9630fSSunil Kumar Kori 	uint16_t data_size;
141333f9630fSSunil Kumar Kori 	bool warning = 0;
1414c73a9071SWei Dai 	int k;
14156f51deb9SIvan Ilchenko 	int ret;
1416af75078fSIntel 
14177acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1418487f9a59SYulong Pei 
1419af75078fSIntel 	/* Configuration of logical cores. */
1420af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1421af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1422fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1423af75078fSIntel 	if (fwd_lcores == NULL) {
1424ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1425ce8d5614SIntel 							"failed\n", nb_lcores);
1426af75078fSIntel 	}
1427af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1428af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1429af75078fSIntel 					       sizeof(struct fwd_lcore),
1430fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1431af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1432ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1433ce8d5614SIntel 								"failed\n");
1434af75078fSIntel 		}
1435af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1436af75078fSIntel 	}
1437af75078fSIntel 
14387d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1439ce8d5614SIntel 		port = &ports[pid];
14408b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1441fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1442384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
14436f51deb9SIvan Ilchenko 
14446f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
14456f51deb9SIvan Ilchenko 		if (ret != 0)
14466f51deb9SIvan Ilchenko 			rte_exit(EXIT_FAILURE,
14476f51deb9SIvan Ilchenko 				 "rte_eth_dev_info_get() failed\n");
14487c45f6c0SFerruh Yigit 
144907e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
145007e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
145107e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
145207e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1453b6ea6408SIntel 		if (numa_support) {
1454b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1455b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1456b6ea6408SIntel 			else {
1457b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
145820a0286fSLiu Xiaofeng 
145929841336SPhil Yang 				/*
146029841336SPhil Yang 				 * if socket_id is invalid,
146129841336SPhil Yang 				 * set to the first available socket.
146229841336SPhil Yang 				 */
146320a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
146429841336SPhil Yang 					socket_id = socket_ids[0];
1465b6ea6408SIntel 				port_per_socket[socket_id]++;
1466b6ea6408SIntel 			}
1467b6ea6408SIntel 		}
1468b6ea6408SIntel 
1469c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1470c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1471c73a9071SWei Dai 			port->rx_conf[k].offloads =
1472c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1473c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1474c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1475c73a9071SWei Dai 			port->tx_conf[k].offloads =
1476c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1477c73a9071SWei Dai 
1478ce8d5614SIntel 		/* set flag to initialize port/queue */
1479ce8d5614SIntel 		port->need_reconfig = 1;
1480ce8d5614SIntel 		port->need_reconfig_queues = 1;
1481c18feafaSDekel Peled 		port->tx_metadata = 0;
148233f9630fSSunil Kumar Kori 
148333f9630fSSunil Kumar Kori 		/* Check for maximum number of segments per MTU. Accordingly
148433f9630fSSunil Kumar Kori 		 * update the mbuf data size.
148533f9630fSSunil Kumar Kori 		 */
1486163fbaafSFerruh Yigit 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1487163fbaafSFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
148833f9630fSSunil Kumar Kori 			data_size = rx_mode.max_rx_pkt_len /
148933f9630fSSunil Kumar Kori 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
149033f9630fSSunil Kumar Kori 
149133f9630fSSunil Kumar Kori 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
149226cbb419SViacheslav Ovsiienko 							mbuf_data_size[0]) {
149326cbb419SViacheslav Ovsiienko 				mbuf_data_size[0] = data_size +
149433f9630fSSunil Kumar Kori 						 RTE_PKTMBUF_HEADROOM;
149533f9630fSSunil Kumar Kori 				warning = 1;
1496ce8d5614SIntel 			}
149733f9630fSSunil Kumar Kori 		}
149833f9630fSSunil Kumar Kori 	}
149933f9630fSSunil Kumar Kori 
150033f9630fSSunil Kumar Kori 	if (warning)
150126cbb419SViacheslav Ovsiienko 		TESTPMD_LOG(WARNING,
150226cbb419SViacheslav Ovsiienko 			    "Configured mbuf size of the first segment %hu\n",
150326cbb419SViacheslav Ovsiienko 			    mbuf_data_size[0]);
15043ab64341SOlivier Matz 	/*
15053ab64341SOlivier Matz 	 * Create pools of mbuf.
15063ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
15073ab64341SOlivier Matz 	 * socket 0 memory by default.
15083ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
15093ab64341SOlivier Matz 	 *
15103ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
15113ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
15123ab64341SOlivier Matz 	 */
15133ab64341SOlivier Matz 	if (param_total_num_mbufs)
15143ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
15153ab64341SOlivier Matz 	else {
15163ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
15173ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
15183ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
15193ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
15203ab64341SOlivier Matz 	}
15213ab64341SOlivier Matz 
1522b6ea6408SIntel 	if (numa_support) {
152326cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1524ce8d5614SIntel 
1525c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
152626cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
152726cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
152826cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1529401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
153026cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
15313ab64341SOlivier Matz 	} else {
153226cbb419SViacheslav Ovsiienko 		uint8_t i;
153326cbb419SViacheslav Ovsiienko 
153426cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
153526cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
153626cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1537401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
153826cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
153926cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
15403ab64341SOlivier Matz 	}
1541b6ea6408SIntel 
1542b6ea6408SIntel 	init_port_config();
15435886ae07SAdrien Mazarguil 
154452f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1545aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
15465886ae07SAdrien Mazarguil 	/*
15475886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
15485886ae07SAdrien Mazarguil 	 */
15495886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
15508fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
155126cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
15528fd8bebcSAdrien Mazarguil 
15535886ae07SAdrien Mazarguil 		if (mbp == NULL)
155426cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
15555886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
155652f38a20SJiayu Hu 		/* initialize GSO context */
155752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
155852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
155952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
156035b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
156135b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
156252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
15635886ae07SAdrien Mazarguil 	}
15645886ae07SAdrien Mazarguil 
1565ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1566ce8d5614SIntel 	if (init_fwd_streams() < 0)
1567ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
15680c0db76fSBernard Iremonger 
15690c0db76fSBernard Iremonger 	fwd_config_setup();
1570b7091f1dSJiayu Hu 
1571b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1572b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1573b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1574b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1575b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1576b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1577b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1578b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1579b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1580b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1581b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1582b7091f1dSJiayu Hu 		}
1583b7091f1dSJiayu Hu 	}
1584ce8d5614SIntel }
1585ce8d5614SIntel 
15862950a769SDeclan Doherty 
15872950a769SDeclan Doherty void
1588a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
15892950a769SDeclan Doherty {
15902950a769SDeclan Doherty 	struct rte_port *port;
15916f51deb9SIvan Ilchenko 	int ret;
15922950a769SDeclan Doherty 
15932950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
15942950a769SDeclan Doherty 	port = &ports[new_port_id];
15956f51deb9SIvan Ilchenko 
15966f51deb9SIvan Ilchenko 	ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
15976f51deb9SIvan Ilchenko 	if (ret != 0)
15986f51deb9SIvan Ilchenko 		return;
15992950a769SDeclan Doherty 
16002950a769SDeclan Doherty 	/* set flag to initialize port/queue */
16012950a769SDeclan Doherty 	port->need_reconfig = 1;
16022950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1603a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
16042950a769SDeclan Doherty 
16052950a769SDeclan Doherty 	init_port_config();
16062950a769SDeclan Doherty }
16072950a769SDeclan Doherty 
16082950a769SDeclan Doherty 
1609ce8d5614SIntel int
1610ce8d5614SIntel init_fwd_streams(void)
1611ce8d5614SIntel {
1612ce8d5614SIntel 	portid_t pid;
1613ce8d5614SIntel 	struct rte_port *port;
1614ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
16155a8fb55cSReshma Pattan 	queueid_t q;
1616ce8d5614SIntel 
1617ce8d5614SIntel 	/* set socket id according to numa or not */
16187d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1619ce8d5614SIntel 		port = &ports[pid];
1620ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1621ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1622ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1623ce8d5614SIntel 				port->dev_info.max_rx_queues);
1624ce8d5614SIntel 			return -1;
1625ce8d5614SIntel 		}
1626ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1627ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1628ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1629ce8d5614SIntel 				port->dev_info.max_tx_queues);
1630ce8d5614SIntel 			return -1;
1631ce8d5614SIntel 		}
163220a0286fSLiu Xiaofeng 		if (numa_support) {
163320a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
163420a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
163520a0286fSLiu Xiaofeng 			else {
1636b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
163720a0286fSLiu Xiaofeng 
163829841336SPhil Yang 				/*
163929841336SPhil Yang 				 * if socket_id is invalid,
164029841336SPhil Yang 				 * set to the first available socket.
164129841336SPhil Yang 				 */
164220a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
164329841336SPhil Yang 					port->socket_id = socket_ids[0];
164420a0286fSLiu Xiaofeng 			}
164520a0286fSLiu Xiaofeng 		}
1646b6ea6408SIntel 		else {
1647b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1648af75078fSIntel 				port->socket_id = 0;
1649b6ea6408SIntel 			else
1650b6ea6408SIntel 				port->socket_id = socket_num;
1651b6ea6408SIntel 		}
1652af75078fSIntel 	}
1653af75078fSIntel 
16545a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
16555a8fb55cSReshma Pattan 	if (q == 0) {
16565a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
16575a8fb55cSReshma Pattan 		return -1;
16585a8fb55cSReshma Pattan 	}
16595a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1660ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1661ce8d5614SIntel 		return 0;
1662ce8d5614SIntel 	/* clear the old */
1663ce8d5614SIntel 	if (fwd_streams != NULL) {
1664ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1665ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1666ce8d5614SIntel 				continue;
1667ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1668ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1669af75078fSIntel 		}
1670ce8d5614SIntel 		rte_free(fwd_streams);
1671ce8d5614SIntel 		fwd_streams = NULL;
1672ce8d5614SIntel 	}
1673ce8d5614SIntel 
1674ce8d5614SIntel 	/* init new */
1675ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
16761f84c469SMatan Azrad 	if (nb_fwd_streams) {
1677ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
16781f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
16791f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1680ce8d5614SIntel 		if (fwd_streams == NULL)
16811f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
16821f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
16831f84c469SMatan Azrad 				 nb_fwd_streams);
1684ce8d5614SIntel 
1685af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
16861f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
16871f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
16881f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1689ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
16901f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
16911f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
16921f84c469SMatan Azrad 		}
1693af75078fSIntel 	}
1694ce8d5614SIntel 
1695ce8d5614SIntel 	return 0;
1696af75078fSIntel }
1697af75078fSIntel 
1698af75078fSIntel static void
1699af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1700af75078fSIntel {
17017569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
170285de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
17037569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
17047569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1705af75078fSIntel 	uint16_t nb_pkt;
17067569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
17077569b8c1SHonnappa Nagarahalli 	int i;
1708af75078fSIntel 
1709af75078fSIntel 	/*
1710af75078fSIntel 	 * First compute the total number of packet bursts and the
1711af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1712af75078fSIntel 	 */
17137569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
17147569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
17157569b8c1SHonnappa Nagarahalli 
17167569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
17177569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
17187569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
17197569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
17207569b8c1SHonnappa Nagarahalli 
17217569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
17227569b8c1SHonnappa Nagarahalli 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1723af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
17247569b8c1SHonnappa Nagarahalli 
1725af75078fSIntel 		if (nb_burst == 0)
1726af75078fSIntel 			continue;
17277569b8c1SHonnappa Nagarahalli 
1728af75078fSIntel 		total_burst += nb_burst;
17297569b8c1SHonnappa Nagarahalli 
17307569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
17317569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
17327569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1733fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1734fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
17357569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
17367569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
17377569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1738af75078fSIntel 		}
1739af75078fSIntel 	}
1740af75078fSIntel 	if (total_burst == 0)
1741af75078fSIntel 		return;
17427569b8c1SHonnappa Nagarahalli 
17437569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
17447569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
17457569b8c1SHonnappa Nagarahalli 		if (i == 3) {
17467569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1747af75078fSIntel 			return;
1748af75078fSIntel 		}
17497569b8c1SHonnappa Nagarahalli 
17507569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
17517569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
17527569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
17537569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1754af75078fSIntel 			return;
1755af75078fSIntel 		}
17567569b8c1SHonnappa Nagarahalli 
17577569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
17587569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
17597569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
17607569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
17617569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1762af75078fSIntel 	}
1763af75078fSIntel }
1764af75078fSIntel 
1765af75078fSIntel static void
1766af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1767af75078fSIntel {
1768af75078fSIntel 	struct fwd_stream *fs;
1769af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1770af75078fSIntel 
1771af75078fSIntel 	fs = fwd_streams[stream_id];
1772af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1773af75078fSIntel 	    (fs->fwd_dropped == 0))
1774af75078fSIntel 		return;
1775af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1776af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1777af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1778af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1779c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1780c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1781af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1782af75078fSIntel 
1783af75078fSIntel 	/* if checksum mode */
1784af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1785c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1786c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1787c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
178858d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
178958d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
179094d65546SDavid Marchand 	} else {
179194d65546SDavid Marchand 		printf("\n");
1792af75078fSIntel 	}
1793af75078fSIntel 
17940e4b1963SDharmik Thakkar 	if (record_burst_stats) {
1795af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1796af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
17970e4b1963SDharmik Thakkar 	}
1798af75078fSIntel }
1799af75078fSIntel 
180053324971SDavid Marchand void
180153324971SDavid Marchand fwd_stats_display(void)
180253324971SDavid Marchand {
180353324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
180453324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
180553324971SDavid Marchand 	struct {
180653324971SDavid Marchand 		struct fwd_stream *rx_stream;
180753324971SDavid Marchand 		struct fwd_stream *tx_stream;
180853324971SDavid Marchand 		uint64_t tx_dropped;
180953324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
181053324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
181153324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
181253324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
181353324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
181453324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
181553324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
181653324971SDavid Marchand 	struct rte_eth_stats stats;
181753324971SDavid Marchand 	uint64_t fwd_cycles = 0;
181853324971SDavid Marchand 	uint64_t total_recv = 0;
181953324971SDavid Marchand 	uint64_t total_xmit = 0;
182053324971SDavid Marchand 	struct rte_port *port;
182153324971SDavid Marchand 	streamid_t sm_id;
182253324971SDavid Marchand 	portid_t pt_id;
182353324971SDavid Marchand 	int i;
182453324971SDavid Marchand 
182553324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
182653324971SDavid Marchand 
182753324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
182853324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
182953324971SDavid Marchand 
183053324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
183153324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
183253324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
183353324971SDavid Marchand 		} else {
183453324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
183553324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
183653324971SDavid Marchand 		}
183753324971SDavid Marchand 
183853324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
183953324971SDavid Marchand 
184053324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
184153324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
184253324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
184353324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
184453324971SDavid Marchand 
1845bc700b67SDharmik Thakkar 		if (record_core_cycles)
184653324971SDavid Marchand 			fwd_cycles += fs->core_cycles;
184753324971SDavid Marchand 	}
184853324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
184953324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
185053324971SDavid Marchand 		port = &ports[pt_id];
185153324971SDavid Marchand 
185253324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
185353324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
185453324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
185553324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
185653324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
185753324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
185853324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
185953324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
186053324971SDavid Marchand 
186153324971SDavid Marchand 		total_recv += stats.ipackets;
186253324971SDavid Marchand 		total_xmit += stats.opackets;
186353324971SDavid Marchand 		total_rx_dropped += stats.imissed;
186453324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
186553324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
186653324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
186753324971SDavid Marchand 
186853324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
186953324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
187053324971SDavid Marchand 
187108dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
187208dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
187353324971SDavid Marchand 		       stats.ipackets + stats.imissed);
187453324971SDavid Marchand 
187553324971SDavid Marchand 		if (cur_fwd_eng == &csum_fwd_engine)
187653324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
187753324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
187853324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
187953324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
188053324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
188153324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
188253324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
188308dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
188408dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
188553324971SDavid Marchand 		}
188653324971SDavid Marchand 
188708dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
188853324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
188953324971SDavid Marchand 		       stats.opackets, ports_stats[pt_id].tx_dropped,
189053324971SDavid Marchand 		       stats.opackets + ports_stats[pt_id].tx_dropped);
189153324971SDavid Marchand 
18920e4b1963SDharmik Thakkar 		if (record_burst_stats) {
189353324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
189453324971SDavid Marchand 				pkt_burst_stats_display("RX",
189553324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
189653324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
189753324971SDavid Marchand 				pkt_burst_stats_display("TX",
189853324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
18990e4b1963SDharmik Thakkar 		}
190053324971SDavid Marchand 
190153324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
190253324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
190353324971SDavid Marchand 	}
190453324971SDavid Marchand 
190553324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
190653324971SDavid Marchand 	       "%s\n",
190753324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
190853324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
190953324971SDavid Marchand 	       "%-"PRIu64"\n"
191053324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
191153324971SDavid Marchand 	       "%-"PRIu64"\n",
191253324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
191353324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
191453324971SDavid Marchand 	if (total_rx_nombuf > 0)
191553324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
191653324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
191753324971SDavid Marchand 	       "%s\n",
191853324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
1919bc700b67SDharmik Thakkar 	if (record_core_cycles) {
19204c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
19213a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
19223a164e00SPhil Yang 			uint64_t total_pkts = 0;
19233a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
19243a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
19253a164e00SPhil Yang 				total_pkts = total_xmit;
19263a164e00SPhil Yang 			else
19273a164e00SPhil Yang 				total_pkts = total_recv;
19283a164e00SPhil Yang 
19291920832aSDharmik Thakkar 			printf("\n  CPU cycles/packet=%.2F (total cycles="
19303a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
19314c0497b1SDharmik Thakkar 			       " MHz Clock\n",
19323a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
19333a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
19344c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
19353a164e00SPhil Yang 		}
1936bc700b67SDharmik Thakkar 	}
193753324971SDavid Marchand }
193853324971SDavid Marchand 
193953324971SDavid Marchand void
194053324971SDavid Marchand fwd_stats_reset(void)
194153324971SDavid Marchand {
194253324971SDavid Marchand 	streamid_t sm_id;
194353324971SDavid Marchand 	portid_t pt_id;
194453324971SDavid Marchand 	int i;
194553324971SDavid Marchand 
194653324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
194753324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
194853324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
194953324971SDavid Marchand 	}
195053324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
195153324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
195253324971SDavid Marchand 
195353324971SDavid Marchand 		fs->rx_packets = 0;
195453324971SDavid Marchand 		fs->tx_packets = 0;
195553324971SDavid Marchand 		fs->fwd_dropped = 0;
195653324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
195753324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
195853324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
195953324971SDavid Marchand 
196053324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
196153324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
196253324971SDavid Marchand 		fs->core_cycles = 0;
196353324971SDavid Marchand 	}
196453324971SDavid Marchand }
196553324971SDavid Marchand 
1966af75078fSIntel static void
19677741e4cfSIntel flush_fwd_rx_queues(void)
1968af75078fSIntel {
1969af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1970af75078fSIntel 	portid_t  rxp;
19717741e4cfSIntel 	portid_t port_id;
1972af75078fSIntel 	queueid_t rxq;
1973af75078fSIntel 	uint16_t  nb_rx;
1974af75078fSIntel 	uint16_t  i;
1975af75078fSIntel 	uint8_t   j;
1976f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1977594302c7SJames Poole 	uint64_t timer_period;
1978f487715fSReshma Pattan 
1979f487715fSReshma Pattan 	/* convert to number of cycles */
1980594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1981af75078fSIntel 
1982af75078fSIntel 	for (j = 0; j < 2; j++) {
19837741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1984af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
19857741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1986f487715fSReshma Pattan 				/**
1987f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1988f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1989f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1990f487715fSReshma Pattan 				* after 1sec timer expiry.
1991f487715fSReshma Pattan 				*/
1992f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1993af75078fSIntel 				do {
19947741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1995013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1996af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1997af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1998f487715fSReshma Pattan 
1999f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2000f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2001f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2002f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2003f487715fSReshma Pattan 					(timer_tsc < timer_period));
2004f487715fSReshma Pattan 				timer_tsc = 0;
2005af75078fSIntel 			}
2006af75078fSIntel 		}
2007af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2008af75078fSIntel 	}
2009af75078fSIntel }
2010af75078fSIntel 
2011af75078fSIntel static void
2012af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2013af75078fSIntel {
2014af75078fSIntel 	struct fwd_stream **fsm;
2015af75078fSIntel 	streamid_t nb_fs;
2016af75078fSIntel 	streamid_t sm_id;
2017a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
20187e4441c8SRemy Horton 	uint64_t tics_per_1sec;
20197e4441c8SRemy Horton 	uint64_t tics_datum;
20207e4441c8SRemy Horton 	uint64_t tics_current;
20214918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2022af75078fSIntel 
20234918a357SXiaoyun Li 	cnt_ports = nb_ports;
20247e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
20257e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
20267e4441c8SRemy Horton #endif
2027af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2028af75078fSIntel 	nb_fs = fc->stream_nb;
2029af75078fSIntel 	do {
2030af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
2031af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
2032a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2033e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2034e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
20357e4441c8SRemy Horton 			tics_current = rte_rdtsc();
20367e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
20377e4441c8SRemy Horton 				/* Periodic bitrate calculation */
20384918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2039e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
20404918a357SXiaoyun Li 						ports_ids[i]);
20417e4441c8SRemy Horton 				tics_datum = tics_current;
20427e4441c8SRemy Horton 			}
2043e25e6c70SRemy Horton 		}
20447e4441c8SRemy Horton #endif
2045a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
204665eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
204765eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
204862d3216dSReshma Pattan 			rte_latencystats_update();
204962d3216dSReshma Pattan #endif
205062d3216dSReshma Pattan 
2051af75078fSIntel 	} while (! fc->stopped);
2052af75078fSIntel }
2053af75078fSIntel 
2054af75078fSIntel static int
2055af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2056af75078fSIntel {
2057af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2058af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2059af75078fSIntel 	return 0;
2060af75078fSIntel }
2061af75078fSIntel 
2062af75078fSIntel /*
2063af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2064af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2065af75078fSIntel  */
2066af75078fSIntel static int
2067af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2068af75078fSIntel {
2069af75078fSIntel 	struct fwd_lcore *fwd_lc;
2070af75078fSIntel 	struct fwd_lcore tmp_lcore;
2071af75078fSIntel 
2072af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2073af75078fSIntel 	tmp_lcore = *fwd_lc;
2074af75078fSIntel 	tmp_lcore.stopped = 1;
2075af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2076af75078fSIntel 	return 0;
2077af75078fSIntel }
2078af75078fSIntel 
2079af75078fSIntel /*
2080af75078fSIntel  * Launch packet forwarding:
2081af75078fSIntel  *     - Setup per-port forwarding context.
2082af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2083af75078fSIntel  */
2084af75078fSIntel static void
2085af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2086af75078fSIntel {
2087af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2088af75078fSIntel 	unsigned int i;
2089af75078fSIntel 	unsigned int lc_id;
2090af75078fSIntel 	int diag;
2091af75078fSIntel 
2092af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2093af75078fSIntel 	if (port_fwd_begin != NULL) {
2094af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2095af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
2096af75078fSIntel 	}
2097af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2098af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2099af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2100af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2101af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2102af75078fSIntel 						     fwd_lcores[i], lc_id);
2103af75078fSIntel 			if (diag != 0)
2104af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
2105af75078fSIntel 				       lc_id, diag);
2106af75078fSIntel 		}
2107af75078fSIntel 	}
2108af75078fSIntel }
2109af75078fSIntel 
2110af75078fSIntel /*
2111af75078fSIntel  * Launch packet forwarding configuration.
2112af75078fSIntel  */
2113af75078fSIntel void
2114af75078fSIntel start_packet_forwarding(int with_tx_first)
2115af75078fSIntel {
2116af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2117af75078fSIntel 	port_fwd_end_t  port_fwd_end;
2118af75078fSIntel 	struct rte_port *port;
2119af75078fSIntel 	unsigned int i;
2120af75078fSIntel 	portid_t   pt_id;
2121af75078fSIntel 
21225a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
21235a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
21245a8fb55cSReshma Pattan 
21255a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
21265a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
21275a8fb55cSReshma Pattan 
21285a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
21295a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
21305a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
21315a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
21325a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
21335a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
21345a8fb55cSReshma Pattan 
2135ce8d5614SIntel 	if (all_ports_started() == 0) {
2136ce8d5614SIntel 		printf("Not all ports were started\n");
2137ce8d5614SIntel 		return;
2138ce8d5614SIntel 	}
2139af75078fSIntel 	if (test_done == 0) {
2140af75078fSIntel 		printf("Packet forwarding already started\n");
2141af75078fSIntel 		return;
2142af75078fSIntel 	}
2143edf87b4aSBernard Iremonger 
2144edf87b4aSBernard Iremonger 
21457741e4cfSIntel 	if(dcb_test) {
21467741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
21477741e4cfSIntel 			pt_id = fwd_ports_ids[i];
21487741e4cfSIntel 			port = &ports[pt_id];
21497741e4cfSIntel 			if (!port->dcb_flag) {
21507741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
21517741e4cfSIntel                                        "be configured in this mode.\n");
2152013af9b6SIntel 				return;
2153013af9b6SIntel 			}
21547741e4cfSIntel 		}
21557741e4cfSIntel 		if (nb_fwd_lcores == 1) {
21567741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
21577741e4cfSIntel                                "should be larger than 1.\n");
21587741e4cfSIntel 			return;
21597741e4cfSIntel 		}
21607741e4cfSIntel 	}
2161af75078fSIntel 	test_done = 0;
21627741e4cfSIntel 
216347a767b2SMatan Azrad 	fwd_config_setup();
216447a767b2SMatan Azrad 
21657741e4cfSIntel 	if(!no_flush_rx)
21667741e4cfSIntel 		flush_fwd_rx_queues();
21677741e4cfSIntel 
2168933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
2169af75078fSIntel 	rxtx_config_display();
2170af75078fSIntel 
217153324971SDavid Marchand 	fwd_stats_reset();
2172af75078fSIntel 	if (with_tx_first) {
2173af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2174af75078fSIntel 		if (port_fwd_begin != NULL) {
2175af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2176af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
2177af75078fSIntel 		}
2178acbf77a6SZhihong Wang 		while (with_tx_first--) {
2179acbf77a6SZhihong Wang 			launch_packet_forwarding(
2180acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2181af75078fSIntel 			rte_eal_mp_wait_lcore();
2182acbf77a6SZhihong Wang 		}
2183af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2184af75078fSIntel 		if (port_fwd_end != NULL) {
2185af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2186af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2187af75078fSIntel 		}
2188af75078fSIntel 	}
2189af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2190af75078fSIntel }
2191af75078fSIntel 
2192af75078fSIntel void
2193af75078fSIntel stop_packet_forwarding(void)
2194af75078fSIntel {
2195af75078fSIntel 	port_fwd_end_t port_fwd_end;
2196af75078fSIntel 	lcoreid_t lc_id;
219753324971SDavid Marchand 	portid_t pt_id;
219853324971SDavid Marchand 	int i;
2199af75078fSIntel 
2200af75078fSIntel 	if (test_done) {
2201af75078fSIntel 		printf("Packet forwarding not started\n");
2202af75078fSIntel 		return;
2203af75078fSIntel 	}
2204af75078fSIntel 	printf("Telling cores to stop...");
2205af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2206af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2207af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2208af75078fSIntel 	rte_eal_mp_wait_lcore();
2209af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2210af75078fSIntel 	if (port_fwd_end != NULL) {
2211af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2212af75078fSIntel 			pt_id = fwd_ports_ids[i];
2213af75078fSIntel 			(*port_fwd_end)(pt_id);
2214af75078fSIntel 		}
2215af75078fSIntel 	}
2216c185d42cSDavid Marchand 
221753324971SDavid Marchand 	fwd_stats_display();
221858d475b7SJerin Jacob 
2219af75078fSIntel 	printf("\nDone.\n");
2220af75078fSIntel 	test_done = 1;
2221af75078fSIntel }
2222af75078fSIntel 
2223cfae07fdSOuyang Changchun void
2224cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2225cfae07fdSOuyang Changchun {
2226492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
2227cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
2228cfae07fdSOuyang Changchun }
2229cfae07fdSOuyang Changchun 
2230cfae07fdSOuyang Changchun void
2231cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2232cfae07fdSOuyang Changchun {
2233492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
2234cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
2235cfae07fdSOuyang Changchun }
2236cfae07fdSOuyang Changchun 
2237ce8d5614SIntel static int
2238ce8d5614SIntel all_ports_started(void)
2239ce8d5614SIntel {
2240ce8d5614SIntel 	portid_t pi;
2241ce8d5614SIntel 	struct rte_port *port;
2242ce8d5614SIntel 
22437d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2244ce8d5614SIntel 		port = &ports[pi];
2245ce8d5614SIntel 		/* Check if there is a port which is not started */
224641b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
224741b05095SBernard Iremonger 			(port->slave_flag == 0))
2248ce8d5614SIntel 			return 0;
2249ce8d5614SIntel 	}
2250ce8d5614SIntel 
2251ce8d5614SIntel 	/* No port is not started */
2252ce8d5614SIntel 	return 1;
2253ce8d5614SIntel }
2254ce8d5614SIntel 
2255148f963fSBruce Richardson int
22566018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
22576018eb8cSShahaf Shuler {
22586018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
22596018eb8cSShahaf Shuler 
22606018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
22616018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
22626018eb8cSShahaf Shuler 		return 0;
22636018eb8cSShahaf Shuler 	return 1;
22646018eb8cSShahaf Shuler }
22656018eb8cSShahaf Shuler 
22666018eb8cSShahaf Shuler int
2267edab33b1STetsuya Mukawa all_ports_stopped(void)
2268edab33b1STetsuya Mukawa {
2269edab33b1STetsuya Mukawa 	portid_t pi;
2270edab33b1STetsuya Mukawa 
22717d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22726018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2273edab33b1STetsuya Mukawa 			return 0;
2274edab33b1STetsuya Mukawa 	}
2275edab33b1STetsuya Mukawa 
2276edab33b1STetsuya Mukawa 	return 1;
2277edab33b1STetsuya Mukawa }
2278edab33b1STetsuya Mukawa 
2279edab33b1STetsuya Mukawa int
2280edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2281edab33b1STetsuya Mukawa {
2282edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2283edab33b1STetsuya Mukawa 		return 0;
2284edab33b1STetsuya Mukawa 
2285edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2286edab33b1STetsuya Mukawa 		return 0;
2287edab33b1STetsuya Mukawa 
2288edab33b1STetsuya Mukawa 	return 1;
2289edab33b1STetsuya Mukawa }
2290edab33b1STetsuya Mukawa 
22911c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
22921c69df45SOri Kam static int
229301817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
22941c69df45SOri Kam {
22951c69df45SOri Kam 	queueid_t qi;
22961c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
22971c69df45SOri Kam 		.peer_count = 1,
22981c69df45SOri Kam 	};
22991c69df45SOri Kam 	int i;
23001c69df45SOri Kam 	int diag;
23011c69df45SOri Kam 	struct rte_port *port = &ports[pi];
230201817b10SBing Zhao 	uint16_t peer_rx_port = pi;
230301817b10SBing Zhao 	uint16_t peer_tx_port = pi;
230401817b10SBing Zhao 	uint32_t manual = 1;
230501817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
230601817b10SBing Zhao 
230701817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
230801817b10SBing Zhao 		peer_rx_port = pi;
230901817b10SBing Zhao 		peer_tx_port = pi;
231001817b10SBing Zhao 		manual = 0;
231101817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
231201817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
231301817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
231401817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
231501817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
231601817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
231701817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
231801817b10SBing Zhao 			peer_rx_port = p_pi;
231901817b10SBing Zhao 		} else {
232001817b10SBing Zhao 			uint16_t next_pi;
232101817b10SBing Zhao 
232201817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
232301817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
232401817b10SBing Zhao 				peer_rx_port = next_pi;
232501817b10SBing Zhao 		}
232601817b10SBing Zhao 		manual = 1;
232701817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
232801817b10SBing Zhao 		if (cnt_pi & 0x1) {
232901817b10SBing Zhao 			peer_rx_port = p_pi;
233001817b10SBing Zhao 		} else {
233101817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
233201817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
233301817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
233401817b10SBing Zhao 				peer_rx_port = pi;
233501817b10SBing Zhao 		}
233601817b10SBing Zhao 		peer_tx_port = peer_rx_port;
233701817b10SBing Zhao 		manual = 1;
233801817b10SBing Zhao 	}
23391c69df45SOri Kam 
23401c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
234101817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
23421c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
234301817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
234401817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
23451c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
23461c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
23471c69df45SOri Kam 		i++;
23481c69df45SOri Kam 		if (diag == 0)
23491c69df45SOri Kam 			continue;
23501c69df45SOri Kam 
23511c69df45SOri Kam 		/* Fail to setup rx queue, return */
23521c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
23531c69df45SOri Kam 					RTE_PORT_HANDLING,
23541c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
23551c69df45SOri Kam 			printf("Port %d can not be set back "
23561c69df45SOri Kam 					"to stopped\n", pi);
23571c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
23581c69df45SOri Kam 				"queues\n", pi);
23591c69df45SOri Kam 		/* try to reconfigure queues next time */
23601c69df45SOri Kam 		port->need_reconfig_queues = 1;
23611c69df45SOri Kam 		return -1;
23621c69df45SOri Kam 	}
23631c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
236401817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
23651c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
236601817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
236701817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
23681c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
23691c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
23701c69df45SOri Kam 		i++;
23711c69df45SOri Kam 		if (diag == 0)
23721c69df45SOri Kam 			continue;
23731c69df45SOri Kam 
23741c69df45SOri Kam 		/* Fail to setup rx queue, return */
23751c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
23761c69df45SOri Kam 					RTE_PORT_HANDLING,
23771c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
23781c69df45SOri Kam 			printf("Port %d can not be set back "
23791c69df45SOri Kam 					"to stopped\n", pi);
23801c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
23811c69df45SOri Kam 				"queues\n", pi);
23821c69df45SOri Kam 		/* try to reconfigure queues next time */
23831c69df45SOri Kam 		port->need_reconfig_queues = 1;
23841c69df45SOri Kam 		return -1;
23851c69df45SOri Kam 	}
23861c69df45SOri Kam 	return 0;
23871c69df45SOri Kam }
23881c69df45SOri Kam 
23892befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
23902befc67fSViacheslav Ovsiienko int
23912befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
23922befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
23932befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
23942befc67fSViacheslav Ovsiienko {
23952befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
23962befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
23972befc67fSViacheslav Ovsiienko 	int ret;
23982befc67fSViacheslav Ovsiienko 
23992befc67fSViacheslav Ovsiienko 	if (rx_pkt_nb_segs <= 1 ||
24002befc67fSViacheslav Ovsiienko 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
24012befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
24022befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
24032befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
24042befc67fSViacheslav Ovsiienko 					     nb_rx_desc, socket_id,
24052befc67fSViacheslav Ovsiienko 					     rx_conf, mp);
24062befc67fSViacheslav Ovsiienko 		return ret;
24072befc67fSViacheslav Ovsiienko 	}
24082befc67fSViacheslav Ovsiienko 	for (i = 0; i < rx_pkt_nb_segs; i++) {
24092befc67fSViacheslav Ovsiienko 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
24102befc67fSViacheslav Ovsiienko 		struct rte_mempool *mpx;
24112befc67fSViacheslav Ovsiienko 		/*
24122befc67fSViacheslav Ovsiienko 		 * Use last valid pool for the segments with number
24132befc67fSViacheslav Ovsiienko 		 * exceeding the pool index.
24142befc67fSViacheslav Ovsiienko 		 */
24152befc67fSViacheslav Ovsiienko 		mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
24162befc67fSViacheslav Ovsiienko 		mpx = mbuf_pool_find(socket_id, mp_n);
24172befc67fSViacheslav Ovsiienko 		/* Handle zero as mbuf data buffer size. */
24182befc67fSViacheslav Ovsiienko 		rx_seg->length = rx_pkt_seg_lengths[i] ?
24192befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_lengths[i] :
24202befc67fSViacheslav Ovsiienko 				   mbuf_data_size[mp_n];
24212befc67fSViacheslav Ovsiienko 		rx_seg->offset = i < rx_pkt_nb_offs ?
24222befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_offsets[i] : 0;
24232befc67fSViacheslav Ovsiienko 		rx_seg->mp = mpx ? mpx : mp;
24242befc67fSViacheslav Ovsiienko 	}
24252befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = rx_pkt_nb_segs;
24262befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = rx_useg;
24272befc67fSViacheslav Ovsiienko 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
24282befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
24292befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = NULL;
24302befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = 0;
24312befc67fSViacheslav Ovsiienko 	return ret;
24322befc67fSViacheslav Ovsiienko }
24332befc67fSViacheslav Ovsiienko 
2434edab33b1STetsuya Mukawa int
2435ce8d5614SIntel start_port(portid_t pid)
2436ce8d5614SIntel {
243792d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2438ce8d5614SIntel 	portid_t pi;
243901817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
244001817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
244101817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
244201817b10SBing Zhao 	uint16_t cnt_pi = 0;
244301817b10SBing Zhao 	uint16_t cfg_pi = 0;
244401817b10SBing Zhao 	int peer_pi;
2445ce8d5614SIntel 	queueid_t qi;
2446ce8d5614SIntel 	struct rte_port *port;
24476d13ea8eSOlivier Matz 	struct rte_ether_addr mac_addr;
24481c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2449ce8d5614SIntel 
24504468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
24514468635fSMichael Qiu 		return 0;
24524468635fSMichael Qiu 
2453ce8d5614SIntel 	if(dcb_config)
2454ce8d5614SIntel 		dcb_test = 1;
24557d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2456edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2457ce8d5614SIntel 			continue;
2458ce8d5614SIntel 
245992d2703eSMichael Qiu 		need_check_link_status = 0;
2460ce8d5614SIntel 		port = &ports[pi];
2461ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2462ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2463ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2464ce8d5614SIntel 			continue;
2465ce8d5614SIntel 		}
2466ce8d5614SIntel 
2467ce8d5614SIntel 		if (port->need_reconfig > 0) {
2468ce8d5614SIntel 			port->need_reconfig = 0;
2469ce8d5614SIntel 
24707ee3e944SVasily Philipov 			if (flow_isolate_all) {
24717ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
24727ee3e944SVasily Philipov 				if (ret) {
24737ee3e944SVasily Philipov 					printf("Failed to apply isolated"
24747ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
24757ee3e944SVasily Philipov 					return -1;
24767ee3e944SVasily Philipov 				}
24777ee3e944SVasily Philipov 			}
2478b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
24795706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
248020a0286fSLiu Xiaofeng 					port->socket_id);
24811c69df45SOri Kam 			if (nb_hairpinq > 0 &&
24821c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
24831c69df45SOri Kam 				printf("Port %d doesn't support hairpin "
24841c69df45SOri Kam 				       "queues\n", pi);
24851c69df45SOri Kam 				return -1;
24861c69df45SOri Kam 			}
2487ce8d5614SIntel 			/* configure port */
24881c69df45SOri Kam 			diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
24891c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2490ce8d5614SIntel 						     &(port->dev_conf));
2491ce8d5614SIntel 			if (diag != 0) {
2492ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2493ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2494ce8d5614SIntel 					printf("Port %d can not be set back "
2495ce8d5614SIntel 							"to stopped\n", pi);
2496ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2497ce8d5614SIntel 				/* try to reconfigure port next time */
2498ce8d5614SIntel 				port->need_reconfig = 1;
2499148f963fSBruce Richardson 				return -1;
2500ce8d5614SIntel 			}
2501ce8d5614SIntel 		}
2502ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2503ce8d5614SIntel 			port->need_reconfig_queues = 0;
2504ce8d5614SIntel 			/* setup tx queues */
2505ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2506b6ea6408SIntel 				if ((numa_support) &&
2507b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2508b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2509d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2510d44f8a48SQi Zhang 						txring_numa[pi],
2511d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2512b6ea6408SIntel 				else
2513b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2514d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2515d44f8a48SQi Zhang 						port->socket_id,
2516d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2517b6ea6408SIntel 
2518ce8d5614SIntel 				if (diag == 0)
2519ce8d5614SIntel 					continue;
2520ce8d5614SIntel 
2521ce8d5614SIntel 				/* Fail to setup tx queue, return */
2522ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2523ce8d5614SIntel 							RTE_PORT_HANDLING,
2524ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2525ce8d5614SIntel 					printf("Port %d can not be set back "
2526ce8d5614SIntel 							"to stopped\n", pi);
2527d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2528d44f8a48SQi Zhang 				       pi);
2529ce8d5614SIntel 				/* try to reconfigure queues next time */
2530ce8d5614SIntel 				port->need_reconfig_queues = 1;
2531148f963fSBruce Richardson 				return -1;
2532ce8d5614SIntel 			}
2533ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2534d44f8a48SQi Zhang 				/* setup rx queues */
2535b6ea6408SIntel 				if ((numa_support) &&
2536b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2537b6ea6408SIntel 					struct rte_mempool * mp =
253826cbb419SViacheslav Ovsiienko 						mbuf_pool_find
253926cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
2540b6ea6408SIntel 					if (mp == NULL) {
2541b6ea6408SIntel 						printf("Failed to setup RX queue:"
2542b6ea6408SIntel 							"No mempool allocation"
2543b6ea6408SIntel 							" on the socket %d\n",
2544b6ea6408SIntel 							rxring_numa[pi]);
2545148f963fSBruce Richardson 						return -1;
2546b6ea6408SIntel 					}
2547b6ea6408SIntel 
25482befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2549d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2550d44f8a48SQi Zhang 					     rxring_numa[pi],
2551d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2552d44f8a48SQi Zhang 					     mp);
25531e1d6bddSBernard Iremonger 				} else {
25541e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
255526cbb419SViacheslav Ovsiienko 						mbuf_pool_find
255626cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
25571e1d6bddSBernard Iremonger 					if (mp == NULL) {
25581e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
25591e1d6bddSBernard Iremonger 							"No mempool allocation"
25601e1d6bddSBernard Iremonger 							" on the socket %d\n",
25611e1d6bddSBernard Iremonger 							port->socket_id);
25621e1d6bddSBernard Iremonger 						return -1;
2563b6ea6408SIntel 					}
25642befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2565d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2566d44f8a48SQi Zhang 					     port->socket_id,
2567d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2568d44f8a48SQi Zhang 					     mp);
25691e1d6bddSBernard Iremonger 				}
2570ce8d5614SIntel 				if (diag == 0)
2571ce8d5614SIntel 					continue;
2572ce8d5614SIntel 
2573ce8d5614SIntel 				/* Fail to setup rx queue, return */
2574ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2575ce8d5614SIntel 							RTE_PORT_HANDLING,
2576ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2577ce8d5614SIntel 					printf("Port %d can not be set back "
2578ce8d5614SIntel 							"to stopped\n", pi);
2579d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2580d44f8a48SQi Zhang 				       pi);
2581ce8d5614SIntel 				/* try to reconfigure queues next time */
2582ce8d5614SIntel 				port->need_reconfig_queues = 1;
2583148f963fSBruce Richardson 				return -1;
2584ce8d5614SIntel 			}
25851c69df45SOri Kam 			/* setup hairpin queues */
258601817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
25871c69df45SOri Kam 				return -1;
2588ce8d5614SIntel 		}
2589b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2590b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
2591b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2592b0a9354aSPavan Nikhilesh 					NULL, 0);
2593b0a9354aSPavan Nikhilesh 			if (diag < 0)
2594b0a9354aSPavan Nikhilesh 				printf(
2595b0a9354aSPavan Nikhilesh 				"Port %d: Failed to disable Ptype parsing\n",
2596b0a9354aSPavan Nikhilesh 				pi);
2597b0a9354aSPavan Nikhilesh 		}
2598b0a9354aSPavan Nikhilesh 
259901817b10SBing Zhao 		p_pi = pi;
260001817b10SBing Zhao 		cnt_pi++;
260101817b10SBing Zhao 
2602ce8d5614SIntel 		/* start port */
2603ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2604ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2605ce8d5614SIntel 
2606ce8d5614SIntel 			/* Fail to setup rx queue, return */
2607ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2608ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2609ce8d5614SIntel 				printf("Port %d can not be set back to "
2610ce8d5614SIntel 							"stopped\n", pi);
2611ce8d5614SIntel 			continue;
2612ce8d5614SIntel 		}
2613ce8d5614SIntel 
2614ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2615ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2616ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2617ce8d5614SIntel 
2618a5279d25SIgor Romanov 		if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2619d8c89163SZijie Pan 			printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
26202950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
26212950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
26222950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2623d8c89163SZijie Pan 
2624ce8d5614SIntel 		/* at least one port started, need checking link status */
2625ce8d5614SIntel 		need_check_link_status = 1;
262601817b10SBing Zhao 
262701817b10SBing Zhao 		pl[cfg_pi++] = pi;
2628ce8d5614SIntel 	}
2629ce8d5614SIntel 
263092d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2631edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
263292d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2633ce8d5614SIntel 		printf("Please stop the ports first\n");
2634ce8d5614SIntel 
263501817b10SBing Zhao 	if (hairpin_mode & 0xf) {
263601817b10SBing Zhao 		uint16_t i;
263701817b10SBing Zhao 		int j;
263801817b10SBing Zhao 
263901817b10SBing Zhao 		/* bind all started hairpin ports */
264001817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
264101817b10SBing Zhao 			pi = pl[i];
264201817b10SBing Zhao 			/* bind current Tx to all peer Rx */
264301817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
264401817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
264501817b10SBing Zhao 			if (peer_pi < 0)
264601817b10SBing Zhao 				return peer_pi;
264701817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
264801817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
264901817b10SBing Zhao 					continue;
265001817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
265101817b10SBing Zhao 				if (diag < 0) {
265201817b10SBing Zhao 					printf("Error during binding hairpin"
265301817b10SBing Zhao 					       " Tx port %u to %u: %s\n",
265401817b10SBing Zhao 					       pi, peer_pl[j],
265501817b10SBing Zhao 					       rte_strerror(-diag));
265601817b10SBing Zhao 					return -1;
265701817b10SBing Zhao 				}
265801817b10SBing Zhao 			}
265901817b10SBing Zhao 			/* bind all peer Tx to current Rx */
266001817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
266101817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
266201817b10SBing Zhao 			if (peer_pi < 0)
266301817b10SBing Zhao 				return peer_pi;
266401817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
266501817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
266601817b10SBing Zhao 					continue;
266701817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
266801817b10SBing Zhao 				if (diag < 0) {
266901817b10SBing Zhao 					printf("Error during binding hairpin"
267001817b10SBing Zhao 					       " Tx port %u to %u: %s\n",
267101817b10SBing Zhao 					       peer_pl[j], pi,
267201817b10SBing Zhao 					       rte_strerror(-diag));
267301817b10SBing Zhao 					return -1;
267401817b10SBing Zhao 				}
267501817b10SBing Zhao 			}
267601817b10SBing Zhao 		}
267701817b10SBing Zhao 	}
267801817b10SBing Zhao 
2679ce8d5614SIntel 	printf("Done\n");
2680148f963fSBruce Richardson 	return 0;
2681ce8d5614SIntel }
2682ce8d5614SIntel 
2683ce8d5614SIntel void
2684ce8d5614SIntel stop_port(portid_t pid)
2685ce8d5614SIntel {
2686ce8d5614SIntel 	portid_t pi;
2687ce8d5614SIntel 	struct rte_port *port;
2688ce8d5614SIntel 	int need_check_link_status = 0;
268901817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
269001817b10SBing Zhao 	int peer_pi;
2691ce8d5614SIntel 
2692ce8d5614SIntel 	if (dcb_test) {
2693ce8d5614SIntel 		dcb_test = 0;
2694ce8d5614SIntel 		dcb_config = 0;
2695ce8d5614SIntel 	}
26964468635fSMichael Qiu 
26974468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
26984468635fSMichael Qiu 		return;
26994468635fSMichael Qiu 
2700ce8d5614SIntel 	printf("Stopping ports...\n");
2701ce8d5614SIntel 
27027d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
27034468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2704ce8d5614SIntel 			continue;
2705ce8d5614SIntel 
2706a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2707a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2708a8ef3e3aSBernard Iremonger 			continue;
2709a8ef3e3aSBernard Iremonger 		}
2710a8ef3e3aSBernard Iremonger 
27110e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
27120e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
27130e545d30SBernard Iremonger 			continue;
27140e545d30SBernard Iremonger 		}
27150e545d30SBernard Iremonger 
2716ce8d5614SIntel 		port = &ports[pi];
2717ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2718ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2719ce8d5614SIntel 			continue;
2720ce8d5614SIntel 
272101817b10SBing Zhao 		if (hairpin_mode & 0xf) {
272201817b10SBing Zhao 			int j;
272301817b10SBing Zhao 
272401817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
272501817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
272601817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
272701817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
272801817b10SBing Zhao 			if (peer_pi < 0)
272901817b10SBing Zhao 				continue;
273001817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
273101817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
273201817b10SBing Zhao 					continue;
273301817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
273401817b10SBing Zhao 			}
273501817b10SBing Zhao 		}
273601817b10SBing Zhao 
2737*0f93edbfSGregory Etelson 		if (port->flow_list)
2738*0f93edbfSGregory Etelson 			port_flow_flush(pi);
2739*0f93edbfSGregory Etelson 
2740e62c5a12SIvan Ilchenko 		if (rte_eth_dev_stop(pi) != 0)
2741e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2742e62c5a12SIvan Ilchenko 				pi);
2743ce8d5614SIntel 
2744ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2745ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2746ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2747ce8d5614SIntel 		need_check_link_status = 1;
2748ce8d5614SIntel 	}
2749bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2750edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2751ce8d5614SIntel 
2752ce8d5614SIntel 	printf("Done\n");
2753ce8d5614SIntel }
2754ce8d5614SIntel 
2755ce6959bfSWisam Jaddo static void
27564f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2757ce6959bfSWisam Jaddo {
27584f1de450SThomas Monjalon 	portid_t i;
27594f1de450SThomas Monjalon 	portid_t new_total = 0;
2760ce6959bfSWisam Jaddo 
27614f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
27624f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
27634f1de450SThomas Monjalon 			array[new_total] = array[i];
27644f1de450SThomas Monjalon 			new_total++;
2765ce6959bfSWisam Jaddo 		}
27664f1de450SThomas Monjalon 	*total = new_total;
27674f1de450SThomas Monjalon }
27684f1de450SThomas Monjalon 
27694f1de450SThomas Monjalon static void
27704f1de450SThomas Monjalon remove_invalid_ports(void)
27714f1de450SThomas Monjalon {
27724f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
27734f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
27744f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2775ce6959bfSWisam Jaddo }
2776ce6959bfSWisam Jaddo 
2777ce8d5614SIntel void
2778ce8d5614SIntel close_port(portid_t pid)
2779ce8d5614SIntel {
2780ce8d5614SIntel 	portid_t pi;
2781ce8d5614SIntel 	struct rte_port *port;
2782ce8d5614SIntel 
27834468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
27844468635fSMichael Qiu 		return;
27854468635fSMichael Qiu 
2786ce8d5614SIntel 	printf("Closing ports...\n");
2787ce8d5614SIntel 
27887d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
27894468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2790ce8d5614SIntel 			continue;
2791ce8d5614SIntel 
2792a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2793a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2794a8ef3e3aSBernard Iremonger 			continue;
2795a8ef3e3aSBernard Iremonger 		}
2796a8ef3e3aSBernard Iremonger 
27970e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
27980e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
27990e545d30SBernard Iremonger 			continue;
28000e545d30SBernard Iremonger 		}
28010e545d30SBernard Iremonger 
2802ce8d5614SIntel 		port = &ports[pi];
2803ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2804d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2805d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2806d4e8ad64SMichael Qiu 			continue;
2807d4e8ad64SMichael Qiu 		}
2808d4e8ad64SMichael Qiu 
2809938a184aSAdrien Mazarguil 		port_flow_flush(pi);
2810ce8d5614SIntel 		rte_eth_dev_close(pi);
2811ce8d5614SIntel 	}
2812ce8d5614SIntel 
281385c6571cSThomas Monjalon 	remove_invalid_ports();
2814ce8d5614SIntel 	printf("Done\n");
2815ce8d5614SIntel }
2816ce8d5614SIntel 
2817edab33b1STetsuya Mukawa void
281897f1e196SWei Dai reset_port(portid_t pid)
281997f1e196SWei Dai {
282097f1e196SWei Dai 	int diag;
282197f1e196SWei Dai 	portid_t pi;
282297f1e196SWei Dai 	struct rte_port *port;
282397f1e196SWei Dai 
282497f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
282597f1e196SWei Dai 		return;
282697f1e196SWei Dai 
28271cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
28281cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
28291cde1b9aSShougang Wang 		printf("Can not reset port(s), please stop port(s) first.\n");
28301cde1b9aSShougang Wang 		return;
28311cde1b9aSShougang Wang 	}
28321cde1b9aSShougang Wang 
283397f1e196SWei Dai 	printf("Resetting ports...\n");
283497f1e196SWei Dai 
283597f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
283697f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
283797f1e196SWei Dai 			continue;
283897f1e196SWei Dai 
283997f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
284097f1e196SWei Dai 			printf("Please remove port %d from forwarding "
284197f1e196SWei Dai 			       "configuration.\n", pi);
284297f1e196SWei Dai 			continue;
284397f1e196SWei Dai 		}
284497f1e196SWei Dai 
284597f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
284697f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
284797f1e196SWei Dai 			       pi);
284897f1e196SWei Dai 			continue;
284997f1e196SWei Dai 		}
285097f1e196SWei Dai 
285197f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
285297f1e196SWei Dai 		if (diag == 0) {
285397f1e196SWei Dai 			port = &ports[pi];
285497f1e196SWei Dai 			port->need_reconfig = 1;
285597f1e196SWei Dai 			port->need_reconfig_queues = 1;
285697f1e196SWei Dai 		} else {
285797f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
285897f1e196SWei Dai 		}
285997f1e196SWei Dai 	}
286097f1e196SWei Dai 
286197f1e196SWei Dai 	printf("Done\n");
286297f1e196SWei Dai }
286397f1e196SWei Dai 
286497f1e196SWei Dai void
2865edab33b1STetsuya Mukawa attach_port(char *identifier)
2866ce8d5614SIntel {
28674f1ed78eSThomas Monjalon 	portid_t pi;
2868c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2869ce8d5614SIntel 
2870edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2871edab33b1STetsuya Mukawa 
2872edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2873edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2874edab33b1STetsuya Mukawa 		return;
2875ce8d5614SIntel 	}
2876ce8d5614SIntel 
287775b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
2878c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2879edab33b1STetsuya Mukawa 		return;
2880c9cce428SThomas Monjalon 	}
2881c9cce428SThomas Monjalon 
28824f1ed78eSThomas Monjalon 	/* first attach mode: event */
28834f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
28844f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
28854f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
28864f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
28874f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
28884f1ed78eSThomas Monjalon 				setup_attached_port(pi);
28894f1ed78eSThomas Monjalon 		return;
28904f1ed78eSThomas Monjalon 	}
28914f1ed78eSThomas Monjalon 
28924f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
289386fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
28944f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
289586fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
289686fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2897c9cce428SThomas Monjalon 		setup_attached_port(pi);
2898c9cce428SThomas Monjalon 	}
289986fa5de1SThomas Monjalon }
2900c9cce428SThomas Monjalon 
2901c9cce428SThomas Monjalon static void
2902c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2903c9cce428SThomas Monjalon {
2904c9cce428SThomas Monjalon 	unsigned int socket_id;
290534fc1051SIvan Ilchenko 	int ret;
2906edab33b1STetsuya Mukawa 
2907931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
290829841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2909931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
291029841336SPhil Yang 		socket_id = socket_ids[0];
2911931126baSBernard Iremonger 	reconfig(pi, socket_id);
291234fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
291334fc1051SIvan Ilchenko 	if (ret != 0)
291434fc1051SIvan Ilchenko 		printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
291534fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
2916edab33b1STetsuya Mukawa 
29174f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
29184f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
29194f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
29204f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2921edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2922edab33b1STetsuya Mukawa 
2923edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2924edab33b1STetsuya Mukawa 	printf("Done\n");
2925edab33b1STetsuya Mukawa }
2926edab33b1STetsuya Mukawa 
29270654d4a8SThomas Monjalon static void
29280654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
29295f4ec54fSChen Jing D(Mark) {
2930f8e5baa2SThomas Monjalon 	portid_t sibling;
2931f8e5baa2SThomas Monjalon 
2932f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2933f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2934f8e5baa2SThomas Monjalon 		return;
2935f8e5baa2SThomas Monjalon 	}
2936f8e5baa2SThomas Monjalon 
29370654d4a8SThomas Monjalon 	printf("Removing a device...\n");
2938938a184aSAdrien Mazarguil 
29392a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
29402a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
29412a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
29422a449871SThomas Monjalon 				printf("Port %u not stopped\n", sibling);
29432a449871SThomas Monjalon 				return;
29442a449871SThomas Monjalon 			}
29452a449871SThomas Monjalon 			port_flow_flush(sibling);
29462a449871SThomas Monjalon 		}
29472a449871SThomas Monjalon 	}
29482a449871SThomas Monjalon 
294975b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
2950f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2951edab33b1STetsuya Mukawa 		return;
29523070419eSGaetan Rivet 	}
29534f1de450SThomas Monjalon 	remove_invalid_ports();
295403ce2c53SMatan Azrad 
29550654d4a8SThomas Monjalon 	printf("Device is detached\n");
2956f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2957edab33b1STetsuya Mukawa 	printf("Done\n");
2958edab33b1STetsuya Mukawa 	return;
29595f4ec54fSChen Jing D(Mark) }
29605f4ec54fSChen Jing D(Mark) 
2961af75078fSIntel void
29620654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
29630654d4a8SThomas Monjalon {
29640654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
29650654d4a8SThomas Monjalon 		return;
29660654d4a8SThomas Monjalon 
29670654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
29680654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
29690654d4a8SThomas Monjalon 			printf("Port not stopped\n");
29700654d4a8SThomas Monjalon 			return;
29710654d4a8SThomas Monjalon 		}
29720654d4a8SThomas Monjalon 		printf("Port was not closed\n");
29730654d4a8SThomas Monjalon 	}
29740654d4a8SThomas Monjalon 
29750654d4a8SThomas Monjalon 	detach_device(rte_eth_devices[port_id].device);
29760654d4a8SThomas Monjalon }
29770654d4a8SThomas Monjalon 
29780654d4a8SThomas Monjalon void
29795edee5f6SThomas Monjalon detach_devargs(char *identifier)
298055e51c96SNithin Dabilpuram {
298155e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
298255e51c96SNithin Dabilpuram 	struct rte_devargs da;
298355e51c96SNithin Dabilpuram 	portid_t port_id;
298455e51c96SNithin Dabilpuram 
298555e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
298655e51c96SNithin Dabilpuram 
298755e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
298855e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
298955e51c96SNithin Dabilpuram 		printf("cannot parse identifier\n");
299055e51c96SNithin Dabilpuram 		if (da.args)
299155e51c96SNithin Dabilpuram 			free(da.args);
299255e51c96SNithin Dabilpuram 		return;
299355e51c96SNithin Dabilpuram 	}
299455e51c96SNithin Dabilpuram 
299555e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
299655e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
299755e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
299855e51c96SNithin Dabilpuram 				printf("Port %u not stopped\n", port_id);
2999149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
300055e51c96SNithin Dabilpuram 				return;
300155e51c96SNithin Dabilpuram 			}
300255e51c96SNithin Dabilpuram 			port_flow_flush(port_id);
300355e51c96SNithin Dabilpuram 		}
300455e51c96SNithin Dabilpuram 	}
300555e51c96SNithin Dabilpuram 
300655e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
300755e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
300855e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
300955e51c96SNithin Dabilpuram 		return;
301055e51c96SNithin Dabilpuram 	}
301155e51c96SNithin Dabilpuram 
301255e51c96SNithin Dabilpuram 	remove_invalid_ports();
301355e51c96SNithin Dabilpuram 
301455e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
301555e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
301655e51c96SNithin Dabilpuram 	printf("Done\n");
301755e51c96SNithin Dabilpuram }
301855e51c96SNithin Dabilpuram 
301955e51c96SNithin Dabilpuram void
3020af75078fSIntel pmd_test_exit(void)
3021af75078fSIntel {
3022af75078fSIntel 	portid_t pt_id;
302326cbb419SViacheslav Ovsiienko 	unsigned int i;
3024fb73e096SJeff Guo 	int ret;
3025af75078fSIntel 
30268210ec25SPablo de Lara 	if (test_done == 0)
30278210ec25SPablo de Lara 		stop_packet_forwarding();
30288210ec25SPablo de Lara 
302926cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
30303a0968c8SShahaf Shuler 		if (mempools[i]) {
30313a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
30323a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
30333a0968c8SShahaf Shuler 						     NULL);
30343a0968c8SShahaf Shuler 		}
30353a0968c8SShahaf Shuler 	}
3036d3a274ceSZhihong Wang 	if (ports != NULL) {
3037d3a274ceSZhihong Wang 		no_link_check = 1;
30387d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
303908fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3040af75078fSIntel 			fflush(stdout);
3041d3a274ceSZhihong Wang 			stop_port(pt_id);
304208fd782bSCristian Dumitrescu 		}
304308fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
304408fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
304508fd782bSCristian Dumitrescu 			fflush(stdout);
3046d3a274ceSZhihong Wang 			close_port(pt_id);
3047af75078fSIntel 		}
3048d3a274ceSZhihong Wang 	}
3049fb73e096SJeff Guo 
3050fb73e096SJeff Guo 	if (hot_plug) {
3051fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
30522049c511SJeff Guo 		if (ret) {
3053fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3054fb73e096SJeff Guo 				"fail to stop device event monitor.");
30552049c511SJeff Guo 			return;
30562049c511SJeff Guo 		}
3057fb73e096SJeff Guo 
30582049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3059cc1bf307SJeff Guo 			dev_event_callback, NULL);
30602049c511SJeff Guo 		if (ret < 0) {
3061fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
30622049c511SJeff Guo 				"fail to unregister device event callback.\n");
30632049c511SJeff Guo 			return;
30642049c511SJeff Guo 		}
30652049c511SJeff Guo 
30662049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
30672049c511SJeff Guo 		if (ret) {
30682049c511SJeff Guo 			RTE_LOG(ERR, EAL,
30692049c511SJeff Guo 				"fail to disable hotplug handling.\n");
30702049c511SJeff Guo 			return;
30712049c511SJeff Guo 		}
3072fb73e096SJeff Guo 	}
307326cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3074401b744dSShahaf Shuler 		if (mempools[i])
3075401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
3076401b744dSShahaf Shuler 	}
3077fb73e096SJeff Guo 
3078d3a274ceSZhihong Wang 	printf("\nBye...\n");
3079af75078fSIntel }
3080af75078fSIntel 
3081af75078fSIntel typedef void (*cmd_func_t)(void);
3082af75078fSIntel struct pmd_test_command {
3083af75078fSIntel 	const char *cmd_name;
3084af75078fSIntel 	cmd_func_t cmd_func;
3085af75078fSIntel };
3086af75078fSIntel 
3087ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3088af75078fSIntel static void
3089edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3090af75078fSIntel {
3091ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3092ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3093f8244c63SZhiyong Yang 	portid_t portid;
3094f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3095ce8d5614SIntel 	struct rte_eth_link link;
3096e661a08bSIgor Romanov 	int ret;
3097ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3098ce8d5614SIntel 
3099ce8d5614SIntel 	printf("Checking link statuses...\n");
3100ce8d5614SIntel 	fflush(stdout);
3101ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3102ce8d5614SIntel 		all_ports_up = 1;
31037d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3104ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3105ce8d5614SIntel 				continue;
3106ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3107e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3108e661a08bSIgor Romanov 			if (ret < 0) {
3109e661a08bSIgor Romanov 				all_ports_up = 0;
3110e661a08bSIgor Romanov 				if (print_flag == 1)
3111e661a08bSIgor Romanov 					printf("Port %u link get failed: %s\n",
3112e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3113e661a08bSIgor Romanov 				continue;
3114e661a08bSIgor Romanov 			}
3115ce8d5614SIntel 			/* print link status if flag set */
3116ce8d5614SIntel 			if (print_flag == 1) {
3117ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3118ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3119ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3120ce8d5614SIntel 				continue;
3121ce8d5614SIntel 			}
3122ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
312309419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
3124ce8d5614SIntel 				all_ports_up = 0;
3125ce8d5614SIntel 				break;
3126ce8d5614SIntel 			}
3127ce8d5614SIntel 		}
3128ce8d5614SIntel 		/* after finally printing all link status, get out */
3129ce8d5614SIntel 		if (print_flag == 1)
3130ce8d5614SIntel 			break;
3131ce8d5614SIntel 
3132ce8d5614SIntel 		if (all_ports_up == 0) {
3133ce8d5614SIntel 			fflush(stdout);
3134ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3135ce8d5614SIntel 		}
3136ce8d5614SIntel 
3137ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3138ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3139ce8d5614SIntel 			print_flag = 1;
3140ce8d5614SIntel 		}
31418ea656f8SGaetan Rivet 
31428ea656f8SGaetan Rivet 		if (lsc_interrupt)
31438ea656f8SGaetan Rivet 			break;
3144ce8d5614SIntel 	}
3145af75078fSIntel }
3146af75078fSIntel 
3147284c908cSGaetan Rivet static void
3148cc1bf307SJeff Guo rmv_port_callback(void *arg)
3149284c908cSGaetan Rivet {
31503b97888aSMatan Azrad 	int need_to_start = 0;
31510da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
315228caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
31530654d4a8SThomas Monjalon 	struct rte_device *dev;
3154284c908cSGaetan Rivet 
3155284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3156284c908cSGaetan Rivet 
31573b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
31583b97888aSMatan Azrad 		need_to_start = 1;
31593b97888aSMatan Azrad 		stop_packet_forwarding();
31603b97888aSMatan Azrad 	}
31610da2a62bSMatan Azrad 	no_link_check = 1;
3162284c908cSGaetan Rivet 	stop_port(port_id);
31630da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
31640654d4a8SThomas Monjalon 
31650654d4a8SThomas Monjalon 	/* Save rte_device pointer before closing ethdev port */
31660654d4a8SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
3167284c908cSGaetan Rivet 	close_port(port_id);
31680654d4a8SThomas Monjalon 	detach_device(dev); /* might be already removed or have more ports */
31690654d4a8SThomas Monjalon 
31703b97888aSMatan Azrad 	if (need_to_start)
31713b97888aSMatan Azrad 		start_packet_forwarding(0);
3172284c908cSGaetan Rivet }
3173284c908cSGaetan Rivet 
317476ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3175d6af1a13SBernard Iremonger static int
3176f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3177d6af1a13SBernard Iremonger 		  void *ret_param)
317876ad4a2dSGaetan Rivet {
317976ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3180d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
318176ad4a2dSGaetan Rivet 
318276ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
3183f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
318476ad4a2dSGaetan Rivet 			port_id, __func__, type);
318576ad4a2dSGaetan Rivet 		fflush(stderr);
31863af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3187f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
318897b5d8b5SThomas Monjalon 			eth_event_desc[type]);
318976ad4a2dSGaetan Rivet 		fflush(stdout);
319076ad4a2dSGaetan Rivet 	}
3191284c908cSGaetan Rivet 
3192284c908cSGaetan Rivet 	switch (type) {
31934f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
31944f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
31954f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
31964f1ed78eSThomas Monjalon 		break;
3197284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
31984f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
31994f1ed78eSThomas Monjalon 			break;
3200284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3201cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3202284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
3203284c908cSGaetan Rivet 		break;
320485c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
320585c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
320685c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
320785c6571cSThomas Monjalon 		break;
3208284c908cSGaetan Rivet 	default:
3209284c908cSGaetan Rivet 		break;
3210284c908cSGaetan Rivet 	}
3211d6af1a13SBernard Iremonger 	return 0;
321276ad4a2dSGaetan Rivet }
321376ad4a2dSGaetan Rivet 
321497b5d8b5SThomas Monjalon static int
321597b5d8b5SThomas Monjalon register_eth_event_callback(void)
321697b5d8b5SThomas Monjalon {
321797b5d8b5SThomas Monjalon 	int ret;
321897b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
321997b5d8b5SThomas Monjalon 
322097b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
322197b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
322297b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
322397b5d8b5SThomas Monjalon 				event,
322497b5d8b5SThomas Monjalon 				eth_event_callback,
322597b5d8b5SThomas Monjalon 				NULL);
322697b5d8b5SThomas Monjalon 		if (ret != 0) {
322797b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
322897b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
322997b5d8b5SThomas Monjalon 			return -1;
323097b5d8b5SThomas Monjalon 		}
323197b5d8b5SThomas Monjalon 	}
323297b5d8b5SThomas Monjalon 
323397b5d8b5SThomas Monjalon 	return 0;
323497b5d8b5SThomas Monjalon }
323597b5d8b5SThomas Monjalon 
3236fb73e096SJeff Guo /* This function is used by the interrupt thread */
3237fb73e096SJeff Guo static void
3238cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3239fb73e096SJeff Guo 			     __rte_unused void *arg)
3240fb73e096SJeff Guo {
32412049c511SJeff Guo 	uint16_t port_id;
32422049c511SJeff Guo 	int ret;
32432049c511SJeff Guo 
3244fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3245fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3246fb73e096SJeff Guo 			__func__, type);
3247fb73e096SJeff Guo 		fflush(stderr);
3248fb73e096SJeff Guo 	}
3249fb73e096SJeff Guo 
3250fb73e096SJeff Guo 	switch (type) {
3251fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3252cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3253fb73e096SJeff Guo 			device_name);
32542049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
32552049c511SJeff Guo 		if (ret) {
32562049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
32572049c511SJeff Guo 				device_name);
32582049c511SJeff Guo 			return;
32592049c511SJeff Guo 		}
3260cc1bf307SJeff Guo 		/*
3261cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3262cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3263cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3264cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3265cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3266cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3267cc1bf307SJeff Guo 		 * be deleted.
3268cc1bf307SJeff Guo 		 */
3269cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3270cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3271cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3272cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3273fb73e096SJeff Guo 		break;
3274fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3275fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3276fb73e096SJeff Guo 			device_name);
3277fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3278fb73e096SJeff Guo 		 * begin to attach port.
3279fb73e096SJeff Guo 		 */
3280fb73e096SJeff Guo 		break;
3281fb73e096SJeff Guo 	default:
3282fb73e096SJeff Guo 		break;
3283fb73e096SJeff Guo 	}
3284fb73e096SJeff Guo }
3285fb73e096SJeff Guo 
3286f2c5125aSPablo de Lara static void
3287f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
3288f2c5125aSPablo de Lara {
3289d44f8a48SQi Zhang 	uint16_t qid;
32905e91aeefSWei Zhao 	uint64_t offloads;
3291f2c5125aSPablo de Lara 
3292d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
32935e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
3294d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3295575e0fd1SWei Zhao 		if (offloads != 0)
3296575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
3297d44f8a48SQi Zhang 
3298d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3299f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3300d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3301f2c5125aSPablo de Lara 
3302f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3303d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3304f2c5125aSPablo de Lara 
3305f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3306d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3307f2c5125aSPablo de Lara 
3308f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3309d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3310f2c5125aSPablo de Lara 
3311f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3312d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3313f2c5125aSPablo de Lara 
3314d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3315d44f8a48SQi Zhang 	}
3316d44f8a48SQi Zhang 
3317d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
33185e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
3319d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
3320575e0fd1SWei Zhao 		if (offloads != 0)
3321575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
3322d44f8a48SQi Zhang 
3323d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3324f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3325d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3326f2c5125aSPablo de Lara 
3327f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3328d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3329f2c5125aSPablo de Lara 
3330f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3331d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3332f2c5125aSPablo de Lara 
3333f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3334d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3335f2c5125aSPablo de Lara 
3336f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3337d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3338d44f8a48SQi Zhang 
3339d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3340d44f8a48SQi Zhang 	}
3341f2c5125aSPablo de Lara }
3342f2c5125aSPablo de Lara 
3343013af9b6SIntel void
3344013af9b6SIntel init_port_config(void)
3345013af9b6SIntel {
3346013af9b6SIntel 	portid_t pid;
3347013af9b6SIntel 	struct rte_port *port;
33486f51deb9SIvan Ilchenko 	int ret;
3349013af9b6SIntel 
33507d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3351013af9b6SIntel 		port = &ports[pid];
3352013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
33536f51deb9SIvan Ilchenko 
33546f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
33556f51deb9SIvan Ilchenko 		if (ret != 0)
33566f51deb9SIvan Ilchenko 			return;
33576f51deb9SIvan Ilchenko 
33583ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3359013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
336090892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3361422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3362af75078fSIntel 		} else {
3363013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3364013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3365af75078fSIntel 		}
33663ce690d3SBruce Richardson 
33675f592039SJingjing Wu 		if (port->dcb_flag == 0) {
33683ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3369f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
3370f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
3371f9295aa2SXiaoyu Min 						(rx_mq_mode & ETH_MQ_RX_RSS);
33723ce690d3SBruce Richardson 			else
33733ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
33743ce690d3SBruce Richardson 		}
33753ce690d3SBruce Richardson 
3376f2c5125aSPablo de Lara 		rxtx_port_config(port);
3377013af9b6SIntel 
3378a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3379a5279d25SIgor Romanov 		if (ret != 0)
3380a5279d25SIgor Romanov 			return;
3381013af9b6SIntel 
3382a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3383e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
33847b7e5ba7SIntel #endif
33858ea656f8SGaetan Rivet 
33868ea656f8SGaetan Rivet 		if (lsc_interrupt &&
33878ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
33888ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
33898ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
3390284c908cSGaetan Rivet 		if (rmv_interrupt &&
3391284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
3392284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
3393284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3394013af9b6SIntel 	}
3395013af9b6SIntel }
3396013af9b6SIntel 
339741b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
339841b05095SBernard Iremonger {
339941b05095SBernard Iremonger 	struct rte_port *port;
340041b05095SBernard Iremonger 
340141b05095SBernard Iremonger 	port = &ports[slave_pid];
340241b05095SBernard Iremonger 	port->slave_flag = 1;
340341b05095SBernard Iremonger }
340441b05095SBernard Iremonger 
340541b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
340641b05095SBernard Iremonger {
340741b05095SBernard Iremonger 	struct rte_port *port;
340841b05095SBernard Iremonger 
340941b05095SBernard Iremonger 	port = &ports[slave_pid];
341041b05095SBernard Iremonger 	port->slave_flag = 0;
341141b05095SBernard Iremonger }
341241b05095SBernard Iremonger 
34130e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
34140e545d30SBernard Iremonger {
34150e545d30SBernard Iremonger 	struct rte_port *port;
34160e545d30SBernard Iremonger 
34170e545d30SBernard Iremonger 	port = &ports[slave_pid];
3418b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
3419b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3420b8b8b344SMatan Azrad 		return 1;
3421b8b8b344SMatan Azrad 	return 0;
34220e545d30SBernard Iremonger }
34230e545d30SBernard Iremonger 
3424013af9b6SIntel const uint16_t vlan_tags[] = {
3425013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3426013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3427013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3428013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3429013af9b6SIntel };
3430013af9b6SIntel 
3431013af9b6SIntel static  int
3432ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
34331a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
34341a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
34351a572499SJingjing Wu 		 uint8_t pfc_en)
3436013af9b6SIntel {
3437013af9b6SIntel 	uint8_t i;
3438ac7c491cSKonstantin Ananyev 	int32_t rc;
3439ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3440af75078fSIntel 
3441af75078fSIntel 	/*
3442013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3443013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3444af75078fSIntel 	 */
34451a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
34461a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
34471a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
34481a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
34491a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3450013af9b6SIntel 
3451547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
34521a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
34531a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
34541a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
34551a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
34561a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
34571a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3458013af9b6SIntel 
34591a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
34601a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
34611a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
34621a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
34631a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3464af75078fSIntel 		}
3465013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3466f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3467f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3468013af9b6SIntel 		}
3469013af9b6SIntel 
3470013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
3471f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3472f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3473f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
347432e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
34751a572499SJingjing Wu 	} else {
34761a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
34771a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
34781a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
34791a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3480013af9b6SIntel 
34815139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
34825139bc12STing Xu 
3483ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3484ac7c491cSKonstantin Ananyev 		if (rc != 0)
3485ac7c491cSKonstantin Ananyev 			return rc;
3486ac7c491cSKonstantin Ananyev 
34871a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
34881a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
34891a572499SJingjing Wu 
3490bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3491bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3492bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3493013af9b6SIntel 		}
3494ac7c491cSKonstantin Ananyev 
3495f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3496f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3497f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3498ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
349932e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
35001a572499SJingjing Wu 	}
35011a572499SJingjing Wu 
35021a572499SJingjing Wu 	if (pfc_en)
35031a572499SJingjing Wu 		eth_conf->dcb_capability_en =
35041a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3505013af9b6SIntel 	else
3506013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3507013af9b6SIntel 
3508013af9b6SIntel 	return 0;
3509013af9b6SIntel }
3510013af9b6SIntel 
3511013af9b6SIntel int
35121a572499SJingjing Wu init_port_dcb_config(portid_t pid,
35131a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
35141a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
35151a572499SJingjing Wu 		     uint8_t pfc_en)
3516013af9b6SIntel {
3517013af9b6SIntel 	struct rte_eth_conf port_conf;
3518013af9b6SIntel 	struct rte_port *rte_port;
3519013af9b6SIntel 	int retval;
3520013af9b6SIntel 	uint16_t i;
3521013af9b6SIntel 
35222a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3523013af9b6SIntel 
3524013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3525013af9b6SIntel 	/* Enter DCB configuration status */
3526013af9b6SIntel 	dcb_config = 1;
3527013af9b6SIntel 
3528d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3529d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3530d5354e89SYanglong Wu 
3531013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3532ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3533013af9b6SIntel 	if (retval < 0)
3534013af9b6SIntel 		return retval;
35350074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3536013af9b6SIntel 
35372f203d44SQi Zhang 	/* re-configure the device . */
35382b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
35392b0e0ebaSChenbo Xia 	if (retval < 0)
35402b0e0ebaSChenbo Xia 		return retval;
35416f51deb9SIvan Ilchenko 
35426f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
35436f51deb9SIvan Ilchenko 	if (retval != 0)
35446f51deb9SIvan Ilchenko 		return retval;
35452a977b89SWenzhuo Lu 
35462a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
35472a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
35482a977b89SWenzhuo Lu 	 */
35492a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
35502a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
35512a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
35522a977b89SWenzhuo Lu 			" for port %d.", pid);
35532a977b89SWenzhuo Lu 		return -1;
35542a977b89SWenzhuo Lu 	}
35552a977b89SWenzhuo Lu 
35562a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
35572a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
35582a977b89SWenzhuo Lu 	 */
35592a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
356086ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
356186ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
356286ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
356386ef65eeSBernard Iremonger 		} else {
35642a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
35652a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
356686ef65eeSBernard Iremonger 		}
35672a977b89SWenzhuo Lu 	} else {
35682a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
35692a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
35702a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
35712a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
35722a977b89SWenzhuo Lu 		} else {
35732a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
35742a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
35752a977b89SWenzhuo Lu 
35762a977b89SWenzhuo Lu 		}
35772a977b89SWenzhuo Lu 	}
35782a977b89SWenzhuo Lu 	rx_free_thresh = 64;
35792a977b89SWenzhuo Lu 
3580013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3581013af9b6SIntel 
3582f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3583013af9b6SIntel 	/* VLAN filter */
35840074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
35851a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3586013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3587013af9b6SIntel 
3588a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3589a5279d25SIgor Romanov 	if (retval != 0)
3590a5279d25SIgor Romanov 		return retval;
3591a5279d25SIgor Romanov 
35927741e4cfSIntel 	rte_port->dcb_flag = 1;
35937741e4cfSIntel 
3594013af9b6SIntel 	return 0;
3595af75078fSIntel }
3596af75078fSIntel 
3597ffc468ffSTetsuya Mukawa static void
3598ffc468ffSTetsuya Mukawa init_port(void)
3599ffc468ffSTetsuya Mukawa {
36001b9f2746SGregory Etelson 	int i;
36011b9f2746SGregory Etelson 
3602ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3603ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3604ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3605ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3606ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3607ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3608ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3609ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3610ffc468ffSTetsuya Mukawa 	}
36111b9f2746SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
36121b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
361329841336SPhil Yang 	/* Initialize ports NUMA structures */
361429841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
361529841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
361629841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3617ffc468ffSTetsuya Mukawa }
3618ffc468ffSTetsuya Mukawa 
3619d3a274ceSZhihong Wang static void
3620d3a274ceSZhihong Wang force_quit(void)
3621d3a274ceSZhihong Wang {
3622d3a274ceSZhihong Wang 	pmd_test_exit();
3623d3a274ceSZhihong Wang 	prompt_exit();
3624d3a274ceSZhihong Wang }
3625d3a274ceSZhihong Wang 
3626d3a274ceSZhihong Wang static void
3627cfea1f30SPablo de Lara print_stats(void)
3628cfea1f30SPablo de Lara {
3629cfea1f30SPablo de Lara 	uint8_t i;
3630cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3631cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3632cfea1f30SPablo de Lara 
3633cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3634cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3635cfea1f30SPablo de Lara 
3636cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3637cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3638cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3639683d1e82SIgor Romanov 
3640683d1e82SIgor Romanov 	fflush(stdout);
3641cfea1f30SPablo de Lara }
3642cfea1f30SPablo de Lara 
3643cfea1f30SPablo de Lara static void
3644d3a274ceSZhihong Wang signal_handler(int signum)
3645d3a274ceSZhihong Wang {
3646d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3647d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3648d3a274ceSZhihong Wang 				signum);
3649a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
3650102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3651102b7329SReshma Pattan 		rte_pdump_uninit();
3652102b7329SReshma Pattan #endif
3653a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
36548b36297dSAmit Gupta 		if (latencystats_enabled != 0)
365562d3216dSReshma Pattan 			rte_latencystats_uninit();
365662d3216dSReshma Pattan #endif
3657d3a274ceSZhihong Wang 		force_quit();
3658d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3659d9a191a0SPhil Yang 		f_quit = 1;
3660d3a274ceSZhihong Wang 		/* exit with the expected status */
3661d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3662d3a274ceSZhihong Wang 		kill(getpid(), signum);
3663d3a274ceSZhihong Wang 	}
3664d3a274ceSZhihong Wang }
3665d3a274ceSZhihong Wang 
3666af75078fSIntel int
3667af75078fSIntel main(int argc, char** argv)
3668af75078fSIntel {
3669af75078fSIntel 	int diag;
3670f8244c63SZhiyong Yang 	portid_t port_id;
36714918a357SXiaoyun Li 	uint16_t count;
3672fb73e096SJeff Guo 	int ret;
3673af75078fSIntel 
3674d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3675d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3676d3a274ceSZhihong Wang 
3677285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3678285fd101SOlivier Matz 	if (testpmd_logtype < 0)
367916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3680285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3681285fd101SOlivier Matz 
36829201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
36839201806eSStephen Hemminger 	if (diag < 0)
368416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
368516267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
36869201806eSStephen Hemminger 
3687a87ab9f7SStephen Hemminger 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
368816267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE,
368916267ceeSStephen Hemminger 			 "Secondary process type not supported.\n");
3690a87ab9f7SStephen Hemminger 
369197b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
369297b5d8b5SThomas Monjalon 	if (ret != 0)
369316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
369497b5d8b5SThomas Monjalon 
3695a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
36964aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3697e9436f54STiwei Bie 	rte_pdump_init();
36984aa0d012SAnatoly Burakov #endif
36994aa0d012SAnatoly Burakov 
37004918a357SXiaoyun Li 	count = 0;
37014918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
37024918a357SXiaoyun Li 		ports_ids[count] = port_id;
37034918a357SXiaoyun Li 		count++;
37044918a357SXiaoyun Li 	}
37054918a357SXiaoyun Li 	nb_ports = (portid_t) count;
37064aa0d012SAnatoly Burakov 	if (nb_ports == 0)
37074aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
37084aa0d012SAnatoly Burakov 
37094aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
37104aa0d012SAnatoly Burakov 	init_port();
37114aa0d012SAnatoly Burakov 
37124aa0d012SAnatoly Burakov 	set_def_fwd_config();
37134aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
371416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
371516267ceeSStephen Hemminger 			 "Check the core mask argument\n");
37164aa0d012SAnatoly Burakov 
3717e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3718a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
3719e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3720e505d84cSAnatoly Burakov #endif
3721a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
3722e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3723e505d84cSAnatoly Burakov #endif
3724e505d84cSAnatoly Burakov 
3725fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
37265fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3727fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3728fb7b8b32SAnatoly Burakov #else
3729fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3730fb7b8b32SAnatoly Burakov #endif
3731fb7b8b32SAnatoly Burakov 
3732e505d84cSAnatoly Burakov 	argc -= diag;
3733e505d84cSAnatoly Burakov 	argv += diag;
3734e505d84cSAnatoly Burakov 	if (argc > 1)
3735e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3736e505d84cSAnatoly Burakov 
3737e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3738285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
37391c036b16SEelco Chaudron 			strerror(errno));
37401c036b16SEelco Chaudron 	}
37411c036b16SEelco Chaudron 
374299cabef0SPablo de Lara 	if (tx_first && interactive)
374399cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
374499cabef0SPablo de Lara 				"interactive mode.\n");
37458820cba4SDavid Hunt 
37468820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
37478820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
37488820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
37498820cba4SDavid Hunt 		lsc_interrupt = 0;
37508820cba4SDavid Hunt 	}
37518820cba4SDavid Hunt 
37525a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
37535a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
37545a8fb55cSReshma Pattan 
37555a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3756af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3757af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3758af75078fSIntel 		       nb_rxq, nb_txq);
3759af75078fSIntel 
3760af75078fSIntel 	init_config();
3761fb73e096SJeff Guo 
3762fb73e096SJeff Guo 	if (hot_plug) {
37632049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3764fb73e096SJeff Guo 		if (ret) {
37652049c511SJeff Guo 			RTE_LOG(ERR, EAL,
37662049c511SJeff Guo 				"fail to enable hotplug handling.");
3767fb73e096SJeff Guo 			return -1;
3768fb73e096SJeff Guo 		}
3769fb73e096SJeff Guo 
37702049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
37712049c511SJeff Guo 		if (ret) {
37722049c511SJeff Guo 			RTE_LOG(ERR, EAL,
37732049c511SJeff Guo 				"fail to start device event monitoring.");
37742049c511SJeff Guo 			return -1;
37752049c511SJeff Guo 		}
37762049c511SJeff Guo 
37772049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3778cc1bf307SJeff Guo 			dev_event_callback, NULL);
37792049c511SJeff Guo 		if (ret) {
37802049c511SJeff Guo 			RTE_LOG(ERR, EAL,
37812049c511SJeff Guo 				"fail  to register device event callback\n");
37822049c511SJeff Guo 			return -1;
37832049c511SJeff Guo 		}
3784fb73e096SJeff Guo 	}
3785fb73e096SJeff Guo 
37866937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3787148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3788af75078fSIntel 
3789ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
379034fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
379134fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
379234fc1051SIvan Ilchenko 		if (ret != 0)
379334fc1051SIvan Ilchenko 			printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
379434fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
379534fc1051SIvan Ilchenko 	}
3796af75078fSIntel 
37977e4441c8SRemy Horton 	/* Init metrics library */
37987e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
37997e4441c8SRemy Horton 
3800a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
380162d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
380262d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
380362d3216dSReshma Pattan 		if (ret)
380462d3216dSReshma Pattan 			printf("Warning: latencystats init()"
380562d3216dSReshma Pattan 				" returned error %d\n",	ret);
380662d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
380762d3216dSReshma Pattan 			latencystats_lcore_id);
380862d3216dSReshma Pattan 	}
380962d3216dSReshma Pattan #endif
381062d3216dSReshma Pattan 
38117e4441c8SRemy Horton 	/* Setup bitrate stats */
3812a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
3813e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
38147e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
38157e4441c8SRemy Horton 		if (bitrate_data == NULL)
3816e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3817e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
38187e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3819e25e6c70SRemy Horton 	}
38207e4441c8SRemy Horton #endif
38217e4441c8SRemy Horton 
3822a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE
382381ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
382481ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
382581ef862bSAllain Legacy 
3826ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3827ca7feb22SCyril Chemparathy 		if (auto_start) {
3828ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3829ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3830ca7feb22SCyril Chemparathy 		}
3831af75078fSIntel 		prompt();
38320de738cfSJiayu Hu 		pmd_test_exit();
3833ca7feb22SCyril Chemparathy 	} else
38340d56cb81SThomas Monjalon #endif
38350d56cb81SThomas Monjalon 	{
3836af75078fSIntel 		char c;
3837af75078fSIntel 		int rc;
3838af75078fSIntel 
3839d9a191a0SPhil Yang 		f_quit = 0;
3840d9a191a0SPhil Yang 
3841af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
384299cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3843cfea1f30SPablo de Lara 		if (stats_period != 0) {
3844cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3845cfea1f30SPablo de Lara 			uint64_t timer_period;
3846cfea1f30SPablo de Lara 
3847cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3848cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3849cfea1f30SPablo de Lara 
3850d9a191a0SPhil Yang 			while (f_quit == 0) {
3851cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3852cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3853cfea1f30SPablo de Lara 
3854cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3855cfea1f30SPablo de Lara 					print_stats();
3856cfea1f30SPablo de Lara 					/* Reset the timer */
3857cfea1f30SPablo de Lara 					diff_time = 0;
3858cfea1f30SPablo de Lara 				}
3859cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3860cfea1f30SPablo de Lara 				prev_time = cur_time;
3861cfea1f30SPablo de Lara 				sleep(1);
3862cfea1f30SPablo de Lara 			}
3863cfea1f30SPablo de Lara 		}
3864cfea1f30SPablo de Lara 
3865af75078fSIntel 		printf("Press enter to exit\n");
3866af75078fSIntel 		rc = read(0, &c, 1);
3867d3a274ceSZhihong Wang 		pmd_test_exit();
3868af75078fSIntel 		if (rc < 0)
3869af75078fSIntel 			return 1;
3870af75078fSIntel 	}
3871af75078fSIntel 
38725e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
38735e516c89SStephen Hemminger 	if (ret != 0)
38745e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
38755e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
38765e516c89SStephen Hemminger 
38775e516c89SStephen Hemminger 	return EXIT_SUCCESS;
3878af75078fSIntel }
3879