xref: /dpdk/app/test-pmd/testpmd.c (revision 9e6b36c34ce928d13a5182e8e76580058366bf7c)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30af75078fSIntel #include <rte_memory.h>
31af75078fSIntel #include <rte_memcpy.h>
32af75078fSIntel #include <rte_launch.h>
33af75078fSIntel #include <rte_eal.h>
34284c908cSGaetan Rivet #include <rte_alarm.h>
35af75078fSIntel #include <rte_per_lcore.h>
36af75078fSIntel #include <rte_lcore.h>
37af75078fSIntel #include <rte_atomic.h>
38af75078fSIntel #include <rte_branch_prediction.h>
39af75078fSIntel #include <rte_mempool.h>
40af75078fSIntel #include <rte_malloc.h>
41af75078fSIntel #include <rte_mbuf.h>
420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
43af75078fSIntel #include <rte_interrupts.h>
44af75078fSIntel #include <rte_pci.h>
45af75078fSIntel #include <rte_ether.h>
46af75078fSIntel #include <rte_ethdev.h>
47edab33b1STetsuya Mukawa #include <rte_dev.h>
48af75078fSIntel #include <rte_string_fns.h>
49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
51e261265eSRadu Nicolau #endif
52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
53102b7329SReshma Pattan #include <rte_pdump.h>
54102b7329SReshma Pattan #endif
55938a184aSAdrien Mazarguil #include <rte_flow.h>
567e4441c8SRemy Horton #include <rte_metrics.h>
577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
587e4441c8SRemy Horton #include <rte_bitrate.h>
597e4441c8SRemy Horton #endif
6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6162d3216dSReshma Pattan #include <rte_latencystats.h>
6262d3216dSReshma Pattan #endif
63af75078fSIntel 
64af75078fSIntel #include "testpmd.h"
65af75078fSIntel 
66c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
67c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
69c7f5dba7SAnatoly Burakov #else
70c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
71c7f5dba7SAnatoly Burakov #endif
72c7f5dba7SAnatoly Burakov 
73c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
74c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
75c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
76c7f5dba7SAnatoly Burakov #else
77c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
78c7f5dba7SAnatoly Burakov #endif
79c7f5dba7SAnatoly Burakov 
80c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
81c7f5dba7SAnatoly Burakov 
82af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
83285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
84af75078fSIntel 
85af75078fSIntel /* use master core for command line ? */
86af75078fSIntel uint8_t interactive = 0;
87ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8899cabef0SPablo de Lara uint8_t tx_first;
8981ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
90af75078fSIntel 
91af75078fSIntel /*
92af75078fSIntel  * NUMA support configuration.
93af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
94af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
96af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
97af75078fSIntel  */
98999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
99af75078fSIntel 
100af75078fSIntel /*
101b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
102b6ea6408SIntel  * not configured.
103b6ea6408SIntel  */
104b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
105b6ea6408SIntel 
106b6ea6408SIntel /*
107c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
108c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
109c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
110c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
111c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
112148f963fSBruce Richardson  */
113c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
114148f963fSBruce Richardson 
115148f963fSBruce Richardson /*
11663531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11763531389SGeorgios Katsikas  * is allocated.
11863531389SGeorgios Katsikas  */
11963531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12063531389SGeorgios Katsikas 
12163531389SGeorgios Katsikas /*
12263531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12363531389SGeorgios Katsikas  * is allocated.
12463531389SGeorgios Katsikas  */
12563531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12663531389SGeorgios Katsikas 
12763531389SGeorgios Katsikas /*
12863531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
12963531389SGeorgios Katsikas  * is allocated.
13063531389SGeorgios Katsikas  */
13163531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13263531389SGeorgios Katsikas 
13363531389SGeorgios Katsikas /*
134af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
135af75078fSIntel  * forwarded.
136547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
137af75078fSIntel  * ports.
138af75078fSIntel  */
1396d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
140af75078fSIntel portid_t nb_peer_eth_addrs = 0;
141af75078fSIntel 
142af75078fSIntel /*
143af75078fSIntel  * Probed Target Environment.
144af75078fSIntel  */
145af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
146af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
147af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
148af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
149af75078fSIntel 
1504918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1514918a357SXiaoyun Li 
152af75078fSIntel /*
153af75078fSIntel  * Test Forwarding Configuration.
154af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
155af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
156af75078fSIntel  */
157af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
158af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
159af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
160af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
161af75078fSIntel 
162af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
163af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
164af75078fSIntel 
165af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
166af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
167af75078fSIntel 
168af75078fSIntel /*
169af75078fSIntel  * Forwarding engines.
170af75078fSIntel  */
171af75078fSIntel struct fwd_engine * fwd_engines[] = {
172af75078fSIntel 	&io_fwd_engine,
173af75078fSIntel 	&mac_fwd_engine,
174d47388f1SCyril Chemparathy 	&mac_swap_engine,
175e9e23a61SCyril Chemparathy 	&flow_gen_engine,
176af75078fSIntel 	&rx_only_engine,
177af75078fSIntel 	&tx_only_engine,
178af75078fSIntel 	&csum_fwd_engine,
179168dfa61SIvan Boule 	&icmp_echo_engine,
1803c156061SJens Freimann 	&noisy_vnf_engine,
1810ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1820ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1835b590fbeSJasvinder Singh #endif
184af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
185af75078fSIntel 	&ieee1588_fwd_engine,
186af75078fSIntel #endif
187af75078fSIntel 	NULL,
188af75078fSIntel };
189af75078fSIntel 
190401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
19159fcf854SShahaf Shuler uint16_t mempool_flags;
192401b744dSShahaf Shuler 
193af75078fSIntel struct fwd_config cur_fwd_config;
194af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
195bf56fce1SZhihong Wang uint32_t retry_enabled;
196bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
197bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
198af75078fSIntel 
199af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
200c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
201c8798818SIntel                                       * specified on command-line. */
202cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203d9a191a0SPhil Yang 
204d9a191a0SPhil Yang /*
205d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
206d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
207d9a191a0SPhil Yang  */
208d9a191a0SPhil Yang uint8_t f_quit;
209d9a191a0SPhil Yang 
210af75078fSIntel /*
211af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
212af75078fSIntel  */
213af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
214af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
215af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
216af75078fSIntel };
217af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
218af75078fSIntel 
21979bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
22079bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
22179bec05bSKonstantin Ananyev 
22282010ef5SYongseok Koh uint8_t txonly_multi_flow;
22382010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
22482010ef5SYongseok Koh 
225af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
226e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
227af75078fSIntel 
228900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
229900550deSIntel uint8_t dcb_config = 0;
230900550deSIntel 
231900550deSIntel /* Whether the dcb is in testing status */
232900550deSIntel uint8_t dcb_test = 0;
233900550deSIntel 
234af75078fSIntel /*
235af75078fSIntel  * Configurable number of RX/TX queues.
236af75078fSIntel  */
2371c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
238af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
240af75078fSIntel 
241af75078fSIntel /*
242af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2438599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
244af75078fSIntel  */
2458599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2468599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
247af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
249af75078fSIntel 
250f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
251af75078fSIntel /*
252af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
253af75078fSIntel  */
254af75078fSIntel 
255f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
258af75078fSIntel 
259f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
262af75078fSIntel 
263af75078fSIntel /*
264af75078fSIntel  * Configurable value of RX free threshold.
265af75078fSIntel  */
266f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
267af75078fSIntel 
268af75078fSIntel /*
269ce8d5614SIntel  * Configurable value of RX drop enable.
270ce8d5614SIntel  */
271f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
272ce8d5614SIntel 
273ce8d5614SIntel /*
274af75078fSIntel  * Configurable value of TX free threshold.
275af75078fSIntel  */
276f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
277af75078fSIntel 
278af75078fSIntel /*
279af75078fSIntel  * Configurable value of TX RS bit threshold.
280af75078fSIntel  */
281f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
282af75078fSIntel 
283af75078fSIntel /*
2843c156061SJens Freimann  * Configurable value of buffered packets before sending.
2853c156061SJens Freimann  */
2863c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2873c156061SJens Freimann 
2883c156061SJens Freimann /*
2893c156061SJens Freimann  * Configurable value of packet buffer timeout.
2903c156061SJens Freimann  */
2913c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2923c156061SJens Freimann 
2933c156061SJens Freimann /*
2943c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2953c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2963c156061SJens Freimann  */
2973c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2983c156061SJens Freimann 
2993c156061SJens Freimann /*
3003c156061SJens Freimann  * Configurable value of number of random writes done in
3013c156061SJens Freimann  * VNF simulation memory area.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of number of random reads done in
3073c156061SJens Freimann  * VNF simulation memory area.
3083c156061SJens Freimann  */
3093c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3103c156061SJens Freimann 
3113c156061SJens Freimann /*
3123c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3133c156061SJens Freimann  * VNF simulation memory area.
3143c156061SJens Freimann  */
3153c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3163c156061SJens Freimann 
3173c156061SJens Freimann /*
318af75078fSIntel  * Receive Side Scaling (RSS) configuration.
319af75078fSIntel  */
3208a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
321af75078fSIntel 
322af75078fSIntel /*
323af75078fSIntel  * Port topology configuration
324af75078fSIntel  */
325af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
326af75078fSIntel 
3277741e4cfSIntel /*
3287741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3297741e4cfSIntel  */
3307741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3317741e4cfSIntel 
332af75078fSIntel /*
3337ee3e944SVasily Philipov  * Flow API isolated mode.
3347ee3e944SVasily Philipov  */
3357ee3e944SVasily Philipov uint8_t flow_isolate_all;
3367ee3e944SVasily Philipov 
3377ee3e944SVasily Philipov /*
338bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
339bc202406SDavid Marchand  */
340bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
341bc202406SDavid Marchand 
342bc202406SDavid Marchand /*
3436937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3446937d210SStephen Hemminger  */
3456937d210SStephen Hemminger uint8_t no_device_start = 0;
3466937d210SStephen Hemminger 
3476937d210SStephen Hemminger /*
3488ea656f8SGaetan Rivet  * Enable link status change notification
3498ea656f8SGaetan Rivet  */
3508ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3518ea656f8SGaetan Rivet 
3528ea656f8SGaetan Rivet /*
353284c908cSGaetan Rivet  * Enable device removal notification.
354284c908cSGaetan Rivet  */
355284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
356284c908cSGaetan Rivet 
357fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
358fb73e096SJeff Guo 
3594f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3604f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3614f1ed78eSThomas Monjalon 
362b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
363b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
364b0a9354aSPavan Nikhilesh 
36597b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
36697b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
36797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
36897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
36997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
37097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
37197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
37297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
37397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
37497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
37597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
37697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
37797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
37897b5d8b5SThomas Monjalon };
37997b5d8b5SThomas Monjalon 
380284c908cSGaetan Rivet /*
3813af72783SGaetan Rivet  * Display or mask ether events
3823af72783SGaetan Rivet  * Default to all events except VF_MBOX
3833af72783SGaetan Rivet  */
3843af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3853af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3863af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3873af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
388badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3893af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3903af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
391e505d84cSAnatoly Burakov /*
392e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
393e505d84cSAnatoly Burakov  */
394e505d84cSAnatoly Burakov int do_mlockall = 0;
3953af72783SGaetan Rivet 
3963af72783SGaetan Rivet /*
3977b7e5ba7SIntel  * NIC bypass mode configuration options.
3987b7e5ba7SIntel  */
3997b7e5ba7SIntel 
40050c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
4017b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
402e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4037b7e5ba7SIntel #endif
4047b7e5ba7SIntel 
405e261265eSRadu Nicolau 
40662d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
40762d3216dSReshma Pattan 
40862d3216dSReshma Pattan /*
40962d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
41062d3216dSReshma Pattan  */
41162d3216dSReshma Pattan uint8_t latencystats_enabled;
41262d3216dSReshma Pattan 
41362d3216dSReshma Pattan /*
41462d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
41562d3216dSReshma Pattan  */
41662d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
41762d3216dSReshma Pattan 
41862d3216dSReshma Pattan #endif
41962d3216dSReshma Pattan 
4207b7e5ba7SIntel /*
421af75078fSIntel  * Ethernet device configuration.
422af75078fSIntel  */
423af75078fSIntel struct rte_eth_rxmode rx_mode = {
42435b2d13fSOlivier Matz 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
42535b2d13fSOlivier Matz 		/**< Default maximum frame length. */
426af75078fSIntel };
427af75078fSIntel 
42807e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
42907e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
43007e5f7bdSShahaf Shuler };
431fd8c20aaSShahaf Shuler 
432af75078fSIntel struct rte_fdir_conf fdir_conf = {
433af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
434af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
435af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
436d9d5e6f2SJingjing Wu 	.mask = {
43726f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
438d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
439d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
440d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
441d9d5e6f2SJingjing Wu 		},
442d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
443d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
444d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
445d9d5e6f2SJingjing Wu 		},
446d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
447d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
44847b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
44947b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
45047b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
451d9d5e6f2SJingjing Wu 	},
452af75078fSIntel 	.drop_queue = 127,
453af75078fSIntel };
454af75078fSIntel 
4552950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
456af75078fSIntel 
457ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
458ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
459ed30d9b6SIntel 
460ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
461ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
462ed30d9b6SIntel 
463ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
464ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
465ed30d9b6SIntel 
466a4fd5eeeSElza Mathew /*
467a4fd5eeeSElza Mathew  * Display zero values by default for xstats
468a4fd5eeeSElza Mathew  */
469a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
470a4fd5eeeSElza Mathew 
471c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
472c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4737acf894dSStephen Hurd 
474e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4757e4441c8SRemy Horton /* Bitrate statistics */
4767e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
477e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
478e25e6c70SRemy Horton uint8_t bitrate_enabled;
479e25e6c70SRemy Horton #endif
4807e4441c8SRemy Horton 
481b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
482b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
483b40f8d78SJiayu Hu 
484ed30d9b6SIntel /* Forward function declarations */
485c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
48628caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
48728caa76aSZhiyong Yang 						   struct rte_port *port);
488edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
489f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
49076ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
491d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
492cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
493fb73e096SJeff Guo 				enum rte_dev_event_type type,
494fb73e096SJeff Guo 				void *param);
495ce8d5614SIntel 
496ce8d5614SIntel /*
497ce8d5614SIntel  * Check if all the ports are started.
498ce8d5614SIntel  * If yes, return positive value. If not, return zero.
499ce8d5614SIntel  */
500ce8d5614SIntel static int all_ports_started(void);
501ed30d9b6SIntel 
50252f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
50335b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
50452f38a20SJiayu Hu 
505af75078fSIntel /*
50698a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
507c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
508c9cafcc8SShahaf Shuler  */
509c9cafcc8SShahaf Shuler int
510c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
511c9cafcc8SShahaf Shuler {
512c9cafcc8SShahaf Shuler 	unsigned int i;
513c9cafcc8SShahaf Shuler 
514c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
515c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
516c9cafcc8SShahaf Shuler 			return 0;
517c9cafcc8SShahaf Shuler 	}
518c9cafcc8SShahaf Shuler 	return 1;
519c9cafcc8SShahaf Shuler }
520c9cafcc8SShahaf Shuler 
521c9cafcc8SShahaf Shuler /*
522af75078fSIntel  * Setup default configuration.
523af75078fSIntel  */
524af75078fSIntel static void
525af75078fSIntel set_default_fwd_lcores_config(void)
526af75078fSIntel {
527af75078fSIntel 	unsigned int i;
528af75078fSIntel 	unsigned int nb_lc;
5297acf894dSStephen Hurd 	unsigned int sock_num;
530af75078fSIntel 
531af75078fSIntel 	nb_lc = 0;
532af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
533dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
534dbfb8ec7SPhil Yang 			continue;
535c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
536c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
537c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
538c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
539c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
540c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
541c9cafcc8SShahaf Shuler 			}
542c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5437acf894dSStephen Hurd 		}
544f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
545f54fe5eeSStephen Hurd 			continue;
546f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
547af75078fSIntel 	}
548af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
549af75078fSIntel 	nb_cfg_lcores = nb_lcores;
550af75078fSIntel 	nb_fwd_lcores = 1;
551af75078fSIntel }
552af75078fSIntel 
553af75078fSIntel static void
554af75078fSIntel set_def_peer_eth_addrs(void)
555af75078fSIntel {
556af75078fSIntel 	portid_t i;
557af75078fSIntel 
558af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
55935b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
560af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
561af75078fSIntel 	}
562af75078fSIntel }
563af75078fSIntel 
564af75078fSIntel static void
565af75078fSIntel set_default_fwd_ports_config(void)
566af75078fSIntel {
567af75078fSIntel 	portid_t pt_id;
56865a7360cSMatan Azrad 	int i = 0;
569af75078fSIntel 
570effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
57165a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
572af75078fSIntel 
573effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
574effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
575effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
576effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
577effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
578effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
579effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
580effdb8bbSPhil Yang 			}
581effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
582effdb8bbSPhil Yang 		}
583effdb8bbSPhil Yang 	}
584effdb8bbSPhil Yang 
585af75078fSIntel 	nb_cfg_ports = nb_ports;
586af75078fSIntel 	nb_fwd_ports = nb_ports;
587af75078fSIntel }
588af75078fSIntel 
589af75078fSIntel void
590af75078fSIntel set_def_fwd_config(void)
591af75078fSIntel {
592af75078fSIntel 	set_default_fwd_lcores_config();
593af75078fSIntel 	set_def_peer_eth_addrs();
594af75078fSIntel 	set_default_fwd_ports_config();
595af75078fSIntel }
596af75078fSIntel 
597c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
598c7f5dba7SAnatoly Burakov static int
599c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
600c7f5dba7SAnatoly Burakov {
601c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
602c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
603c7f5dba7SAnatoly Burakov 
604c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
605c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
606c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
607c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
608c7f5dba7SAnatoly Burakov 	 */
609c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
610c7f5dba7SAnatoly Burakov 
611c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
612c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
613c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
614c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
615c7f5dba7SAnatoly Burakov 		return -1;
616c7f5dba7SAnatoly Burakov 	}
617c7f5dba7SAnatoly Burakov 
618c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
619c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
620c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
621c7f5dba7SAnatoly Burakov 
622c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
623c7f5dba7SAnatoly Burakov 
624c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
625c7f5dba7SAnatoly Burakov 
626c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
627c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
628c7f5dba7SAnatoly Burakov 		return -1;
629c7f5dba7SAnatoly Burakov 	}
630c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
631c7f5dba7SAnatoly Burakov 
632c7f5dba7SAnatoly Burakov 	return 0;
633c7f5dba7SAnatoly Burakov }
634c7f5dba7SAnatoly Burakov 
635c7f5dba7SAnatoly Burakov static int
636c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
637c7f5dba7SAnatoly Burakov {
638c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
639c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
640c7f5dba7SAnatoly Burakov 	 */
6419d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
642c7f5dba7SAnatoly Burakov 
643c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
644c7f5dba7SAnatoly Burakov }
645c7f5dba7SAnatoly Burakov 
646c7f5dba7SAnatoly Burakov static void *
647c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
648c7f5dba7SAnatoly Burakov {
649c7f5dba7SAnatoly Burakov 	void *addr;
650c7f5dba7SAnatoly Burakov 	int flags;
651c7f5dba7SAnatoly Burakov 
652c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
653c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
654c7f5dba7SAnatoly Burakov 	if (huge)
655c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
656c7f5dba7SAnatoly Burakov 
657c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
658c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
659c7f5dba7SAnatoly Burakov 		return NULL;
660c7f5dba7SAnatoly Burakov 
661c7f5dba7SAnatoly Burakov 	return addr;
662c7f5dba7SAnatoly Burakov }
663c7f5dba7SAnatoly Burakov 
664c7f5dba7SAnatoly Burakov struct extmem_param {
665c7f5dba7SAnatoly Burakov 	void *addr;
666c7f5dba7SAnatoly Burakov 	size_t len;
667c7f5dba7SAnatoly Burakov 	size_t pgsz;
668c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
669c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
670c7f5dba7SAnatoly Burakov };
671c7f5dba7SAnatoly Burakov 
672c7f5dba7SAnatoly Burakov static int
673c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
674c7f5dba7SAnatoly Burakov 		bool huge)
675c7f5dba7SAnatoly Burakov {
676c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
677c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
678c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
679c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
680c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
681c7f5dba7SAnatoly Burakov 	void *addr;
682c7f5dba7SAnatoly Burakov 	int ret;
683c7f5dba7SAnatoly Burakov 
684c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
685c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
686c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
687c7f5dba7SAnatoly Burakov 			continue;
688c7f5dba7SAnatoly Burakov 
689c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
690c7f5dba7SAnatoly Burakov 
691c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
692c7f5dba7SAnatoly Burakov 		if (!huge)
693c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
694c7f5dba7SAnatoly Burakov 
695c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
696c7f5dba7SAnatoly Burakov 		if (ret < 0) {
697c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
698c7f5dba7SAnatoly Burakov 			return -1;
699c7f5dba7SAnatoly Burakov 		}
700c7f5dba7SAnatoly Burakov 
701c7f5dba7SAnatoly Burakov 		/* allocate our memory */
702c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
703c7f5dba7SAnatoly Burakov 
704c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
705c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
706c7f5dba7SAnatoly Burakov 		 * try another one.
707c7f5dba7SAnatoly Burakov 		 */
708c7f5dba7SAnatoly Burakov 		if (addr == NULL)
709c7f5dba7SAnatoly Burakov 			continue;
710c7f5dba7SAnatoly Burakov 
711c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
712c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
713c7f5dba7SAnatoly Burakov 
714c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
715c7f5dba7SAnatoly Burakov 
716c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
717c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
718c7f5dba7SAnatoly Burakov 			goto fail;
719c7f5dba7SAnatoly Burakov 		}
720c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
721c7f5dba7SAnatoly Burakov 		if (!huge)
722c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
723c7f5dba7SAnatoly Burakov 
724c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
725c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
726c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
727c7f5dba7SAnatoly Burakov 			size_t offset;
728c7f5dba7SAnatoly Burakov 			void *cur;
729c7f5dba7SAnatoly Burakov 
730c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
731c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
732c7f5dba7SAnatoly Burakov 
733c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
734c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
735c7f5dba7SAnatoly Burakov 
736c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
737c7f5dba7SAnatoly Burakov 
738c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
739c7f5dba7SAnatoly Burakov 		}
740c7f5dba7SAnatoly Burakov 
741c7f5dba7SAnatoly Burakov 		break;
742c7f5dba7SAnatoly Burakov 	}
743c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
744c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
745c7f5dba7SAnatoly Burakov 		return -1;
746c7f5dba7SAnatoly Burakov 
747c7f5dba7SAnatoly Burakov 	param->addr = addr;
748c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
749c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
750c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
751c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
752c7f5dba7SAnatoly Burakov 
753c7f5dba7SAnatoly Burakov 	return 0;
754c7f5dba7SAnatoly Burakov fail:
755c7f5dba7SAnatoly Burakov 	if (iovas)
756c7f5dba7SAnatoly Burakov 		free(iovas);
757c7f5dba7SAnatoly Burakov 	if (addr)
758c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
759c7f5dba7SAnatoly Burakov 
760c7f5dba7SAnatoly Burakov 	return -1;
761c7f5dba7SAnatoly Burakov }
762c7f5dba7SAnatoly Burakov 
763c7f5dba7SAnatoly Burakov static int
764c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
765c7f5dba7SAnatoly Burakov {
766c7f5dba7SAnatoly Burakov 	struct extmem_param param;
767c7f5dba7SAnatoly Burakov 	int socket_id, ret;
768c7f5dba7SAnatoly Burakov 
769c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
770c7f5dba7SAnatoly Burakov 
771c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
772c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
773c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
774c7f5dba7SAnatoly Burakov 		/* create our heap */
775c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
776c7f5dba7SAnatoly Burakov 		if (ret < 0) {
777c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
778c7f5dba7SAnatoly Burakov 			return -1;
779c7f5dba7SAnatoly Burakov 		}
780c7f5dba7SAnatoly Burakov 	}
781c7f5dba7SAnatoly Burakov 
782c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
783c7f5dba7SAnatoly Burakov 	if (ret < 0) {
784c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
785c7f5dba7SAnatoly Burakov 		return -1;
786c7f5dba7SAnatoly Burakov 	}
787c7f5dba7SAnatoly Burakov 
788c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
789c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
790c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
791c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
792c7f5dba7SAnatoly Burakov 
793c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
794c7f5dba7SAnatoly Burakov 
795c7f5dba7SAnatoly Burakov 	/* not needed any more */
796c7f5dba7SAnatoly Burakov 	free(param.iova_table);
797c7f5dba7SAnatoly Burakov 
798c7f5dba7SAnatoly Burakov 	if (ret < 0) {
799c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
800c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
801c7f5dba7SAnatoly Burakov 		return -1;
802c7f5dba7SAnatoly Burakov 	}
803c7f5dba7SAnatoly Burakov 
804c7f5dba7SAnatoly Burakov 	/* success */
805c7f5dba7SAnatoly Burakov 
806c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
807c7f5dba7SAnatoly Burakov 			param.len >> 20);
808c7f5dba7SAnatoly Burakov 
809c7f5dba7SAnatoly Burakov 	return 0;
810c7f5dba7SAnatoly Burakov }
8113a0968c8SShahaf Shuler static void
8123a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8133a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8143a0968c8SShahaf Shuler {
8153a0968c8SShahaf Shuler 	uint16_t pid = 0;
8163a0968c8SShahaf Shuler 	int ret;
8173a0968c8SShahaf Shuler 
8183a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8193a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8203a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8213a0968c8SShahaf Shuler 
8223a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8233a0968c8SShahaf Shuler 					memhdr->len);
8243a0968c8SShahaf Shuler 		if (ret) {
8253a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8263a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8273a0968c8SShahaf Shuler 				    "for device %s\n",
8283a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8293a0968c8SShahaf Shuler 		}
8303a0968c8SShahaf Shuler 	}
8313a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8323a0968c8SShahaf Shuler 	if (ret) {
8333a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8343a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8353a0968c8SShahaf Shuler 	}
8363a0968c8SShahaf Shuler }
8373a0968c8SShahaf Shuler 
8383a0968c8SShahaf Shuler static void
8393a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8403a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8413a0968c8SShahaf Shuler {
8423a0968c8SShahaf Shuler 	uint16_t pid = 0;
8433a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8443a0968c8SShahaf Shuler 	int ret;
8453a0968c8SShahaf Shuler 
8463a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8473a0968c8SShahaf Shuler 				  page_size);
8483a0968c8SShahaf Shuler 	if (ret) {
8493a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8503a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8513a0968c8SShahaf Shuler 		return;
8523a0968c8SShahaf Shuler 	}
8533a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8543a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8553a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8563a0968c8SShahaf Shuler 
8573a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8583a0968c8SShahaf Shuler 				      memhdr->len);
8593a0968c8SShahaf Shuler 		if (ret) {
8603a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8613a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
8623a0968c8SShahaf Shuler 				    "for device %s\n",
8633a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8643a0968c8SShahaf Shuler 		}
8653a0968c8SShahaf Shuler 	}
8663a0968c8SShahaf Shuler }
867c7f5dba7SAnatoly Burakov 
868af75078fSIntel /*
869af75078fSIntel  * Configuration initialisation done once at init time.
870af75078fSIntel  */
871401b744dSShahaf Shuler static struct rte_mempool *
872af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
873af75078fSIntel 		 unsigned int socket_id)
874af75078fSIntel {
875af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
876bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
877af75078fSIntel 	uint32_t mb_size;
878af75078fSIntel 
879dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
880af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
881148f963fSBruce Richardson 
882285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
883d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
884d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
885d1eb542eSOlivier Matz 
886c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
887c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
888c7f5dba7SAnatoly Burakov 		{
889c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
890c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
891c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
892c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
893c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
894c7f5dba7SAnatoly Burakov 			break;
895c7f5dba7SAnatoly Burakov 		}
896c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
897c7f5dba7SAnatoly Burakov 		{
898b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
899c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
900148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
90159fcf854SShahaf Shuler 				socket_id, mempool_flags);
90224427bb9SOlivier Matz 			if (rte_mp == NULL)
90324427bb9SOlivier Matz 				goto err;
904b19a0c75SOlivier Matz 
905b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
906b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
907b19a0c75SOlivier Matz 				rte_mp = NULL;
90824427bb9SOlivier Matz 				goto err;
909b19a0c75SOlivier Matz 			}
910b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
911b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
9123a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
913c7f5dba7SAnatoly Burakov 			break;
914c7f5dba7SAnatoly Burakov 		}
915c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
916c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
917c7f5dba7SAnatoly Burakov 		{
918c7f5dba7SAnatoly Burakov 			int heap_socket;
919c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
920c7f5dba7SAnatoly Burakov 
921c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
922c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
923c7f5dba7SAnatoly Burakov 
924c7f5dba7SAnatoly Burakov 			heap_socket =
925c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
926c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
927c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
928c7f5dba7SAnatoly Burakov 
9290e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
9300e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
931ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
932c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
933c7f5dba7SAnatoly Burakov 					heap_socket);
934c7f5dba7SAnatoly Burakov 			break;
935c7f5dba7SAnatoly Burakov 		}
936c7f5dba7SAnatoly Burakov 	default:
937c7f5dba7SAnatoly Burakov 		{
938c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
939c7f5dba7SAnatoly Burakov 		}
940bece7b6cSChristian Ehrhardt 	}
941148f963fSBruce Richardson 
94224427bb9SOlivier Matz err:
943af75078fSIntel 	if (rte_mp == NULL) {
944d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
945d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
946d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
947148f963fSBruce Richardson 	} else if (verbose_level > 0) {
948591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
949af75078fSIntel 	}
950401b744dSShahaf Shuler 	return rte_mp;
951af75078fSIntel }
952af75078fSIntel 
95320a0286fSLiu Xiaofeng /*
95420a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
95520a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
95620a0286fSLiu Xiaofeng  */
95720a0286fSLiu Xiaofeng static int
95820a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
95920a0286fSLiu Xiaofeng {
96020a0286fSLiu Xiaofeng 	static int warning_once = 0;
96120a0286fSLiu Xiaofeng 
962c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
96320a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
96420a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
96520a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
96620a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
96720a0286fSLiu Xiaofeng 			       " --numa.\n");
96820a0286fSLiu Xiaofeng 		warning_once = 1;
96920a0286fSLiu Xiaofeng 		return -1;
97020a0286fSLiu Xiaofeng 	}
97120a0286fSLiu Xiaofeng 	return 0;
97220a0286fSLiu Xiaofeng }
97320a0286fSLiu Xiaofeng 
9743f7311baSWei Dai /*
9753f7311baSWei Dai  * Get the allowed maximum number of RX queues.
9763f7311baSWei Dai  * *pid return the port id which has minimal value of
9773f7311baSWei Dai  * max_rx_queues in all ports.
9783f7311baSWei Dai  */
9793f7311baSWei Dai queueid_t
9803f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
9813f7311baSWei Dai {
982*9e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
9836f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
9843f7311baSWei Dai 	portid_t pi;
9853f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
9863f7311baSWei Dai 
9873f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
9886f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
9896f51deb9SIvan Ilchenko 			continue;
9906f51deb9SIvan Ilchenko 
9916f51deb9SIvan Ilchenko 		max_rxq_valid = true;
9923f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
9933f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
9943f7311baSWei Dai 			*pid = pi;
9953f7311baSWei Dai 		}
9963f7311baSWei Dai 	}
9976f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
9983f7311baSWei Dai }
9993f7311baSWei Dai 
10003f7311baSWei Dai /*
10013f7311baSWei Dai  * Check input rxq is valid or not.
10023f7311baSWei Dai  * If input rxq is not greater than any of maximum number
10033f7311baSWei Dai  * of RX queues of all ports, it is valid.
10043f7311baSWei Dai  * if valid, return 0, else return -1
10053f7311baSWei Dai  */
10063f7311baSWei Dai int
10073f7311baSWei Dai check_nb_rxq(queueid_t rxq)
10083f7311baSWei Dai {
10093f7311baSWei Dai 	queueid_t allowed_max_rxq;
10103f7311baSWei Dai 	portid_t pid = 0;
10113f7311baSWei Dai 
10123f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
10133f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
10143f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
10153f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
10163f7311baSWei Dai 		       rxq,
10173f7311baSWei Dai 		       allowed_max_rxq,
10183f7311baSWei Dai 		       pid);
10193f7311baSWei Dai 		return -1;
10203f7311baSWei Dai 	}
10213f7311baSWei Dai 	return 0;
10223f7311baSWei Dai }
10233f7311baSWei Dai 
102436db4f6cSWei Dai /*
102536db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
102636db4f6cSWei Dai  * *pid return the port id which has minimal value of
102736db4f6cSWei Dai  * max_tx_queues in all ports.
102836db4f6cSWei Dai  */
102936db4f6cSWei Dai queueid_t
103036db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
103136db4f6cSWei Dai {
1032*9e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
10336f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
103436db4f6cSWei Dai 	portid_t pi;
103536db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
103636db4f6cSWei Dai 
103736db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10386f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
10396f51deb9SIvan Ilchenko 			continue;
10406f51deb9SIvan Ilchenko 
10416f51deb9SIvan Ilchenko 		max_txq_valid = true;
104236db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
104336db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
104436db4f6cSWei Dai 			*pid = pi;
104536db4f6cSWei Dai 		}
104636db4f6cSWei Dai 	}
10476f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
104836db4f6cSWei Dai }
104936db4f6cSWei Dai 
105036db4f6cSWei Dai /*
105136db4f6cSWei Dai  * Check input txq is valid or not.
105236db4f6cSWei Dai  * If input txq is not greater than any of maximum number
105336db4f6cSWei Dai  * of TX queues of all ports, it is valid.
105436db4f6cSWei Dai  * if valid, return 0, else return -1
105536db4f6cSWei Dai  */
105636db4f6cSWei Dai int
105736db4f6cSWei Dai check_nb_txq(queueid_t txq)
105836db4f6cSWei Dai {
105936db4f6cSWei Dai 	queueid_t allowed_max_txq;
106036db4f6cSWei Dai 	portid_t pid = 0;
106136db4f6cSWei Dai 
106236db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
106336db4f6cSWei Dai 	if (txq > allowed_max_txq) {
106436db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
106536db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
106636db4f6cSWei Dai 		       txq,
106736db4f6cSWei Dai 		       allowed_max_txq,
106836db4f6cSWei Dai 		       pid);
106936db4f6cSWei Dai 		return -1;
107036db4f6cSWei Dai 	}
107136db4f6cSWei Dai 	return 0;
107236db4f6cSWei Dai }
107336db4f6cSWei Dai 
10741c69df45SOri Kam /*
10751c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
10761c69df45SOri Kam  * *pid return the port id which has minimal value of
10771c69df45SOri Kam  * max_hairpin_queues in all ports.
10781c69df45SOri Kam  */
10791c69df45SOri Kam queueid_t
10801c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
10811c69df45SOri Kam {
1082*9e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
10831c69df45SOri Kam 	portid_t pi;
10841c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
10851c69df45SOri Kam 
10861c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
10871c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
10881c69df45SOri Kam 			*pid = pi;
10891c69df45SOri Kam 			return 0;
10901c69df45SOri Kam 		}
10911c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
10921c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
10931c69df45SOri Kam 			*pid = pi;
10941c69df45SOri Kam 		}
10951c69df45SOri Kam 	}
10961c69df45SOri Kam 	return allowed_max_hairpinq;
10971c69df45SOri Kam }
10981c69df45SOri Kam 
10991c69df45SOri Kam /*
11001c69df45SOri Kam  * Check input hairpin is valid or not.
11011c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
11021c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
11031c69df45SOri Kam  * if valid, return 0, else return -1
11041c69df45SOri Kam  */
11051c69df45SOri Kam int
11061c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
11071c69df45SOri Kam {
11081c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
11091c69df45SOri Kam 	portid_t pid = 0;
11101c69df45SOri Kam 
11111c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
11121c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
11131c69df45SOri Kam 		printf("Fail: input hairpin (%u) can't be greater "
11141c69df45SOri Kam 		       "than max_hairpin_queues (%u) of port %u\n",
11151c69df45SOri Kam 		       hairpinq, allowed_max_hairpinq, pid);
11161c69df45SOri Kam 		return -1;
11171c69df45SOri Kam 	}
11181c69df45SOri Kam 	return 0;
11191c69df45SOri Kam }
11201c69df45SOri Kam 
1121af75078fSIntel static void
1122af75078fSIntel init_config(void)
1123af75078fSIntel {
1124ce8d5614SIntel 	portid_t pid;
1125af75078fSIntel 	struct rte_port *port;
1126af75078fSIntel 	struct rte_mempool *mbp;
1127af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1128af75078fSIntel 	lcoreid_t  lc_id;
11297acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1130b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
113152f38a20SJiayu Hu 	uint32_t gso_types;
113233f9630fSSunil Kumar Kori 	uint16_t data_size;
113333f9630fSSunil Kumar Kori 	bool warning = 0;
1134c73a9071SWei Dai 	int k;
11356f51deb9SIvan Ilchenko 	int ret;
1136af75078fSIntel 
11377acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1138487f9a59SYulong Pei 
1139af75078fSIntel 	/* Configuration of logical cores. */
1140af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1141af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1142fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1143af75078fSIntel 	if (fwd_lcores == NULL) {
1144ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1145ce8d5614SIntel 							"failed\n", nb_lcores);
1146af75078fSIntel 	}
1147af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1148af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1149af75078fSIntel 					       sizeof(struct fwd_lcore),
1150fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1151af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1152ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1153ce8d5614SIntel 								"failed\n");
1154af75078fSIntel 		}
1155af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1156af75078fSIntel 	}
1157af75078fSIntel 
11587d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1159ce8d5614SIntel 		port = &ports[pid];
11608b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1161fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1162384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
11636f51deb9SIvan Ilchenko 
11646f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
11656f51deb9SIvan Ilchenko 		if (ret != 0)
11666f51deb9SIvan Ilchenko 			rte_exit(EXIT_FAILURE,
11676f51deb9SIvan Ilchenko 				 "rte_eth_dev_info_get() failed\n");
11687c45f6c0SFerruh Yigit 
116907e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
117007e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
117107e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
117207e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1173b6ea6408SIntel 		if (numa_support) {
1174b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1175b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1176b6ea6408SIntel 			else {
1177b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
117820a0286fSLiu Xiaofeng 
117929841336SPhil Yang 				/*
118029841336SPhil Yang 				 * if socket_id is invalid,
118129841336SPhil Yang 				 * set to the first available socket.
118229841336SPhil Yang 				 */
118320a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
118429841336SPhil Yang 					socket_id = socket_ids[0];
1185b6ea6408SIntel 				port_per_socket[socket_id]++;
1186b6ea6408SIntel 			}
1187b6ea6408SIntel 		}
1188b6ea6408SIntel 
1189c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1190c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1191c73a9071SWei Dai 			port->rx_conf[k].offloads =
1192c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1193c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1194c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1195c73a9071SWei Dai 			port->tx_conf[k].offloads =
1196c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1197c73a9071SWei Dai 
1198ce8d5614SIntel 		/* set flag to initialize port/queue */
1199ce8d5614SIntel 		port->need_reconfig = 1;
1200ce8d5614SIntel 		port->need_reconfig_queues = 1;
1201c18feafaSDekel Peled 		port->tx_metadata = 0;
120233f9630fSSunil Kumar Kori 
120333f9630fSSunil Kumar Kori 		/* Check for maximum number of segments per MTU. Accordingly
120433f9630fSSunil Kumar Kori 		 * update the mbuf data size.
120533f9630fSSunil Kumar Kori 		 */
1206163fbaafSFerruh Yigit 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1207163fbaafSFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
120833f9630fSSunil Kumar Kori 			data_size = rx_mode.max_rx_pkt_len /
120933f9630fSSunil Kumar Kori 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
121033f9630fSSunil Kumar Kori 
121133f9630fSSunil Kumar Kori 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
121233f9630fSSunil Kumar Kori 							mbuf_data_size) {
121333f9630fSSunil Kumar Kori 				mbuf_data_size = data_size +
121433f9630fSSunil Kumar Kori 						 RTE_PKTMBUF_HEADROOM;
121533f9630fSSunil Kumar Kori 				warning = 1;
1216ce8d5614SIntel 			}
121733f9630fSSunil Kumar Kori 		}
121833f9630fSSunil Kumar Kori 	}
121933f9630fSSunil Kumar Kori 
122033f9630fSSunil Kumar Kori 	if (warning)
122133f9630fSSunil Kumar Kori 		TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
122233f9630fSSunil Kumar Kori 			    mbuf_data_size);
1223ce8d5614SIntel 
12243ab64341SOlivier Matz 	/*
12253ab64341SOlivier Matz 	 * Create pools of mbuf.
12263ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
12273ab64341SOlivier Matz 	 * socket 0 memory by default.
12283ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
12293ab64341SOlivier Matz 	 *
12303ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
12313ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
12323ab64341SOlivier Matz 	 */
12333ab64341SOlivier Matz 	if (param_total_num_mbufs)
12343ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
12353ab64341SOlivier Matz 	else {
12363ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
12373ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
12383ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
12393ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
12403ab64341SOlivier Matz 	}
12413ab64341SOlivier Matz 
1242b6ea6408SIntel 	if (numa_support) {
1243b6ea6408SIntel 		uint8_t i;
1244ce8d5614SIntel 
1245c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1246401b744dSShahaf Shuler 			mempools[i] = mbuf_pool_create(mbuf_data_size,
1247401b744dSShahaf Shuler 						       nb_mbuf_per_pool,
1248c9cafcc8SShahaf Shuler 						       socket_ids[i]);
12493ab64341SOlivier Matz 	} else {
12503ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
1251401b744dSShahaf Shuler 			mempools[0] = mbuf_pool_create(mbuf_data_size,
1252401b744dSShahaf Shuler 						       nb_mbuf_per_pool, 0);
12533ab64341SOlivier Matz 		else
1254401b744dSShahaf Shuler 			mempools[socket_num] = mbuf_pool_create
1255401b744dSShahaf Shuler 							(mbuf_data_size,
1256401b744dSShahaf Shuler 							 nb_mbuf_per_pool,
12573ab64341SOlivier Matz 							 socket_num);
12583ab64341SOlivier Matz 	}
1259b6ea6408SIntel 
1260b6ea6408SIntel 	init_port_config();
12615886ae07SAdrien Mazarguil 
126252f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1263aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
12645886ae07SAdrien Mazarguil 	/*
12655886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
12665886ae07SAdrien Mazarguil 	 */
12675886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
12688fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
12698fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
12708fd8bebcSAdrien Mazarguil 
12715886ae07SAdrien Mazarguil 		if (mbp == NULL)
12725886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
12735886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
127452f38a20SJiayu Hu 		/* initialize GSO context */
127552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
127652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
127752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
127835b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
127935b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
128052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
12815886ae07SAdrien Mazarguil 	}
12825886ae07SAdrien Mazarguil 
1283ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1284ce8d5614SIntel 	if (init_fwd_streams() < 0)
1285ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
12860c0db76fSBernard Iremonger 
12870c0db76fSBernard Iremonger 	fwd_config_setup();
1288b7091f1dSJiayu Hu 
1289b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1290b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1291b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1292b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1293b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1294b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1295b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1296b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1297b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1298b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1299b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1300b7091f1dSJiayu Hu 		}
1301b7091f1dSJiayu Hu 	}
13020ad778b3SJasvinder Singh 
13030ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
13040ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
13050ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
13060ad778b3SJasvinder Singh 			port = &ports[pid];
13070ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
13080ad778b3SJasvinder Singh 
13090ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
13100ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
13110ad778b3SJasvinder Singh 		}
13120ad778b3SJasvinder Singh 	}
13130ad778b3SJasvinder Singh #endif
13140ad778b3SJasvinder Singh 
1315ce8d5614SIntel }
1316ce8d5614SIntel 
13172950a769SDeclan Doherty 
13182950a769SDeclan Doherty void
1319a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
13202950a769SDeclan Doherty {
13212950a769SDeclan Doherty 	struct rte_port *port;
13226f51deb9SIvan Ilchenko 	int ret;
13232950a769SDeclan Doherty 
13242950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
13252950a769SDeclan Doherty 	port = &ports[new_port_id];
13266f51deb9SIvan Ilchenko 
13276f51deb9SIvan Ilchenko 	ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
13286f51deb9SIvan Ilchenko 	if (ret != 0)
13296f51deb9SIvan Ilchenko 		return;
13302950a769SDeclan Doherty 
13312950a769SDeclan Doherty 	/* set flag to initialize port/queue */
13322950a769SDeclan Doherty 	port->need_reconfig = 1;
13332950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1334a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
13352950a769SDeclan Doherty 
13362950a769SDeclan Doherty 	init_port_config();
13372950a769SDeclan Doherty }
13382950a769SDeclan Doherty 
13392950a769SDeclan Doherty 
1340ce8d5614SIntel int
1341ce8d5614SIntel init_fwd_streams(void)
1342ce8d5614SIntel {
1343ce8d5614SIntel 	portid_t pid;
1344ce8d5614SIntel 	struct rte_port *port;
1345ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
13465a8fb55cSReshma Pattan 	queueid_t q;
1347ce8d5614SIntel 
1348ce8d5614SIntel 	/* set socket id according to numa or not */
13497d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1350ce8d5614SIntel 		port = &ports[pid];
1351ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1352ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1353ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1354ce8d5614SIntel 				port->dev_info.max_rx_queues);
1355ce8d5614SIntel 			return -1;
1356ce8d5614SIntel 		}
1357ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1358ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1359ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1360ce8d5614SIntel 				port->dev_info.max_tx_queues);
1361ce8d5614SIntel 			return -1;
1362ce8d5614SIntel 		}
136320a0286fSLiu Xiaofeng 		if (numa_support) {
136420a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
136520a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
136620a0286fSLiu Xiaofeng 			else {
1367b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
136820a0286fSLiu Xiaofeng 
136929841336SPhil Yang 				/*
137029841336SPhil Yang 				 * if socket_id is invalid,
137129841336SPhil Yang 				 * set to the first available socket.
137229841336SPhil Yang 				 */
137320a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
137429841336SPhil Yang 					port->socket_id = socket_ids[0];
137520a0286fSLiu Xiaofeng 			}
137620a0286fSLiu Xiaofeng 		}
1377b6ea6408SIntel 		else {
1378b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1379af75078fSIntel 				port->socket_id = 0;
1380b6ea6408SIntel 			else
1381b6ea6408SIntel 				port->socket_id = socket_num;
1382b6ea6408SIntel 		}
1383af75078fSIntel 	}
1384af75078fSIntel 
13855a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
13865a8fb55cSReshma Pattan 	if (q == 0) {
13875a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
13885a8fb55cSReshma Pattan 		return -1;
13895a8fb55cSReshma Pattan 	}
13905a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1391ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1392ce8d5614SIntel 		return 0;
1393ce8d5614SIntel 	/* clear the old */
1394ce8d5614SIntel 	if (fwd_streams != NULL) {
1395ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1396ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1397ce8d5614SIntel 				continue;
1398ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1399ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1400af75078fSIntel 		}
1401ce8d5614SIntel 		rte_free(fwd_streams);
1402ce8d5614SIntel 		fwd_streams = NULL;
1403ce8d5614SIntel 	}
1404ce8d5614SIntel 
1405ce8d5614SIntel 	/* init new */
1406ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
14071f84c469SMatan Azrad 	if (nb_fwd_streams) {
1408ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
14091f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
14101f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1411ce8d5614SIntel 		if (fwd_streams == NULL)
14121f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
14131f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
14141f84c469SMatan Azrad 				 nb_fwd_streams);
1415ce8d5614SIntel 
1416af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
14171f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
14181f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
14191f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1420ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
14211f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
14221f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
14231f84c469SMatan Azrad 		}
1424af75078fSIntel 	}
1425ce8d5614SIntel 
1426ce8d5614SIntel 	return 0;
1427af75078fSIntel }
1428af75078fSIntel 
1429af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1430af75078fSIntel static void
1431af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1432af75078fSIntel {
1433af75078fSIntel 	unsigned int total_burst;
1434af75078fSIntel 	unsigned int nb_burst;
1435af75078fSIntel 	unsigned int burst_stats[3];
1436af75078fSIntel 	uint16_t pktnb_stats[3];
1437af75078fSIntel 	uint16_t nb_pkt;
1438af75078fSIntel 	int burst_percent[3];
1439af75078fSIntel 
1440af75078fSIntel 	/*
1441af75078fSIntel 	 * First compute the total number of packet bursts and the
1442af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1443af75078fSIntel 	 */
1444af75078fSIntel 	total_burst = 0;
1445af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1446af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1447af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1448af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1449af75078fSIntel 		if (nb_burst == 0)
1450af75078fSIntel 			continue;
1451af75078fSIntel 		total_burst += nb_burst;
1452af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1453af75078fSIntel 			burst_stats[1] = burst_stats[0];
1454af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1455af75078fSIntel 			burst_stats[0] = nb_burst;
1456af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1457fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1458fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1459fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1460af75078fSIntel 		}
1461af75078fSIntel 	}
1462af75078fSIntel 	if (total_burst == 0)
1463af75078fSIntel 		return;
1464af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1465af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1466af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1467af75078fSIntel 	if (burst_stats[0] == total_burst) {
1468af75078fSIntel 		printf("]\n");
1469af75078fSIntel 		return;
1470af75078fSIntel 	}
1471af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1472af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1473af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1474af75078fSIntel 		return;
1475af75078fSIntel 	}
1476af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1477af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1478af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1479af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1480af75078fSIntel 		return;
1481af75078fSIntel 	}
1482af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1483af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1484af75078fSIntel }
1485af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1486af75078fSIntel 
1487af75078fSIntel static void
1488af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1489af75078fSIntel {
1490af75078fSIntel 	struct fwd_stream *fs;
1491af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1492af75078fSIntel 
1493af75078fSIntel 	fs = fwd_streams[stream_id];
1494af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1495af75078fSIntel 	    (fs->fwd_dropped == 0))
1496af75078fSIntel 		return;
1497af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1498af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1499af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1500af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1501c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1502c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1503af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1504af75078fSIntel 
1505af75078fSIntel 	/* if checksum mode */
1506af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1507c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1508c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1509c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
151058d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
151158d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
151294d65546SDavid Marchand 	} else {
151394d65546SDavid Marchand 		printf("\n");
1514af75078fSIntel 	}
1515af75078fSIntel 
1516af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1517af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1518af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1519af75078fSIntel #endif
1520af75078fSIntel }
1521af75078fSIntel 
152253324971SDavid Marchand void
152353324971SDavid Marchand fwd_stats_display(void)
152453324971SDavid Marchand {
152553324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
152653324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
152753324971SDavid Marchand 	struct {
152853324971SDavid Marchand 		struct fwd_stream *rx_stream;
152953324971SDavid Marchand 		struct fwd_stream *tx_stream;
153053324971SDavid Marchand 		uint64_t tx_dropped;
153153324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
153253324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
153353324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
153453324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
153553324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
153653324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
153753324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
153853324971SDavid Marchand 	struct rte_eth_stats stats;
153953324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
154053324971SDavid Marchand 	uint64_t fwd_cycles = 0;
154153324971SDavid Marchand #endif
154253324971SDavid Marchand 	uint64_t total_recv = 0;
154353324971SDavid Marchand 	uint64_t total_xmit = 0;
154453324971SDavid Marchand 	struct rte_port *port;
154553324971SDavid Marchand 	streamid_t sm_id;
154653324971SDavid Marchand 	portid_t pt_id;
154753324971SDavid Marchand 	int i;
154853324971SDavid Marchand 
154953324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
155053324971SDavid Marchand 
155153324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
155253324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
155353324971SDavid Marchand 
155453324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
155553324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
155653324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
155753324971SDavid Marchand 		} else {
155853324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
155953324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
156053324971SDavid Marchand 		}
156153324971SDavid Marchand 
156253324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
156353324971SDavid Marchand 
156453324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
156553324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
156653324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
156753324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
156853324971SDavid Marchand 
156953324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
157053324971SDavid Marchand 		fwd_cycles += fs->core_cycles;
157153324971SDavid Marchand #endif
157253324971SDavid Marchand 	}
157353324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
157453324971SDavid Marchand 		uint8_t j;
157553324971SDavid Marchand 
157653324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
157753324971SDavid Marchand 		port = &ports[pt_id];
157853324971SDavid Marchand 
157953324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
158053324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
158153324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
158253324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
158353324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
158453324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
158553324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
158653324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
158753324971SDavid Marchand 
158853324971SDavid Marchand 		total_recv += stats.ipackets;
158953324971SDavid Marchand 		total_xmit += stats.opackets;
159053324971SDavid Marchand 		total_rx_dropped += stats.imissed;
159153324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
159253324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
159353324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
159453324971SDavid Marchand 
159553324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
159653324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
159753324971SDavid Marchand 
159853324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
159953324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
160053324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
160153324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
160253324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
160353324971SDavid Marchand 			       stats.ipackets, stats.imissed,
160453324971SDavid Marchand 			       stats.ipackets + stats.imissed);
160553324971SDavid Marchand 
160653324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
160753324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
160853324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
160953324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
161053324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
161153324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
161253324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
161353324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
161453324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
161553324971SDavid Marchand 				       stats.ierrors);
161653324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
161753324971SDavid Marchand 				       stats.rx_nombuf);
161853324971SDavid Marchand 			}
161953324971SDavid Marchand 
162053324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
162153324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
162253324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
162353324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
162453324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
162553324971SDavid Marchand 		} else {
162653324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
162753324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
162853324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
162953324971SDavid Marchand 			       stats.ipackets, stats.imissed,
163053324971SDavid Marchand 			       stats.ipackets + stats.imissed);
163153324971SDavid Marchand 
163253324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
163353324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
163453324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
163553324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
163653324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
163753324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
163853324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
163953324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
164053324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
164153324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
164253324971SDavid Marchand 				       stats.rx_nombuf);
164353324971SDavid Marchand 			}
164453324971SDavid Marchand 
164553324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
164653324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
164753324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
164853324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
164953324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
165053324971SDavid Marchand 		}
165153324971SDavid Marchand 
165253324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
165353324971SDavid Marchand 		if (ports_stats[pt_id].rx_stream)
165453324971SDavid Marchand 			pkt_burst_stats_display("RX",
165553324971SDavid Marchand 				&ports_stats[pt_id].rx_stream->rx_burst_stats);
165653324971SDavid Marchand 		if (ports_stats[pt_id].tx_stream)
165753324971SDavid Marchand 			pkt_burst_stats_display("TX",
165853324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
165953324971SDavid Marchand #endif
166053324971SDavid Marchand 
166153324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
166253324971SDavid Marchand 			printf("\n");
166353324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
166453324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
166553324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
166653324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
166753324971SDavid Marchand 				       j, stats.q_ipackets[j],
166853324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
166953324971SDavid Marchand 			}
167053324971SDavid Marchand 			printf("\n");
167153324971SDavid Marchand 		}
167253324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
167353324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
167453324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
167553324971SDavid Marchand 				       "                                 TX-bytes:%14"
167653324971SDavid Marchand 				       PRIu64"\n",
167753324971SDavid Marchand 				       j, stats.q_opackets[j],
167853324971SDavid Marchand 				       stats.q_obytes[j]);
167953324971SDavid Marchand 			}
168053324971SDavid Marchand 		}
168153324971SDavid Marchand 
168253324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
168353324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
168453324971SDavid Marchand 	}
168553324971SDavid Marchand 
168653324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
168753324971SDavid Marchand 	       "%s\n",
168853324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
168953324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
169053324971SDavid Marchand 	       "%-"PRIu64"\n"
169153324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
169253324971SDavid Marchand 	       "%-"PRIu64"\n",
169353324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
169453324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
169553324971SDavid Marchand 	if (total_rx_nombuf > 0)
169653324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
169753324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
169853324971SDavid Marchand 	       "%s\n",
169953324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
170053324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
170153324971SDavid Marchand 	if (total_recv > 0)
170253324971SDavid Marchand 		printf("\n  CPU cycles/packet=%u (total cycles="
170353324971SDavid Marchand 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
170453324971SDavid Marchand 		       (unsigned int)(fwd_cycles / total_recv),
170553324971SDavid Marchand 		       fwd_cycles, total_recv);
170653324971SDavid Marchand #endif
170753324971SDavid Marchand }
170853324971SDavid Marchand 
170953324971SDavid Marchand void
171053324971SDavid Marchand fwd_stats_reset(void)
171153324971SDavid Marchand {
171253324971SDavid Marchand 	streamid_t sm_id;
171353324971SDavid Marchand 	portid_t pt_id;
171453324971SDavid Marchand 	int i;
171553324971SDavid Marchand 
171653324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
171753324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
171853324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
171953324971SDavid Marchand 	}
172053324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
172153324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
172253324971SDavid Marchand 
172353324971SDavid Marchand 		fs->rx_packets = 0;
172453324971SDavid Marchand 		fs->tx_packets = 0;
172553324971SDavid Marchand 		fs->fwd_dropped = 0;
172653324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
172753324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
172853324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
172953324971SDavid Marchand 
173053324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
173153324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
173253324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
173353324971SDavid Marchand #endif
173453324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
173553324971SDavid Marchand 		fs->core_cycles = 0;
173653324971SDavid Marchand #endif
173753324971SDavid Marchand 	}
173853324971SDavid Marchand }
173953324971SDavid Marchand 
1740af75078fSIntel static void
17417741e4cfSIntel flush_fwd_rx_queues(void)
1742af75078fSIntel {
1743af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1744af75078fSIntel 	portid_t  rxp;
17457741e4cfSIntel 	portid_t port_id;
1746af75078fSIntel 	queueid_t rxq;
1747af75078fSIntel 	uint16_t  nb_rx;
1748af75078fSIntel 	uint16_t  i;
1749af75078fSIntel 	uint8_t   j;
1750f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1751594302c7SJames Poole 	uint64_t timer_period;
1752f487715fSReshma Pattan 
1753f487715fSReshma Pattan 	/* convert to number of cycles */
1754594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1755af75078fSIntel 
1756af75078fSIntel 	for (j = 0; j < 2; j++) {
17577741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1758af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
17597741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1760f487715fSReshma Pattan 				/**
1761f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1762f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1763f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1764f487715fSReshma Pattan 				* after 1sec timer expiry.
1765f487715fSReshma Pattan 				*/
1766f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1767af75078fSIntel 				do {
17687741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1769013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1770af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1771af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1772f487715fSReshma Pattan 
1773f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1774f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1775f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1776f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1777f487715fSReshma Pattan 					(timer_tsc < timer_period));
1778f487715fSReshma Pattan 				timer_tsc = 0;
1779af75078fSIntel 			}
1780af75078fSIntel 		}
1781af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1782af75078fSIntel 	}
1783af75078fSIntel }
1784af75078fSIntel 
1785af75078fSIntel static void
1786af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1787af75078fSIntel {
1788af75078fSIntel 	struct fwd_stream **fsm;
1789af75078fSIntel 	streamid_t nb_fs;
1790af75078fSIntel 	streamid_t sm_id;
17917e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
17927e4441c8SRemy Horton 	uint64_t tics_per_1sec;
17937e4441c8SRemy Horton 	uint64_t tics_datum;
17947e4441c8SRemy Horton 	uint64_t tics_current;
17954918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1796af75078fSIntel 
17974918a357SXiaoyun Li 	cnt_ports = nb_ports;
17987e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
17997e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
18007e4441c8SRemy Horton #endif
1801af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1802af75078fSIntel 	nb_fs = fc->stream_nb;
1803af75078fSIntel 	do {
1804af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1805af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
18067e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1807e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1808e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
18097e4441c8SRemy Horton 			tics_current = rte_rdtsc();
18107e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
18117e4441c8SRemy Horton 				/* Periodic bitrate calculation */
18124918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1813e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
18144918a357SXiaoyun Li 						ports_ids[i]);
18157e4441c8SRemy Horton 				tics_datum = tics_current;
18167e4441c8SRemy Horton 			}
1817e25e6c70SRemy Horton 		}
18187e4441c8SRemy Horton #endif
181962d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
182065eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
182165eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
182262d3216dSReshma Pattan 			rte_latencystats_update();
182362d3216dSReshma Pattan #endif
182462d3216dSReshma Pattan 
1825af75078fSIntel 	} while (! fc->stopped);
1826af75078fSIntel }
1827af75078fSIntel 
1828af75078fSIntel static int
1829af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1830af75078fSIntel {
1831af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1832af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1833af75078fSIntel 	return 0;
1834af75078fSIntel }
1835af75078fSIntel 
1836af75078fSIntel /*
1837af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1838af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1839af75078fSIntel  */
1840af75078fSIntel static int
1841af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1842af75078fSIntel {
1843af75078fSIntel 	struct fwd_lcore *fwd_lc;
1844af75078fSIntel 	struct fwd_lcore tmp_lcore;
1845af75078fSIntel 
1846af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1847af75078fSIntel 	tmp_lcore = *fwd_lc;
1848af75078fSIntel 	tmp_lcore.stopped = 1;
1849af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1850af75078fSIntel 	return 0;
1851af75078fSIntel }
1852af75078fSIntel 
1853af75078fSIntel /*
1854af75078fSIntel  * Launch packet forwarding:
1855af75078fSIntel  *     - Setup per-port forwarding context.
1856af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1857af75078fSIntel  */
1858af75078fSIntel static void
1859af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1860af75078fSIntel {
1861af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1862af75078fSIntel 	unsigned int i;
1863af75078fSIntel 	unsigned int lc_id;
1864af75078fSIntel 	int diag;
1865af75078fSIntel 
1866af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1867af75078fSIntel 	if (port_fwd_begin != NULL) {
1868af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1869af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1870af75078fSIntel 	}
1871af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1872af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1873af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1874af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1875af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1876af75078fSIntel 						     fwd_lcores[i], lc_id);
1877af75078fSIntel 			if (diag != 0)
1878af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1879af75078fSIntel 				       lc_id, diag);
1880af75078fSIntel 		}
1881af75078fSIntel 	}
1882af75078fSIntel }
1883af75078fSIntel 
1884af75078fSIntel /*
1885af75078fSIntel  * Launch packet forwarding configuration.
1886af75078fSIntel  */
1887af75078fSIntel void
1888af75078fSIntel start_packet_forwarding(int with_tx_first)
1889af75078fSIntel {
1890af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1891af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1892af75078fSIntel 	struct rte_port *port;
1893af75078fSIntel 	unsigned int i;
1894af75078fSIntel 	portid_t   pt_id;
1895af75078fSIntel 
18965a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
18975a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
18985a8fb55cSReshma Pattan 
18995a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
19005a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
19015a8fb55cSReshma Pattan 
19025a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
19035a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
19045a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
19055a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
19065a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
19075a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
19085a8fb55cSReshma Pattan 
1909ce8d5614SIntel 	if (all_ports_started() == 0) {
1910ce8d5614SIntel 		printf("Not all ports were started\n");
1911ce8d5614SIntel 		return;
1912ce8d5614SIntel 	}
1913af75078fSIntel 	if (test_done == 0) {
1914af75078fSIntel 		printf("Packet forwarding already started\n");
1915af75078fSIntel 		return;
1916af75078fSIntel 	}
1917edf87b4aSBernard Iremonger 
1918edf87b4aSBernard Iremonger 
19197741e4cfSIntel 	if(dcb_test) {
19207741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
19217741e4cfSIntel 			pt_id = fwd_ports_ids[i];
19227741e4cfSIntel 			port = &ports[pt_id];
19237741e4cfSIntel 			if (!port->dcb_flag) {
19247741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
19257741e4cfSIntel                                        "be configured in this mode.\n");
1926013af9b6SIntel 				return;
1927013af9b6SIntel 			}
19287741e4cfSIntel 		}
19297741e4cfSIntel 		if (nb_fwd_lcores == 1) {
19307741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
19317741e4cfSIntel                                "should be larger than 1.\n");
19327741e4cfSIntel 			return;
19337741e4cfSIntel 		}
19347741e4cfSIntel 	}
1935af75078fSIntel 	test_done = 0;
19367741e4cfSIntel 
193747a767b2SMatan Azrad 	fwd_config_setup();
193847a767b2SMatan Azrad 
19397741e4cfSIntel 	if(!no_flush_rx)
19407741e4cfSIntel 		flush_fwd_rx_queues();
19417741e4cfSIntel 
1942933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1943af75078fSIntel 	rxtx_config_display();
1944af75078fSIntel 
194553324971SDavid Marchand 	fwd_stats_reset();
1946af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1947af75078fSIntel 		pt_id = fwd_ports_ids[i];
1948af75078fSIntel 		port = &ports[pt_id];
1949013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1950af75078fSIntel 	}
1951af75078fSIntel 	if (with_tx_first) {
1952af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1953af75078fSIntel 		if (port_fwd_begin != NULL) {
1954af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1955af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1956af75078fSIntel 		}
1957acbf77a6SZhihong Wang 		while (with_tx_first--) {
1958acbf77a6SZhihong Wang 			launch_packet_forwarding(
1959acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1960af75078fSIntel 			rte_eal_mp_wait_lcore();
1961acbf77a6SZhihong Wang 		}
1962af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1963af75078fSIntel 		if (port_fwd_end != NULL) {
1964af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1965af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1966af75078fSIntel 		}
1967af75078fSIntel 	}
1968af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1969af75078fSIntel }
1970af75078fSIntel 
1971af75078fSIntel void
1972af75078fSIntel stop_packet_forwarding(void)
1973af75078fSIntel {
1974af75078fSIntel 	port_fwd_end_t port_fwd_end;
1975af75078fSIntel 	lcoreid_t lc_id;
197653324971SDavid Marchand 	portid_t pt_id;
197753324971SDavid Marchand 	int i;
1978af75078fSIntel 
1979af75078fSIntel 	if (test_done) {
1980af75078fSIntel 		printf("Packet forwarding not started\n");
1981af75078fSIntel 		return;
1982af75078fSIntel 	}
1983af75078fSIntel 	printf("Telling cores to stop...");
1984af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1985af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1986af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1987af75078fSIntel 	rte_eal_mp_wait_lcore();
1988af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1989af75078fSIntel 	if (port_fwd_end != NULL) {
1990af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1991af75078fSIntel 			pt_id = fwd_ports_ids[i];
1992af75078fSIntel 			(*port_fwd_end)(pt_id);
1993af75078fSIntel 		}
1994af75078fSIntel 	}
1995c185d42cSDavid Marchand 
199653324971SDavid Marchand 	fwd_stats_display();
199758d475b7SJerin Jacob 
1998af75078fSIntel 	printf("\nDone.\n");
1999af75078fSIntel 	test_done = 1;
2000af75078fSIntel }
2001af75078fSIntel 
2002cfae07fdSOuyang Changchun void
2003cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2004cfae07fdSOuyang Changchun {
2005492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
2006cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
2007cfae07fdSOuyang Changchun }
2008cfae07fdSOuyang Changchun 
2009cfae07fdSOuyang Changchun void
2010cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2011cfae07fdSOuyang Changchun {
2012492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
2013cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
2014cfae07fdSOuyang Changchun }
2015cfae07fdSOuyang Changchun 
2016ce8d5614SIntel static int
2017ce8d5614SIntel all_ports_started(void)
2018ce8d5614SIntel {
2019ce8d5614SIntel 	portid_t pi;
2020ce8d5614SIntel 	struct rte_port *port;
2021ce8d5614SIntel 
20227d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2023ce8d5614SIntel 		port = &ports[pi];
2024ce8d5614SIntel 		/* Check if there is a port which is not started */
202541b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
202641b05095SBernard Iremonger 			(port->slave_flag == 0))
2027ce8d5614SIntel 			return 0;
2028ce8d5614SIntel 	}
2029ce8d5614SIntel 
2030ce8d5614SIntel 	/* No port is not started */
2031ce8d5614SIntel 	return 1;
2032ce8d5614SIntel }
2033ce8d5614SIntel 
2034148f963fSBruce Richardson int
20356018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
20366018eb8cSShahaf Shuler {
20376018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
20386018eb8cSShahaf Shuler 
20396018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
20406018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
20416018eb8cSShahaf Shuler 		return 0;
20426018eb8cSShahaf Shuler 	return 1;
20436018eb8cSShahaf Shuler }
20446018eb8cSShahaf Shuler 
20456018eb8cSShahaf Shuler int
2046edab33b1STetsuya Mukawa all_ports_stopped(void)
2047edab33b1STetsuya Mukawa {
2048edab33b1STetsuya Mukawa 	portid_t pi;
2049edab33b1STetsuya Mukawa 
20507d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
20516018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2052edab33b1STetsuya Mukawa 			return 0;
2053edab33b1STetsuya Mukawa 	}
2054edab33b1STetsuya Mukawa 
2055edab33b1STetsuya Mukawa 	return 1;
2056edab33b1STetsuya Mukawa }
2057edab33b1STetsuya Mukawa 
2058edab33b1STetsuya Mukawa int
2059edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2060edab33b1STetsuya Mukawa {
2061edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2062edab33b1STetsuya Mukawa 		return 0;
2063edab33b1STetsuya Mukawa 
2064edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2065edab33b1STetsuya Mukawa 		return 0;
2066edab33b1STetsuya Mukawa 
2067edab33b1STetsuya Mukawa 	return 1;
2068edab33b1STetsuya Mukawa }
2069edab33b1STetsuya Mukawa 
20701c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
20711c69df45SOri Kam static int
20721c69df45SOri Kam setup_hairpin_queues(portid_t pi)
20731c69df45SOri Kam {
20741c69df45SOri Kam 	queueid_t qi;
20751c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
20761c69df45SOri Kam 		.peer_count = 1,
20771c69df45SOri Kam 	};
20781c69df45SOri Kam 	int i;
20791c69df45SOri Kam 	int diag;
20801c69df45SOri Kam 	struct rte_port *port = &ports[pi];
20811c69df45SOri Kam 
20821c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
20831c69df45SOri Kam 		hairpin_conf.peers[0].port = pi;
20841c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
20851c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
20861c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
20871c69df45SOri Kam 		i++;
20881c69df45SOri Kam 		if (diag == 0)
20891c69df45SOri Kam 			continue;
20901c69df45SOri Kam 
20911c69df45SOri Kam 		/* Fail to setup rx queue, return */
20921c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
20931c69df45SOri Kam 					RTE_PORT_HANDLING,
20941c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
20951c69df45SOri Kam 			printf("Port %d can not be set back "
20961c69df45SOri Kam 					"to stopped\n", pi);
20971c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
20981c69df45SOri Kam 				"queues\n", pi);
20991c69df45SOri Kam 		/* try to reconfigure queues next time */
21001c69df45SOri Kam 		port->need_reconfig_queues = 1;
21011c69df45SOri Kam 		return -1;
21021c69df45SOri Kam 	}
21031c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
21041c69df45SOri Kam 		hairpin_conf.peers[0].port = pi;
21051c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
21061c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
21071c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
21081c69df45SOri Kam 		i++;
21091c69df45SOri Kam 		if (diag == 0)
21101c69df45SOri Kam 			continue;
21111c69df45SOri Kam 
21121c69df45SOri Kam 		/* Fail to setup rx queue, return */
21131c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
21141c69df45SOri Kam 					RTE_PORT_HANDLING,
21151c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
21161c69df45SOri Kam 			printf("Port %d can not be set back "
21171c69df45SOri Kam 					"to stopped\n", pi);
21181c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
21191c69df45SOri Kam 				"queues\n", pi);
21201c69df45SOri Kam 		/* try to reconfigure queues next time */
21211c69df45SOri Kam 		port->need_reconfig_queues = 1;
21221c69df45SOri Kam 		return -1;
21231c69df45SOri Kam 	}
21241c69df45SOri Kam 	return 0;
21251c69df45SOri Kam }
21261c69df45SOri Kam 
2127edab33b1STetsuya Mukawa int
2128ce8d5614SIntel start_port(portid_t pid)
2129ce8d5614SIntel {
213092d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2131ce8d5614SIntel 	portid_t pi;
2132ce8d5614SIntel 	queueid_t qi;
2133ce8d5614SIntel 	struct rte_port *port;
21346d13ea8eSOlivier Matz 	struct rte_ether_addr mac_addr;
21351c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2136ce8d5614SIntel 
21374468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21384468635fSMichael Qiu 		return 0;
21394468635fSMichael Qiu 
2140ce8d5614SIntel 	if(dcb_config)
2141ce8d5614SIntel 		dcb_test = 1;
21427d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2143edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2144ce8d5614SIntel 			continue;
2145ce8d5614SIntel 
214692d2703eSMichael Qiu 		need_check_link_status = 0;
2147ce8d5614SIntel 		port = &ports[pi];
2148ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2149ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2150ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2151ce8d5614SIntel 			continue;
2152ce8d5614SIntel 		}
2153ce8d5614SIntel 
2154ce8d5614SIntel 		if (port->need_reconfig > 0) {
2155ce8d5614SIntel 			port->need_reconfig = 0;
2156ce8d5614SIntel 
21577ee3e944SVasily Philipov 			if (flow_isolate_all) {
21587ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
21597ee3e944SVasily Philipov 				if (ret) {
21607ee3e944SVasily Philipov 					printf("Failed to apply isolated"
21617ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
21627ee3e944SVasily Philipov 					return -1;
21637ee3e944SVasily Philipov 				}
21647ee3e944SVasily Philipov 			}
2165b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
21665706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
216720a0286fSLiu Xiaofeng 					port->socket_id);
21681c69df45SOri Kam 			if (nb_hairpinq > 0 &&
21691c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
21701c69df45SOri Kam 				printf("Port %d doesn't support hairpin "
21711c69df45SOri Kam 				       "queues\n", pi);
21721c69df45SOri Kam 				return -1;
21731c69df45SOri Kam 			}
2174ce8d5614SIntel 			/* configure port */
21751c69df45SOri Kam 			diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
21761c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2177ce8d5614SIntel 						     &(port->dev_conf));
2178ce8d5614SIntel 			if (diag != 0) {
2179ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2180ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2181ce8d5614SIntel 					printf("Port %d can not be set back "
2182ce8d5614SIntel 							"to stopped\n", pi);
2183ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2184ce8d5614SIntel 				/* try to reconfigure port next time */
2185ce8d5614SIntel 				port->need_reconfig = 1;
2186148f963fSBruce Richardson 				return -1;
2187ce8d5614SIntel 			}
2188ce8d5614SIntel 		}
2189ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2190ce8d5614SIntel 			port->need_reconfig_queues = 0;
2191ce8d5614SIntel 			/* setup tx queues */
2192ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2193b6ea6408SIntel 				if ((numa_support) &&
2194b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2195b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2196d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2197d44f8a48SQi Zhang 						txring_numa[pi],
2198d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2199b6ea6408SIntel 				else
2200b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2201d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2202d44f8a48SQi Zhang 						port->socket_id,
2203d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2204b6ea6408SIntel 
2205ce8d5614SIntel 				if (diag == 0)
2206ce8d5614SIntel 					continue;
2207ce8d5614SIntel 
2208ce8d5614SIntel 				/* Fail to setup tx queue, return */
2209ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2210ce8d5614SIntel 							RTE_PORT_HANDLING,
2211ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2212ce8d5614SIntel 					printf("Port %d can not be set back "
2213ce8d5614SIntel 							"to stopped\n", pi);
2214d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2215d44f8a48SQi Zhang 				       pi);
2216ce8d5614SIntel 				/* try to reconfigure queues next time */
2217ce8d5614SIntel 				port->need_reconfig_queues = 1;
2218148f963fSBruce Richardson 				return -1;
2219ce8d5614SIntel 			}
2220ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2221d44f8a48SQi Zhang 				/* setup rx queues */
2222b6ea6408SIntel 				if ((numa_support) &&
2223b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2224b6ea6408SIntel 					struct rte_mempool * mp =
2225b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2226b6ea6408SIntel 					if (mp == NULL) {
2227b6ea6408SIntel 						printf("Failed to setup RX queue:"
2228b6ea6408SIntel 							"No mempool allocation"
2229b6ea6408SIntel 							" on the socket %d\n",
2230b6ea6408SIntel 							rxring_numa[pi]);
2231148f963fSBruce Richardson 						return -1;
2232b6ea6408SIntel 					}
2233b6ea6408SIntel 
2234b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2235d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2236d44f8a48SQi Zhang 					     rxring_numa[pi],
2237d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2238d44f8a48SQi Zhang 					     mp);
22391e1d6bddSBernard Iremonger 				} else {
22401e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
22411e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
22421e1d6bddSBernard Iremonger 					if (mp == NULL) {
22431e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
22441e1d6bddSBernard Iremonger 							"No mempool allocation"
22451e1d6bddSBernard Iremonger 							" on the socket %d\n",
22461e1d6bddSBernard Iremonger 							port->socket_id);
22471e1d6bddSBernard Iremonger 						return -1;
2248b6ea6408SIntel 					}
2249b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2250d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2251d44f8a48SQi Zhang 					     port->socket_id,
2252d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2253d44f8a48SQi Zhang 					     mp);
22541e1d6bddSBernard Iremonger 				}
2255ce8d5614SIntel 				if (diag == 0)
2256ce8d5614SIntel 					continue;
2257ce8d5614SIntel 
2258ce8d5614SIntel 				/* Fail to setup rx queue, return */
2259ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2260ce8d5614SIntel 							RTE_PORT_HANDLING,
2261ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2262ce8d5614SIntel 					printf("Port %d can not be set back "
2263ce8d5614SIntel 							"to stopped\n", pi);
2264d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2265d44f8a48SQi Zhang 				       pi);
2266ce8d5614SIntel 				/* try to reconfigure queues next time */
2267ce8d5614SIntel 				port->need_reconfig_queues = 1;
2268148f963fSBruce Richardson 				return -1;
2269ce8d5614SIntel 			}
22701c69df45SOri Kam 			/* setup hairpin queues */
22711c69df45SOri Kam 			if (setup_hairpin_queues(pi) != 0)
22721c69df45SOri Kam 				return -1;
2273ce8d5614SIntel 		}
2274b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2275b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
2276b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2277b0a9354aSPavan Nikhilesh 					NULL, 0);
2278b0a9354aSPavan Nikhilesh 			if (diag < 0)
2279b0a9354aSPavan Nikhilesh 				printf(
2280b0a9354aSPavan Nikhilesh 				"Port %d: Failed to disable Ptype parsing\n",
2281b0a9354aSPavan Nikhilesh 				pi);
2282b0a9354aSPavan Nikhilesh 		}
2283b0a9354aSPavan Nikhilesh 
2284ce8d5614SIntel 		/* start port */
2285ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2286ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2287ce8d5614SIntel 
2288ce8d5614SIntel 			/* Fail to setup rx queue, return */
2289ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2290ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2291ce8d5614SIntel 				printf("Port %d can not be set back to "
2292ce8d5614SIntel 							"stopped\n", pi);
2293ce8d5614SIntel 			continue;
2294ce8d5614SIntel 		}
2295ce8d5614SIntel 
2296ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2297ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2298ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2299ce8d5614SIntel 
2300a5279d25SIgor Romanov 		if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2301d8c89163SZijie Pan 			printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
23022950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
23032950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
23042950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2305d8c89163SZijie Pan 
2306ce8d5614SIntel 		/* at least one port started, need checking link status */
2307ce8d5614SIntel 		need_check_link_status = 1;
2308ce8d5614SIntel 	}
2309ce8d5614SIntel 
231092d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2311edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
231292d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2313ce8d5614SIntel 		printf("Please stop the ports first\n");
2314ce8d5614SIntel 
2315ce8d5614SIntel 	printf("Done\n");
2316148f963fSBruce Richardson 	return 0;
2317ce8d5614SIntel }
2318ce8d5614SIntel 
2319ce8d5614SIntel void
2320ce8d5614SIntel stop_port(portid_t pid)
2321ce8d5614SIntel {
2322ce8d5614SIntel 	portid_t pi;
2323ce8d5614SIntel 	struct rte_port *port;
2324ce8d5614SIntel 	int need_check_link_status = 0;
2325ce8d5614SIntel 
2326ce8d5614SIntel 	if (dcb_test) {
2327ce8d5614SIntel 		dcb_test = 0;
2328ce8d5614SIntel 		dcb_config = 0;
2329ce8d5614SIntel 	}
23304468635fSMichael Qiu 
23314468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
23324468635fSMichael Qiu 		return;
23334468635fSMichael Qiu 
2334ce8d5614SIntel 	printf("Stopping ports...\n");
2335ce8d5614SIntel 
23367d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
23374468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2338ce8d5614SIntel 			continue;
2339ce8d5614SIntel 
2340a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2341a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2342a8ef3e3aSBernard Iremonger 			continue;
2343a8ef3e3aSBernard Iremonger 		}
2344a8ef3e3aSBernard Iremonger 
23450e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
23460e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
23470e545d30SBernard Iremonger 			continue;
23480e545d30SBernard Iremonger 		}
23490e545d30SBernard Iremonger 
2350ce8d5614SIntel 		port = &ports[pi];
2351ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2352ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2353ce8d5614SIntel 			continue;
2354ce8d5614SIntel 
2355ce8d5614SIntel 		rte_eth_dev_stop(pi);
2356ce8d5614SIntel 
2357ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2358ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2359ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2360ce8d5614SIntel 		need_check_link_status = 1;
2361ce8d5614SIntel 	}
2362bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2363edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2364ce8d5614SIntel 
2365ce8d5614SIntel 	printf("Done\n");
2366ce8d5614SIntel }
2367ce8d5614SIntel 
2368ce6959bfSWisam Jaddo static void
23694f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2370ce6959bfSWisam Jaddo {
23714f1de450SThomas Monjalon 	portid_t i;
23724f1de450SThomas Monjalon 	portid_t new_total = 0;
2373ce6959bfSWisam Jaddo 
23744f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
23754f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
23764f1de450SThomas Monjalon 			array[new_total] = array[i];
23774f1de450SThomas Monjalon 			new_total++;
2378ce6959bfSWisam Jaddo 		}
23794f1de450SThomas Monjalon 	*total = new_total;
23804f1de450SThomas Monjalon }
23814f1de450SThomas Monjalon 
23824f1de450SThomas Monjalon static void
23834f1de450SThomas Monjalon remove_invalid_ports(void)
23844f1de450SThomas Monjalon {
23854f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
23864f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
23874f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2388ce6959bfSWisam Jaddo }
2389ce6959bfSWisam Jaddo 
2390ce8d5614SIntel void
2391ce8d5614SIntel close_port(portid_t pid)
2392ce8d5614SIntel {
2393ce8d5614SIntel 	portid_t pi;
2394ce8d5614SIntel 	struct rte_port *port;
2395ce8d5614SIntel 
23964468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
23974468635fSMichael Qiu 		return;
23984468635fSMichael Qiu 
2399ce8d5614SIntel 	printf("Closing ports...\n");
2400ce8d5614SIntel 
24017d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
24024468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2403ce8d5614SIntel 			continue;
2404ce8d5614SIntel 
2405a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2406a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2407a8ef3e3aSBernard Iremonger 			continue;
2408a8ef3e3aSBernard Iremonger 		}
2409a8ef3e3aSBernard Iremonger 
24100e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
24110e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
24120e545d30SBernard Iremonger 			continue;
24130e545d30SBernard Iremonger 		}
24140e545d30SBernard Iremonger 
2415ce8d5614SIntel 		port = &ports[pi];
2416ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2417d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2418d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2419d4e8ad64SMichael Qiu 			continue;
2420d4e8ad64SMichael Qiu 		}
2421d4e8ad64SMichael Qiu 
2422d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2423ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2424ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2425ce8d5614SIntel 			continue;
2426ce8d5614SIntel 		}
2427ce8d5614SIntel 
2428938a184aSAdrien Mazarguil 		if (port->flow_list)
2429938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2430ce8d5614SIntel 		rte_eth_dev_close(pi);
2431ce8d5614SIntel 
24324f1de450SThomas Monjalon 		remove_invalid_ports();
243323ea57a2SThomas Monjalon 
2434ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2435ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2436b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2437ce8d5614SIntel 	}
2438ce8d5614SIntel 
2439ce8d5614SIntel 	printf("Done\n");
2440ce8d5614SIntel }
2441ce8d5614SIntel 
2442edab33b1STetsuya Mukawa void
244397f1e196SWei Dai reset_port(portid_t pid)
244497f1e196SWei Dai {
244597f1e196SWei Dai 	int diag;
244697f1e196SWei Dai 	portid_t pi;
244797f1e196SWei Dai 	struct rte_port *port;
244897f1e196SWei Dai 
244997f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
245097f1e196SWei Dai 		return;
245197f1e196SWei Dai 
24521cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
24531cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
24541cde1b9aSShougang Wang 		printf("Can not reset port(s), please stop port(s) first.\n");
24551cde1b9aSShougang Wang 		return;
24561cde1b9aSShougang Wang 	}
24571cde1b9aSShougang Wang 
245897f1e196SWei Dai 	printf("Resetting ports...\n");
245997f1e196SWei Dai 
246097f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
246197f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
246297f1e196SWei Dai 			continue;
246397f1e196SWei Dai 
246497f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
246597f1e196SWei Dai 			printf("Please remove port %d from forwarding "
246697f1e196SWei Dai 			       "configuration.\n", pi);
246797f1e196SWei Dai 			continue;
246897f1e196SWei Dai 		}
246997f1e196SWei Dai 
247097f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
247197f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
247297f1e196SWei Dai 			       pi);
247397f1e196SWei Dai 			continue;
247497f1e196SWei Dai 		}
247597f1e196SWei Dai 
247697f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
247797f1e196SWei Dai 		if (diag == 0) {
247897f1e196SWei Dai 			port = &ports[pi];
247997f1e196SWei Dai 			port->need_reconfig = 1;
248097f1e196SWei Dai 			port->need_reconfig_queues = 1;
248197f1e196SWei Dai 		} else {
248297f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
248397f1e196SWei Dai 		}
248497f1e196SWei Dai 	}
248597f1e196SWei Dai 
248697f1e196SWei Dai 	printf("Done\n");
248797f1e196SWei Dai }
248897f1e196SWei Dai 
248997f1e196SWei Dai void
2490edab33b1STetsuya Mukawa attach_port(char *identifier)
2491ce8d5614SIntel {
24924f1ed78eSThomas Monjalon 	portid_t pi;
2493c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2494ce8d5614SIntel 
2495edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2496edab33b1STetsuya Mukawa 
2497edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2498edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2499edab33b1STetsuya Mukawa 		return;
2500ce8d5614SIntel 	}
2501ce8d5614SIntel 
250275b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
2503c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2504edab33b1STetsuya Mukawa 		return;
2505c9cce428SThomas Monjalon 	}
2506c9cce428SThomas Monjalon 
25074f1ed78eSThomas Monjalon 	/* first attach mode: event */
25084f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
25094f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
25104f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
25114f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
25124f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
25134f1ed78eSThomas Monjalon 				setup_attached_port(pi);
25144f1ed78eSThomas Monjalon 		return;
25154f1ed78eSThomas Monjalon 	}
25164f1ed78eSThomas Monjalon 
25174f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
251886fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
25194f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
252086fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
252186fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2522c9cce428SThomas Monjalon 		setup_attached_port(pi);
2523c9cce428SThomas Monjalon 	}
252486fa5de1SThomas Monjalon }
2525c9cce428SThomas Monjalon 
2526c9cce428SThomas Monjalon static void
2527c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2528c9cce428SThomas Monjalon {
2529c9cce428SThomas Monjalon 	unsigned int socket_id;
253034fc1051SIvan Ilchenko 	int ret;
2531edab33b1STetsuya Mukawa 
2532931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
253329841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2534931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
253529841336SPhil Yang 		socket_id = socket_ids[0];
2536931126baSBernard Iremonger 	reconfig(pi, socket_id);
253734fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
253834fc1051SIvan Ilchenko 	if (ret != 0)
253934fc1051SIvan Ilchenko 		printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
254034fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
2541edab33b1STetsuya Mukawa 
25424f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
25434f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
25444f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
25454f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2546edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2547edab33b1STetsuya Mukawa 
2548edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2549edab33b1STetsuya Mukawa 	printf("Done\n");
2550edab33b1STetsuya Mukawa }
2551edab33b1STetsuya Mukawa 
2552edab33b1STetsuya Mukawa void
2553f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id)
25545f4ec54fSChen Jing D(Mark) {
2555f8e5baa2SThomas Monjalon 	struct rte_device *dev;
2556f8e5baa2SThomas Monjalon 	portid_t sibling;
2557f8e5baa2SThomas Monjalon 
2558c9cce428SThomas Monjalon 	printf("Removing a device...\n");
25595f4ec54fSChen Jing D(Mark) 
256043d0e304SMatan Azrad 	if (port_id_is_invalid(port_id, ENABLED_WARN))
256143d0e304SMatan Azrad 		return;
256243d0e304SMatan Azrad 
2563f8e5baa2SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
2564f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2565f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2566f8e5baa2SThomas Monjalon 		return;
2567f8e5baa2SThomas Monjalon 	}
2568f8e5baa2SThomas Monjalon 
256923ea57a2SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
25703f4a8370SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
25713f4a8370SThomas Monjalon 			printf("Port not stopped\n");
2572edab33b1STetsuya Mukawa 			return;
2573edab33b1STetsuya Mukawa 		}
25743f4a8370SThomas Monjalon 		printf("Port was not closed\n");
2575938a184aSAdrien Mazarguil 		if (ports[port_id].flow_list)
2576938a184aSAdrien Mazarguil 			port_flow_flush(port_id);
25773f4a8370SThomas Monjalon 	}
2578938a184aSAdrien Mazarguil 
257975b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
2580f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2581edab33b1STetsuya Mukawa 		return;
25823070419eSGaetan Rivet 	}
25837ca262b8SViacheslav Ovsiienko 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2584f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2585f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2586f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2587f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2588f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2589f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2590f8e5baa2SThomas Monjalon 		}
2591f8e5baa2SThomas Monjalon 	}
2592f8e5baa2SThomas Monjalon 
25934f1de450SThomas Monjalon 	remove_invalid_ports();
259403ce2c53SMatan Azrad 
2595f8e5baa2SThomas Monjalon 	printf("Device of port %u is detached\n", port_id);
2596f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2597edab33b1STetsuya Mukawa 	printf("Done\n");
2598edab33b1STetsuya Mukawa 	return;
25995f4ec54fSChen Jing D(Mark) }
26005f4ec54fSChen Jing D(Mark) 
2601af75078fSIntel void
260255e51c96SNithin Dabilpuram detach_device(char *identifier)
260355e51c96SNithin Dabilpuram {
260455e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
260555e51c96SNithin Dabilpuram 	struct rte_devargs da;
260655e51c96SNithin Dabilpuram 	portid_t port_id;
260755e51c96SNithin Dabilpuram 
260855e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
260955e51c96SNithin Dabilpuram 
261055e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
261155e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
261255e51c96SNithin Dabilpuram 		printf("cannot parse identifier\n");
261355e51c96SNithin Dabilpuram 		if (da.args)
261455e51c96SNithin Dabilpuram 			free(da.args);
261555e51c96SNithin Dabilpuram 		return;
261655e51c96SNithin Dabilpuram 	}
261755e51c96SNithin Dabilpuram 
261855e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
261955e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
262055e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
262155e51c96SNithin Dabilpuram 				printf("Port %u not stopped\n", port_id);
2622149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
262355e51c96SNithin Dabilpuram 				return;
262455e51c96SNithin Dabilpuram 			}
262555e51c96SNithin Dabilpuram 
262655e51c96SNithin Dabilpuram 			/* sibling ports are forced to be closed */
262755e51c96SNithin Dabilpuram 			if (ports[port_id].flow_list)
262855e51c96SNithin Dabilpuram 				port_flow_flush(port_id);
262955e51c96SNithin Dabilpuram 			ports[port_id].port_status = RTE_PORT_CLOSED;
263055e51c96SNithin Dabilpuram 			printf("Port %u is now closed\n", port_id);
263155e51c96SNithin Dabilpuram 		}
263255e51c96SNithin Dabilpuram 	}
263355e51c96SNithin Dabilpuram 
263455e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
263555e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
263655e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
263755e51c96SNithin Dabilpuram 		return;
263855e51c96SNithin Dabilpuram 	}
263955e51c96SNithin Dabilpuram 
264055e51c96SNithin Dabilpuram 	remove_invalid_ports();
264155e51c96SNithin Dabilpuram 
264255e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
264355e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
264455e51c96SNithin Dabilpuram 	printf("Done\n");
264555e51c96SNithin Dabilpuram }
264655e51c96SNithin Dabilpuram 
264755e51c96SNithin Dabilpuram void
2648af75078fSIntel pmd_test_exit(void)
2649af75078fSIntel {
2650af75078fSIntel 	portid_t pt_id;
2651fb73e096SJeff Guo 	int ret;
2652401b744dSShahaf Shuler 	int i;
2653af75078fSIntel 
26548210ec25SPablo de Lara 	if (test_done == 0)
26558210ec25SPablo de Lara 		stop_packet_forwarding();
26568210ec25SPablo de Lara 
26573a0968c8SShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
26583a0968c8SShahaf Shuler 		if (mempools[i]) {
26593a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
26603a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
26613a0968c8SShahaf Shuler 						     NULL);
26623a0968c8SShahaf Shuler 		}
26633a0968c8SShahaf Shuler 	}
2664d3a274ceSZhihong Wang 	if (ports != NULL) {
2665d3a274ceSZhihong Wang 		no_link_check = 1;
26667d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
266708fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
2668af75078fSIntel 			fflush(stdout);
2669d3a274ceSZhihong Wang 			stop_port(pt_id);
267008fd782bSCristian Dumitrescu 		}
267108fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
267208fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
267308fd782bSCristian Dumitrescu 			fflush(stdout);
2674d3a274ceSZhihong Wang 			close_port(pt_id);
2675af75078fSIntel 		}
2676d3a274ceSZhihong Wang 	}
2677fb73e096SJeff Guo 
2678fb73e096SJeff Guo 	if (hot_plug) {
2679fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
26802049c511SJeff Guo 		if (ret) {
2681fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2682fb73e096SJeff Guo 				"fail to stop device event monitor.");
26832049c511SJeff Guo 			return;
26842049c511SJeff Guo 		}
2685fb73e096SJeff Guo 
26862049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
2687cc1bf307SJeff Guo 			dev_event_callback, NULL);
26882049c511SJeff Guo 		if (ret < 0) {
2689fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
26902049c511SJeff Guo 				"fail to unregister device event callback.\n");
26912049c511SJeff Guo 			return;
26922049c511SJeff Guo 		}
26932049c511SJeff Guo 
26942049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
26952049c511SJeff Guo 		if (ret) {
26962049c511SJeff Guo 			RTE_LOG(ERR, EAL,
26972049c511SJeff Guo 				"fail to disable hotplug handling.\n");
26982049c511SJeff Guo 			return;
26992049c511SJeff Guo 		}
2700fb73e096SJeff Guo 	}
2701401b744dSShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2702401b744dSShahaf Shuler 		if (mempools[i])
2703401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
2704401b744dSShahaf Shuler 	}
2705fb73e096SJeff Guo 
2706d3a274ceSZhihong Wang 	printf("\nBye...\n");
2707af75078fSIntel }
2708af75078fSIntel 
2709af75078fSIntel typedef void (*cmd_func_t)(void);
2710af75078fSIntel struct pmd_test_command {
2711af75078fSIntel 	const char *cmd_name;
2712af75078fSIntel 	cmd_func_t cmd_func;
2713af75078fSIntel };
2714af75078fSIntel 
2715af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2716af75078fSIntel 
2717ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2718af75078fSIntel static void
2719edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2720af75078fSIntel {
2721ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2722ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2723f8244c63SZhiyong Yang 	portid_t portid;
2724f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2725ce8d5614SIntel 	struct rte_eth_link link;
2726e661a08bSIgor Romanov 	int ret;
2727ce8d5614SIntel 
2728ce8d5614SIntel 	printf("Checking link statuses...\n");
2729ce8d5614SIntel 	fflush(stdout);
2730ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2731ce8d5614SIntel 		all_ports_up = 1;
27327d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2733ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2734ce8d5614SIntel 				continue;
2735ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2736e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
2737e661a08bSIgor Romanov 			if (ret < 0) {
2738e661a08bSIgor Romanov 				all_ports_up = 0;
2739e661a08bSIgor Romanov 				if (print_flag == 1)
2740e661a08bSIgor Romanov 					printf("Port %u link get failed: %s\n",
2741e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
2742e661a08bSIgor Romanov 				continue;
2743e661a08bSIgor Romanov 			}
2744ce8d5614SIntel 			/* print link status if flag set */
2745ce8d5614SIntel 			if (print_flag == 1) {
2746ce8d5614SIntel 				if (link.link_status)
2747f8244c63SZhiyong Yang 					printf(
2748f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2749f8244c63SZhiyong Yang 					portid, link.link_speed,
2750ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2751ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2752ce8d5614SIntel 				else
2753f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2754ce8d5614SIntel 				continue;
2755ce8d5614SIntel 			}
2756ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
275709419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2758ce8d5614SIntel 				all_ports_up = 0;
2759ce8d5614SIntel 				break;
2760ce8d5614SIntel 			}
2761ce8d5614SIntel 		}
2762ce8d5614SIntel 		/* after finally printing all link status, get out */
2763ce8d5614SIntel 		if (print_flag == 1)
2764ce8d5614SIntel 			break;
2765ce8d5614SIntel 
2766ce8d5614SIntel 		if (all_ports_up == 0) {
2767ce8d5614SIntel 			fflush(stdout);
2768ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2769ce8d5614SIntel 		}
2770ce8d5614SIntel 
2771ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2772ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2773ce8d5614SIntel 			print_flag = 1;
2774ce8d5614SIntel 		}
27758ea656f8SGaetan Rivet 
27768ea656f8SGaetan Rivet 		if (lsc_interrupt)
27778ea656f8SGaetan Rivet 			break;
2778ce8d5614SIntel 	}
2779af75078fSIntel }
2780af75078fSIntel 
2781cc1bf307SJeff Guo /*
2782cc1bf307SJeff Guo  * This callback is for remove a port for a device. It has limitation because
2783cc1bf307SJeff Guo  * it is not for multiple port removal for a device.
2784cc1bf307SJeff Guo  * TODO: the device detach invoke will plan to be removed from user side to
2785cc1bf307SJeff Guo  * eal. And convert all PMDs to free port resources on ether device closing.
2786cc1bf307SJeff Guo  */
2787284c908cSGaetan Rivet static void
2788cc1bf307SJeff Guo rmv_port_callback(void *arg)
2789284c908cSGaetan Rivet {
27903b97888aSMatan Azrad 	int need_to_start = 0;
27910da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
279228caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2793284c908cSGaetan Rivet 
2794284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2795284c908cSGaetan Rivet 
27963b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
27973b97888aSMatan Azrad 		need_to_start = 1;
27983b97888aSMatan Azrad 		stop_packet_forwarding();
27993b97888aSMatan Azrad 	}
28000da2a62bSMatan Azrad 	no_link_check = 1;
2801284c908cSGaetan Rivet 	stop_port(port_id);
28020da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2803284c908cSGaetan Rivet 	close_port(port_id);
2804f8e5baa2SThomas Monjalon 	detach_port_device(port_id);
28053b97888aSMatan Azrad 	if (need_to_start)
28063b97888aSMatan Azrad 		start_packet_forwarding(0);
2807284c908cSGaetan Rivet }
2808284c908cSGaetan Rivet 
280976ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2810d6af1a13SBernard Iremonger static int
2811f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2812d6af1a13SBernard Iremonger 		  void *ret_param)
281376ad4a2dSGaetan Rivet {
281476ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2815d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
281676ad4a2dSGaetan Rivet 
281776ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2818f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
281976ad4a2dSGaetan Rivet 			port_id, __func__, type);
282076ad4a2dSGaetan Rivet 		fflush(stderr);
28213af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2822f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
282397b5d8b5SThomas Monjalon 			eth_event_desc[type]);
282476ad4a2dSGaetan Rivet 		fflush(stdout);
282576ad4a2dSGaetan Rivet 	}
2826284c908cSGaetan Rivet 
2827284c908cSGaetan Rivet 	switch (type) {
28284f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
28294f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
28304f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
28314f1ed78eSThomas Monjalon 		break;
2832284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
28334f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
28344f1ed78eSThomas Monjalon 			break;
2835284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2836cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2837284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2838284c908cSGaetan Rivet 		break;
2839284c908cSGaetan Rivet 	default:
2840284c908cSGaetan Rivet 		break;
2841284c908cSGaetan Rivet 	}
2842d6af1a13SBernard Iremonger 	return 0;
284376ad4a2dSGaetan Rivet }
284476ad4a2dSGaetan Rivet 
284597b5d8b5SThomas Monjalon static int
284697b5d8b5SThomas Monjalon register_eth_event_callback(void)
284797b5d8b5SThomas Monjalon {
284897b5d8b5SThomas Monjalon 	int ret;
284997b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
285097b5d8b5SThomas Monjalon 
285197b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
285297b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
285397b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
285497b5d8b5SThomas Monjalon 				event,
285597b5d8b5SThomas Monjalon 				eth_event_callback,
285697b5d8b5SThomas Monjalon 				NULL);
285797b5d8b5SThomas Monjalon 		if (ret != 0) {
285897b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
285997b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
286097b5d8b5SThomas Monjalon 			return -1;
286197b5d8b5SThomas Monjalon 		}
286297b5d8b5SThomas Monjalon 	}
286397b5d8b5SThomas Monjalon 
286497b5d8b5SThomas Monjalon 	return 0;
286597b5d8b5SThomas Monjalon }
286697b5d8b5SThomas Monjalon 
2867fb73e096SJeff Guo /* This function is used by the interrupt thread */
2868fb73e096SJeff Guo static void
2869cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2870fb73e096SJeff Guo 			     __rte_unused void *arg)
2871fb73e096SJeff Guo {
28722049c511SJeff Guo 	uint16_t port_id;
28732049c511SJeff Guo 	int ret;
28742049c511SJeff Guo 
2875fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2876fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2877fb73e096SJeff Guo 			__func__, type);
2878fb73e096SJeff Guo 		fflush(stderr);
2879fb73e096SJeff Guo 	}
2880fb73e096SJeff Guo 
2881fb73e096SJeff Guo 	switch (type) {
2882fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2883cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2884fb73e096SJeff Guo 			device_name);
28852049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
28862049c511SJeff Guo 		if (ret) {
28872049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
28882049c511SJeff Guo 				device_name);
28892049c511SJeff Guo 			return;
28902049c511SJeff Guo 		}
2891cc1bf307SJeff Guo 		/*
2892cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
2893cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
2894cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
2895cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
2896cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
2897cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
2898cc1bf307SJeff Guo 		 * be deleted.
2899cc1bf307SJeff Guo 		 */
2900cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
2901cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2902cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
2903cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
2904fb73e096SJeff Guo 		break;
2905fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2906fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2907fb73e096SJeff Guo 			device_name);
2908fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2909fb73e096SJeff Guo 		 * begin to attach port.
2910fb73e096SJeff Guo 		 */
2911fb73e096SJeff Guo 		break;
2912fb73e096SJeff Guo 	default:
2913fb73e096SJeff Guo 		break;
2914fb73e096SJeff Guo 	}
2915fb73e096SJeff Guo }
2916fb73e096SJeff Guo 
2917013af9b6SIntel static int
291828caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2919af75078fSIntel {
2920013af9b6SIntel 	uint16_t i;
2921af75078fSIntel 	int diag;
2922013af9b6SIntel 	uint8_t mapping_found = 0;
2923af75078fSIntel 
2924013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2925013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2926013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2927013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2928013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2929013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2930013af9b6SIntel 			if (diag != 0)
2931013af9b6SIntel 				return diag;
2932013af9b6SIntel 			mapping_found = 1;
2933af75078fSIntel 		}
2934013af9b6SIntel 	}
2935013af9b6SIntel 	if (mapping_found)
2936013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2937013af9b6SIntel 	return 0;
2938013af9b6SIntel }
2939013af9b6SIntel 
2940013af9b6SIntel static int
294128caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2942013af9b6SIntel {
2943013af9b6SIntel 	uint16_t i;
2944013af9b6SIntel 	int diag;
2945013af9b6SIntel 	uint8_t mapping_found = 0;
2946013af9b6SIntel 
2947013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2948013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2949013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2950013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2951013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2952013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2953013af9b6SIntel 			if (diag != 0)
2954013af9b6SIntel 				return diag;
2955013af9b6SIntel 			mapping_found = 1;
2956013af9b6SIntel 		}
2957013af9b6SIntel 	}
2958013af9b6SIntel 	if (mapping_found)
2959013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2960013af9b6SIntel 	return 0;
2961013af9b6SIntel }
2962013af9b6SIntel 
2963013af9b6SIntel static void
296428caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2965013af9b6SIntel {
2966013af9b6SIntel 	int diag = 0;
2967013af9b6SIntel 
2968013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2969af75078fSIntel 	if (diag != 0) {
2970013af9b6SIntel 		if (diag == -ENOTSUP) {
2971013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2972013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2973013af9b6SIntel 		}
2974013af9b6SIntel 		else
2975013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2976013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2977013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2978af75078fSIntel 					pi, diag);
2979af75078fSIntel 	}
2980013af9b6SIntel 
2981013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2982af75078fSIntel 	if (diag != 0) {
2983013af9b6SIntel 		if (diag == -ENOTSUP) {
2984013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2985013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2986013af9b6SIntel 		}
2987013af9b6SIntel 		else
2988013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2989013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2990013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2991af75078fSIntel 					pi, diag);
2992af75078fSIntel 	}
2993af75078fSIntel }
2994af75078fSIntel 
2995f2c5125aSPablo de Lara static void
2996f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2997f2c5125aSPablo de Lara {
2998d44f8a48SQi Zhang 	uint16_t qid;
29995e91aeefSWei Zhao 	uint64_t offloads;
3000f2c5125aSPablo de Lara 
3001d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
30025e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
3003d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3004575e0fd1SWei Zhao 		if (offloads != 0)
3005575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
3006d44f8a48SQi Zhang 
3007d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3008f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3009d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3010f2c5125aSPablo de Lara 
3011f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3012d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3013f2c5125aSPablo de Lara 
3014f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3015d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3016f2c5125aSPablo de Lara 
3017f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3018d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3019f2c5125aSPablo de Lara 
3020f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3021d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3022f2c5125aSPablo de Lara 
3023d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3024d44f8a48SQi Zhang 	}
3025d44f8a48SQi Zhang 
3026d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
30275e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
3028d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
3029575e0fd1SWei Zhao 		if (offloads != 0)
3030575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
3031d44f8a48SQi Zhang 
3032d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3033f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3034d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3035f2c5125aSPablo de Lara 
3036f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3037d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3038f2c5125aSPablo de Lara 
3039f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3040d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3041f2c5125aSPablo de Lara 
3042f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3043d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3044f2c5125aSPablo de Lara 
3045f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3046d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3047d44f8a48SQi Zhang 
3048d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3049d44f8a48SQi Zhang 	}
3050f2c5125aSPablo de Lara }
3051f2c5125aSPablo de Lara 
3052013af9b6SIntel void
3053013af9b6SIntel init_port_config(void)
3054013af9b6SIntel {
3055013af9b6SIntel 	portid_t pid;
3056013af9b6SIntel 	struct rte_port *port;
30576f51deb9SIvan Ilchenko 	int ret;
3058013af9b6SIntel 
30597d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3060013af9b6SIntel 		port = &ports[pid];
3061013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
30626f51deb9SIvan Ilchenko 
30636f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
30646f51deb9SIvan Ilchenko 		if (ret != 0)
30656f51deb9SIvan Ilchenko 			return;
30666f51deb9SIvan Ilchenko 
30673ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3068013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
306990892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3070422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3071af75078fSIntel 		} else {
3072013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3073013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3074af75078fSIntel 		}
30753ce690d3SBruce Richardson 
30765f592039SJingjing Wu 		if (port->dcb_flag == 0) {
30773ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
30783ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
30793ce690d3SBruce Richardson 			else
30803ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
30813ce690d3SBruce Richardson 		}
30823ce690d3SBruce Richardson 
3083f2c5125aSPablo de Lara 		rxtx_port_config(port);
3084013af9b6SIntel 
3085a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3086a5279d25SIgor Romanov 		if (ret != 0)
3087a5279d25SIgor Romanov 			return;
3088013af9b6SIntel 
3089013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
309050c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3091e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
30927b7e5ba7SIntel #endif
30938ea656f8SGaetan Rivet 
30948ea656f8SGaetan Rivet 		if (lsc_interrupt &&
30958ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
30968ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
30978ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
3098284c908cSGaetan Rivet 		if (rmv_interrupt &&
3099284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
3100284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
3101284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3102013af9b6SIntel 	}
3103013af9b6SIntel }
3104013af9b6SIntel 
310541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
310641b05095SBernard Iremonger {
310741b05095SBernard Iremonger 	struct rte_port *port;
310841b05095SBernard Iremonger 
310941b05095SBernard Iremonger 	port = &ports[slave_pid];
311041b05095SBernard Iremonger 	port->slave_flag = 1;
311141b05095SBernard Iremonger }
311241b05095SBernard Iremonger 
311341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
311441b05095SBernard Iremonger {
311541b05095SBernard Iremonger 	struct rte_port *port;
311641b05095SBernard Iremonger 
311741b05095SBernard Iremonger 	port = &ports[slave_pid];
311841b05095SBernard Iremonger 	port->slave_flag = 0;
311941b05095SBernard Iremonger }
312041b05095SBernard Iremonger 
31210e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
31220e545d30SBernard Iremonger {
31230e545d30SBernard Iremonger 	struct rte_port *port;
31240e545d30SBernard Iremonger 
31250e545d30SBernard Iremonger 	port = &ports[slave_pid];
3126b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
3127b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3128b8b8b344SMatan Azrad 		return 1;
3129b8b8b344SMatan Azrad 	return 0;
31300e545d30SBernard Iremonger }
31310e545d30SBernard Iremonger 
3132013af9b6SIntel const uint16_t vlan_tags[] = {
3133013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3134013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3135013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3136013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3137013af9b6SIntel };
3138013af9b6SIntel 
3139013af9b6SIntel static  int
3140ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
31411a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
31421a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
31431a572499SJingjing Wu 		 uint8_t pfc_en)
3144013af9b6SIntel {
3145013af9b6SIntel 	uint8_t i;
3146ac7c491cSKonstantin Ananyev 	int32_t rc;
3147ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3148af75078fSIntel 
3149af75078fSIntel 	/*
3150013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3151013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3152af75078fSIntel 	 */
31531a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
31541a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
31551a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
31561a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
31571a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3158013af9b6SIntel 
3159547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
31601a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
31611a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
31621a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
31631a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
31641a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
31651a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3166013af9b6SIntel 
31671a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
31681a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
31691a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
31701a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
31711a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3172af75078fSIntel 		}
3173013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3174f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3175f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3176013af9b6SIntel 		}
3177013af9b6SIntel 
3178013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
317932e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
318032e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
31811a572499SJingjing Wu 	} else {
31821a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
31831a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
31841a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
31851a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3186013af9b6SIntel 
3187ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3188ac7c491cSKonstantin Ananyev 		if (rc != 0)
3189ac7c491cSKonstantin Ananyev 			return rc;
3190ac7c491cSKonstantin Ananyev 
31911a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
31921a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
31931a572499SJingjing Wu 
3194bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3195bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3196bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3197013af9b6SIntel 		}
3198ac7c491cSKonstantin Ananyev 
31991a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3200ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
320132e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
32021a572499SJingjing Wu 	}
32031a572499SJingjing Wu 
32041a572499SJingjing Wu 	if (pfc_en)
32051a572499SJingjing Wu 		eth_conf->dcb_capability_en =
32061a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3207013af9b6SIntel 	else
3208013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3209013af9b6SIntel 
3210013af9b6SIntel 	return 0;
3211013af9b6SIntel }
3212013af9b6SIntel 
3213013af9b6SIntel int
32141a572499SJingjing Wu init_port_dcb_config(portid_t pid,
32151a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
32161a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
32171a572499SJingjing Wu 		     uint8_t pfc_en)
3218013af9b6SIntel {
3219013af9b6SIntel 	struct rte_eth_conf port_conf;
3220013af9b6SIntel 	struct rte_port *rte_port;
3221013af9b6SIntel 	int retval;
3222013af9b6SIntel 	uint16_t i;
3223013af9b6SIntel 
32242a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3225013af9b6SIntel 
3226013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3227013af9b6SIntel 	/* Enter DCB configuration status */
3228013af9b6SIntel 	dcb_config = 1;
3229013af9b6SIntel 
3230d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3231d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3232d5354e89SYanglong Wu 
3233013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3234ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3235013af9b6SIntel 	if (retval < 0)
3236013af9b6SIntel 		return retval;
32370074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3238013af9b6SIntel 
32392f203d44SQi Zhang 	/* re-configure the device . */
32402b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
32412b0e0ebaSChenbo Xia 	if (retval < 0)
32422b0e0ebaSChenbo Xia 		return retval;
32436f51deb9SIvan Ilchenko 
32446f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
32456f51deb9SIvan Ilchenko 	if (retval != 0)
32466f51deb9SIvan Ilchenko 		return retval;
32472a977b89SWenzhuo Lu 
32482a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
32492a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
32502a977b89SWenzhuo Lu 	 */
32512a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
32522a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
32532a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
32542a977b89SWenzhuo Lu 			" for port %d.", pid);
32552a977b89SWenzhuo Lu 		return -1;
32562a977b89SWenzhuo Lu 	}
32572a977b89SWenzhuo Lu 
32582a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
32592a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
32602a977b89SWenzhuo Lu 	 */
32612a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
326286ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
326386ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
326486ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
326586ef65eeSBernard Iremonger 		} else {
32662a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
32672a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
326886ef65eeSBernard Iremonger 		}
32692a977b89SWenzhuo Lu 	} else {
32702a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
32712a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
32722a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
32732a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
32742a977b89SWenzhuo Lu 		} else {
32752a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
32762a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
32772a977b89SWenzhuo Lu 
32782a977b89SWenzhuo Lu 		}
32792a977b89SWenzhuo Lu 	}
32802a977b89SWenzhuo Lu 	rx_free_thresh = 64;
32812a977b89SWenzhuo Lu 
3282013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3283013af9b6SIntel 
3284f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3285013af9b6SIntel 	/* VLAN filter */
32860074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
32871a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3288013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3289013af9b6SIntel 
3290a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3291a5279d25SIgor Romanov 	if (retval != 0)
3292a5279d25SIgor Romanov 		return retval;
3293a5279d25SIgor Romanov 
3294013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3295013af9b6SIntel 
32967741e4cfSIntel 	rte_port->dcb_flag = 1;
32977741e4cfSIntel 
3298013af9b6SIntel 	return 0;
3299af75078fSIntel }
3300af75078fSIntel 
3301ffc468ffSTetsuya Mukawa static void
3302ffc468ffSTetsuya Mukawa init_port(void)
3303ffc468ffSTetsuya Mukawa {
3304ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3305ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3306ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3307ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3308ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3309ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3310ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3311ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3312ffc468ffSTetsuya Mukawa 	}
331329841336SPhil Yang 
331429841336SPhil Yang 	/* Initialize ports NUMA structures */
331529841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
331629841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
331729841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3318ffc468ffSTetsuya Mukawa }
3319ffc468ffSTetsuya Mukawa 
3320d3a274ceSZhihong Wang static void
3321d3a274ceSZhihong Wang force_quit(void)
3322d3a274ceSZhihong Wang {
3323d3a274ceSZhihong Wang 	pmd_test_exit();
3324d3a274ceSZhihong Wang 	prompt_exit();
3325d3a274ceSZhihong Wang }
3326d3a274ceSZhihong Wang 
3327d3a274ceSZhihong Wang static void
3328cfea1f30SPablo de Lara print_stats(void)
3329cfea1f30SPablo de Lara {
3330cfea1f30SPablo de Lara 	uint8_t i;
3331cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3332cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3333cfea1f30SPablo de Lara 
3334cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3335cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3336cfea1f30SPablo de Lara 
3337cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3338cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3339cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3340683d1e82SIgor Romanov 
3341683d1e82SIgor Romanov 	fflush(stdout);
3342cfea1f30SPablo de Lara }
3343cfea1f30SPablo de Lara 
3344cfea1f30SPablo de Lara static void
3345d3a274ceSZhihong Wang signal_handler(int signum)
3346d3a274ceSZhihong Wang {
3347d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3348d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3349d3a274ceSZhihong Wang 				signum);
3350102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3351102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3352102b7329SReshma Pattan 		rte_pdump_uninit();
3353102b7329SReshma Pattan #endif
335462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
33558b36297dSAmit Gupta 		if (latencystats_enabled != 0)
335662d3216dSReshma Pattan 			rte_latencystats_uninit();
335762d3216dSReshma Pattan #endif
3358d3a274ceSZhihong Wang 		force_quit();
3359d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3360d9a191a0SPhil Yang 		f_quit = 1;
3361d3a274ceSZhihong Wang 		/* exit with the expected status */
3362d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3363d3a274ceSZhihong Wang 		kill(getpid(), signum);
3364d3a274ceSZhihong Wang 	}
3365d3a274ceSZhihong Wang }
3366d3a274ceSZhihong Wang 
3367af75078fSIntel int
3368af75078fSIntel main(int argc, char** argv)
3369af75078fSIntel {
3370af75078fSIntel 	int diag;
3371f8244c63SZhiyong Yang 	portid_t port_id;
33724918a357SXiaoyun Li 	uint16_t count;
3373fb73e096SJeff Guo 	int ret;
3374af75078fSIntel 
3375d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3376d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3377d3a274ceSZhihong Wang 
3378285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3379285fd101SOlivier Matz 	if (testpmd_logtype < 0)
338016267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3381285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3382285fd101SOlivier Matz 
33839201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
33849201806eSStephen Hemminger 	if (diag < 0)
338516267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
338616267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
33879201806eSStephen Hemminger 
3388a87ab9f7SStephen Hemminger 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
338916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE,
339016267ceeSStephen Hemminger 			 "Secondary process type not supported.\n");
3391a87ab9f7SStephen Hemminger 
339297b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
339397b5d8b5SThomas Monjalon 	if (ret != 0)
339416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
339597b5d8b5SThomas Monjalon 
33964aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
33974aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3398e9436f54STiwei Bie 	rte_pdump_init();
33994aa0d012SAnatoly Burakov #endif
34004aa0d012SAnatoly Burakov 
34014918a357SXiaoyun Li 	count = 0;
34024918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
34034918a357SXiaoyun Li 		ports_ids[count] = port_id;
34044918a357SXiaoyun Li 		count++;
34054918a357SXiaoyun Li 	}
34064918a357SXiaoyun Li 	nb_ports = (portid_t) count;
34074aa0d012SAnatoly Burakov 	if (nb_ports == 0)
34084aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
34094aa0d012SAnatoly Burakov 
34104aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
34114aa0d012SAnatoly Burakov 	init_port();
34124aa0d012SAnatoly Burakov 
34134aa0d012SAnatoly Burakov 	set_def_fwd_config();
34144aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
341516267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
341616267ceeSStephen Hemminger 			 "Check the core mask argument\n");
34174aa0d012SAnatoly Burakov 
3418e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3419e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3420e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3421e505d84cSAnatoly Burakov #endif
3422e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3423e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3424e505d84cSAnatoly Burakov #endif
3425e505d84cSAnatoly Burakov 
3426fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
34275fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3428fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3429fb7b8b32SAnatoly Burakov #else
3430fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3431fb7b8b32SAnatoly Burakov #endif
3432fb7b8b32SAnatoly Burakov 
3433e505d84cSAnatoly Burakov 	argc -= diag;
3434e505d84cSAnatoly Burakov 	argv += diag;
3435e505d84cSAnatoly Burakov 	if (argc > 1)
3436e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3437e505d84cSAnatoly Burakov 
3438e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3439285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
34401c036b16SEelco Chaudron 			strerror(errno));
34411c036b16SEelco Chaudron 	}
34421c036b16SEelco Chaudron 
344399cabef0SPablo de Lara 	if (tx_first && interactive)
344499cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
344599cabef0SPablo de Lara 				"interactive mode.\n");
34468820cba4SDavid Hunt 
34478820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
34488820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
34498820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
34508820cba4SDavid Hunt 		lsc_interrupt = 0;
34518820cba4SDavid Hunt 	}
34528820cba4SDavid Hunt 
34535a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
34545a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
34555a8fb55cSReshma Pattan 
34565a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3457af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3458af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3459af75078fSIntel 		       nb_rxq, nb_txq);
3460af75078fSIntel 
3461af75078fSIntel 	init_config();
3462fb73e096SJeff Guo 
3463fb73e096SJeff Guo 	if (hot_plug) {
34642049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3465fb73e096SJeff Guo 		if (ret) {
34662049c511SJeff Guo 			RTE_LOG(ERR, EAL,
34672049c511SJeff Guo 				"fail to enable hotplug handling.");
3468fb73e096SJeff Guo 			return -1;
3469fb73e096SJeff Guo 		}
3470fb73e096SJeff Guo 
34712049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
34722049c511SJeff Guo 		if (ret) {
34732049c511SJeff Guo 			RTE_LOG(ERR, EAL,
34742049c511SJeff Guo 				"fail to start device event monitoring.");
34752049c511SJeff Guo 			return -1;
34762049c511SJeff Guo 		}
34772049c511SJeff Guo 
34782049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3479cc1bf307SJeff Guo 			dev_event_callback, NULL);
34802049c511SJeff Guo 		if (ret) {
34812049c511SJeff Guo 			RTE_LOG(ERR, EAL,
34822049c511SJeff Guo 				"fail  to register device event callback\n");
34832049c511SJeff Guo 			return -1;
34842049c511SJeff Guo 		}
3485fb73e096SJeff Guo 	}
3486fb73e096SJeff Guo 
34876937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3488148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3489af75078fSIntel 
3490ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
349134fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
349234fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
349334fc1051SIvan Ilchenko 		if (ret != 0)
349434fc1051SIvan Ilchenko 			printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
349534fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
349634fc1051SIvan Ilchenko 	}
3497af75078fSIntel 
34987e4441c8SRemy Horton 	/* Init metrics library */
34997e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
35007e4441c8SRemy Horton 
350162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
350262d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
350362d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
350462d3216dSReshma Pattan 		if (ret)
350562d3216dSReshma Pattan 			printf("Warning: latencystats init()"
350662d3216dSReshma Pattan 				" returned error %d\n",	ret);
350762d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
350862d3216dSReshma Pattan 			latencystats_lcore_id);
350962d3216dSReshma Pattan 	}
351062d3216dSReshma Pattan #endif
351162d3216dSReshma Pattan 
35127e4441c8SRemy Horton 	/* Setup bitrate stats */
35137e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3514e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
35157e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
35167e4441c8SRemy Horton 		if (bitrate_data == NULL)
3517e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3518e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
35197e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3520e25e6c70SRemy Horton 	}
35217e4441c8SRemy Horton #endif
35227e4441c8SRemy Horton 
35230d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
352481ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
352581ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
352681ef862bSAllain Legacy 
3527ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3528ca7feb22SCyril Chemparathy 		if (auto_start) {
3529ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3530ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3531ca7feb22SCyril Chemparathy 		}
3532af75078fSIntel 		prompt();
35330de738cfSJiayu Hu 		pmd_test_exit();
3534ca7feb22SCyril Chemparathy 	} else
35350d56cb81SThomas Monjalon #endif
35360d56cb81SThomas Monjalon 	{
3537af75078fSIntel 		char c;
3538af75078fSIntel 		int rc;
3539af75078fSIntel 
3540d9a191a0SPhil Yang 		f_quit = 0;
3541d9a191a0SPhil Yang 
3542af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
354399cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3544cfea1f30SPablo de Lara 		if (stats_period != 0) {
3545cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3546cfea1f30SPablo de Lara 			uint64_t timer_period;
3547cfea1f30SPablo de Lara 
3548cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3549cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3550cfea1f30SPablo de Lara 
3551d9a191a0SPhil Yang 			while (f_quit == 0) {
3552cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3553cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3554cfea1f30SPablo de Lara 
3555cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3556cfea1f30SPablo de Lara 					print_stats();
3557cfea1f30SPablo de Lara 					/* Reset the timer */
3558cfea1f30SPablo de Lara 					diff_time = 0;
3559cfea1f30SPablo de Lara 				}
3560cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3561cfea1f30SPablo de Lara 				prev_time = cur_time;
3562cfea1f30SPablo de Lara 				sleep(1);
3563cfea1f30SPablo de Lara 			}
3564cfea1f30SPablo de Lara 		}
3565cfea1f30SPablo de Lara 
3566af75078fSIntel 		printf("Press enter to exit\n");
3567af75078fSIntel 		rc = read(0, &c, 1);
3568d3a274ceSZhihong Wang 		pmd_test_exit();
3569af75078fSIntel 		if (rc < 0)
3570af75078fSIntel 			return 1;
3571af75078fSIntel 	}
3572af75078fSIntel 
3573af75078fSIntel 	return 0;
3574af75078fSIntel }
3575