xref: /dpdk/app/test-pmd/testpmd.c (revision 86fa5de1d8f34f0d5849d16edb33f7633c604367)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70c7f5dba7SAnatoly Burakov #else
71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72c7f5dba7SAnatoly Burakov #endif
73c7f5dba7SAnatoly Burakov 
74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77c7f5dba7SAnatoly Burakov #else
78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov #endif
80c7f5dba7SAnatoly Burakov 
81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
140af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1830ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1845b590fbeSJasvinder Singh #endif
185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
186af75078fSIntel 	&ieee1588_fwd_engine,
187af75078fSIntel #endif
188af75078fSIntel 	NULL,
189af75078fSIntel };
190af75078fSIntel 
191af75078fSIntel struct fwd_config cur_fwd_config;
192af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193bf56fce1SZhihong Wang uint32_t retry_enabled;
194bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196af75078fSIntel 
197af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199c8798818SIntel                                       * specified on command-line. */
200cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201d9a191a0SPhil Yang 
202d9a191a0SPhil Yang /*
203d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
204d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205d9a191a0SPhil Yang  */
206d9a191a0SPhil Yang uint8_t f_quit;
207d9a191a0SPhil Yang 
208af75078fSIntel /*
209af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
210af75078fSIntel  */
211af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
214af75078fSIntel };
215af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216af75078fSIntel 
21779bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
21879bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
21979bec05bSKonstantin Ananyev 
220af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
222af75078fSIntel 
223900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
224900550deSIntel uint8_t dcb_config = 0;
225900550deSIntel 
226900550deSIntel /* Whether the dcb is in testing status */
227900550deSIntel uint8_t dcb_test = 0;
228900550deSIntel 
229af75078fSIntel /*
230af75078fSIntel  * Configurable number of RX/TX queues.
231af75078fSIntel  */
232af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
234af75078fSIntel 
235af75078fSIntel /*
236af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2378599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
238af75078fSIntel  */
2398599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2408599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
241af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
243af75078fSIntel 
244f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
245af75078fSIntel /*
246af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
247af75078fSIntel  */
248af75078fSIntel 
249f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
252af75078fSIntel 
253f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
256af75078fSIntel 
257af75078fSIntel /*
258af75078fSIntel  * Configurable value of RX free threshold.
259af75078fSIntel  */
260f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
261af75078fSIntel 
262af75078fSIntel /*
263ce8d5614SIntel  * Configurable value of RX drop enable.
264ce8d5614SIntel  */
265f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
266ce8d5614SIntel 
267ce8d5614SIntel /*
268af75078fSIntel  * Configurable value of TX free threshold.
269af75078fSIntel  */
270f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
271af75078fSIntel 
272af75078fSIntel /*
273af75078fSIntel  * Configurable value of TX RS bit threshold.
274af75078fSIntel  */
275f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
276af75078fSIntel 
277af75078fSIntel /*
2783c156061SJens Freimann  * Configurable value of buffered packets before sending.
2793c156061SJens Freimann  */
2803c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2813c156061SJens Freimann 
2823c156061SJens Freimann /*
2833c156061SJens Freimann  * Configurable value of packet buffer timeout.
2843c156061SJens Freimann  */
2853c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2863c156061SJens Freimann 
2873c156061SJens Freimann /*
2883c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2893c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2903c156061SJens Freimann  */
2913c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2923c156061SJens Freimann 
2933c156061SJens Freimann /*
2943c156061SJens Freimann  * Configurable value of number of random writes done in
2953c156061SJens Freimann  * VNF simulation memory area.
2963c156061SJens Freimann  */
2973c156061SJens Freimann uint64_t noisy_lkup_num_writes;
2983c156061SJens Freimann 
2993c156061SJens Freimann /*
3003c156061SJens Freimann  * Configurable value of number of random reads done in
3013c156061SJens Freimann  * VNF simulation memory area.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3073c156061SJens Freimann  * VNF simulation memory area.
3083c156061SJens Freimann  */
3093c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3103c156061SJens Freimann 
3113c156061SJens Freimann /*
312af75078fSIntel  * Receive Side Scaling (RSS) configuration.
313af75078fSIntel  */
3148a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
315af75078fSIntel 
316af75078fSIntel /*
317af75078fSIntel  * Port topology configuration
318af75078fSIntel  */
319af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
320af75078fSIntel 
3217741e4cfSIntel /*
3227741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3237741e4cfSIntel  */
3247741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3257741e4cfSIntel 
326af75078fSIntel /*
3277ee3e944SVasily Philipov  * Flow API isolated mode.
3287ee3e944SVasily Philipov  */
3297ee3e944SVasily Philipov uint8_t flow_isolate_all;
3307ee3e944SVasily Philipov 
3317ee3e944SVasily Philipov /*
332bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
333bc202406SDavid Marchand  */
334bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
335bc202406SDavid Marchand 
336bc202406SDavid Marchand /*
3378ea656f8SGaetan Rivet  * Enable link status change notification
3388ea656f8SGaetan Rivet  */
3398ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3408ea656f8SGaetan Rivet 
3418ea656f8SGaetan Rivet /*
342284c908cSGaetan Rivet  * Enable device removal notification.
343284c908cSGaetan Rivet  */
344284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
345284c908cSGaetan Rivet 
346fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
347fb73e096SJeff Guo 
348284c908cSGaetan Rivet /*
3493af72783SGaetan Rivet  * Display or mask ether events
3503af72783SGaetan Rivet  * Default to all events except VF_MBOX
3513af72783SGaetan Rivet  */
3523af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3533af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3543af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3553af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3573af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3583af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
359e505d84cSAnatoly Burakov /*
360e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
361e505d84cSAnatoly Burakov  */
362e505d84cSAnatoly Burakov int do_mlockall = 0;
3633af72783SGaetan Rivet 
3643af72783SGaetan Rivet /*
3657b7e5ba7SIntel  * NIC bypass mode configuration options.
3667b7e5ba7SIntel  */
3677b7e5ba7SIntel 
36850c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3697b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
370e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3717b7e5ba7SIntel #endif
3727b7e5ba7SIntel 
373e261265eSRadu Nicolau 
37462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
37562d3216dSReshma Pattan 
37662d3216dSReshma Pattan /*
37762d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
37862d3216dSReshma Pattan  */
37962d3216dSReshma Pattan uint8_t latencystats_enabled;
38062d3216dSReshma Pattan 
38162d3216dSReshma Pattan /*
38262d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
38362d3216dSReshma Pattan  */
38462d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
38562d3216dSReshma Pattan 
38662d3216dSReshma Pattan #endif
38762d3216dSReshma Pattan 
3887b7e5ba7SIntel /*
389af75078fSIntel  * Ethernet device configuration.
390af75078fSIntel  */
391af75078fSIntel struct rte_eth_rxmode rx_mode = {
392af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
393af75078fSIntel };
394af75078fSIntel 
39507e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
39607e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
39707e5f7bdSShahaf Shuler };
398fd8c20aaSShahaf Shuler 
399af75078fSIntel struct rte_fdir_conf fdir_conf = {
400af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
401af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
402af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
403d9d5e6f2SJingjing Wu 	.mask = {
40426f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
405d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
406d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
407d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
408d9d5e6f2SJingjing Wu 		},
409d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
410d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
412d9d5e6f2SJingjing Wu 		},
413d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
414d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
41547b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
41647b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
41747b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
418d9d5e6f2SJingjing Wu 	},
419af75078fSIntel 	.drop_queue = 127,
420af75078fSIntel };
421af75078fSIntel 
4222950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
423af75078fSIntel 
424ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
426ed30d9b6SIntel 
427ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
429ed30d9b6SIntel 
430ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
431ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
432ed30d9b6SIntel 
433a4fd5eeeSElza Mathew /*
434a4fd5eeeSElza Mathew  * Display zero values by default for xstats
435a4fd5eeeSElza Mathew  */
436a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
437a4fd5eeeSElza Mathew 
438c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
439c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4407acf894dSStephen Hurd 
441e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4427e4441c8SRemy Horton /* Bitrate statistics */
4437e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
444e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
445e25e6c70SRemy Horton uint8_t bitrate_enabled;
446e25e6c70SRemy Horton #endif
4477e4441c8SRemy Horton 
448b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
450b40f8d78SJiayu Hu 
4511960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4521960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4531960be7dSNelio Laranjeiro 	.select_vlan = 0,
4541960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4551960be7dSNelio Laranjeiro 	.udp_src = 0,
4561960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4571960be7dSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
4581960be7dSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
4591960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4601960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4611960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4621960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4631960be7dSNelio Laranjeiro 	.vlan_tci = 0,
4641960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4651960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4661960be7dSNelio Laranjeiro };
4671960be7dSNelio Laranjeiro 
468dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
469dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
470dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
471dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
472dcd962fcSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
473dcd962fcSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
474dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
476dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
478dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
479dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
480dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
481dcd962fcSNelio Laranjeiro };
482dcd962fcSNelio Laranjeiro 
483ed30d9b6SIntel /* Forward function declarations */
484c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
48528caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
48628caa76aSZhiyong Yang 						   struct rte_port *port);
487edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
488f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
48976ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
490d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
49189ecd110SJeff Guo static void eth_dev_event_callback(const char *device_name,
492fb73e096SJeff Guo 				enum rte_dev_event_type type,
493fb73e096SJeff Guo 				void *param);
494ce8d5614SIntel 
495ce8d5614SIntel /*
496ce8d5614SIntel  * Check if all the ports are started.
497ce8d5614SIntel  * If yes, return positive value. If not, return zero.
498ce8d5614SIntel  */
499ce8d5614SIntel static int all_ports_started(void);
500ed30d9b6SIntel 
50152f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
50252f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
50352f38a20SJiayu Hu 
504af75078fSIntel /*
50598a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
506c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
507c9cafcc8SShahaf Shuler  */
508c9cafcc8SShahaf Shuler int
509c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
510c9cafcc8SShahaf Shuler {
511c9cafcc8SShahaf Shuler 	unsigned int i;
512c9cafcc8SShahaf Shuler 
513c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
514c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
515c9cafcc8SShahaf Shuler 			return 0;
516c9cafcc8SShahaf Shuler 	}
517c9cafcc8SShahaf Shuler 	return 1;
518c9cafcc8SShahaf Shuler }
519c9cafcc8SShahaf Shuler 
520c9cafcc8SShahaf Shuler /*
521af75078fSIntel  * Setup default configuration.
522af75078fSIntel  */
523af75078fSIntel static void
524af75078fSIntel set_default_fwd_lcores_config(void)
525af75078fSIntel {
526af75078fSIntel 	unsigned int i;
527af75078fSIntel 	unsigned int nb_lc;
5287acf894dSStephen Hurd 	unsigned int sock_num;
529af75078fSIntel 
530af75078fSIntel 	nb_lc = 0;
531af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
532dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
533dbfb8ec7SPhil Yang 			continue;
534c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
535c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
536c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
537c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
538c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
539c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
540c9cafcc8SShahaf Shuler 			}
541c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5427acf894dSStephen Hurd 		}
543f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
544f54fe5eeSStephen Hurd 			continue;
545f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
546af75078fSIntel 	}
547af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
548af75078fSIntel 	nb_cfg_lcores = nb_lcores;
549af75078fSIntel 	nb_fwd_lcores = 1;
550af75078fSIntel }
551af75078fSIntel 
552af75078fSIntel static void
553af75078fSIntel set_def_peer_eth_addrs(void)
554af75078fSIntel {
555af75078fSIntel 	portid_t i;
556af75078fSIntel 
557af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
558af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
559af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
560af75078fSIntel 	}
561af75078fSIntel }
562af75078fSIntel 
563af75078fSIntel static void
564af75078fSIntel set_default_fwd_ports_config(void)
565af75078fSIntel {
566af75078fSIntel 	portid_t pt_id;
56765a7360cSMatan Azrad 	int i = 0;
568af75078fSIntel 
569effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
57065a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
571af75078fSIntel 
572effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
573effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
574effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
575effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
576effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
577effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
578effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
579effdb8bbSPhil Yang 			}
580effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
581effdb8bbSPhil Yang 		}
582effdb8bbSPhil Yang 	}
583effdb8bbSPhil Yang 
584af75078fSIntel 	nb_cfg_ports = nb_ports;
585af75078fSIntel 	nb_fwd_ports = nb_ports;
586af75078fSIntel }
587af75078fSIntel 
588af75078fSIntel void
589af75078fSIntel set_def_fwd_config(void)
590af75078fSIntel {
591af75078fSIntel 	set_default_fwd_lcores_config();
592af75078fSIntel 	set_def_peer_eth_addrs();
593af75078fSIntel 	set_default_fwd_ports_config();
594af75078fSIntel }
595af75078fSIntel 
596c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
597c7f5dba7SAnatoly Burakov static int
598c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
599c7f5dba7SAnatoly Burakov {
600c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
601c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
602c7f5dba7SAnatoly Burakov 
603c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
604c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
605c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
606c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
607c7f5dba7SAnatoly Burakov 	 */
608c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
609c7f5dba7SAnatoly Burakov 
610c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
611c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
612c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
613c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
614c7f5dba7SAnatoly Burakov 		return -1;
615c7f5dba7SAnatoly Burakov 	}
616c7f5dba7SAnatoly Burakov 
617c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
618c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
619c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
620c7f5dba7SAnatoly Burakov 
621c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
622c7f5dba7SAnatoly Burakov 
623c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
624c7f5dba7SAnatoly Burakov 
625c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
626c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
627c7f5dba7SAnatoly Burakov 		return -1;
628c7f5dba7SAnatoly Burakov 	}
629c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
630c7f5dba7SAnatoly Burakov 
631c7f5dba7SAnatoly Burakov 	return 0;
632c7f5dba7SAnatoly Burakov }
633c7f5dba7SAnatoly Burakov 
634c7f5dba7SAnatoly Burakov static inline uint32_t
635c7f5dba7SAnatoly Burakov bsf64(uint64_t v)
636c7f5dba7SAnatoly Burakov {
637c7f5dba7SAnatoly Burakov 	return (uint32_t)__builtin_ctzll(v);
638c7f5dba7SAnatoly Burakov }
639c7f5dba7SAnatoly Burakov 
640c7f5dba7SAnatoly Burakov static inline uint32_t
641c7f5dba7SAnatoly Burakov log2_u64(uint64_t v)
642c7f5dba7SAnatoly Burakov {
643c7f5dba7SAnatoly Burakov 	if (v == 0)
644c7f5dba7SAnatoly Burakov 		return 0;
645c7f5dba7SAnatoly Burakov 	v = rte_align64pow2(v);
646c7f5dba7SAnatoly Burakov 	return bsf64(v);
647c7f5dba7SAnatoly Burakov }
648c7f5dba7SAnatoly Burakov 
649c7f5dba7SAnatoly Burakov static int
650c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
651c7f5dba7SAnatoly Burakov {
652c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
653c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
654c7f5dba7SAnatoly Burakov 	 */
655c7f5dba7SAnatoly Burakov 	int log2 = log2_u64(page_sz);
656c7f5dba7SAnatoly Burakov 
657c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
658c7f5dba7SAnatoly Burakov }
659c7f5dba7SAnatoly Burakov 
660c7f5dba7SAnatoly Burakov static void *
661c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
662c7f5dba7SAnatoly Burakov {
663c7f5dba7SAnatoly Burakov 	void *addr;
664c7f5dba7SAnatoly Burakov 	int flags;
665c7f5dba7SAnatoly Burakov 
666c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
667c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
668c7f5dba7SAnatoly Burakov 	if (huge)
669c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
670c7f5dba7SAnatoly Burakov 
671c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
672c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
673c7f5dba7SAnatoly Burakov 		return NULL;
674c7f5dba7SAnatoly Burakov 
675c7f5dba7SAnatoly Burakov 	return addr;
676c7f5dba7SAnatoly Burakov }
677c7f5dba7SAnatoly Burakov 
678c7f5dba7SAnatoly Burakov struct extmem_param {
679c7f5dba7SAnatoly Burakov 	void *addr;
680c7f5dba7SAnatoly Burakov 	size_t len;
681c7f5dba7SAnatoly Burakov 	size_t pgsz;
682c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
683c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
684c7f5dba7SAnatoly Burakov };
685c7f5dba7SAnatoly Burakov 
686c7f5dba7SAnatoly Burakov static int
687c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
688c7f5dba7SAnatoly Burakov 		bool huge)
689c7f5dba7SAnatoly Burakov {
690c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
691c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
692c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
693c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
694c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
695c7f5dba7SAnatoly Burakov 	void *addr;
696c7f5dba7SAnatoly Burakov 	int ret;
697c7f5dba7SAnatoly Burakov 
698c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
699c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
700c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
701c7f5dba7SAnatoly Burakov 			continue;
702c7f5dba7SAnatoly Burakov 
703c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
704c7f5dba7SAnatoly Burakov 
705c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
706c7f5dba7SAnatoly Burakov 		if (!huge)
707c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
708c7f5dba7SAnatoly Burakov 
709c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
710c7f5dba7SAnatoly Burakov 		if (ret < 0) {
711c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
712c7f5dba7SAnatoly Burakov 			return -1;
713c7f5dba7SAnatoly Burakov 		}
714c7f5dba7SAnatoly Burakov 
715c7f5dba7SAnatoly Burakov 		/* allocate our memory */
716c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
717c7f5dba7SAnatoly Burakov 
718c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
719c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
720c7f5dba7SAnatoly Burakov 		 * try another one.
721c7f5dba7SAnatoly Burakov 		 */
722c7f5dba7SAnatoly Burakov 		if (addr == NULL)
723c7f5dba7SAnatoly Burakov 			continue;
724c7f5dba7SAnatoly Burakov 
725c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
726c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
727c7f5dba7SAnatoly Burakov 
728c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
729c7f5dba7SAnatoly Burakov 
730c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
731c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
732c7f5dba7SAnatoly Burakov 			goto fail;
733c7f5dba7SAnatoly Burakov 		}
734c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
735c7f5dba7SAnatoly Burakov 		if (!huge)
736c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
737c7f5dba7SAnatoly Burakov 
738c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
739c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
740c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
741c7f5dba7SAnatoly Burakov 			size_t offset;
742c7f5dba7SAnatoly Burakov 			void *cur;
743c7f5dba7SAnatoly Burakov 
744c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
745c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
746c7f5dba7SAnatoly Burakov 
747c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
748c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
749c7f5dba7SAnatoly Burakov 
750c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
751c7f5dba7SAnatoly Burakov 
752c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
753c7f5dba7SAnatoly Burakov 		}
754c7f5dba7SAnatoly Burakov 
755c7f5dba7SAnatoly Burakov 		break;
756c7f5dba7SAnatoly Burakov 	}
757c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
758c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
759c7f5dba7SAnatoly Burakov 		return -1;
760c7f5dba7SAnatoly Burakov 
761c7f5dba7SAnatoly Burakov 	param->addr = addr;
762c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
763c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
764c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
765c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
766c7f5dba7SAnatoly Burakov 
767c7f5dba7SAnatoly Burakov 	return 0;
768c7f5dba7SAnatoly Burakov fail:
769c7f5dba7SAnatoly Burakov 	if (iovas)
770c7f5dba7SAnatoly Burakov 		free(iovas);
771c7f5dba7SAnatoly Burakov 	if (addr)
772c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
773c7f5dba7SAnatoly Burakov 
774c7f5dba7SAnatoly Burakov 	return -1;
775c7f5dba7SAnatoly Burakov }
776c7f5dba7SAnatoly Burakov 
777c7f5dba7SAnatoly Burakov static int
778c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
779c7f5dba7SAnatoly Burakov {
780c7f5dba7SAnatoly Burakov 	struct extmem_param param;
781c7f5dba7SAnatoly Burakov 	int socket_id, ret;
782c7f5dba7SAnatoly Burakov 
783c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
784c7f5dba7SAnatoly Burakov 
785c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
786c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
787c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
788c7f5dba7SAnatoly Burakov 		/* create our heap */
789c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
790c7f5dba7SAnatoly Burakov 		if (ret < 0) {
791c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
792c7f5dba7SAnatoly Burakov 			return -1;
793c7f5dba7SAnatoly Burakov 		}
794c7f5dba7SAnatoly Burakov 	}
795c7f5dba7SAnatoly Burakov 
796c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
797c7f5dba7SAnatoly Burakov 	if (ret < 0) {
798c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
799c7f5dba7SAnatoly Burakov 		return -1;
800c7f5dba7SAnatoly Burakov 	}
801c7f5dba7SAnatoly Burakov 
802c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
803c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
804c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
805c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
806c7f5dba7SAnatoly Burakov 
807c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
808c7f5dba7SAnatoly Burakov 
809c7f5dba7SAnatoly Burakov 	/* not needed any more */
810c7f5dba7SAnatoly Burakov 	free(param.iova_table);
811c7f5dba7SAnatoly Burakov 
812c7f5dba7SAnatoly Burakov 	if (ret < 0) {
813c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
814c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
815c7f5dba7SAnatoly Burakov 		return -1;
816c7f5dba7SAnatoly Burakov 	}
817c7f5dba7SAnatoly Burakov 
818c7f5dba7SAnatoly Burakov 	/* success */
819c7f5dba7SAnatoly Burakov 
820c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
821c7f5dba7SAnatoly Burakov 			param.len >> 20);
822c7f5dba7SAnatoly Burakov 
823c7f5dba7SAnatoly Burakov 	return 0;
824c7f5dba7SAnatoly Burakov }
825c7f5dba7SAnatoly Burakov 
826af75078fSIntel /*
827af75078fSIntel  * Configuration initialisation done once at init time.
828af75078fSIntel  */
829af75078fSIntel static void
830af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
831af75078fSIntel 		 unsigned int socket_id)
832af75078fSIntel {
833af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
834bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
835af75078fSIntel 	uint32_t mb_size;
836af75078fSIntel 
837dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
838af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
839148f963fSBruce Richardson 
840285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
841d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
842d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
843d1eb542eSOlivier Matz 
844c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
845c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
846c7f5dba7SAnatoly Burakov 		{
847c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
848c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
849c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
850c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
851c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
852c7f5dba7SAnatoly Burakov 			break;
853c7f5dba7SAnatoly Burakov 		}
854c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
855c7f5dba7SAnatoly Burakov 		{
856b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
857c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
858148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
859148f963fSBruce Richardson 				socket_id, 0);
86024427bb9SOlivier Matz 			if (rte_mp == NULL)
86124427bb9SOlivier Matz 				goto err;
862b19a0c75SOlivier Matz 
863b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
864b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
865b19a0c75SOlivier Matz 				rte_mp = NULL;
86624427bb9SOlivier Matz 				goto err;
867b19a0c75SOlivier Matz 			}
868b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
869b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
870c7f5dba7SAnatoly Burakov 			break;
871c7f5dba7SAnatoly Burakov 		}
872c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
873c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
874c7f5dba7SAnatoly Burakov 		{
875c7f5dba7SAnatoly Burakov 			int heap_socket;
876c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
877c7f5dba7SAnatoly Burakov 
878c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
879c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
880c7f5dba7SAnatoly Burakov 
881c7f5dba7SAnatoly Burakov 			heap_socket =
882c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
883c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
884c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
885c7f5dba7SAnatoly Burakov 
8860e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
8870e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
888ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
889c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
890c7f5dba7SAnatoly Burakov 					heap_socket);
891c7f5dba7SAnatoly Burakov 			break;
892c7f5dba7SAnatoly Burakov 		}
893c7f5dba7SAnatoly Burakov 	default:
894c7f5dba7SAnatoly Burakov 		{
895c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
896c7f5dba7SAnatoly Burakov 		}
897bece7b6cSChristian Ehrhardt 	}
898148f963fSBruce Richardson 
89924427bb9SOlivier Matz err:
900af75078fSIntel 	if (rte_mp == NULL) {
901d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
902d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
903d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
904148f963fSBruce Richardson 	} else if (verbose_level > 0) {
905591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
906af75078fSIntel 	}
907af75078fSIntel }
908af75078fSIntel 
90920a0286fSLiu Xiaofeng /*
91020a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
91120a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
91220a0286fSLiu Xiaofeng  */
91320a0286fSLiu Xiaofeng static int
91420a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
91520a0286fSLiu Xiaofeng {
91620a0286fSLiu Xiaofeng 	static int warning_once = 0;
91720a0286fSLiu Xiaofeng 
918c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
91920a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
92020a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
92120a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
92220a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
92320a0286fSLiu Xiaofeng 			       " --numa.\n");
92420a0286fSLiu Xiaofeng 		warning_once = 1;
92520a0286fSLiu Xiaofeng 		return -1;
92620a0286fSLiu Xiaofeng 	}
92720a0286fSLiu Xiaofeng 	return 0;
92820a0286fSLiu Xiaofeng }
92920a0286fSLiu Xiaofeng 
9303f7311baSWei Dai /*
9313f7311baSWei Dai  * Get the allowed maximum number of RX queues.
9323f7311baSWei Dai  * *pid return the port id which has minimal value of
9333f7311baSWei Dai  * max_rx_queues in all ports.
9343f7311baSWei Dai  */
9353f7311baSWei Dai queueid_t
9363f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
9373f7311baSWei Dai {
9383f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
9393f7311baSWei Dai 	portid_t pi;
9403f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
9413f7311baSWei Dai 
9423f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
9433f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
9443f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
9453f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
9463f7311baSWei Dai 			*pid = pi;
9473f7311baSWei Dai 		}
9483f7311baSWei Dai 	}
9493f7311baSWei Dai 	return allowed_max_rxq;
9503f7311baSWei Dai }
9513f7311baSWei Dai 
9523f7311baSWei Dai /*
9533f7311baSWei Dai  * Check input rxq is valid or not.
9543f7311baSWei Dai  * If input rxq is not greater than any of maximum number
9553f7311baSWei Dai  * of RX queues of all ports, it is valid.
9563f7311baSWei Dai  * if valid, return 0, else return -1
9573f7311baSWei Dai  */
9583f7311baSWei Dai int
9593f7311baSWei Dai check_nb_rxq(queueid_t rxq)
9603f7311baSWei Dai {
9613f7311baSWei Dai 	queueid_t allowed_max_rxq;
9623f7311baSWei Dai 	portid_t pid = 0;
9633f7311baSWei Dai 
9643f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
9653f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
9663f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
9673f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
9683f7311baSWei Dai 		       rxq,
9693f7311baSWei Dai 		       allowed_max_rxq,
9703f7311baSWei Dai 		       pid);
9713f7311baSWei Dai 		return -1;
9723f7311baSWei Dai 	}
9733f7311baSWei Dai 	return 0;
9743f7311baSWei Dai }
9753f7311baSWei Dai 
97636db4f6cSWei Dai /*
97736db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
97836db4f6cSWei Dai  * *pid return the port id which has minimal value of
97936db4f6cSWei Dai  * max_tx_queues in all ports.
98036db4f6cSWei Dai  */
98136db4f6cSWei Dai queueid_t
98236db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
98336db4f6cSWei Dai {
98436db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
98536db4f6cSWei Dai 	portid_t pi;
98636db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
98736db4f6cSWei Dai 
98836db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
98936db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
99036db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
99136db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
99236db4f6cSWei Dai 			*pid = pi;
99336db4f6cSWei Dai 		}
99436db4f6cSWei Dai 	}
99536db4f6cSWei Dai 	return allowed_max_txq;
99636db4f6cSWei Dai }
99736db4f6cSWei Dai 
99836db4f6cSWei Dai /*
99936db4f6cSWei Dai  * Check input txq is valid or not.
100036db4f6cSWei Dai  * If input txq is not greater than any of maximum number
100136db4f6cSWei Dai  * of TX queues of all ports, it is valid.
100236db4f6cSWei Dai  * if valid, return 0, else return -1
100336db4f6cSWei Dai  */
100436db4f6cSWei Dai int
100536db4f6cSWei Dai check_nb_txq(queueid_t txq)
100636db4f6cSWei Dai {
100736db4f6cSWei Dai 	queueid_t allowed_max_txq;
100836db4f6cSWei Dai 	portid_t pid = 0;
100936db4f6cSWei Dai 
101036db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
101136db4f6cSWei Dai 	if (txq > allowed_max_txq) {
101236db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
101336db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
101436db4f6cSWei Dai 		       txq,
101536db4f6cSWei Dai 		       allowed_max_txq,
101636db4f6cSWei Dai 		       pid);
101736db4f6cSWei Dai 		return -1;
101836db4f6cSWei Dai 	}
101936db4f6cSWei Dai 	return 0;
102036db4f6cSWei Dai }
102136db4f6cSWei Dai 
1022af75078fSIntel static void
1023af75078fSIntel init_config(void)
1024af75078fSIntel {
1025ce8d5614SIntel 	portid_t pid;
1026af75078fSIntel 	struct rte_port *port;
1027af75078fSIntel 	struct rte_mempool *mbp;
1028af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1029af75078fSIntel 	lcoreid_t  lc_id;
10307acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1031b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
103252f38a20SJiayu Hu 	uint32_t gso_types;
1033c73a9071SWei Dai 	int k;
1034af75078fSIntel 
10357acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1036487f9a59SYulong Pei 
1037af75078fSIntel 	/* Configuration of logical cores. */
1038af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1039af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1040fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1041af75078fSIntel 	if (fwd_lcores == NULL) {
1042ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1043ce8d5614SIntel 							"failed\n", nb_lcores);
1044af75078fSIntel 	}
1045af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1046af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1047af75078fSIntel 					       sizeof(struct fwd_lcore),
1048fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1049af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1050ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1051ce8d5614SIntel 								"failed\n");
1052af75078fSIntel 		}
1053af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1054af75078fSIntel 	}
1055af75078fSIntel 
10567d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1057ce8d5614SIntel 		port = &ports[pid];
10588b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1059fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1060384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
1061ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
10627c45f6c0SFerruh Yigit 
106307e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
106407e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
106507e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
106607e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1067c18feafaSDekel Peled 		if (!(port->dev_info.tx_offload_capa &
1068c18feafaSDekel Peled 			DEV_TX_OFFLOAD_MATCH_METADATA))
1069c18feafaSDekel Peled 			port->dev_conf.txmode.offloads &=
1070c18feafaSDekel Peled 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1071b6ea6408SIntel 		if (numa_support) {
1072b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1073b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1074b6ea6408SIntel 			else {
1075b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
107620a0286fSLiu Xiaofeng 
107729841336SPhil Yang 				/*
107829841336SPhil Yang 				 * if socket_id is invalid,
107929841336SPhil Yang 				 * set to the first available socket.
108029841336SPhil Yang 				 */
108120a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
108229841336SPhil Yang 					socket_id = socket_ids[0];
1083b6ea6408SIntel 				port_per_socket[socket_id]++;
1084b6ea6408SIntel 			}
1085b6ea6408SIntel 		}
1086b6ea6408SIntel 
1087c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1088c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1089c73a9071SWei Dai 			port->rx_conf[k].offloads =
1090c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1091c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1092c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1093c73a9071SWei Dai 			port->tx_conf[k].offloads =
1094c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1095c73a9071SWei Dai 
1096ce8d5614SIntel 		/* set flag to initialize port/queue */
1097ce8d5614SIntel 		port->need_reconfig = 1;
1098ce8d5614SIntel 		port->need_reconfig_queues = 1;
1099c18feafaSDekel Peled 		port->tx_metadata = 0;
1100ce8d5614SIntel 	}
1101ce8d5614SIntel 
11023ab64341SOlivier Matz 	/*
11033ab64341SOlivier Matz 	 * Create pools of mbuf.
11043ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
11053ab64341SOlivier Matz 	 * socket 0 memory by default.
11063ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
11073ab64341SOlivier Matz 	 *
11083ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
11093ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
11103ab64341SOlivier Matz 	 */
11113ab64341SOlivier Matz 	if (param_total_num_mbufs)
11123ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
11133ab64341SOlivier Matz 	else {
11143ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
11153ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
11163ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
11173ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
11183ab64341SOlivier Matz 	}
11193ab64341SOlivier Matz 
1120b6ea6408SIntel 	if (numa_support) {
1121b6ea6408SIntel 		uint8_t i;
1122ce8d5614SIntel 
1123c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1124c9cafcc8SShahaf Shuler 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1125c9cafcc8SShahaf Shuler 					 socket_ids[i]);
11263ab64341SOlivier Matz 	} else {
11273ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
11283ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
11293ab64341SOlivier Matz 		else
11303ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
11313ab64341SOlivier Matz 						 socket_num);
11323ab64341SOlivier Matz 	}
1133b6ea6408SIntel 
1134b6ea6408SIntel 	init_port_config();
11355886ae07SAdrien Mazarguil 
113652f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1137aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
11385886ae07SAdrien Mazarguil 	/*
11395886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
11405886ae07SAdrien Mazarguil 	 */
11415886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
11428fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
11438fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
11448fd8bebcSAdrien Mazarguil 
11455886ae07SAdrien Mazarguil 		if (mbp == NULL)
11465886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
11475886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
114852f38a20SJiayu Hu 		/* initialize GSO context */
114952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
115052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
115152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
115252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
115352f38a20SJiayu Hu 			ETHER_CRC_LEN;
115452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
11555886ae07SAdrien Mazarguil 	}
11565886ae07SAdrien Mazarguil 
1157ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1158ce8d5614SIntel 	if (init_fwd_streams() < 0)
1159ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
11600c0db76fSBernard Iremonger 
11610c0db76fSBernard Iremonger 	fwd_config_setup();
1162b7091f1dSJiayu Hu 
1163b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1164b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1165b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1166b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1167b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1168b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1169b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1170b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1171b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1172b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1173b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1174b7091f1dSJiayu Hu 		}
1175b7091f1dSJiayu Hu 	}
11760ad778b3SJasvinder Singh 
11770ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
11780ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
11790ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
11800ad778b3SJasvinder Singh 			port = &ports[pid];
11810ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
11820ad778b3SJasvinder Singh 
11830ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
11840ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
11850ad778b3SJasvinder Singh 		}
11860ad778b3SJasvinder Singh 	}
11870ad778b3SJasvinder Singh #endif
11880ad778b3SJasvinder Singh 
1189ce8d5614SIntel }
1190ce8d5614SIntel 
11912950a769SDeclan Doherty 
11922950a769SDeclan Doherty void
1193a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
11942950a769SDeclan Doherty {
11952950a769SDeclan Doherty 	struct rte_port *port;
11962950a769SDeclan Doherty 
11972950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
11982950a769SDeclan Doherty 	port = &ports[new_port_id];
11992950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
12002950a769SDeclan Doherty 
12012950a769SDeclan Doherty 	/* set flag to initialize port/queue */
12022950a769SDeclan Doherty 	port->need_reconfig = 1;
12032950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1204a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
12052950a769SDeclan Doherty 
12062950a769SDeclan Doherty 	init_port_config();
12072950a769SDeclan Doherty }
12082950a769SDeclan Doherty 
12092950a769SDeclan Doherty 
1210ce8d5614SIntel int
1211ce8d5614SIntel init_fwd_streams(void)
1212ce8d5614SIntel {
1213ce8d5614SIntel 	portid_t pid;
1214ce8d5614SIntel 	struct rte_port *port;
1215ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
12165a8fb55cSReshma Pattan 	queueid_t q;
1217ce8d5614SIntel 
1218ce8d5614SIntel 	/* set socket id according to numa or not */
12197d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1220ce8d5614SIntel 		port = &ports[pid];
1221ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1222ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1223ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1224ce8d5614SIntel 				port->dev_info.max_rx_queues);
1225ce8d5614SIntel 			return -1;
1226ce8d5614SIntel 		}
1227ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1228ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1229ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1230ce8d5614SIntel 				port->dev_info.max_tx_queues);
1231ce8d5614SIntel 			return -1;
1232ce8d5614SIntel 		}
123320a0286fSLiu Xiaofeng 		if (numa_support) {
123420a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
123520a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
123620a0286fSLiu Xiaofeng 			else {
1237b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
123820a0286fSLiu Xiaofeng 
123929841336SPhil Yang 				/*
124029841336SPhil Yang 				 * if socket_id is invalid,
124129841336SPhil Yang 				 * set to the first available socket.
124229841336SPhil Yang 				 */
124320a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
124429841336SPhil Yang 					port->socket_id = socket_ids[0];
124520a0286fSLiu Xiaofeng 			}
124620a0286fSLiu Xiaofeng 		}
1247b6ea6408SIntel 		else {
1248b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1249af75078fSIntel 				port->socket_id = 0;
1250b6ea6408SIntel 			else
1251b6ea6408SIntel 				port->socket_id = socket_num;
1252b6ea6408SIntel 		}
1253af75078fSIntel 	}
1254af75078fSIntel 
12555a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
12565a8fb55cSReshma Pattan 	if (q == 0) {
12575a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
12585a8fb55cSReshma Pattan 		return -1;
12595a8fb55cSReshma Pattan 	}
12605a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1261ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1262ce8d5614SIntel 		return 0;
1263ce8d5614SIntel 	/* clear the old */
1264ce8d5614SIntel 	if (fwd_streams != NULL) {
1265ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1266ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1267ce8d5614SIntel 				continue;
1268ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1269ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1270af75078fSIntel 		}
1271ce8d5614SIntel 		rte_free(fwd_streams);
1272ce8d5614SIntel 		fwd_streams = NULL;
1273ce8d5614SIntel 	}
1274ce8d5614SIntel 
1275ce8d5614SIntel 	/* init new */
1276ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
12771f84c469SMatan Azrad 	if (nb_fwd_streams) {
1278ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
12791f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
12801f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1281ce8d5614SIntel 		if (fwd_streams == NULL)
12821f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
12831f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
12841f84c469SMatan Azrad 				 nb_fwd_streams);
1285ce8d5614SIntel 
1286af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
12871f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
12881f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
12891f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1290ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
12911f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
12921f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
12931f84c469SMatan Azrad 		}
1294af75078fSIntel 	}
1295ce8d5614SIntel 
1296ce8d5614SIntel 	return 0;
1297af75078fSIntel }
1298af75078fSIntel 
1299af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1300af75078fSIntel static void
1301af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1302af75078fSIntel {
1303af75078fSIntel 	unsigned int total_burst;
1304af75078fSIntel 	unsigned int nb_burst;
1305af75078fSIntel 	unsigned int burst_stats[3];
1306af75078fSIntel 	uint16_t pktnb_stats[3];
1307af75078fSIntel 	uint16_t nb_pkt;
1308af75078fSIntel 	int burst_percent[3];
1309af75078fSIntel 
1310af75078fSIntel 	/*
1311af75078fSIntel 	 * First compute the total number of packet bursts and the
1312af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1313af75078fSIntel 	 */
1314af75078fSIntel 	total_burst = 0;
1315af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1316af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1317af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1318af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1319af75078fSIntel 		if (nb_burst == 0)
1320af75078fSIntel 			continue;
1321af75078fSIntel 		total_burst += nb_burst;
1322af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1323af75078fSIntel 			burst_stats[1] = burst_stats[0];
1324af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1325af75078fSIntel 			burst_stats[0] = nb_burst;
1326af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1327fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1328fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1329fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1330af75078fSIntel 		}
1331af75078fSIntel 	}
1332af75078fSIntel 	if (total_burst == 0)
1333af75078fSIntel 		return;
1334af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1335af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1336af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1337af75078fSIntel 	if (burst_stats[0] == total_burst) {
1338af75078fSIntel 		printf("]\n");
1339af75078fSIntel 		return;
1340af75078fSIntel 	}
1341af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1342af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1343af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1344af75078fSIntel 		return;
1345af75078fSIntel 	}
1346af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1347af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1348af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1349af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1350af75078fSIntel 		return;
1351af75078fSIntel 	}
1352af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1353af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1354af75078fSIntel }
1355af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1356af75078fSIntel 
1357af75078fSIntel static void
1358af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1359af75078fSIntel {
1360af75078fSIntel 	struct rte_port *port;
1361013af9b6SIntel 	uint8_t i;
1362af75078fSIntel 
1363af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
1364af75078fSIntel 
1365af75078fSIntel 	port = &ports[port_id];
1366af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
1367af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
1368013af9b6SIntel 
1369013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1370af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1371af75078fSIntel 		       "%-"PRIu64"\n",
137270bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
137370bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1374af75078fSIntel 
1375af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
137658d475b7SJerin Jacob 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
137758d475b7SJerin Jacob 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
137858d475b7SJerin Jacob 			       port->rx_bad_outer_l4_csum);
137986057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1380f72a0fa6SStephen Hemminger 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
138170bdb186SIvan Boule 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
138270bdb186SIvan Boule 		}
1383af75078fSIntel 
1384af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1385af75078fSIntel 		       "%-"PRIu64"\n",
1386af75078fSIntel 		       stats->opackets, port->tx_dropped,
1387af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1388013af9b6SIntel 	}
1389013af9b6SIntel 	else {
1390013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1391013af9b6SIntel 		       "%14"PRIu64"\n",
139270bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
139370bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1394013af9b6SIntel 
1395013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
139658d475b7SJerin Jacob 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
139758d475b7SJerin Jacob 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
139858d475b7SJerin Jacob 			       port->rx_bad_outer_l4_csum);
139986057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1400f72a0fa6SStephen Hemminger 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
140170bdb186SIvan Boule 			printf("  RX-nombufs:             %14"PRIu64"\n",
140270bdb186SIvan Boule 			       stats->rx_nombuf);
140370bdb186SIvan Boule 		}
1404013af9b6SIntel 
1405013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1406013af9b6SIntel 		       "%14"PRIu64"\n",
1407013af9b6SIntel 		       stats->opackets, port->tx_dropped,
1408013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1409013af9b6SIntel 	}
1410e659b6b4SIvan Boule 
1411af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1412af75078fSIntel 	if (port->rx_stream)
1413013af9b6SIntel 		pkt_burst_stats_display("RX",
1414013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
1415af75078fSIntel 	if (port->tx_stream)
1416013af9b6SIntel 		pkt_burst_stats_display("TX",
1417013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
1418af75078fSIntel #endif
1419af75078fSIntel 
1420013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
1421013af9b6SIntel 		printf("\n");
1422013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1423013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1424013af9b6SIntel 			       "     RX-errors:%14"PRIu64
1425013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
1426013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1427013af9b6SIntel 		}
1428013af9b6SIntel 		printf("\n");
1429013af9b6SIntel 	}
1430013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
1431013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1432013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1433013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
1434013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1435013af9b6SIntel 		}
1436013af9b6SIntel 	}
1437013af9b6SIntel 
1438af75078fSIntel 	printf("  %s--------------------------------%s\n",
1439af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
1440af75078fSIntel }
1441af75078fSIntel 
1442af75078fSIntel static void
1443af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1444af75078fSIntel {
1445af75078fSIntel 	struct fwd_stream *fs;
1446af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1447af75078fSIntel 
1448af75078fSIntel 	fs = fwd_streams[stream_id];
1449af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1450af75078fSIntel 	    (fs->fwd_dropped == 0))
1451af75078fSIntel 		return;
1452af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1453af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1454af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1455af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1456af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1457af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1458af75078fSIntel 
1459af75078fSIntel 	/* if checksum mode */
1460af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1461013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
146258d475b7SJerin Jacob 			"%-14u Rx- bad outer L4 checksum: %-14u\n",
146358d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
146458d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1465af75078fSIntel 	}
1466af75078fSIntel 
1467af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1468af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1469af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1470af75078fSIntel #endif
1471af75078fSIntel }
1472af75078fSIntel 
1473af75078fSIntel static void
14747741e4cfSIntel flush_fwd_rx_queues(void)
1475af75078fSIntel {
1476af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1477af75078fSIntel 	portid_t  rxp;
14787741e4cfSIntel 	portid_t port_id;
1479af75078fSIntel 	queueid_t rxq;
1480af75078fSIntel 	uint16_t  nb_rx;
1481af75078fSIntel 	uint16_t  i;
1482af75078fSIntel 	uint8_t   j;
1483f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1484594302c7SJames Poole 	uint64_t timer_period;
1485f487715fSReshma Pattan 
1486f487715fSReshma Pattan 	/* convert to number of cycles */
1487594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1488af75078fSIntel 
1489af75078fSIntel 	for (j = 0; j < 2; j++) {
14907741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1491af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
14927741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1493f487715fSReshma Pattan 				/**
1494f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1495f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1496f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1497f487715fSReshma Pattan 				* after 1sec timer expiry.
1498f487715fSReshma Pattan 				*/
1499f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1500af75078fSIntel 				do {
15017741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1502013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1503af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1504af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1505f487715fSReshma Pattan 
1506f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1507f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1508f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1509f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1510f487715fSReshma Pattan 					(timer_tsc < timer_period));
1511f487715fSReshma Pattan 				timer_tsc = 0;
1512af75078fSIntel 			}
1513af75078fSIntel 		}
1514af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1515af75078fSIntel 	}
1516af75078fSIntel }
1517af75078fSIntel 
1518af75078fSIntel static void
1519af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1520af75078fSIntel {
1521af75078fSIntel 	struct fwd_stream **fsm;
1522af75078fSIntel 	streamid_t nb_fs;
1523af75078fSIntel 	streamid_t sm_id;
15247e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
15257e4441c8SRemy Horton 	uint64_t tics_per_1sec;
15267e4441c8SRemy Horton 	uint64_t tics_datum;
15277e4441c8SRemy Horton 	uint64_t tics_current;
15284918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1529af75078fSIntel 
15304918a357SXiaoyun Li 	cnt_ports = nb_ports;
15317e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
15327e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
15337e4441c8SRemy Horton #endif
1534af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1535af75078fSIntel 	nb_fs = fc->stream_nb;
1536af75078fSIntel 	do {
1537af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1538af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
15397e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1540e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1541e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
15427e4441c8SRemy Horton 			tics_current = rte_rdtsc();
15437e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
15447e4441c8SRemy Horton 				/* Periodic bitrate calculation */
15454918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1546e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
15474918a357SXiaoyun Li 						ports_ids[i]);
15487e4441c8SRemy Horton 				tics_datum = tics_current;
15497e4441c8SRemy Horton 			}
1550e25e6c70SRemy Horton 		}
15517e4441c8SRemy Horton #endif
155262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
155365eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
155465eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
155562d3216dSReshma Pattan 			rte_latencystats_update();
155662d3216dSReshma Pattan #endif
155762d3216dSReshma Pattan 
1558af75078fSIntel 	} while (! fc->stopped);
1559af75078fSIntel }
1560af75078fSIntel 
1561af75078fSIntel static int
1562af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1563af75078fSIntel {
1564af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1565af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1566af75078fSIntel 	return 0;
1567af75078fSIntel }
1568af75078fSIntel 
1569af75078fSIntel /*
1570af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1571af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1572af75078fSIntel  */
1573af75078fSIntel static int
1574af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1575af75078fSIntel {
1576af75078fSIntel 	struct fwd_lcore *fwd_lc;
1577af75078fSIntel 	struct fwd_lcore tmp_lcore;
1578af75078fSIntel 
1579af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1580af75078fSIntel 	tmp_lcore = *fwd_lc;
1581af75078fSIntel 	tmp_lcore.stopped = 1;
1582af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1583af75078fSIntel 	return 0;
1584af75078fSIntel }
1585af75078fSIntel 
1586af75078fSIntel /*
1587af75078fSIntel  * Launch packet forwarding:
1588af75078fSIntel  *     - Setup per-port forwarding context.
1589af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1590af75078fSIntel  */
1591af75078fSIntel static void
1592af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1593af75078fSIntel {
1594af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1595af75078fSIntel 	unsigned int i;
1596af75078fSIntel 	unsigned int lc_id;
1597af75078fSIntel 	int diag;
1598af75078fSIntel 
1599af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1600af75078fSIntel 	if (port_fwd_begin != NULL) {
1601af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1602af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1603af75078fSIntel 	}
1604af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1605af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1606af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1607af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1608af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1609af75078fSIntel 						     fwd_lcores[i], lc_id);
1610af75078fSIntel 			if (diag != 0)
1611af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1612af75078fSIntel 				       lc_id, diag);
1613af75078fSIntel 		}
1614af75078fSIntel 	}
1615af75078fSIntel }
1616af75078fSIntel 
1617af75078fSIntel /*
1618af75078fSIntel  * Launch packet forwarding configuration.
1619af75078fSIntel  */
1620af75078fSIntel void
1621af75078fSIntel start_packet_forwarding(int with_tx_first)
1622af75078fSIntel {
1623af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1624af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1625af75078fSIntel 	struct rte_port *port;
1626af75078fSIntel 	unsigned int i;
1627af75078fSIntel 	portid_t   pt_id;
1628af75078fSIntel 	streamid_t sm_id;
1629af75078fSIntel 
16305a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
16315a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
16325a8fb55cSReshma Pattan 
16335a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
16345a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
16355a8fb55cSReshma Pattan 
16365a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
16375a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
16385a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
16395a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
16405a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
16415a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
16425a8fb55cSReshma Pattan 
1643ce8d5614SIntel 	if (all_ports_started() == 0) {
1644ce8d5614SIntel 		printf("Not all ports were started\n");
1645ce8d5614SIntel 		return;
1646ce8d5614SIntel 	}
1647af75078fSIntel 	if (test_done == 0) {
1648af75078fSIntel 		printf("Packet forwarding already started\n");
1649af75078fSIntel 		return;
1650af75078fSIntel 	}
1651edf87b4aSBernard Iremonger 
1652edf87b4aSBernard Iremonger 
16537741e4cfSIntel 	if(dcb_test) {
16547741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
16557741e4cfSIntel 			pt_id = fwd_ports_ids[i];
16567741e4cfSIntel 			port = &ports[pt_id];
16577741e4cfSIntel 			if (!port->dcb_flag) {
16587741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
16597741e4cfSIntel                                        "be configured in this mode.\n");
1660013af9b6SIntel 				return;
1661013af9b6SIntel 			}
16627741e4cfSIntel 		}
16637741e4cfSIntel 		if (nb_fwd_lcores == 1) {
16647741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
16657741e4cfSIntel                                "should be larger than 1.\n");
16667741e4cfSIntel 			return;
16677741e4cfSIntel 		}
16687741e4cfSIntel 	}
1669af75078fSIntel 	test_done = 0;
16707741e4cfSIntel 
167147a767b2SMatan Azrad 	fwd_config_setup();
167247a767b2SMatan Azrad 
16737741e4cfSIntel 	if(!no_flush_rx)
16747741e4cfSIntel 		flush_fwd_rx_queues();
16757741e4cfSIntel 
1676933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1677af75078fSIntel 	rxtx_config_display();
1678af75078fSIntel 
1679af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1680af75078fSIntel 		pt_id = fwd_ports_ids[i];
1681af75078fSIntel 		port = &ports[pt_id];
1682af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1683af75078fSIntel 		port->tx_dropped = 0;
1684013af9b6SIntel 
1685013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1686af75078fSIntel 	}
1687af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1688af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1689af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1690af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1691af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1692af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
169358d475b7SJerin Jacob 		fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1694af75078fSIntel 
1695af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1696af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1697af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1698af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1699af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1700af75078fSIntel #endif
1701af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1702af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1703af75078fSIntel #endif
1704af75078fSIntel 	}
1705af75078fSIntel 	if (with_tx_first) {
1706af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1707af75078fSIntel 		if (port_fwd_begin != NULL) {
1708af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1709af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1710af75078fSIntel 		}
1711acbf77a6SZhihong Wang 		while (with_tx_first--) {
1712acbf77a6SZhihong Wang 			launch_packet_forwarding(
1713acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1714af75078fSIntel 			rte_eal_mp_wait_lcore();
1715acbf77a6SZhihong Wang 		}
1716af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1717af75078fSIntel 		if (port_fwd_end != NULL) {
1718af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1719af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1720af75078fSIntel 		}
1721af75078fSIntel 	}
1722af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1723af75078fSIntel }
1724af75078fSIntel 
1725af75078fSIntel void
1726af75078fSIntel stop_packet_forwarding(void)
1727af75078fSIntel {
1728af75078fSIntel 	struct rte_eth_stats stats;
1729af75078fSIntel 	struct rte_port *port;
1730af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1731af75078fSIntel 	int i;
1732af75078fSIntel 	portid_t   pt_id;
1733af75078fSIntel 	streamid_t sm_id;
1734af75078fSIntel 	lcoreid_t  lc_id;
1735af75078fSIntel 	uint64_t total_recv;
1736af75078fSIntel 	uint64_t total_xmit;
1737af75078fSIntel 	uint64_t total_rx_dropped;
1738af75078fSIntel 	uint64_t total_tx_dropped;
1739af75078fSIntel 	uint64_t total_rx_nombuf;
1740af75078fSIntel 	uint64_t tx_dropped;
1741af75078fSIntel 	uint64_t rx_bad_ip_csum;
1742af75078fSIntel 	uint64_t rx_bad_l4_csum;
1743af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1744af75078fSIntel 	uint64_t fwd_cycles;
1745af75078fSIntel #endif
1746b7091f1dSJiayu Hu 
1747af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1748af75078fSIntel 
1749af75078fSIntel 	if (test_done) {
1750af75078fSIntel 		printf("Packet forwarding not started\n");
1751af75078fSIntel 		return;
1752af75078fSIntel 	}
1753af75078fSIntel 	printf("Telling cores to stop...");
1754af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1755af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1756af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1757af75078fSIntel 	rte_eal_mp_wait_lcore();
1758af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1759af75078fSIntel 	if (port_fwd_end != NULL) {
1760af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1761af75078fSIntel 			pt_id = fwd_ports_ids[i];
1762af75078fSIntel 			(*port_fwd_end)(pt_id);
1763af75078fSIntel 		}
1764af75078fSIntel 	}
1765af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1766af75078fSIntel 	fwd_cycles = 0;
1767af75078fSIntel #endif
1768af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1769af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1770af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1771af75078fSIntel 			fwd_stream_stats_display(sm_id);
1772af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1773af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1774af75078fSIntel 		} else {
1775af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1776af75078fSIntel 				fwd_streams[sm_id];
1777af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1778af75078fSIntel 				fwd_streams[sm_id];
1779af75078fSIntel 		}
1780af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1781af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1782af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1783af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1784af75078fSIntel 
1785013af9b6SIntel 		rx_bad_ip_csum =
1786013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1787af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1788af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1789013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1790013af9b6SIntel 							rx_bad_ip_csum;
1791af75078fSIntel 
1792013af9b6SIntel 		rx_bad_l4_csum =
1793013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1794af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1795af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1796013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1797013af9b6SIntel 							rx_bad_l4_csum;
1798af75078fSIntel 
179958d475b7SJerin Jacob 		ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
180058d475b7SJerin Jacob 				fwd_streams[sm_id]->rx_bad_outer_l4_csum;
180158d475b7SJerin Jacob 
1802af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1803af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1804af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1805af75078fSIntel #endif
1806af75078fSIntel 	}
1807af75078fSIntel 	total_recv = 0;
1808af75078fSIntel 	total_xmit = 0;
1809af75078fSIntel 	total_rx_dropped = 0;
1810af75078fSIntel 	total_tx_dropped = 0;
1811af75078fSIntel 	total_rx_nombuf  = 0;
18127741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1813af75078fSIntel 		pt_id = fwd_ports_ids[i];
1814af75078fSIntel 
1815af75078fSIntel 		port = &ports[pt_id];
1816af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1817af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1818af75078fSIntel 		port->stats.ipackets = 0;
1819af75078fSIntel 		stats.opackets -= port->stats.opackets;
1820af75078fSIntel 		port->stats.opackets = 0;
1821af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1822af75078fSIntel 		port->stats.ibytes = 0;
1823af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1824af75078fSIntel 		port->stats.obytes = 0;
182570bdb186SIvan Boule 		stats.imissed  -= port->stats.imissed;
182670bdb186SIvan Boule 		port->stats.imissed = 0;
1827af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1828af75078fSIntel 		port->stats.oerrors = 0;
1829af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1830af75078fSIntel 		port->stats.rx_nombuf = 0;
1831af75078fSIntel 
1832af75078fSIntel 		total_recv += stats.ipackets;
1833af75078fSIntel 		total_xmit += stats.opackets;
183470bdb186SIvan Boule 		total_rx_dropped += stats.imissed;
1835af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1836af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1837af75078fSIntel 
1838af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1839af75078fSIntel 	}
1840b7091f1dSJiayu Hu 
1841af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1842af75078fSIntel 	       "%s\n",
1843af75078fSIntel 	       acc_stats_border, acc_stats_border);
1844af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1845af75078fSIntel 	       "%-"PRIu64"\n"
1846af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1847af75078fSIntel 	       "%-"PRIu64"\n",
1848af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1849af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1850af75078fSIntel 	if (total_rx_nombuf > 0)
1851af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1852af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1853af75078fSIntel 	       "%s\n",
1854af75078fSIntel 	       acc_stats_border, acc_stats_border);
1855af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1856af75078fSIntel 	if (total_recv > 0)
1857af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1858af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1859af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1860af75078fSIntel 		       fwd_cycles, total_recv);
1861af75078fSIntel #endif
1862af75078fSIntel 	printf("\nDone.\n");
1863af75078fSIntel 	test_done = 1;
1864af75078fSIntel }
1865af75078fSIntel 
1866cfae07fdSOuyang Changchun void
1867cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1868cfae07fdSOuyang Changchun {
1869492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1870cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1871cfae07fdSOuyang Changchun }
1872cfae07fdSOuyang Changchun 
1873cfae07fdSOuyang Changchun void
1874cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1875cfae07fdSOuyang Changchun {
1876492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1877cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1878cfae07fdSOuyang Changchun }
1879cfae07fdSOuyang Changchun 
1880ce8d5614SIntel static int
1881ce8d5614SIntel all_ports_started(void)
1882ce8d5614SIntel {
1883ce8d5614SIntel 	portid_t pi;
1884ce8d5614SIntel 	struct rte_port *port;
1885ce8d5614SIntel 
18867d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1887ce8d5614SIntel 		port = &ports[pi];
1888ce8d5614SIntel 		/* Check if there is a port which is not started */
188941b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
189041b05095SBernard Iremonger 			(port->slave_flag == 0))
1891ce8d5614SIntel 			return 0;
1892ce8d5614SIntel 	}
1893ce8d5614SIntel 
1894ce8d5614SIntel 	/* No port is not started */
1895ce8d5614SIntel 	return 1;
1896ce8d5614SIntel }
1897ce8d5614SIntel 
1898148f963fSBruce Richardson int
18996018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
19006018eb8cSShahaf Shuler {
19016018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
19026018eb8cSShahaf Shuler 
19036018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
19046018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
19056018eb8cSShahaf Shuler 		return 0;
19066018eb8cSShahaf Shuler 	return 1;
19076018eb8cSShahaf Shuler }
19086018eb8cSShahaf Shuler 
19096018eb8cSShahaf Shuler int
1910edab33b1STetsuya Mukawa all_ports_stopped(void)
1911edab33b1STetsuya Mukawa {
1912edab33b1STetsuya Mukawa 	portid_t pi;
1913edab33b1STetsuya Mukawa 
19147d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
19156018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
1916edab33b1STetsuya Mukawa 			return 0;
1917edab33b1STetsuya Mukawa 	}
1918edab33b1STetsuya Mukawa 
1919edab33b1STetsuya Mukawa 	return 1;
1920edab33b1STetsuya Mukawa }
1921edab33b1STetsuya Mukawa 
1922edab33b1STetsuya Mukawa int
1923edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
1924edab33b1STetsuya Mukawa {
1925edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1926edab33b1STetsuya Mukawa 		return 0;
1927edab33b1STetsuya Mukawa 
1928edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1929edab33b1STetsuya Mukawa 		return 0;
1930edab33b1STetsuya Mukawa 
1931edab33b1STetsuya Mukawa 	return 1;
1932edab33b1STetsuya Mukawa }
1933edab33b1STetsuya Mukawa 
1934edab33b1STetsuya Mukawa int
1935ce8d5614SIntel start_port(portid_t pid)
1936ce8d5614SIntel {
193792d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
1938ce8d5614SIntel 	portid_t pi;
1939ce8d5614SIntel 	queueid_t qi;
1940ce8d5614SIntel 	struct rte_port *port;
19412950a769SDeclan Doherty 	struct ether_addr mac_addr;
194276ad4a2dSGaetan Rivet 	enum rte_eth_event_type event_type;
1943ce8d5614SIntel 
19444468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
19454468635fSMichael Qiu 		return 0;
19464468635fSMichael Qiu 
1947ce8d5614SIntel 	if(dcb_config)
1948ce8d5614SIntel 		dcb_test = 1;
19497d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1950edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1951ce8d5614SIntel 			continue;
1952ce8d5614SIntel 
195392d2703eSMichael Qiu 		need_check_link_status = 0;
1954ce8d5614SIntel 		port = &ports[pi];
1955ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1956ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1957ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1958ce8d5614SIntel 			continue;
1959ce8d5614SIntel 		}
1960ce8d5614SIntel 
1961ce8d5614SIntel 		if (port->need_reconfig > 0) {
1962ce8d5614SIntel 			port->need_reconfig = 0;
1963ce8d5614SIntel 
19647ee3e944SVasily Philipov 			if (flow_isolate_all) {
19657ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
19667ee3e944SVasily Philipov 				if (ret) {
19677ee3e944SVasily Philipov 					printf("Failed to apply isolated"
19687ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
19697ee3e944SVasily Philipov 					return -1;
19707ee3e944SVasily Philipov 				}
19717ee3e944SVasily Philipov 			}
1972b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
19735706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
197420a0286fSLiu Xiaofeng 					port->socket_id);
1975ce8d5614SIntel 			/* configure port */
1976ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1977ce8d5614SIntel 						&(port->dev_conf));
1978ce8d5614SIntel 			if (diag != 0) {
1979ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1980ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1981ce8d5614SIntel 					printf("Port %d can not be set back "
1982ce8d5614SIntel 							"to stopped\n", pi);
1983ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1984ce8d5614SIntel 				/* try to reconfigure port next time */
1985ce8d5614SIntel 				port->need_reconfig = 1;
1986148f963fSBruce Richardson 				return -1;
1987ce8d5614SIntel 			}
1988ce8d5614SIntel 		}
1989ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1990ce8d5614SIntel 			port->need_reconfig_queues = 0;
1991ce8d5614SIntel 			/* setup tx queues */
1992ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1993b6ea6408SIntel 				if ((numa_support) &&
1994b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1995b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1996d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
1997d44f8a48SQi Zhang 						txring_numa[pi],
1998d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
1999b6ea6408SIntel 				else
2000b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2001d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2002d44f8a48SQi Zhang 						port->socket_id,
2003d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2004b6ea6408SIntel 
2005ce8d5614SIntel 				if (diag == 0)
2006ce8d5614SIntel 					continue;
2007ce8d5614SIntel 
2008ce8d5614SIntel 				/* Fail to setup tx queue, return */
2009ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2010ce8d5614SIntel 							RTE_PORT_HANDLING,
2011ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2012ce8d5614SIntel 					printf("Port %d can not be set back "
2013ce8d5614SIntel 							"to stopped\n", pi);
2014d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2015d44f8a48SQi Zhang 				       pi);
2016ce8d5614SIntel 				/* try to reconfigure queues next time */
2017ce8d5614SIntel 				port->need_reconfig_queues = 1;
2018148f963fSBruce Richardson 				return -1;
2019ce8d5614SIntel 			}
2020ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2021d44f8a48SQi Zhang 				/* setup rx queues */
2022b6ea6408SIntel 				if ((numa_support) &&
2023b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2024b6ea6408SIntel 					struct rte_mempool * mp =
2025b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2026b6ea6408SIntel 					if (mp == NULL) {
2027b6ea6408SIntel 						printf("Failed to setup RX queue:"
2028b6ea6408SIntel 							"No mempool allocation"
2029b6ea6408SIntel 							" on the socket %d\n",
2030b6ea6408SIntel 							rxring_numa[pi]);
2031148f963fSBruce Richardson 						return -1;
2032b6ea6408SIntel 					}
2033b6ea6408SIntel 
2034b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2035d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2036d44f8a48SQi Zhang 					     rxring_numa[pi],
2037d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2038d44f8a48SQi Zhang 					     mp);
20391e1d6bddSBernard Iremonger 				} else {
20401e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
20411e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
20421e1d6bddSBernard Iremonger 					if (mp == NULL) {
20431e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
20441e1d6bddSBernard Iremonger 							"No mempool allocation"
20451e1d6bddSBernard Iremonger 							" on the socket %d\n",
20461e1d6bddSBernard Iremonger 							port->socket_id);
20471e1d6bddSBernard Iremonger 						return -1;
2048b6ea6408SIntel 					}
2049b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2050d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2051d44f8a48SQi Zhang 					     port->socket_id,
2052d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2053d44f8a48SQi Zhang 					     mp);
20541e1d6bddSBernard Iremonger 				}
2055ce8d5614SIntel 				if (diag == 0)
2056ce8d5614SIntel 					continue;
2057ce8d5614SIntel 
2058ce8d5614SIntel 				/* Fail to setup rx queue, return */
2059ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2060ce8d5614SIntel 							RTE_PORT_HANDLING,
2061ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2062ce8d5614SIntel 					printf("Port %d can not be set back "
2063ce8d5614SIntel 							"to stopped\n", pi);
2064d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2065d44f8a48SQi Zhang 				       pi);
2066ce8d5614SIntel 				/* try to reconfigure queues next time */
2067ce8d5614SIntel 				port->need_reconfig_queues = 1;
2068148f963fSBruce Richardson 				return -1;
2069ce8d5614SIntel 			}
2070ce8d5614SIntel 		}
2071b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2072ce8d5614SIntel 		/* start port */
2073ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2074ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2075ce8d5614SIntel 
2076ce8d5614SIntel 			/* Fail to setup rx queue, return */
2077ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2078ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2079ce8d5614SIntel 				printf("Port %d can not be set back to "
2080ce8d5614SIntel 							"stopped\n", pi);
2081ce8d5614SIntel 			continue;
2082ce8d5614SIntel 		}
2083ce8d5614SIntel 
2084ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2085ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2086ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2087ce8d5614SIntel 
20882950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
2089d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
20902950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
20912950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
20922950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2093d8c89163SZijie Pan 
2094ce8d5614SIntel 		/* at least one port started, need checking link status */
2095ce8d5614SIntel 		need_check_link_status = 1;
2096ce8d5614SIntel 	}
2097ce8d5614SIntel 
20984fb82244SMatan Azrad 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
20994fb82244SMatan Azrad 	     event_type < RTE_ETH_EVENT_MAX;
21004fb82244SMatan Azrad 	     event_type++) {
21014fb82244SMatan Azrad 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
21024fb82244SMatan Azrad 						event_type,
21034fb82244SMatan Azrad 						eth_event_callback,
21044fb82244SMatan Azrad 						NULL);
21054fb82244SMatan Azrad 		if (diag) {
21064fb82244SMatan Azrad 			printf("Failed to setup even callback for event %d\n",
21074fb82244SMatan Azrad 				event_type);
21084fb82244SMatan Azrad 			return -1;
21094fb82244SMatan Azrad 		}
21104fb82244SMatan Azrad 	}
21114fb82244SMatan Azrad 
211292d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2113edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
211492d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2115ce8d5614SIntel 		printf("Please stop the ports first\n");
2116ce8d5614SIntel 
2117ce8d5614SIntel 	printf("Done\n");
2118148f963fSBruce Richardson 	return 0;
2119ce8d5614SIntel }
2120ce8d5614SIntel 
2121ce8d5614SIntel void
2122ce8d5614SIntel stop_port(portid_t pid)
2123ce8d5614SIntel {
2124ce8d5614SIntel 	portid_t pi;
2125ce8d5614SIntel 	struct rte_port *port;
2126ce8d5614SIntel 	int need_check_link_status = 0;
2127ce8d5614SIntel 
2128ce8d5614SIntel 	if (dcb_test) {
2129ce8d5614SIntel 		dcb_test = 0;
2130ce8d5614SIntel 		dcb_config = 0;
2131ce8d5614SIntel 	}
21324468635fSMichael Qiu 
21334468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21344468635fSMichael Qiu 		return;
21354468635fSMichael Qiu 
2136ce8d5614SIntel 	printf("Stopping ports...\n");
2137ce8d5614SIntel 
21387d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21394468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2140ce8d5614SIntel 			continue;
2141ce8d5614SIntel 
2142a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2143a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2144a8ef3e3aSBernard Iremonger 			continue;
2145a8ef3e3aSBernard Iremonger 		}
2146a8ef3e3aSBernard Iremonger 
21470e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
21480e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
21490e545d30SBernard Iremonger 			continue;
21500e545d30SBernard Iremonger 		}
21510e545d30SBernard Iremonger 
2152ce8d5614SIntel 		port = &ports[pi];
2153ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2154ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2155ce8d5614SIntel 			continue;
2156ce8d5614SIntel 
2157ce8d5614SIntel 		rte_eth_dev_stop(pi);
2158ce8d5614SIntel 
2159ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2160ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2161ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2162ce8d5614SIntel 		need_check_link_status = 1;
2163ce8d5614SIntel 	}
2164bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2165edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2166ce8d5614SIntel 
2167ce8d5614SIntel 	printf("Done\n");
2168ce8d5614SIntel }
2169ce8d5614SIntel 
2170ce6959bfSWisam Jaddo static void
21714f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2172ce6959bfSWisam Jaddo {
21734f1de450SThomas Monjalon 	portid_t i;
21744f1de450SThomas Monjalon 	portid_t new_total = 0;
2175ce6959bfSWisam Jaddo 
21764f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
21774f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
21784f1de450SThomas Monjalon 			array[new_total] = array[i];
21794f1de450SThomas Monjalon 			new_total++;
2180ce6959bfSWisam Jaddo 		}
21814f1de450SThomas Monjalon 	*total = new_total;
21824f1de450SThomas Monjalon }
21834f1de450SThomas Monjalon 
21844f1de450SThomas Monjalon static void
21854f1de450SThomas Monjalon remove_invalid_ports(void)
21864f1de450SThomas Monjalon {
21874f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
21884f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
21894f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2190ce6959bfSWisam Jaddo }
2191ce6959bfSWisam Jaddo 
2192ce8d5614SIntel void
2193ce8d5614SIntel close_port(portid_t pid)
2194ce8d5614SIntel {
2195ce8d5614SIntel 	portid_t pi;
2196ce8d5614SIntel 	struct rte_port *port;
2197ce8d5614SIntel 
21984468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21994468635fSMichael Qiu 		return;
22004468635fSMichael Qiu 
2201ce8d5614SIntel 	printf("Closing ports...\n");
2202ce8d5614SIntel 
22037d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22044468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2205ce8d5614SIntel 			continue;
2206ce8d5614SIntel 
2207a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2209a8ef3e3aSBernard Iremonger 			continue;
2210a8ef3e3aSBernard Iremonger 		}
2211a8ef3e3aSBernard Iremonger 
22120e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
22130e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
22140e545d30SBernard Iremonger 			continue;
22150e545d30SBernard Iremonger 		}
22160e545d30SBernard Iremonger 
2217ce8d5614SIntel 		port = &ports[pi];
2218ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2219d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2220d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2221d4e8ad64SMichael Qiu 			continue;
2222d4e8ad64SMichael Qiu 		}
2223d4e8ad64SMichael Qiu 
2224d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2225ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2226ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2227ce8d5614SIntel 			continue;
2228ce8d5614SIntel 		}
2229ce8d5614SIntel 
2230938a184aSAdrien Mazarguil 		if (port->flow_list)
2231938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2232ce8d5614SIntel 		rte_eth_dev_close(pi);
2233ce8d5614SIntel 
22344f1de450SThomas Monjalon 		remove_invalid_ports();
223523ea57a2SThomas Monjalon 
2236ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2237ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2238b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2239ce8d5614SIntel 	}
2240ce8d5614SIntel 
2241ce8d5614SIntel 	printf("Done\n");
2242ce8d5614SIntel }
2243ce8d5614SIntel 
2244edab33b1STetsuya Mukawa void
224597f1e196SWei Dai reset_port(portid_t pid)
224697f1e196SWei Dai {
224797f1e196SWei Dai 	int diag;
224897f1e196SWei Dai 	portid_t pi;
224997f1e196SWei Dai 	struct rte_port *port;
225097f1e196SWei Dai 
225197f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
225297f1e196SWei Dai 		return;
225397f1e196SWei Dai 
225497f1e196SWei Dai 	printf("Resetting ports...\n");
225597f1e196SWei Dai 
225697f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
225797f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
225897f1e196SWei Dai 			continue;
225997f1e196SWei Dai 
226097f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
226197f1e196SWei Dai 			printf("Please remove port %d from forwarding "
226297f1e196SWei Dai 			       "configuration.\n", pi);
226397f1e196SWei Dai 			continue;
226497f1e196SWei Dai 		}
226597f1e196SWei Dai 
226697f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
226797f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
226897f1e196SWei Dai 			       pi);
226997f1e196SWei Dai 			continue;
227097f1e196SWei Dai 		}
227197f1e196SWei Dai 
227297f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
227397f1e196SWei Dai 		if (diag == 0) {
227497f1e196SWei Dai 			port = &ports[pi];
227597f1e196SWei Dai 			port->need_reconfig = 1;
227697f1e196SWei Dai 			port->need_reconfig_queues = 1;
227797f1e196SWei Dai 		} else {
227897f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
227997f1e196SWei Dai 		}
228097f1e196SWei Dai 	}
228197f1e196SWei Dai 
228297f1e196SWei Dai 	printf("Done\n");
228397f1e196SWei Dai }
228497f1e196SWei Dai 
228597f1e196SWei Dai void
2286edab33b1STetsuya Mukawa attach_port(char *identifier)
2287ce8d5614SIntel {
2288ebf5e9b7SBernard Iremonger 	portid_t pi = 0;
2289c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2290ce8d5614SIntel 
2291edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2292edab33b1STetsuya Mukawa 
2293edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2294edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2295edab33b1STetsuya Mukawa 		return;
2296ce8d5614SIntel 	}
2297ce8d5614SIntel 
2298c9cce428SThomas Monjalon 	if (rte_dev_probe(identifier) != 0) {
2299c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2300edab33b1STetsuya Mukawa 		return;
2301c9cce428SThomas Monjalon 	}
2302c9cce428SThomas Monjalon 
2303*86fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2304*86fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
2305*86fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2306c9cce428SThomas Monjalon 		setup_attached_port(pi);
2307c9cce428SThomas Monjalon 	}
2308*86fa5de1SThomas Monjalon }
2309c9cce428SThomas Monjalon 
2310c9cce428SThomas Monjalon static void
2311c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2312c9cce428SThomas Monjalon {
2313c9cce428SThomas Monjalon 	unsigned int socket_id;
2314edab33b1STetsuya Mukawa 
2315931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
231629841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2317931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
231829841336SPhil Yang 		socket_id = socket_ids[0];
2319931126baSBernard Iremonger 	reconfig(pi, socket_id);
2320edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2321edab33b1STetsuya Mukawa 
23224f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
23234f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
23244f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2325edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2326edab33b1STetsuya Mukawa 
2327edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2328edab33b1STetsuya Mukawa 	printf("Done\n");
2329edab33b1STetsuya Mukawa }
2330edab33b1STetsuya Mukawa 
2331edab33b1STetsuya Mukawa void
2332f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id)
23335f4ec54fSChen Jing D(Mark) {
2334f8e5baa2SThomas Monjalon 	struct rte_device *dev;
2335f8e5baa2SThomas Monjalon 	portid_t sibling;
2336f8e5baa2SThomas Monjalon 
2337c9cce428SThomas Monjalon 	printf("Removing a device...\n");
23385f4ec54fSChen Jing D(Mark) 
2339f8e5baa2SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
2340f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2341f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2342f8e5baa2SThomas Monjalon 		return;
2343f8e5baa2SThomas Monjalon 	}
2344f8e5baa2SThomas Monjalon 
234523ea57a2SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
23463f4a8370SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
23473f4a8370SThomas Monjalon 			printf("Port not stopped\n");
2348edab33b1STetsuya Mukawa 			return;
2349edab33b1STetsuya Mukawa 		}
23503f4a8370SThomas Monjalon 		printf("Port was not closed\n");
2351938a184aSAdrien Mazarguil 		if (ports[port_id].flow_list)
2352938a184aSAdrien Mazarguil 			port_flow_flush(port_id);
23533f4a8370SThomas Monjalon 	}
2354938a184aSAdrien Mazarguil 
2355f8e5baa2SThomas Monjalon 	if (rte_dev_remove(dev) != 0) {
2356f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2357edab33b1STetsuya Mukawa 		return;
23583070419eSGaetan Rivet 	}
2359edab33b1STetsuya Mukawa 
2360f8e5baa2SThomas Monjalon 	for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
2361f8e5baa2SThomas Monjalon 		if (rte_eth_devices[sibling].device != dev)
2362f8e5baa2SThomas Monjalon 			continue;
2363f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2364f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2365f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2366f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2367f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2368f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2369f8e5baa2SThomas Monjalon 		}
2370f8e5baa2SThomas Monjalon 	}
2371f8e5baa2SThomas Monjalon 
23724f1de450SThomas Monjalon 	remove_invalid_ports();
237303ce2c53SMatan Azrad 
2374f8e5baa2SThomas Monjalon 	printf("Device of port %u is detached\n", port_id);
2375f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2376edab33b1STetsuya Mukawa 	printf("Done\n");
2377edab33b1STetsuya Mukawa 	return;
23785f4ec54fSChen Jing D(Mark) }
23795f4ec54fSChen Jing D(Mark) 
2380af75078fSIntel void
2381af75078fSIntel pmd_test_exit(void)
2382af75078fSIntel {
2383124909d7SZhiyong Yang 	struct rte_device *device;
2384af75078fSIntel 	portid_t pt_id;
2385fb73e096SJeff Guo 	int ret;
2386af75078fSIntel 
23878210ec25SPablo de Lara 	if (test_done == 0)
23888210ec25SPablo de Lara 		stop_packet_forwarding();
23898210ec25SPablo de Lara 
2390d3a274ceSZhihong Wang 	if (ports != NULL) {
2391d3a274ceSZhihong Wang 		no_link_check = 1;
23927d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
2393d3a274ceSZhihong Wang 			printf("\nShutting down port %d...\n", pt_id);
2394af75078fSIntel 			fflush(stdout);
2395d3a274ceSZhihong Wang 			stop_port(pt_id);
2396d3a274ceSZhihong Wang 			close_port(pt_id);
2397124909d7SZhiyong Yang 
2398124909d7SZhiyong Yang 			/*
2399124909d7SZhiyong Yang 			 * This is a workaround to fix a virtio-user issue that
2400124909d7SZhiyong Yang 			 * requires to call clean-up routine to remove existing
2401124909d7SZhiyong Yang 			 * socket.
2402124909d7SZhiyong Yang 			 * This workaround valid only for testpmd, needs a fix
2403124909d7SZhiyong Yang 			 * valid for all applications.
2404124909d7SZhiyong Yang 			 * TODO: Implement proper resource cleanup
2405124909d7SZhiyong Yang 			 */
2406124909d7SZhiyong Yang 			device = rte_eth_devices[pt_id].device;
2407124909d7SZhiyong Yang 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2408f8e5baa2SThomas Monjalon 				detach_port_device(pt_id);
2409af75078fSIntel 		}
2410d3a274ceSZhihong Wang 	}
2411fb73e096SJeff Guo 
2412fb73e096SJeff Guo 	if (hot_plug) {
2413fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
24142049c511SJeff Guo 		if (ret) {
2415fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2416fb73e096SJeff Guo 				"fail to stop device event monitor.");
24172049c511SJeff Guo 			return;
24182049c511SJeff Guo 		}
2419fb73e096SJeff Guo 
24202049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
24212049c511SJeff Guo 			eth_dev_event_callback, NULL);
24222049c511SJeff Guo 		if (ret < 0) {
2423fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
24242049c511SJeff Guo 				"fail to unregister device event callback.\n");
24252049c511SJeff Guo 			return;
24262049c511SJeff Guo 		}
24272049c511SJeff Guo 
24282049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
24292049c511SJeff Guo 		if (ret) {
24302049c511SJeff Guo 			RTE_LOG(ERR, EAL,
24312049c511SJeff Guo 				"fail to disable hotplug handling.\n");
24322049c511SJeff Guo 			return;
24332049c511SJeff Guo 		}
2434fb73e096SJeff Guo 	}
2435fb73e096SJeff Guo 
2436d3a274ceSZhihong Wang 	printf("\nBye...\n");
2437af75078fSIntel }
2438af75078fSIntel 
2439af75078fSIntel typedef void (*cmd_func_t)(void);
2440af75078fSIntel struct pmd_test_command {
2441af75078fSIntel 	const char *cmd_name;
2442af75078fSIntel 	cmd_func_t cmd_func;
2443af75078fSIntel };
2444af75078fSIntel 
2445af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2446af75078fSIntel 
2447ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2448af75078fSIntel static void
2449edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2450af75078fSIntel {
2451ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2452ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2453f8244c63SZhiyong Yang 	portid_t portid;
2454f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2455ce8d5614SIntel 	struct rte_eth_link link;
2456ce8d5614SIntel 
2457ce8d5614SIntel 	printf("Checking link statuses...\n");
2458ce8d5614SIntel 	fflush(stdout);
2459ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2460ce8d5614SIntel 		all_ports_up = 1;
24617d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2462ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2463ce8d5614SIntel 				continue;
2464ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2465ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2466ce8d5614SIntel 			/* print link status if flag set */
2467ce8d5614SIntel 			if (print_flag == 1) {
2468ce8d5614SIntel 				if (link.link_status)
2469f8244c63SZhiyong Yang 					printf(
2470f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2471f8244c63SZhiyong Yang 					portid, link.link_speed,
2472ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2473ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2474ce8d5614SIntel 				else
2475f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2476ce8d5614SIntel 				continue;
2477ce8d5614SIntel 			}
2478ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
247909419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2480ce8d5614SIntel 				all_ports_up = 0;
2481ce8d5614SIntel 				break;
2482ce8d5614SIntel 			}
2483ce8d5614SIntel 		}
2484ce8d5614SIntel 		/* after finally printing all link status, get out */
2485ce8d5614SIntel 		if (print_flag == 1)
2486ce8d5614SIntel 			break;
2487ce8d5614SIntel 
2488ce8d5614SIntel 		if (all_ports_up == 0) {
2489ce8d5614SIntel 			fflush(stdout);
2490ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2491ce8d5614SIntel 		}
2492ce8d5614SIntel 
2493ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2494ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2495ce8d5614SIntel 			print_flag = 1;
2496ce8d5614SIntel 		}
24978ea656f8SGaetan Rivet 
24988ea656f8SGaetan Rivet 		if (lsc_interrupt)
24998ea656f8SGaetan Rivet 			break;
2500ce8d5614SIntel 	}
2501af75078fSIntel }
2502af75078fSIntel 
2503284c908cSGaetan Rivet static void
2504284c908cSGaetan Rivet rmv_event_callback(void *arg)
2505284c908cSGaetan Rivet {
25063b97888aSMatan Azrad 	int need_to_start = 0;
25070da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
250828caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2509284c908cSGaetan Rivet 
2510284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2511284c908cSGaetan Rivet 
25123b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
25133b97888aSMatan Azrad 		need_to_start = 1;
25143b97888aSMatan Azrad 		stop_packet_forwarding();
25153b97888aSMatan Azrad 	}
25160da2a62bSMatan Azrad 	no_link_check = 1;
2517284c908cSGaetan Rivet 	stop_port(port_id);
25180da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2519284c908cSGaetan Rivet 	close_port(port_id);
2520f8e5baa2SThomas Monjalon 	detach_port_device(port_id);
25213b97888aSMatan Azrad 	if (need_to_start)
25223b97888aSMatan Azrad 		start_packet_forwarding(0);
2523284c908cSGaetan Rivet }
2524284c908cSGaetan Rivet 
252576ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2526d6af1a13SBernard Iremonger static int
2527f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2528d6af1a13SBernard Iremonger 		  void *ret_param)
252976ad4a2dSGaetan Rivet {
253076ad4a2dSGaetan Rivet 	static const char * const event_desc[] = {
253176ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
253276ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
253376ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
253476ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
253576ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2536badb87c1SAnoob Joseph 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
253776ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
253876ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
25394fb82244SMatan Azrad 		[RTE_ETH_EVENT_NEW] = "device probed",
25404fb82244SMatan Azrad 		[RTE_ETH_EVENT_DESTROY] = "device released",
254176ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MAX] = NULL,
254276ad4a2dSGaetan Rivet 	};
254376ad4a2dSGaetan Rivet 
254476ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2545d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
254676ad4a2dSGaetan Rivet 
254776ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2548f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
254976ad4a2dSGaetan Rivet 			port_id, __func__, type);
255076ad4a2dSGaetan Rivet 		fflush(stderr);
25513af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2552f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
255376ad4a2dSGaetan Rivet 			event_desc[type]);
255476ad4a2dSGaetan Rivet 		fflush(stdout);
255576ad4a2dSGaetan Rivet 	}
2556284c908cSGaetan Rivet 
25570e45c64dSMatan Azrad 	if (port_id_is_invalid(port_id, DISABLED_WARN))
25580e45c64dSMatan Azrad 		return 0;
25590e45c64dSMatan Azrad 
2560284c908cSGaetan Rivet 	switch (type) {
2561284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
2562284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2563284c908cSGaetan Rivet 				rmv_event_callback, (void *)(intptr_t)port_id))
2564284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2565284c908cSGaetan Rivet 		break;
2566284c908cSGaetan Rivet 	default:
2567284c908cSGaetan Rivet 		break;
2568284c908cSGaetan Rivet 	}
2569d6af1a13SBernard Iremonger 	return 0;
257076ad4a2dSGaetan Rivet }
257176ad4a2dSGaetan Rivet 
2572fb73e096SJeff Guo /* This function is used by the interrupt thread */
2573fb73e096SJeff Guo static void
257489ecd110SJeff Guo eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2575fb73e096SJeff Guo 			     __rte_unused void *arg)
2576fb73e096SJeff Guo {
25772049c511SJeff Guo 	uint16_t port_id;
25782049c511SJeff Guo 	int ret;
25792049c511SJeff Guo 
2580fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2581fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2582fb73e096SJeff Guo 			__func__, type);
2583fb73e096SJeff Guo 		fflush(stderr);
2584fb73e096SJeff Guo 	}
2585fb73e096SJeff Guo 
2586fb73e096SJeff Guo 	switch (type) {
2587fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2588fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2589fb73e096SJeff Guo 			device_name);
25902049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
25912049c511SJeff Guo 		if (ret) {
25922049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
25932049c511SJeff Guo 				device_name);
25942049c511SJeff Guo 			return;
25952049c511SJeff Guo 		}
25962049c511SJeff Guo 		rmv_event_callback((void *)(intptr_t)port_id);
2597fb73e096SJeff Guo 		break;
2598fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2599fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2600fb73e096SJeff Guo 			device_name);
2601fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2602fb73e096SJeff Guo 		 * begin to attach port.
2603fb73e096SJeff Guo 		 */
2604fb73e096SJeff Guo 		break;
2605fb73e096SJeff Guo 	default:
2606fb73e096SJeff Guo 		break;
2607fb73e096SJeff Guo 	}
2608fb73e096SJeff Guo }
2609fb73e096SJeff Guo 
2610013af9b6SIntel static int
261128caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2612af75078fSIntel {
2613013af9b6SIntel 	uint16_t i;
2614af75078fSIntel 	int diag;
2615013af9b6SIntel 	uint8_t mapping_found = 0;
2616af75078fSIntel 
2617013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2618013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2619013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2620013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2621013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2622013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2623013af9b6SIntel 			if (diag != 0)
2624013af9b6SIntel 				return diag;
2625013af9b6SIntel 			mapping_found = 1;
2626af75078fSIntel 		}
2627013af9b6SIntel 	}
2628013af9b6SIntel 	if (mapping_found)
2629013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2630013af9b6SIntel 	return 0;
2631013af9b6SIntel }
2632013af9b6SIntel 
2633013af9b6SIntel static int
263428caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2635013af9b6SIntel {
2636013af9b6SIntel 	uint16_t i;
2637013af9b6SIntel 	int diag;
2638013af9b6SIntel 	uint8_t mapping_found = 0;
2639013af9b6SIntel 
2640013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2641013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2642013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2643013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2644013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2645013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2646013af9b6SIntel 			if (diag != 0)
2647013af9b6SIntel 				return diag;
2648013af9b6SIntel 			mapping_found = 1;
2649013af9b6SIntel 		}
2650013af9b6SIntel 	}
2651013af9b6SIntel 	if (mapping_found)
2652013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2653013af9b6SIntel 	return 0;
2654013af9b6SIntel }
2655013af9b6SIntel 
2656013af9b6SIntel static void
265728caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2658013af9b6SIntel {
2659013af9b6SIntel 	int diag = 0;
2660013af9b6SIntel 
2661013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2662af75078fSIntel 	if (diag != 0) {
2663013af9b6SIntel 		if (diag == -ENOTSUP) {
2664013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2665013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2666013af9b6SIntel 		}
2667013af9b6SIntel 		else
2668013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2669013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2670013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2671af75078fSIntel 					pi, diag);
2672af75078fSIntel 	}
2673013af9b6SIntel 
2674013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2675af75078fSIntel 	if (diag != 0) {
2676013af9b6SIntel 		if (diag == -ENOTSUP) {
2677013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2678013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2679013af9b6SIntel 		}
2680013af9b6SIntel 		else
2681013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2682013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2683013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2684af75078fSIntel 					pi, diag);
2685af75078fSIntel 	}
2686af75078fSIntel }
2687af75078fSIntel 
2688f2c5125aSPablo de Lara static void
2689f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2690f2c5125aSPablo de Lara {
2691d44f8a48SQi Zhang 	uint16_t qid;
2692f2c5125aSPablo de Lara 
2693d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
2694d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2695d44f8a48SQi Zhang 
2696d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2697f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2698d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2699f2c5125aSPablo de Lara 
2700f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2701d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2702f2c5125aSPablo de Lara 
2703f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2704d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2705f2c5125aSPablo de Lara 
2706f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2707d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2708f2c5125aSPablo de Lara 
2709f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2710d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2711f2c5125aSPablo de Lara 
2712d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2713d44f8a48SQi Zhang 	}
2714d44f8a48SQi Zhang 
2715d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
2716d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2717d44f8a48SQi Zhang 
2718d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2719f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2720d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2721f2c5125aSPablo de Lara 
2722f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2723d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2724f2c5125aSPablo de Lara 
2725f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2726d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2727f2c5125aSPablo de Lara 
2728f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2729d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2730f2c5125aSPablo de Lara 
2731f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2732d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2733d44f8a48SQi Zhang 
2734d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2735d44f8a48SQi Zhang 	}
2736f2c5125aSPablo de Lara }
2737f2c5125aSPablo de Lara 
2738013af9b6SIntel void
2739013af9b6SIntel init_port_config(void)
2740013af9b6SIntel {
2741013af9b6SIntel 	portid_t pid;
2742013af9b6SIntel 	struct rte_port *port;
2743013af9b6SIntel 
27447d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2745013af9b6SIntel 		port = &ports[pid];
2746013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2747422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
27483ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2749013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
275090892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2751422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2752af75078fSIntel 		} else {
2753013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2754013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2755af75078fSIntel 		}
27563ce690d3SBruce Richardson 
27575f592039SJingjing Wu 		if (port->dcb_flag == 0) {
27583ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
27593ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
27603ce690d3SBruce Richardson 			else
27613ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
27623ce690d3SBruce Richardson 		}
27633ce690d3SBruce Richardson 
2764f2c5125aSPablo de Lara 		rxtx_port_config(port);
2765013af9b6SIntel 
2766013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2767013af9b6SIntel 
2768013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
276950c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2770e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
27717b7e5ba7SIntel #endif
27728ea656f8SGaetan Rivet 
27738ea656f8SGaetan Rivet 		if (lsc_interrupt &&
27748ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
27758ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
27768ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2777284c908cSGaetan Rivet 		if (rmv_interrupt &&
2778284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2779284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2780284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2781013af9b6SIntel 	}
2782013af9b6SIntel }
2783013af9b6SIntel 
278441b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
278541b05095SBernard Iremonger {
278641b05095SBernard Iremonger 	struct rte_port *port;
278741b05095SBernard Iremonger 
278841b05095SBernard Iremonger 	port = &ports[slave_pid];
278941b05095SBernard Iremonger 	port->slave_flag = 1;
279041b05095SBernard Iremonger }
279141b05095SBernard Iremonger 
279241b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
279341b05095SBernard Iremonger {
279441b05095SBernard Iremonger 	struct rte_port *port;
279541b05095SBernard Iremonger 
279641b05095SBernard Iremonger 	port = &ports[slave_pid];
279741b05095SBernard Iremonger 	port->slave_flag = 0;
279841b05095SBernard Iremonger }
279941b05095SBernard Iremonger 
28000e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
28010e545d30SBernard Iremonger {
28020e545d30SBernard Iremonger 	struct rte_port *port;
28030e545d30SBernard Iremonger 
28040e545d30SBernard Iremonger 	port = &ports[slave_pid];
2805b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2806b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2807b8b8b344SMatan Azrad 		return 1;
2808b8b8b344SMatan Azrad 	return 0;
28090e545d30SBernard Iremonger }
28100e545d30SBernard Iremonger 
2811013af9b6SIntel const uint16_t vlan_tags[] = {
2812013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2813013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2814013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2815013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2816013af9b6SIntel };
2817013af9b6SIntel 
2818013af9b6SIntel static  int
2819ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
28201a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
28211a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
28221a572499SJingjing Wu 		 uint8_t pfc_en)
2823013af9b6SIntel {
2824013af9b6SIntel 	uint8_t i;
2825ac7c491cSKonstantin Ananyev 	int32_t rc;
2826ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
2827af75078fSIntel 
2828af75078fSIntel 	/*
2829013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2830013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2831af75078fSIntel 	 */
28321a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
28331a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
28341a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
28351a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
28361a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2837013af9b6SIntel 
2838547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
28391a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
28401a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
28411a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
28421a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
28431a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
28441a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2845013af9b6SIntel 
28461a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
28471a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
28481a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
28491a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
28501a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2851af75078fSIntel 		}
2852013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2853f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2854f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2855013af9b6SIntel 		}
2856013af9b6SIntel 
2857013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
285832e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
285932e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
28601a572499SJingjing Wu 	} else {
28611a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
28621a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
28631a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
28641a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2865013af9b6SIntel 
2866ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2867ac7c491cSKonstantin Ananyev 		if (rc != 0)
2868ac7c491cSKonstantin Ananyev 			return rc;
2869ac7c491cSKonstantin Ananyev 
28701a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
28711a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
28721a572499SJingjing Wu 
2873bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2874bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2875bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2876013af9b6SIntel 		}
2877ac7c491cSKonstantin Ananyev 
28781a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2879ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
288032e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
28811a572499SJingjing Wu 	}
28821a572499SJingjing Wu 
28831a572499SJingjing Wu 	if (pfc_en)
28841a572499SJingjing Wu 		eth_conf->dcb_capability_en =
28851a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2886013af9b6SIntel 	else
2887013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2888013af9b6SIntel 
2889013af9b6SIntel 	return 0;
2890013af9b6SIntel }
2891013af9b6SIntel 
2892013af9b6SIntel int
28931a572499SJingjing Wu init_port_dcb_config(portid_t pid,
28941a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
28951a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
28961a572499SJingjing Wu 		     uint8_t pfc_en)
2897013af9b6SIntel {
2898013af9b6SIntel 	struct rte_eth_conf port_conf;
2899013af9b6SIntel 	struct rte_port *rte_port;
2900013af9b6SIntel 	int retval;
2901013af9b6SIntel 	uint16_t i;
2902013af9b6SIntel 
29032a977b89SWenzhuo Lu 	rte_port = &ports[pid];
2904013af9b6SIntel 
2905013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2906013af9b6SIntel 	/* Enter DCB configuration status */
2907013af9b6SIntel 	dcb_config = 1;
2908013af9b6SIntel 
2909d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2910d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
2911d5354e89SYanglong Wu 
2912013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2913ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2914013af9b6SIntel 	if (retval < 0)
2915013af9b6SIntel 		return retval;
29160074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2917013af9b6SIntel 
29182f203d44SQi Zhang 	/* re-configure the device . */
29192f203d44SQi Zhang 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
29202a977b89SWenzhuo Lu 
29212a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
29222a977b89SWenzhuo Lu 
29232a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
29242a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
29252a977b89SWenzhuo Lu 	 */
29262a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
29272a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
29282a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
29292a977b89SWenzhuo Lu 			" for port %d.", pid);
29302a977b89SWenzhuo Lu 		return -1;
29312a977b89SWenzhuo Lu 	}
29322a977b89SWenzhuo Lu 
29332a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
29342a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
29352a977b89SWenzhuo Lu 	 */
29362a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
293786ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
293886ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
293986ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
294086ef65eeSBernard Iremonger 		} else {
29412a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29422a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
294386ef65eeSBernard Iremonger 		}
29442a977b89SWenzhuo Lu 	} else {
29452a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
29462a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
29472a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29482a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
29492a977b89SWenzhuo Lu 		} else {
29502a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
29512a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
29522a977b89SWenzhuo Lu 
29532a977b89SWenzhuo Lu 		}
29542a977b89SWenzhuo Lu 	}
29552a977b89SWenzhuo Lu 	rx_free_thresh = 64;
29562a977b89SWenzhuo Lu 
2957013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2958013af9b6SIntel 
2959f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
2960013af9b6SIntel 	/* VLAN filter */
29610074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
29621a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2963013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
2964013af9b6SIntel 
2965013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2966013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
2967013af9b6SIntel 
29687741e4cfSIntel 	rte_port->dcb_flag = 1;
29697741e4cfSIntel 
2970013af9b6SIntel 	return 0;
2971af75078fSIntel }
2972af75078fSIntel 
2973ffc468ffSTetsuya Mukawa static void
2974ffc468ffSTetsuya Mukawa init_port(void)
2975ffc468ffSTetsuya Mukawa {
2976ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
2977ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
2978ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2979ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
2980ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
2981ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
2982ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
2983ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
2984ffc468ffSTetsuya Mukawa 	}
298529841336SPhil Yang 
298629841336SPhil Yang 	/* Initialize ports NUMA structures */
298729841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
298829841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
298929841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2990ffc468ffSTetsuya Mukawa }
2991ffc468ffSTetsuya Mukawa 
2992d3a274ceSZhihong Wang static void
2993d3a274ceSZhihong Wang force_quit(void)
2994d3a274ceSZhihong Wang {
2995d3a274ceSZhihong Wang 	pmd_test_exit();
2996d3a274ceSZhihong Wang 	prompt_exit();
2997d3a274ceSZhihong Wang }
2998d3a274ceSZhihong Wang 
2999d3a274ceSZhihong Wang static void
3000cfea1f30SPablo de Lara print_stats(void)
3001cfea1f30SPablo de Lara {
3002cfea1f30SPablo de Lara 	uint8_t i;
3003cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3004cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3005cfea1f30SPablo de Lara 
3006cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3007cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3008cfea1f30SPablo de Lara 
3009cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3010cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3011cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3012cfea1f30SPablo de Lara }
3013cfea1f30SPablo de Lara 
3014cfea1f30SPablo de Lara static void
3015d3a274ceSZhihong Wang signal_handler(int signum)
3016d3a274ceSZhihong Wang {
3017d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3018d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3019d3a274ceSZhihong Wang 				signum);
3020102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3021102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3022102b7329SReshma Pattan 		rte_pdump_uninit();
3023102b7329SReshma Pattan #endif
302462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
302562d3216dSReshma Pattan 		rte_latencystats_uninit();
302662d3216dSReshma Pattan #endif
3027d3a274ceSZhihong Wang 		force_quit();
3028d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3029d9a191a0SPhil Yang 		f_quit = 1;
3030d3a274ceSZhihong Wang 		/* exit with the expected status */
3031d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3032d3a274ceSZhihong Wang 		kill(getpid(), signum);
3033d3a274ceSZhihong Wang 	}
3034d3a274ceSZhihong Wang }
3035d3a274ceSZhihong Wang 
3036af75078fSIntel int
3037af75078fSIntel main(int argc, char** argv)
3038af75078fSIntel {
3039af75078fSIntel 	int diag;
3040f8244c63SZhiyong Yang 	portid_t port_id;
30414918a357SXiaoyun Li 	uint16_t count;
3042fb73e096SJeff Guo 	int ret;
3043af75078fSIntel 
3044d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3045d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3046d3a274ceSZhihong Wang 
3047af75078fSIntel 	diag = rte_eal_init(argc, argv);
3048af75078fSIntel 	if (diag < 0)
3049af75078fSIntel 		rte_panic("Cannot init EAL\n");
3050af75078fSIntel 
3051285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3052285fd101SOlivier Matz 	if (testpmd_logtype < 0)
3053285fd101SOlivier Matz 		rte_panic("Cannot register log type");
3054285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3055285fd101SOlivier Matz 
30564aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
30574aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
30584aa0d012SAnatoly Burakov 	rte_pdump_init(NULL);
30594aa0d012SAnatoly Burakov #endif
30604aa0d012SAnatoly Burakov 
30614918a357SXiaoyun Li 	count = 0;
30624918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
30634918a357SXiaoyun Li 		ports_ids[count] = port_id;
30644918a357SXiaoyun Li 		count++;
30654918a357SXiaoyun Li 	}
30664918a357SXiaoyun Li 	nb_ports = (portid_t) count;
30674aa0d012SAnatoly Burakov 	if (nb_ports == 0)
30684aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
30694aa0d012SAnatoly Burakov 
30704aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
30714aa0d012SAnatoly Burakov 	init_port();
30724aa0d012SAnatoly Burakov 
30734aa0d012SAnatoly Burakov 	set_def_fwd_config();
30744aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
30754aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
30764aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
30774aa0d012SAnatoly Burakov 
3078e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3079e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3080e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3081e505d84cSAnatoly Burakov #endif
3082e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3083e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3084e505d84cSAnatoly Burakov #endif
3085e505d84cSAnatoly Burakov 
3086fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
3087fb7b8b32SAnatoly Burakov #ifdef RTE_EXEC_ENV_BSDAPP
3088fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3089fb7b8b32SAnatoly Burakov #else
3090fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3091fb7b8b32SAnatoly Burakov #endif
3092fb7b8b32SAnatoly Burakov 
3093e505d84cSAnatoly Burakov 	argc -= diag;
3094e505d84cSAnatoly Burakov 	argv += diag;
3095e505d84cSAnatoly Burakov 	if (argc > 1)
3096e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3097e505d84cSAnatoly Burakov 
3098e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3099285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
31001c036b16SEelco Chaudron 			strerror(errno));
31011c036b16SEelco Chaudron 	}
31021c036b16SEelco Chaudron 
310399cabef0SPablo de Lara 	if (tx_first && interactive)
310499cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
310599cabef0SPablo de Lara 				"interactive mode.\n");
31068820cba4SDavid Hunt 
31078820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
31088820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
31098820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
31108820cba4SDavid Hunt 		lsc_interrupt = 0;
31118820cba4SDavid Hunt 	}
31128820cba4SDavid Hunt 
31135a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
31145a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
31155a8fb55cSReshma Pattan 
31165a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3117af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3118af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3119af75078fSIntel 		       nb_rxq, nb_txq);
3120af75078fSIntel 
3121af75078fSIntel 	init_config();
3122fb73e096SJeff Guo 
3123fb73e096SJeff Guo 	if (hot_plug) {
31242049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3125fb73e096SJeff Guo 		if (ret) {
31262049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31272049c511SJeff Guo 				"fail to enable hotplug handling.");
3128fb73e096SJeff Guo 			return -1;
3129fb73e096SJeff Guo 		}
3130fb73e096SJeff Guo 
31312049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
31322049c511SJeff Guo 		if (ret) {
31332049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31342049c511SJeff Guo 				"fail to start device event monitoring.");
31352049c511SJeff Guo 			return -1;
31362049c511SJeff Guo 		}
31372049c511SJeff Guo 
31382049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
31392049c511SJeff Guo 			eth_dev_event_callback, NULL);
31402049c511SJeff Guo 		if (ret) {
31412049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31422049c511SJeff Guo 				"fail  to register device event callback\n");
31432049c511SJeff Guo 			return -1;
31442049c511SJeff Guo 		}
3145fb73e096SJeff Guo 	}
3146fb73e096SJeff Guo 
3147148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
3148148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3149af75078fSIntel 
3150ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
31517d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
3152ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
3153af75078fSIntel 
31547e4441c8SRemy Horton 	/* Init metrics library */
31557e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
31567e4441c8SRemy Horton 
315762d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
315862d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
315962d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
316062d3216dSReshma Pattan 		if (ret)
316162d3216dSReshma Pattan 			printf("Warning: latencystats init()"
316262d3216dSReshma Pattan 				" returned error %d\n",	ret);
316362d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
316462d3216dSReshma Pattan 			latencystats_lcore_id);
316562d3216dSReshma Pattan 	}
316662d3216dSReshma Pattan #endif
316762d3216dSReshma Pattan 
31687e4441c8SRemy Horton 	/* Setup bitrate stats */
31697e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3170e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
31717e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
31727e4441c8SRemy Horton 		if (bitrate_data == NULL)
3173e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3174e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
31757e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3176e25e6c70SRemy Horton 	}
31777e4441c8SRemy Horton #endif
31787e4441c8SRemy Horton 
31790d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
318081ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
318181ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
318281ef862bSAllain Legacy 
3183ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3184ca7feb22SCyril Chemparathy 		if (auto_start) {
3185ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3186ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3187ca7feb22SCyril Chemparathy 		}
3188af75078fSIntel 		prompt();
31890de738cfSJiayu Hu 		pmd_test_exit();
3190ca7feb22SCyril Chemparathy 	} else
31910d56cb81SThomas Monjalon #endif
31920d56cb81SThomas Monjalon 	{
3193af75078fSIntel 		char c;
3194af75078fSIntel 		int rc;
3195af75078fSIntel 
3196d9a191a0SPhil Yang 		f_quit = 0;
3197d9a191a0SPhil Yang 
3198af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
319999cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3200cfea1f30SPablo de Lara 		if (stats_period != 0) {
3201cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3202cfea1f30SPablo de Lara 			uint64_t timer_period;
3203cfea1f30SPablo de Lara 
3204cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3205cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3206cfea1f30SPablo de Lara 
3207d9a191a0SPhil Yang 			while (f_quit == 0) {
3208cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3209cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3210cfea1f30SPablo de Lara 
3211cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3212cfea1f30SPablo de Lara 					print_stats();
3213cfea1f30SPablo de Lara 					/* Reset the timer */
3214cfea1f30SPablo de Lara 					diff_time = 0;
3215cfea1f30SPablo de Lara 				}
3216cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3217cfea1f30SPablo de Lara 				prev_time = cur_time;
3218cfea1f30SPablo de Lara 				sleep(1);
3219cfea1f30SPablo de Lara 			}
3220cfea1f30SPablo de Lara 		}
3221cfea1f30SPablo de Lara 
3222af75078fSIntel 		printf("Press enter to exit\n");
3223af75078fSIntel 		rc = read(0, &c, 1);
3224d3a274ceSZhihong Wang 		pmd_test_exit();
3225af75078fSIntel 		if (rc < 0)
3226af75078fSIntel 			return 1;
3227af75078fSIntel 	}
3228af75078fSIntel 
3229af75078fSIntel 	return 0;
3230af75078fSIntel }
3231