xref: /dpdk/app/test-pmd/testpmd.c (revision 89ecd110524dda2ae7d31446eb699a257c76f6cd)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70c7f5dba7SAnatoly Burakov #else
71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72c7f5dba7SAnatoly Burakov #endif
73c7f5dba7SAnatoly Burakov 
74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77c7f5dba7SAnatoly Burakov #else
78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov #endif
80c7f5dba7SAnatoly Burakov 
81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
140af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1830ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1845b590fbeSJasvinder Singh #endif
185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
186af75078fSIntel 	&ieee1588_fwd_engine,
187af75078fSIntel #endif
188af75078fSIntel 	NULL,
189af75078fSIntel };
190af75078fSIntel 
191af75078fSIntel struct fwd_config cur_fwd_config;
192af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193bf56fce1SZhihong Wang uint32_t retry_enabled;
194bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196af75078fSIntel 
197af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199c8798818SIntel                                       * specified on command-line. */
200cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201d9a191a0SPhil Yang 
202d9a191a0SPhil Yang /*
203d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
204d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205d9a191a0SPhil Yang  */
206d9a191a0SPhil Yang uint8_t f_quit;
207d9a191a0SPhil Yang 
208af75078fSIntel /*
209af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
210af75078fSIntel  */
211af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
214af75078fSIntel };
215af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216af75078fSIntel 
21779bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
21879bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
21979bec05bSKonstantin Ananyev 
220af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
222af75078fSIntel 
223900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
224900550deSIntel uint8_t dcb_config = 0;
225900550deSIntel 
226900550deSIntel /* Whether the dcb is in testing status */
227900550deSIntel uint8_t dcb_test = 0;
228900550deSIntel 
229af75078fSIntel /*
230af75078fSIntel  * Configurable number of RX/TX queues.
231af75078fSIntel  */
232af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
234af75078fSIntel 
235af75078fSIntel /*
236af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2378599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
238af75078fSIntel  */
2398599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2408599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
241af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
243af75078fSIntel 
244f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
245af75078fSIntel /*
246af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
247af75078fSIntel  */
248af75078fSIntel 
249f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
252af75078fSIntel 
253f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
256af75078fSIntel 
257af75078fSIntel /*
258af75078fSIntel  * Configurable value of RX free threshold.
259af75078fSIntel  */
260f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
261af75078fSIntel 
262af75078fSIntel /*
263ce8d5614SIntel  * Configurable value of RX drop enable.
264ce8d5614SIntel  */
265f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
266ce8d5614SIntel 
267ce8d5614SIntel /*
268af75078fSIntel  * Configurable value of TX free threshold.
269af75078fSIntel  */
270f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
271af75078fSIntel 
272af75078fSIntel /*
273af75078fSIntel  * Configurable value of TX RS bit threshold.
274af75078fSIntel  */
275f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
276af75078fSIntel 
277af75078fSIntel /*
2783c156061SJens Freimann  * Configurable value of buffered packets before sending.
2793c156061SJens Freimann  */
2803c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2813c156061SJens Freimann 
2823c156061SJens Freimann /*
2833c156061SJens Freimann  * Configurable value of packet buffer timeout.
2843c156061SJens Freimann  */
2853c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2863c156061SJens Freimann 
2873c156061SJens Freimann /*
2883c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2893c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2903c156061SJens Freimann  */
2913c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2923c156061SJens Freimann 
2933c156061SJens Freimann /*
2943c156061SJens Freimann  * Configurable value of number of random writes done in
2953c156061SJens Freimann  * VNF simulation memory area.
2963c156061SJens Freimann  */
2973c156061SJens Freimann uint64_t noisy_lkup_num_writes;
2983c156061SJens Freimann 
2993c156061SJens Freimann /*
3003c156061SJens Freimann  * Configurable value of number of random reads done in
3013c156061SJens Freimann  * VNF simulation memory area.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3073c156061SJens Freimann  * VNF simulation memory area.
3083c156061SJens Freimann  */
3093c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3103c156061SJens Freimann 
3113c156061SJens Freimann /*
312af75078fSIntel  * Receive Side Scaling (RSS) configuration.
313af75078fSIntel  */
3148a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
315af75078fSIntel 
316af75078fSIntel /*
317af75078fSIntel  * Port topology configuration
318af75078fSIntel  */
319af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
320af75078fSIntel 
3217741e4cfSIntel /*
3227741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3237741e4cfSIntel  */
3247741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3257741e4cfSIntel 
326af75078fSIntel /*
3277ee3e944SVasily Philipov  * Flow API isolated mode.
3287ee3e944SVasily Philipov  */
3297ee3e944SVasily Philipov uint8_t flow_isolate_all;
3307ee3e944SVasily Philipov 
3317ee3e944SVasily Philipov /*
332bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
333bc202406SDavid Marchand  */
334bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
335bc202406SDavid Marchand 
336bc202406SDavid Marchand /*
3378ea656f8SGaetan Rivet  * Enable link status change notification
3388ea656f8SGaetan Rivet  */
3398ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3408ea656f8SGaetan Rivet 
3418ea656f8SGaetan Rivet /*
342284c908cSGaetan Rivet  * Enable device removal notification.
343284c908cSGaetan Rivet  */
344284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
345284c908cSGaetan Rivet 
346fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
347fb73e096SJeff Guo 
348284c908cSGaetan Rivet /*
3493af72783SGaetan Rivet  * Display or mask ether events
3503af72783SGaetan Rivet  * Default to all events except VF_MBOX
3513af72783SGaetan Rivet  */
3523af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3533af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3543af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3553af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3573af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3583af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
359e505d84cSAnatoly Burakov /*
360e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
361e505d84cSAnatoly Burakov  */
362e505d84cSAnatoly Burakov int do_mlockall = 0;
3633af72783SGaetan Rivet 
3643af72783SGaetan Rivet /*
3657b7e5ba7SIntel  * NIC bypass mode configuration options.
3667b7e5ba7SIntel  */
3677b7e5ba7SIntel 
36850c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3697b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
370e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3717b7e5ba7SIntel #endif
3727b7e5ba7SIntel 
373e261265eSRadu Nicolau 
37462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
37562d3216dSReshma Pattan 
37662d3216dSReshma Pattan /*
37762d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
37862d3216dSReshma Pattan  */
37962d3216dSReshma Pattan uint8_t latencystats_enabled;
38062d3216dSReshma Pattan 
38162d3216dSReshma Pattan /*
38262d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
38362d3216dSReshma Pattan  */
38462d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
38562d3216dSReshma Pattan 
38662d3216dSReshma Pattan #endif
38762d3216dSReshma Pattan 
3887b7e5ba7SIntel /*
389af75078fSIntel  * Ethernet device configuration.
390af75078fSIntel  */
391af75078fSIntel struct rte_eth_rxmode rx_mode = {
392af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
393af75078fSIntel };
394af75078fSIntel 
39507e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
39607e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
39707e5f7bdSShahaf Shuler };
398fd8c20aaSShahaf Shuler 
399af75078fSIntel struct rte_fdir_conf fdir_conf = {
400af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
401af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
402af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
403d9d5e6f2SJingjing Wu 	.mask = {
40426f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
405d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
406d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
407d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
408d9d5e6f2SJingjing Wu 		},
409d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
410d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
412d9d5e6f2SJingjing Wu 		},
413d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
414d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
41547b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
41647b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
41747b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
418d9d5e6f2SJingjing Wu 	},
419af75078fSIntel 	.drop_queue = 127,
420af75078fSIntel };
421af75078fSIntel 
4222950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
423af75078fSIntel 
424ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
426ed30d9b6SIntel 
427ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
429ed30d9b6SIntel 
430ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
431ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
432ed30d9b6SIntel 
433a4fd5eeeSElza Mathew /*
434a4fd5eeeSElza Mathew  * Display zero values by default for xstats
435a4fd5eeeSElza Mathew  */
436a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
437a4fd5eeeSElza Mathew 
438c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
439c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4407acf894dSStephen Hurd 
441e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4427e4441c8SRemy Horton /* Bitrate statistics */
4437e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
444e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
445e25e6c70SRemy Horton uint8_t bitrate_enabled;
446e25e6c70SRemy Horton #endif
4477e4441c8SRemy Horton 
448b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
450b40f8d78SJiayu Hu 
4511960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4521960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4531960be7dSNelio Laranjeiro 	.select_vlan = 0,
4541960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4551960be7dSNelio Laranjeiro 	.udp_src = 0,
4561960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4571960be7dSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
4581960be7dSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
4591960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4601960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4611960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4621960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4631960be7dSNelio Laranjeiro 	.vlan_tci = 0,
4641960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4651960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4661960be7dSNelio Laranjeiro };
4671960be7dSNelio Laranjeiro 
468dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
469dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
470dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
471dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
472dcd962fcSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
473dcd962fcSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
474dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
476dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
478dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
479dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
480dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
481dcd962fcSNelio Laranjeiro };
482dcd962fcSNelio Laranjeiro 
483ed30d9b6SIntel /* Forward function declarations */
48428caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
48528caa76aSZhiyong Yang 						   struct rte_port *port);
486edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
487f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
48876ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
489d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
490*89ecd110SJeff Guo static void eth_dev_event_callback(const char *device_name,
491fb73e096SJeff Guo 				enum rte_dev_event_type type,
492fb73e096SJeff Guo 				void *param);
493ce8d5614SIntel 
494ce8d5614SIntel /*
495ce8d5614SIntel  * Check if all the ports are started.
496ce8d5614SIntel  * If yes, return positive value. If not, return zero.
497ce8d5614SIntel  */
498ce8d5614SIntel static int all_ports_started(void);
499ed30d9b6SIntel 
50052f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
50152f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
50252f38a20SJiayu Hu 
503af75078fSIntel /*
50498a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
505c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
506c9cafcc8SShahaf Shuler  */
507c9cafcc8SShahaf Shuler int
508c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
509c9cafcc8SShahaf Shuler {
510c9cafcc8SShahaf Shuler 	unsigned int i;
511c9cafcc8SShahaf Shuler 
512c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
513c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
514c9cafcc8SShahaf Shuler 			return 0;
515c9cafcc8SShahaf Shuler 	}
516c9cafcc8SShahaf Shuler 	return 1;
517c9cafcc8SShahaf Shuler }
518c9cafcc8SShahaf Shuler 
519c9cafcc8SShahaf Shuler /*
520af75078fSIntel  * Setup default configuration.
521af75078fSIntel  */
522af75078fSIntel static void
523af75078fSIntel set_default_fwd_lcores_config(void)
524af75078fSIntel {
525af75078fSIntel 	unsigned int i;
526af75078fSIntel 	unsigned int nb_lc;
5277acf894dSStephen Hurd 	unsigned int sock_num;
528af75078fSIntel 
529af75078fSIntel 	nb_lc = 0;
530af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
531dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
532dbfb8ec7SPhil Yang 			continue;
533c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
534c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
535c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
536c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
537c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
538c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
539c9cafcc8SShahaf Shuler 			}
540c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5417acf894dSStephen Hurd 		}
542f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
543f54fe5eeSStephen Hurd 			continue;
544f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
545af75078fSIntel 	}
546af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
547af75078fSIntel 	nb_cfg_lcores = nb_lcores;
548af75078fSIntel 	nb_fwd_lcores = 1;
549af75078fSIntel }
550af75078fSIntel 
551af75078fSIntel static void
552af75078fSIntel set_def_peer_eth_addrs(void)
553af75078fSIntel {
554af75078fSIntel 	portid_t i;
555af75078fSIntel 
556af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
557af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
558af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
559af75078fSIntel 	}
560af75078fSIntel }
561af75078fSIntel 
562af75078fSIntel static void
563af75078fSIntel set_default_fwd_ports_config(void)
564af75078fSIntel {
565af75078fSIntel 	portid_t pt_id;
56665a7360cSMatan Azrad 	int i = 0;
567af75078fSIntel 
56865a7360cSMatan Azrad 	RTE_ETH_FOREACH_DEV(pt_id)
56965a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
570af75078fSIntel 
571af75078fSIntel 	nb_cfg_ports = nb_ports;
572af75078fSIntel 	nb_fwd_ports = nb_ports;
573af75078fSIntel }
574af75078fSIntel 
575af75078fSIntel void
576af75078fSIntel set_def_fwd_config(void)
577af75078fSIntel {
578af75078fSIntel 	set_default_fwd_lcores_config();
579af75078fSIntel 	set_def_peer_eth_addrs();
580af75078fSIntel 	set_default_fwd_ports_config();
581af75078fSIntel }
582af75078fSIntel 
583c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
584c7f5dba7SAnatoly Burakov static int
585c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
586c7f5dba7SAnatoly Burakov {
587c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
588c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
589c7f5dba7SAnatoly Burakov 
590c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
591c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
592c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
593c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
594c7f5dba7SAnatoly Burakov 	 */
595c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
596c7f5dba7SAnatoly Burakov 
597c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
598c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
599c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
600c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
601c7f5dba7SAnatoly Burakov 		return -1;
602c7f5dba7SAnatoly Burakov 	}
603c7f5dba7SAnatoly Burakov 
604c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
605c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
606c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
607c7f5dba7SAnatoly Burakov 
608c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
609c7f5dba7SAnatoly Burakov 
610c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
611c7f5dba7SAnatoly Burakov 
612c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
613c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
614c7f5dba7SAnatoly Burakov 		return -1;
615c7f5dba7SAnatoly Burakov 	}
616c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
617c7f5dba7SAnatoly Burakov 
618c7f5dba7SAnatoly Burakov 	return 0;
619c7f5dba7SAnatoly Burakov }
620c7f5dba7SAnatoly Burakov 
621c7f5dba7SAnatoly Burakov static inline uint32_t
622c7f5dba7SAnatoly Burakov bsf64(uint64_t v)
623c7f5dba7SAnatoly Burakov {
624c7f5dba7SAnatoly Burakov 	return (uint32_t)__builtin_ctzll(v);
625c7f5dba7SAnatoly Burakov }
626c7f5dba7SAnatoly Burakov 
627c7f5dba7SAnatoly Burakov static inline uint32_t
628c7f5dba7SAnatoly Burakov log2_u64(uint64_t v)
629c7f5dba7SAnatoly Burakov {
630c7f5dba7SAnatoly Burakov 	if (v == 0)
631c7f5dba7SAnatoly Burakov 		return 0;
632c7f5dba7SAnatoly Burakov 	v = rte_align64pow2(v);
633c7f5dba7SAnatoly Burakov 	return bsf64(v);
634c7f5dba7SAnatoly Burakov }
635c7f5dba7SAnatoly Burakov 
636c7f5dba7SAnatoly Burakov static int
637c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
638c7f5dba7SAnatoly Burakov {
639c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
640c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
641c7f5dba7SAnatoly Burakov 	 */
642c7f5dba7SAnatoly Burakov 	int log2 = log2_u64(page_sz);
643c7f5dba7SAnatoly Burakov 
644c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
645c7f5dba7SAnatoly Burakov }
646c7f5dba7SAnatoly Burakov 
647c7f5dba7SAnatoly Burakov static void *
648c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
649c7f5dba7SAnatoly Burakov {
650c7f5dba7SAnatoly Burakov 	void *addr;
651c7f5dba7SAnatoly Burakov 	int flags;
652c7f5dba7SAnatoly Burakov 
653c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
654c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
655c7f5dba7SAnatoly Burakov 	if (huge)
656c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
657c7f5dba7SAnatoly Burakov 
658c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
659c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
660c7f5dba7SAnatoly Burakov 		return NULL;
661c7f5dba7SAnatoly Burakov 
662c7f5dba7SAnatoly Burakov 	return addr;
663c7f5dba7SAnatoly Burakov }
664c7f5dba7SAnatoly Burakov 
665c7f5dba7SAnatoly Burakov struct extmem_param {
666c7f5dba7SAnatoly Burakov 	void *addr;
667c7f5dba7SAnatoly Burakov 	size_t len;
668c7f5dba7SAnatoly Burakov 	size_t pgsz;
669c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
670c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
671c7f5dba7SAnatoly Burakov };
672c7f5dba7SAnatoly Burakov 
673c7f5dba7SAnatoly Burakov static int
674c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
675c7f5dba7SAnatoly Burakov 		bool huge)
676c7f5dba7SAnatoly Burakov {
677c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
678c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
679c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
680c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
681c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
682c7f5dba7SAnatoly Burakov 	void *addr;
683c7f5dba7SAnatoly Burakov 	int ret;
684c7f5dba7SAnatoly Burakov 
685c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
686c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
687c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
688c7f5dba7SAnatoly Burakov 			continue;
689c7f5dba7SAnatoly Burakov 
690c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
691c7f5dba7SAnatoly Burakov 
692c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
693c7f5dba7SAnatoly Burakov 		if (!huge)
694c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
695c7f5dba7SAnatoly Burakov 
696c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
697c7f5dba7SAnatoly Burakov 		if (ret < 0) {
698c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
699c7f5dba7SAnatoly Burakov 			return -1;
700c7f5dba7SAnatoly Burakov 		}
701c7f5dba7SAnatoly Burakov 
702c7f5dba7SAnatoly Burakov 		/* allocate our memory */
703c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
704c7f5dba7SAnatoly Burakov 
705c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
706c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
707c7f5dba7SAnatoly Burakov 		 * try another one.
708c7f5dba7SAnatoly Burakov 		 */
709c7f5dba7SAnatoly Burakov 		if (addr == NULL)
710c7f5dba7SAnatoly Burakov 			continue;
711c7f5dba7SAnatoly Burakov 
712c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
713c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
714c7f5dba7SAnatoly Burakov 
715c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
716c7f5dba7SAnatoly Burakov 
717c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
718c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
719c7f5dba7SAnatoly Burakov 			goto fail;
720c7f5dba7SAnatoly Burakov 		}
721c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
722c7f5dba7SAnatoly Burakov 		if (!huge)
723c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
724c7f5dba7SAnatoly Burakov 
725c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
726c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
727c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
728c7f5dba7SAnatoly Burakov 			size_t offset;
729c7f5dba7SAnatoly Burakov 			void *cur;
730c7f5dba7SAnatoly Burakov 
731c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
732c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
733c7f5dba7SAnatoly Burakov 
734c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
735c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
736c7f5dba7SAnatoly Burakov 
737c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
738c7f5dba7SAnatoly Burakov 
739c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
740c7f5dba7SAnatoly Burakov 		}
741c7f5dba7SAnatoly Burakov 
742c7f5dba7SAnatoly Burakov 		break;
743c7f5dba7SAnatoly Burakov 	}
744c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
745c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
746c7f5dba7SAnatoly Burakov 		return -1;
747c7f5dba7SAnatoly Burakov 
748c7f5dba7SAnatoly Burakov 	param->addr = addr;
749c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
750c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
751c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
752c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
753c7f5dba7SAnatoly Burakov 
754c7f5dba7SAnatoly Burakov 	return 0;
755c7f5dba7SAnatoly Burakov fail:
756c7f5dba7SAnatoly Burakov 	if (iovas)
757c7f5dba7SAnatoly Burakov 		free(iovas);
758c7f5dba7SAnatoly Burakov 	if (addr)
759c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
760c7f5dba7SAnatoly Burakov 
761c7f5dba7SAnatoly Burakov 	return -1;
762c7f5dba7SAnatoly Burakov }
763c7f5dba7SAnatoly Burakov 
764c7f5dba7SAnatoly Burakov static int
765c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
766c7f5dba7SAnatoly Burakov {
767c7f5dba7SAnatoly Burakov 	struct extmem_param param;
768c7f5dba7SAnatoly Burakov 	int socket_id, ret;
769c7f5dba7SAnatoly Burakov 
770c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
771c7f5dba7SAnatoly Burakov 
772c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
773c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
774c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
775c7f5dba7SAnatoly Burakov 		/* create our heap */
776c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
777c7f5dba7SAnatoly Burakov 		if (ret < 0) {
778c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
779c7f5dba7SAnatoly Burakov 			return -1;
780c7f5dba7SAnatoly Burakov 		}
781c7f5dba7SAnatoly Burakov 	}
782c7f5dba7SAnatoly Burakov 
783c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
784c7f5dba7SAnatoly Burakov 	if (ret < 0) {
785c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
786c7f5dba7SAnatoly Burakov 		return -1;
787c7f5dba7SAnatoly Burakov 	}
788c7f5dba7SAnatoly Burakov 
789c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
790c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
791c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
792c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
793c7f5dba7SAnatoly Burakov 
794c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
795c7f5dba7SAnatoly Burakov 
796c7f5dba7SAnatoly Burakov 	/* not needed any more */
797c7f5dba7SAnatoly Burakov 	free(param.iova_table);
798c7f5dba7SAnatoly Burakov 
799c7f5dba7SAnatoly Burakov 	if (ret < 0) {
800c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
801c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
802c7f5dba7SAnatoly Burakov 		return -1;
803c7f5dba7SAnatoly Burakov 	}
804c7f5dba7SAnatoly Burakov 
805c7f5dba7SAnatoly Burakov 	/* success */
806c7f5dba7SAnatoly Burakov 
807c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
808c7f5dba7SAnatoly Burakov 			param.len >> 20);
809c7f5dba7SAnatoly Burakov 
810c7f5dba7SAnatoly Burakov 	return 0;
811c7f5dba7SAnatoly Burakov }
812c7f5dba7SAnatoly Burakov 
813af75078fSIntel /*
814af75078fSIntel  * Configuration initialisation done once at init time.
815af75078fSIntel  */
816af75078fSIntel static void
817af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
818af75078fSIntel 		 unsigned int socket_id)
819af75078fSIntel {
820af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
821bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
822af75078fSIntel 	uint32_t mb_size;
823af75078fSIntel 
824dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
825af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
826148f963fSBruce Richardson 
827285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
828d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
829d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
830d1eb542eSOlivier Matz 
831c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
832c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
833c7f5dba7SAnatoly Burakov 		{
834c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
835c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
836c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
837c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
838c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
839c7f5dba7SAnatoly Burakov 			break;
840c7f5dba7SAnatoly Burakov 		}
841c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
842c7f5dba7SAnatoly Burakov 		{
843b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
844c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
845148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
846148f963fSBruce Richardson 				socket_id, 0);
84724427bb9SOlivier Matz 			if (rte_mp == NULL)
84824427bb9SOlivier Matz 				goto err;
849b19a0c75SOlivier Matz 
850b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
851b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
852b19a0c75SOlivier Matz 				rte_mp = NULL;
85324427bb9SOlivier Matz 				goto err;
854b19a0c75SOlivier Matz 			}
855b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
856b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
857c7f5dba7SAnatoly Burakov 			break;
858c7f5dba7SAnatoly Burakov 		}
859c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
860c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
861c7f5dba7SAnatoly Burakov 		{
862c7f5dba7SAnatoly Burakov 			int heap_socket;
863c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
864c7f5dba7SAnatoly Burakov 
865c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
866c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
867c7f5dba7SAnatoly Burakov 
868c7f5dba7SAnatoly Burakov 			heap_socket =
869c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
870c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
871c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
872c7f5dba7SAnatoly Burakov 
8730e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
8740e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
875ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
876c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
877c7f5dba7SAnatoly Burakov 					heap_socket);
878c7f5dba7SAnatoly Burakov 			break;
879c7f5dba7SAnatoly Burakov 		}
880c7f5dba7SAnatoly Burakov 	default:
881c7f5dba7SAnatoly Burakov 		{
882c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
883c7f5dba7SAnatoly Burakov 		}
884bece7b6cSChristian Ehrhardt 	}
885148f963fSBruce Richardson 
88624427bb9SOlivier Matz err:
887af75078fSIntel 	if (rte_mp == NULL) {
888d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
889d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
890d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
891148f963fSBruce Richardson 	} else if (verbose_level > 0) {
892591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
893af75078fSIntel 	}
894af75078fSIntel }
895af75078fSIntel 
89620a0286fSLiu Xiaofeng /*
89720a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
89820a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
89920a0286fSLiu Xiaofeng  */
90020a0286fSLiu Xiaofeng static int
90120a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
90220a0286fSLiu Xiaofeng {
90320a0286fSLiu Xiaofeng 	static int warning_once = 0;
90420a0286fSLiu Xiaofeng 
905c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
90620a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
90720a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
90820a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
90920a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
91020a0286fSLiu Xiaofeng 			       " --numa.\n");
91120a0286fSLiu Xiaofeng 		warning_once = 1;
91220a0286fSLiu Xiaofeng 		return -1;
91320a0286fSLiu Xiaofeng 	}
91420a0286fSLiu Xiaofeng 	return 0;
91520a0286fSLiu Xiaofeng }
91620a0286fSLiu Xiaofeng 
9173f7311baSWei Dai /*
9183f7311baSWei Dai  * Get the allowed maximum number of RX queues.
9193f7311baSWei Dai  * *pid return the port id which has minimal value of
9203f7311baSWei Dai  * max_rx_queues in all ports.
9213f7311baSWei Dai  */
9223f7311baSWei Dai queueid_t
9233f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
9243f7311baSWei Dai {
9253f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
9263f7311baSWei Dai 	portid_t pi;
9273f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
9283f7311baSWei Dai 
9293f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
9303f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
9313f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
9323f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
9333f7311baSWei Dai 			*pid = pi;
9343f7311baSWei Dai 		}
9353f7311baSWei Dai 	}
9363f7311baSWei Dai 	return allowed_max_rxq;
9373f7311baSWei Dai }
9383f7311baSWei Dai 
9393f7311baSWei Dai /*
9403f7311baSWei Dai  * Check input rxq is valid or not.
9413f7311baSWei Dai  * If input rxq is not greater than any of maximum number
9423f7311baSWei Dai  * of RX queues of all ports, it is valid.
9433f7311baSWei Dai  * if valid, return 0, else return -1
9443f7311baSWei Dai  */
9453f7311baSWei Dai int
9463f7311baSWei Dai check_nb_rxq(queueid_t rxq)
9473f7311baSWei Dai {
9483f7311baSWei Dai 	queueid_t allowed_max_rxq;
9493f7311baSWei Dai 	portid_t pid = 0;
9503f7311baSWei Dai 
9513f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
9523f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
9533f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
9543f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
9553f7311baSWei Dai 		       rxq,
9563f7311baSWei Dai 		       allowed_max_rxq,
9573f7311baSWei Dai 		       pid);
9583f7311baSWei Dai 		return -1;
9593f7311baSWei Dai 	}
9603f7311baSWei Dai 	return 0;
9613f7311baSWei Dai }
9623f7311baSWei Dai 
96336db4f6cSWei Dai /*
96436db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
96536db4f6cSWei Dai  * *pid return the port id which has minimal value of
96636db4f6cSWei Dai  * max_tx_queues in all ports.
96736db4f6cSWei Dai  */
96836db4f6cSWei Dai queueid_t
96936db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
97036db4f6cSWei Dai {
97136db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
97236db4f6cSWei Dai 	portid_t pi;
97336db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
97436db4f6cSWei Dai 
97536db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
97636db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
97736db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
97836db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
97936db4f6cSWei Dai 			*pid = pi;
98036db4f6cSWei Dai 		}
98136db4f6cSWei Dai 	}
98236db4f6cSWei Dai 	return allowed_max_txq;
98336db4f6cSWei Dai }
98436db4f6cSWei Dai 
98536db4f6cSWei Dai /*
98636db4f6cSWei Dai  * Check input txq is valid or not.
98736db4f6cSWei Dai  * If input txq is not greater than any of maximum number
98836db4f6cSWei Dai  * of TX queues of all ports, it is valid.
98936db4f6cSWei Dai  * if valid, return 0, else return -1
99036db4f6cSWei Dai  */
99136db4f6cSWei Dai int
99236db4f6cSWei Dai check_nb_txq(queueid_t txq)
99336db4f6cSWei Dai {
99436db4f6cSWei Dai 	queueid_t allowed_max_txq;
99536db4f6cSWei Dai 	portid_t pid = 0;
99636db4f6cSWei Dai 
99736db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
99836db4f6cSWei Dai 	if (txq > allowed_max_txq) {
99936db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
100036db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
100136db4f6cSWei Dai 		       txq,
100236db4f6cSWei Dai 		       allowed_max_txq,
100336db4f6cSWei Dai 		       pid);
100436db4f6cSWei Dai 		return -1;
100536db4f6cSWei Dai 	}
100636db4f6cSWei Dai 	return 0;
100736db4f6cSWei Dai }
100836db4f6cSWei Dai 
1009af75078fSIntel static void
1010af75078fSIntel init_config(void)
1011af75078fSIntel {
1012ce8d5614SIntel 	portid_t pid;
1013af75078fSIntel 	struct rte_port *port;
1014af75078fSIntel 	struct rte_mempool *mbp;
1015af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1016af75078fSIntel 	lcoreid_t  lc_id;
10177acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1018b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
101952f38a20SJiayu Hu 	uint32_t gso_types;
1020c73a9071SWei Dai 	int k;
1021af75078fSIntel 
10227acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1023487f9a59SYulong Pei 
1024487f9a59SYulong Pei 	if (numa_support) {
1025487f9a59SYulong Pei 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
1026487f9a59SYulong Pei 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
1027487f9a59SYulong Pei 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
1028487f9a59SYulong Pei 	}
1029487f9a59SYulong Pei 
1030af75078fSIntel 	/* Configuration of logical cores. */
1031af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1032af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1033fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1034af75078fSIntel 	if (fwd_lcores == NULL) {
1035ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1036ce8d5614SIntel 							"failed\n", nb_lcores);
1037af75078fSIntel 	}
1038af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1039af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1040af75078fSIntel 					       sizeof(struct fwd_lcore),
1041fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1042af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1043ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1044ce8d5614SIntel 								"failed\n");
1045af75078fSIntel 		}
1046af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1047af75078fSIntel 	}
1048af75078fSIntel 
10497d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1050ce8d5614SIntel 		port = &ports[pid];
10518b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1052fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1053384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
1054ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
10557c45f6c0SFerruh Yigit 
105607e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
105707e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
105807e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
105907e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1060b6ea6408SIntel 		if (numa_support) {
1061b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1062b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1063b6ea6408SIntel 			else {
1064b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
106520a0286fSLiu Xiaofeng 
106620a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
106720a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
106820a0286fSLiu Xiaofeng 					socket_id = 0;
1069b6ea6408SIntel 				port_per_socket[socket_id]++;
1070b6ea6408SIntel 			}
1071b6ea6408SIntel 		}
1072b6ea6408SIntel 
1073c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1074c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1075c73a9071SWei Dai 			port->rx_conf[k].offloads =
1076c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1077c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1078c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1079c73a9071SWei Dai 			port->tx_conf[k].offloads =
1080c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1081c73a9071SWei Dai 
1082ce8d5614SIntel 		/* set flag to initialize port/queue */
1083ce8d5614SIntel 		port->need_reconfig = 1;
1084ce8d5614SIntel 		port->need_reconfig_queues = 1;
1085ce8d5614SIntel 	}
1086ce8d5614SIntel 
10873ab64341SOlivier Matz 	/*
10883ab64341SOlivier Matz 	 * Create pools of mbuf.
10893ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
10903ab64341SOlivier Matz 	 * socket 0 memory by default.
10913ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
10923ab64341SOlivier Matz 	 *
10933ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
10943ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
10953ab64341SOlivier Matz 	 */
10963ab64341SOlivier Matz 	if (param_total_num_mbufs)
10973ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
10983ab64341SOlivier Matz 	else {
10993ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
11003ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
11013ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
11023ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
11033ab64341SOlivier Matz 	}
11043ab64341SOlivier Matz 
1105b6ea6408SIntel 	if (numa_support) {
1106b6ea6408SIntel 		uint8_t i;
1107ce8d5614SIntel 
1108c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1109c9cafcc8SShahaf Shuler 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1110c9cafcc8SShahaf Shuler 					 socket_ids[i]);
11113ab64341SOlivier Matz 	} else {
11123ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
11133ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
11143ab64341SOlivier Matz 		else
11153ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
11163ab64341SOlivier Matz 						 socket_num);
11173ab64341SOlivier Matz 	}
1118b6ea6408SIntel 
1119b6ea6408SIntel 	init_port_config();
11205886ae07SAdrien Mazarguil 
112152f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1122aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
11235886ae07SAdrien Mazarguil 	/*
11245886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
11255886ae07SAdrien Mazarguil 	 */
11265886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
11278fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
11288fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
11298fd8bebcSAdrien Mazarguil 
11305886ae07SAdrien Mazarguil 		if (mbp == NULL)
11315886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
11325886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
113352f38a20SJiayu Hu 		/* initialize GSO context */
113452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
113552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
113652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
113752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
113852f38a20SJiayu Hu 			ETHER_CRC_LEN;
113952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
11405886ae07SAdrien Mazarguil 	}
11415886ae07SAdrien Mazarguil 
1142ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1143ce8d5614SIntel 	if (init_fwd_streams() < 0)
1144ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
11450c0db76fSBernard Iremonger 
11460c0db76fSBernard Iremonger 	fwd_config_setup();
1147b7091f1dSJiayu Hu 
1148b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1149b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1150b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1151b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1152b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1153b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1154b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1155b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1156b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1157b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1158b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1159b7091f1dSJiayu Hu 		}
1160b7091f1dSJiayu Hu 	}
11610ad778b3SJasvinder Singh 
11620ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
11630ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
11640ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
11650ad778b3SJasvinder Singh 			port = &ports[pid];
11660ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
11670ad778b3SJasvinder Singh 
11680ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
11690ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
11700ad778b3SJasvinder Singh 		}
11710ad778b3SJasvinder Singh 	}
11720ad778b3SJasvinder Singh #endif
11730ad778b3SJasvinder Singh 
1174ce8d5614SIntel }
1175ce8d5614SIntel 
11762950a769SDeclan Doherty 
11772950a769SDeclan Doherty void
1178a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
11792950a769SDeclan Doherty {
11802950a769SDeclan Doherty 	struct rte_port *port;
11812950a769SDeclan Doherty 
11822950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
11832950a769SDeclan Doherty 	port = &ports[new_port_id];
11842950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
11852950a769SDeclan Doherty 
11862950a769SDeclan Doherty 	/* set flag to initialize port/queue */
11872950a769SDeclan Doherty 	port->need_reconfig = 1;
11882950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1189a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
11902950a769SDeclan Doherty 
11912950a769SDeclan Doherty 	init_port_config();
11922950a769SDeclan Doherty }
11932950a769SDeclan Doherty 
11942950a769SDeclan Doherty 
1195ce8d5614SIntel int
1196ce8d5614SIntel init_fwd_streams(void)
1197ce8d5614SIntel {
1198ce8d5614SIntel 	portid_t pid;
1199ce8d5614SIntel 	struct rte_port *port;
1200ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
12015a8fb55cSReshma Pattan 	queueid_t q;
1202ce8d5614SIntel 
1203ce8d5614SIntel 	/* set socket id according to numa or not */
12047d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1205ce8d5614SIntel 		port = &ports[pid];
1206ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1207ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1208ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1209ce8d5614SIntel 				port->dev_info.max_rx_queues);
1210ce8d5614SIntel 			return -1;
1211ce8d5614SIntel 		}
1212ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1213ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1214ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1215ce8d5614SIntel 				port->dev_info.max_tx_queues);
1216ce8d5614SIntel 			return -1;
1217ce8d5614SIntel 		}
121820a0286fSLiu Xiaofeng 		if (numa_support) {
121920a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
122020a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
122120a0286fSLiu Xiaofeng 			else {
1222b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
122320a0286fSLiu Xiaofeng 
122420a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
122520a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
122620a0286fSLiu Xiaofeng 					port->socket_id = 0;
122720a0286fSLiu Xiaofeng 			}
122820a0286fSLiu Xiaofeng 		}
1229b6ea6408SIntel 		else {
1230b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1231af75078fSIntel 				port->socket_id = 0;
1232b6ea6408SIntel 			else
1233b6ea6408SIntel 				port->socket_id = socket_num;
1234b6ea6408SIntel 		}
1235af75078fSIntel 	}
1236af75078fSIntel 
12375a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
12385a8fb55cSReshma Pattan 	if (q == 0) {
12395a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
12405a8fb55cSReshma Pattan 		return -1;
12415a8fb55cSReshma Pattan 	}
12425a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1243ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1244ce8d5614SIntel 		return 0;
1245ce8d5614SIntel 	/* clear the old */
1246ce8d5614SIntel 	if (fwd_streams != NULL) {
1247ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1248ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1249ce8d5614SIntel 				continue;
1250ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1251ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1252af75078fSIntel 		}
1253ce8d5614SIntel 		rte_free(fwd_streams);
1254ce8d5614SIntel 		fwd_streams = NULL;
1255ce8d5614SIntel 	}
1256ce8d5614SIntel 
1257ce8d5614SIntel 	/* init new */
1258ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
12591f84c469SMatan Azrad 	if (nb_fwd_streams) {
1260ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
12611f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
12621f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1263ce8d5614SIntel 		if (fwd_streams == NULL)
12641f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
12651f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
12661f84c469SMatan Azrad 				 nb_fwd_streams);
1267ce8d5614SIntel 
1268af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
12691f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
12701f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
12711f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1272ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
12731f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
12741f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
12751f84c469SMatan Azrad 		}
1276af75078fSIntel 	}
1277ce8d5614SIntel 
1278ce8d5614SIntel 	return 0;
1279af75078fSIntel }
1280af75078fSIntel 
1281af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1282af75078fSIntel static void
1283af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1284af75078fSIntel {
1285af75078fSIntel 	unsigned int total_burst;
1286af75078fSIntel 	unsigned int nb_burst;
1287af75078fSIntel 	unsigned int burst_stats[3];
1288af75078fSIntel 	uint16_t pktnb_stats[3];
1289af75078fSIntel 	uint16_t nb_pkt;
1290af75078fSIntel 	int burst_percent[3];
1291af75078fSIntel 
1292af75078fSIntel 	/*
1293af75078fSIntel 	 * First compute the total number of packet bursts and the
1294af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1295af75078fSIntel 	 */
1296af75078fSIntel 	total_burst = 0;
1297af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1298af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1299af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1300af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1301af75078fSIntel 		if (nb_burst == 0)
1302af75078fSIntel 			continue;
1303af75078fSIntel 		total_burst += nb_burst;
1304af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1305af75078fSIntel 			burst_stats[1] = burst_stats[0];
1306af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1307af75078fSIntel 			burst_stats[0] = nb_burst;
1308af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1309fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1310fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1311fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1312af75078fSIntel 		}
1313af75078fSIntel 	}
1314af75078fSIntel 	if (total_burst == 0)
1315af75078fSIntel 		return;
1316af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1317af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1318af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1319af75078fSIntel 	if (burst_stats[0] == total_burst) {
1320af75078fSIntel 		printf("]\n");
1321af75078fSIntel 		return;
1322af75078fSIntel 	}
1323af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1324af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1325af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1326af75078fSIntel 		return;
1327af75078fSIntel 	}
1328af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1329af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1330af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1331af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1332af75078fSIntel 		return;
1333af75078fSIntel 	}
1334af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1335af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1336af75078fSIntel }
1337af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1338af75078fSIntel 
1339af75078fSIntel static void
1340af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1341af75078fSIntel {
1342af75078fSIntel 	struct rte_port *port;
1343013af9b6SIntel 	uint8_t i;
1344af75078fSIntel 
1345af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
1346af75078fSIntel 
1347af75078fSIntel 	port = &ports[port_id];
1348af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
1349af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
1350013af9b6SIntel 
1351013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1352af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1353af75078fSIntel 		       "%-"PRIu64"\n",
135470bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
135570bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1356af75078fSIntel 
1357af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
135858d475b7SJerin Jacob 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
135958d475b7SJerin Jacob 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
136058d475b7SJerin Jacob 			       port->rx_bad_outer_l4_csum);
136186057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1362f72a0fa6SStephen Hemminger 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
136370bdb186SIvan Boule 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
136470bdb186SIvan Boule 		}
1365af75078fSIntel 
1366af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1367af75078fSIntel 		       "%-"PRIu64"\n",
1368af75078fSIntel 		       stats->opackets, port->tx_dropped,
1369af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1370013af9b6SIntel 	}
1371013af9b6SIntel 	else {
1372013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1373013af9b6SIntel 		       "%14"PRIu64"\n",
137470bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
137570bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1376013af9b6SIntel 
1377013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
137858d475b7SJerin Jacob 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
137958d475b7SJerin Jacob 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
138058d475b7SJerin Jacob 			       port->rx_bad_outer_l4_csum);
138186057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1382f72a0fa6SStephen Hemminger 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
138370bdb186SIvan Boule 			printf("  RX-nombufs:             %14"PRIu64"\n",
138470bdb186SIvan Boule 			       stats->rx_nombuf);
138570bdb186SIvan Boule 		}
1386013af9b6SIntel 
1387013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1388013af9b6SIntel 		       "%14"PRIu64"\n",
1389013af9b6SIntel 		       stats->opackets, port->tx_dropped,
1390013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1391013af9b6SIntel 	}
1392e659b6b4SIvan Boule 
1393af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1394af75078fSIntel 	if (port->rx_stream)
1395013af9b6SIntel 		pkt_burst_stats_display("RX",
1396013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
1397af75078fSIntel 	if (port->tx_stream)
1398013af9b6SIntel 		pkt_burst_stats_display("TX",
1399013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
1400af75078fSIntel #endif
1401af75078fSIntel 
1402013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
1403013af9b6SIntel 		printf("\n");
1404013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1405013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1406013af9b6SIntel 			       "     RX-errors:%14"PRIu64
1407013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
1408013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1409013af9b6SIntel 		}
1410013af9b6SIntel 		printf("\n");
1411013af9b6SIntel 	}
1412013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
1413013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1414013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1415013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
1416013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1417013af9b6SIntel 		}
1418013af9b6SIntel 	}
1419013af9b6SIntel 
1420af75078fSIntel 	printf("  %s--------------------------------%s\n",
1421af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
1422af75078fSIntel }
1423af75078fSIntel 
1424af75078fSIntel static void
1425af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1426af75078fSIntel {
1427af75078fSIntel 	struct fwd_stream *fs;
1428af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1429af75078fSIntel 
1430af75078fSIntel 	fs = fwd_streams[stream_id];
1431af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1432af75078fSIntel 	    (fs->fwd_dropped == 0))
1433af75078fSIntel 		return;
1434af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1435af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1436af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1437af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1438af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1439af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1440af75078fSIntel 
1441af75078fSIntel 	/* if checksum mode */
1442af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1443013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
144458d475b7SJerin Jacob 			"%-14u Rx- bad outer L4 checksum: %-14u\n",
144558d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
144658d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1447af75078fSIntel 	}
1448af75078fSIntel 
1449af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1450af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1451af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1452af75078fSIntel #endif
1453af75078fSIntel }
1454af75078fSIntel 
1455af75078fSIntel static void
14567741e4cfSIntel flush_fwd_rx_queues(void)
1457af75078fSIntel {
1458af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1459af75078fSIntel 	portid_t  rxp;
14607741e4cfSIntel 	portid_t port_id;
1461af75078fSIntel 	queueid_t rxq;
1462af75078fSIntel 	uint16_t  nb_rx;
1463af75078fSIntel 	uint16_t  i;
1464af75078fSIntel 	uint8_t   j;
1465f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1466594302c7SJames Poole 	uint64_t timer_period;
1467f487715fSReshma Pattan 
1468f487715fSReshma Pattan 	/* convert to number of cycles */
1469594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1470af75078fSIntel 
1471af75078fSIntel 	for (j = 0; j < 2; j++) {
14727741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1473af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
14747741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1475f487715fSReshma Pattan 				/**
1476f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1477f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1478f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1479f487715fSReshma Pattan 				* after 1sec timer expiry.
1480f487715fSReshma Pattan 				*/
1481f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1482af75078fSIntel 				do {
14837741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1484013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1485af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1486af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1487f487715fSReshma Pattan 
1488f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1489f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1490f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1491f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1492f487715fSReshma Pattan 					(timer_tsc < timer_period));
1493f487715fSReshma Pattan 				timer_tsc = 0;
1494af75078fSIntel 			}
1495af75078fSIntel 		}
1496af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1497af75078fSIntel 	}
1498af75078fSIntel }
1499af75078fSIntel 
1500af75078fSIntel static void
1501af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1502af75078fSIntel {
1503af75078fSIntel 	struct fwd_stream **fsm;
1504af75078fSIntel 	streamid_t nb_fs;
1505af75078fSIntel 	streamid_t sm_id;
15067e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
15077e4441c8SRemy Horton 	uint64_t tics_per_1sec;
15087e4441c8SRemy Horton 	uint64_t tics_datum;
15097e4441c8SRemy Horton 	uint64_t tics_current;
15104918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1511af75078fSIntel 
15124918a357SXiaoyun Li 	cnt_ports = nb_ports;
15137e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
15147e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
15157e4441c8SRemy Horton #endif
1516af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1517af75078fSIntel 	nb_fs = fc->stream_nb;
1518af75078fSIntel 	do {
1519af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1520af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
15217e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1522e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1523e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
15247e4441c8SRemy Horton 			tics_current = rte_rdtsc();
15257e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
15267e4441c8SRemy Horton 				/* Periodic bitrate calculation */
15274918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1528e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
15294918a357SXiaoyun Li 						ports_ids[i]);
15307e4441c8SRemy Horton 				tics_datum = tics_current;
15317e4441c8SRemy Horton 			}
1532e25e6c70SRemy Horton 		}
15337e4441c8SRemy Horton #endif
153462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
153565eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
153665eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
153762d3216dSReshma Pattan 			rte_latencystats_update();
153862d3216dSReshma Pattan #endif
153962d3216dSReshma Pattan 
1540af75078fSIntel 	} while (! fc->stopped);
1541af75078fSIntel }
1542af75078fSIntel 
1543af75078fSIntel static int
1544af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1545af75078fSIntel {
1546af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1547af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1548af75078fSIntel 	return 0;
1549af75078fSIntel }
1550af75078fSIntel 
1551af75078fSIntel /*
1552af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1553af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1554af75078fSIntel  */
1555af75078fSIntel static int
1556af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1557af75078fSIntel {
1558af75078fSIntel 	struct fwd_lcore *fwd_lc;
1559af75078fSIntel 	struct fwd_lcore tmp_lcore;
1560af75078fSIntel 
1561af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1562af75078fSIntel 	tmp_lcore = *fwd_lc;
1563af75078fSIntel 	tmp_lcore.stopped = 1;
1564af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1565af75078fSIntel 	return 0;
1566af75078fSIntel }
1567af75078fSIntel 
1568af75078fSIntel /*
1569af75078fSIntel  * Launch packet forwarding:
1570af75078fSIntel  *     - Setup per-port forwarding context.
1571af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1572af75078fSIntel  */
1573af75078fSIntel static void
1574af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1575af75078fSIntel {
1576af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1577af75078fSIntel 	unsigned int i;
1578af75078fSIntel 	unsigned int lc_id;
1579af75078fSIntel 	int diag;
1580af75078fSIntel 
1581af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1582af75078fSIntel 	if (port_fwd_begin != NULL) {
1583af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1584af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1585af75078fSIntel 	}
1586af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1587af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1588af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1589af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1590af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1591af75078fSIntel 						     fwd_lcores[i], lc_id);
1592af75078fSIntel 			if (diag != 0)
1593af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1594af75078fSIntel 				       lc_id, diag);
1595af75078fSIntel 		}
1596af75078fSIntel 	}
1597af75078fSIntel }
1598af75078fSIntel 
1599af75078fSIntel /*
160003ce2c53SMatan Azrad  * Update the forward ports list.
160103ce2c53SMatan Azrad  */
160203ce2c53SMatan Azrad void
160303ce2c53SMatan Azrad update_fwd_ports(portid_t new_pid)
160403ce2c53SMatan Azrad {
160503ce2c53SMatan Azrad 	unsigned int i;
160603ce2c53SMatan Azrad 	unsigned int new_nb_fwd_ports = 0;
160703ce2c53SMatan Azrad 	int move = 0;
160803ce2c53SMatan Azrad 
160903ce2c53SMatan Azrad 	for (i = 0; i < nb_fwd_ports; ++i) {
161003ce2c53SMatan Azrad 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
161103ce2c53SMatan Azrad 			move = 1;
161203ce2c53SMatan Azrad 		else if (move)
161303ce2c53SMatan Azrad 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
161403ce2c53SMatan Azrad 		else
161503ce2c53SMatan Azrad 			new_nb_fwd_ports++;
161603ce2c53SMatan Azrad 	}
161703ce2c53SMatan Azrad 	if (new_pid < RTE_MAX_ETHPORTS)
161803ce2c53SMatan Azrad 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
161903ce2c53SMatan Azrad 
162003ce2c53SMatan Azrad 	nb_fwd_ports = new_nb_fwd_ports;
162103ce2c53SMatan Azrad 	nb_cfg_ports = new_nb_fwd_ports;
162203ce2c53SMatan Azrad }
162303ce2c53SMatan Azrad 
162403ce2c53SMatan Azrad /*
1625af75078fSIntel  * Launch packet forwarding configuration.
1626af75078fSIntel  */
1627af75078fSIntel void
1628af75078fSIntel start_packet_forwarding(int with_tx_first)
1629af75078fSIntel {
1630af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1631af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1632af75078fSIntel 	struct rte_port *port;
1633af75078fSIntel 	unsigned int i;
1634af75078fSIntel 	portid_t   pt_id;
1635af75078fSIntel 	streamid_t sm_id;
1636af75078fSIntel 
16375a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
16385a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
16395a8fb55cSReshma Pattan 
16405a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
16415a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
16425a8fb55cSReshma Pattan 
16435a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
16445a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
16455a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
16465a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
16475a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
16485a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
16495a8fb55cSReshma Pattan 
1650ce8d5614SIntel 	if (all_ports_started() == 0) {
1651ce8d5614SIntel 		printf("Not all ports were started\n");
1652ce8d5614SIntel 		return;
1653ce8d5614SIntel 	}
1654af75078fSIntel 	if (test_done == 0) {
1655af75078fSIntel 		printf("Packet forwarding already started\n");
1656af75078fSIntel 		return;
1657af75078fSIntel 	}
1658edf87b4aSBernard Iremonger 
1659edf87b4aSBernard Iremonger 
16607741e4cfSIntel 	if(dcb_test) {
16617741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
16627741e4cfSIntel 			pt_id = fwd_ports_ids[i];
16637741e4cfSIntel 			port = &ports[pt_id];
16647741e4cfSIntel 			if (!port->dcb_flag) {
16657741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
16667741e4cfSIntel                                        "be configured in this mode.\n");
1667013af9b6SIntel 				return;
1668013af9b6SIntel 			}
16697741e4cfSIntel 		}
16707741e4cfSIntel 		if (nb_fwd_lcores == 1) {
16717741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
16727741e4cfSIntel                                "should be larger than 1.\n");
16737741e4cfSIntel 			return;
16747741e4cfSIntel 		}
16757741e4cfSIntel 	}
1676af75078fSIntel 	test_done = 0;
16777741e4cfSIntel 
167847a767b2SMatan Azrad 	fwd_config_setup();
167947a767b2SMatan Azrad 
16807741e4cfSIntel 	if(!no_flush_rx)
16817741e4cfSIntel 		flush_fwd_rx_queues();
16827741e4cfSIntel 
1683933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1684af75078fSIntel 	rxtx_config_display();
1685af75078fSIntel 
1686af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1687af75078fSIntel 		pt_id = fwd_ports_ids[i];
1688af75078fSIntel 		port = &ports[pt_id];
1689af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1690af75078fSIntel 		port->tx_dropped = 0;
1691013af9b6SIntel 
1692013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1693af75078fSIntel 	}
1694af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1695af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1696af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1697af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1698af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1699af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
170058d475b7SJerin Jacob 		fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1701af75078fSIntel 
1702af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1703af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1704af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1705af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1706af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1707af75078fSIntel #endif
1708af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1709af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1710af75078fSIntel #endif
1711af75078fSIntel 	}
1712af75078fSIntel 	if (with_tx_first) {
1713af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1714af75078fSIntel 		if (port_fwd_begin != NULL) {
1715af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1716af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1717af75078fSIntel 		}
1718acbf77a6SZhihong Wang 		while (with_tx_first--) {
1719acbf77a6SZhihong Wang 			launch_packet_forwarding(
1720acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1721af75078fSIntel 			rte_eal_mp_wait_lcore();
1722acbf77a6SZhihong Wang 		}
1723af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1724af75078fSIntel 		if (port_fwd_end != NULL) {
1725af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1726af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1727af75078fSIntel 		}
1728af75078fSIntel 	}
1729af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1730af75078fSIntel }
1731af75078fSIntel 
1732af75078fSIntel void
1733af75078fSIntel stop_packet_forwarding(void)
1734af75078fSIntel {
1735af75078fSIntel 	struct rte_eth_stats stats;
1736af75078fSIntel 	struct rte_port *port;
1737af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1738af75078fSIntel 	int i;
1739af75078fSIntel 	portid_t   pt_id;
1740af75078fSIntel 	streamid_t sm_id;
1741af75078fSIntel 	lcoreid_t  lc_id;
1742af75078fSIntel 	uint64_t total_recv;
1743af75078fSIntel 	uint64_t total_xmit;
1744af75078fSIntel 	uint64_t total_rx_dropped;
1745af75078fSIntel 	uint64_t total_tx_dropped;
1746af75078fSIntel 	uint64_t total_rx_nombuf;
1747af75078fSIntel 	uint64_t tx_dropped;
1748af75078fSIntel 	uint64_t rx_bad_ip_csum;
1749af75078fSIntel 	uint64_t rx_bad_l4_csum;
1750af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1751af75078fSIntel 	uint64_t fwd_cycles;
1752af75078fSIntel #endif
1753b7091f1dSJiayu Hu 
1754af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1755af75078fSIntel 
1756af75078fSIntel 	if (test_done) {
1757af75078fSIntel 		printf("Packet forwarding not started\n");
1758af75078fSIntel 		return;
1759af75078fSIntel 	}
1760af75078fSIntel 	printf("Telling cores to stop...");
1761af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1762af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1763af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1764af75078fSIntel 	rte_eal_mp_wait_lcore();
1765af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1766af75078fSIntel 	if (port_fwd_end != NULL) {
1767af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1768af75078fSIntel 			pt_id = fwd_ports_ids[i];
1769af75078fSIntel 			(*port_fwd_end)(pt_id);
1770af75078fSIntel 		}
1771af75078fSIntel 	}
1772af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1773af75078fSIntel 	fwd_cycles = 0;
1774af75078fSIntel #endif
1775af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1776af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1777af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1778af75078fSIntel 			fwd_stream_stats_display(sm_id);
1779af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1780af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1781af75078fSIntel 		} else {
1782af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1783af75078fSIntel 				fwd_streams[sm_id];
1784af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1785af75078fSIntel 				fwd_streams[sm_id];
1786af75078fSIntel 		}
1787af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1788af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1789af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1790af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1791af75078fSIntel 
1792013af9b6SIntel 		rx_bad_ip_csum =
1793013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1794af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1795af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1796013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1797013af9b6SIntel 							rx_bad_ip_csum;
1798af75078fSIntel 
1799013af9b6SIntel 		rx_bad_l4_csum =
1800013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1801af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1802af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1803013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1804013af9b6SIntel 							rx_bad_l4_csum;
1805af75078fSIntel 
180658d475b7SJerin Jacob 		ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
180758d475b7SJerin Jacob 				fwd_streams[sm_id]->rx_bad_outer_l4_csum;
180858d475b7SJerin Jacob 
1809af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1810af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1811af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1812af75078fSIntel #endif
1813af75078fSIntel 	}
1814af75078fSIntel 	total_recv = 0;
1815af75078fSIntel 	total_xmit = 0;
1816af75078fSIntel 	total_rx_dropped = 0;
1817af75078fSIntel 	total_tx_dropped = 0;
1818af75078fSIntel 	total_rx_nombuf  = 0;
18197741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1820af75078fSIntel 		pt_id = fwd_ports_ids[i];
1821af75078fSIntel 
1822af75078fSIntel 		port = &ports[pt_id];
1823af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1824af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1825af75078fSIntel 		port->stats.ipackets = 0;
1826af75078fSIntel 		stats.opackets -= port->stats.opackets;
1827af75078fSIntel 		port->stats.opackets = 0;
1828af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1829af75078fSIntel 		port->stats.ibytes = 0;
1830af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1831af75078fSIntel 		port->stats.obytes = 0;
183270bdb186SIvan Boule 		stats.imissed  -= port->stats.imissed;
183370bdb186SIvan Boule 		port->stats.imissed = 0;
1834af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1835af75078fSIntel 		port->stats.oerrors = 0;
1836af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1837af75078fSIntel 		port->stats.rx_nombuf = 0;
1838af75078fSIntel 
1839af75078fSIntel 		total_recv += stats.ipackets;
1840af75078fSIntel 		total_xmit += stats.opackets;
184170bdb186SIvan Boule 		total_rx_dropped += stats.imissed;
1842af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1843af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1844af75078fSIntel 
1845af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1846af75078fSIntel 	}
1847b7091f1dSJiayu Hu 
1848af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1849af75078fSIntel 	       "%s\n",
1850af75078fSIntel 	       acc_stats_border, acc_stats_border);
1851af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1852af75078fSIntel 	       "%-"PRIu64"\n"
1853af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1854af75078fSIntel 	       "%-"PRIu64"\n",
1855af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1856af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1857af75078fSIntel 	if (total_rx_nombuf > 0)
1858af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1859af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1860af75078fSIntel 	       "%s\n",
1861af75078fSIntel 	       acc_stats_border, acc_stats_border);
1862af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1863af75078fSIntel 	if (total_recv > 0)
1864af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1865af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1866af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1867af75078fSIntel 		       fwd_cycles, total_recv);
1868af75078fSIntel #endif
1869af75078fSIntel 	printf("\nDone.\n");
1870af75078fSIntel 	test_done = 1;
1871af75078fSIntel }
1872af75078fSIntel 
1873cfae07fdSOuyang Changchun void
1874cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1875cfae07fdSOuyang Changchun {
1876492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1877cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1878cfae07fdSOuyang Changchun }
1879cfae07fdSOuyang Changchun 
1880cfae07fdSOuyang Changchun void
1881cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1882cfae07fdSOuyang Changchun {
1883492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1884cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1885cfae07fdSOuyang Changchun }
1886cfae07fdSOuyang Changchun 
1887ce8d5614SIntel static int
1888ce8d5614SIntel all_ports_started(void)
1889ce8d5614SIntel {
1890ce8d5614SIntel 	portid_t pi;
1891ce8d5614SIntel 	struct rte_port *port;
1892ce8d5614SIntel 
18937d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1894ce8d5614SIntel 		port = &ports[pi];
1895ce8d5614SIntel 		/* Check if there is a port which is not started */
189641b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
189741b05095SBernard Iremonger 			(port->slave_flag == 0))
1898ce8d5614SIntel 			return 0;
1899ce8d5614SIntel 	}
1900ce8d5614SIntel 
1901ce8d5614SIntel 	/* No port is not started */
1902ce8d5614SIntel 	return 1;
1903ce8d5614SIntel }
1904ce8d5614SIntel 
1905148f963fSBruce Richardson int
19066018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
19076018eb8cSShahaf Shuler {
19086018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
19096018eb8cSShahaf Shuler 
19106018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
19116018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
19126018eb8cSShahaf Shuler 		return 0;
19136018eb8cSShahaf Shuler 	return 1;
19146018eb8cSShahaf Shuler }
19156018eb8cSShahaf Shuler 
19166018eb8cSShahaf Shuler int
1917edab33b1STetsuya Mukawa all_ports_stopped(void)
1918edab33b1STetsuya Mukawa {
1919edab33b1STetsuya Mukawa 	portid_t pi;
1920edab33b1STetsuya Mukawa 
19217d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
19226018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
1923edab33b1STetsuya Mukawa 			return 0;
1924edab33b1STetsuya Mukawa 	}
1925edab33b1STetsuya Mukawa 
1926edab33b1STetsuya Mukawa 	return 1;
1927edab33b1STetsuya Mukawa }
1928edab33b1STetsuya Mukawa 
1929edab33b1STetsuya Mukawa int
1930edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
1931edab33b1STetsuya Mukawa {
1932edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1933edab33b1STetsuya Mukawa 		return 0;
1934edab33b1STetsuya Mukawa 
1935edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1936edab33b1STetsuya Mukawa 		return 0;
1937edab33b1STetsuya Mukawa 
1938edab33b1STetsuya Mukawa 	return 1;
1939edab33b1STetsuya Mukawa }
1940edab33b1STetsuya Mukawa 
1941edab33b1STetsuya Mukawa static int
1942edab33b1STetsuya Mukawa port_is_closed(portid_t port_id)
1943edab33b1STetsuya Mukawa {
1944edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1945edab33b1STetsuya Mukawa 		return 0;
1946edab33b1STetsuya Mukawa 
1947edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1948edab33b1STetsuya Mukawa 		return 0;
1949edab33b1STetsuya Mukawa 
1950edab33b1STetsuya Mukawa 	return 1;
1951edab33b1STetsuya Mukawa }
1952edab33b1STetsuya Mukawa 
1953edab33b1STetsuya Mukawa int
1954ce8d5614SIntel start_port(portid_t pid)
1955ce8d5614SIntel {
195692d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
1957ce8d5614SIntel 	portid_t pi;
1958ce8d5614SIntel 	queueid_t qi;
1959ce8d5614SIntel 	struct rte_port *port;
19602950a769SDeclan Doherty 	struct ether_addr mac_addr;
196176ad4a2dSGaetan Rivet 	enum rte_eth_event_type event_type;
1962ce8d5614SIntel 
19634468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
19644468635fSMichael Qiu 		return 0;
19654468635fSMichael Qiu 
1966ce8d5614SIntel 	if(dcb_config)
1967ce8d5614SIntel 		dcb_test = 1;
19687d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1969edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1970ce8d5614SIntel 			continue;
1971ce8d5614SIntel 
197292d2703eSMichael Qiu 		need_check_link_status = 0;
1973ce8d5614SIntel 		port = &ports[pi];
1974ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1975ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1976ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1977ce8d5614SIntel 			continue;
1978ce8d5614SIntel 		}
1979ce8d5614SIntel 
1980ce8d5614SIntel 		if (port->need_reconfig > 0) {
1981ce8d5614SIntel 			port->need_reconfig = 0;
1982ce8d5614SIntel 
19837ee3e944SVasily Philipov 			if (flow_isolate_all) {
19847ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
19857ee3e944SVasily Philipov 				if (ret) {
19867ee3e944SVasily Philipov 					printf("Failed to apply isolated"
19877ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
19887ee3e944SVasily Philipov 					return -1;
19897ee3e944SVasily Philipov 				}
19907ee3e944SVasily Philipov 			}
19917ee3e944SVasily Philipov 
19925706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
199320a0286fSLiu Xiaofeng 					port->socket_id);
1994ce8d5614SIntel 			/* configure port */
1995ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1996ce8d5614SIntel 						&(port->dev_conf));
1997ce8d5614SIntel 			if (diag != 0) {
1998ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1999ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2000ce8d5614SIntel 					printf("Port %d can not be set back "
2001ce8d5614SIntel 							"to stopped\n", pi);
2002ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2003ce8d5614SIntel 				/* try to reconfigure port next time */
2004ce8d5614SIntel 				port->need_reconfig = 1;
2005148f963fSBruce Richardson 				return -1;
2006ce8d5614SIntel 			}
2007ce8d5614SIntel 		}
2008ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2009ce8d5614SIntel 			port->need_reconfig_queues = 0;
2010ce8d5614SIntel 			/* setup tx queues */
2011ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2012b6ea6408SIntel 				if ((numa_support) &&
2013b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2014b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2015d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2016d44f8a48SQi Zhang 						txring_numa[pi],
2017d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2018b6ea6408SIntel 				else
2019b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2020d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2021d44f8a48SQi Zhang 						port->socket_id,
2022d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2023b6ea6408SIntel 
2024ce8d5614SIntel 				if (diag == 0)
2025ce8d5614SIntel 					continue;
2026ce8d5614SIntel 
2027ce8d5614SIntel 				/* Fail to setup tx queue, return */
2028ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2029ce8d5614SIntel 							RTE_PORT_HANDLING,
2030ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2031ce8d5614SIntel 					printf("Port %d can not be set back "
2032ce8d5614SIntel 							"to stopped\n", pi);
2033d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2034d44f8a48SQi Zhang 				       pi);
2035ce8d5614SIntel 				/* try to reconfigure queues next time */
2036ce8d5614SIntel 				port->need_reconfig_queues = 1;
2037148f963fSBruce Richardson 				return -1;
2038ce8d5614SIntel 			}
2039ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2040d44f8a48SQi Zhang 				/* setup rx queues */
2041b6ea6408SIntel 				if ((numa_support) &&
2042b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2043b6ea6408SIntel 					struct rte_mempool * mp =
2044b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2045b6ea6408SIntel 					if (mp == NULL) {
2046b6ea6408SIntel 						printf("Failed to setup RX queue:"
2047b6ea6408SIntel 							"No mempool allocation"
2048b6ea6408SIntel 							" on the socket %d\n",
2049b6ea6408SIntel 							rxring_numa[pi]);
2050148f963fSBruce Richardson 						return -1;
2051b6ea6408SIntel 					}
2052b6ea6408SIntel 
2053b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2054d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2055d44f8a48SQi Zhang 					     rxring_numa[pi],
2056d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2057d44f8a48SQi Zhang 					     mp);
20581e1d6bddSBernard Iremonger 				} else {
20591e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
20601e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
20611e1d6bddSBernard Iremonger 					if (mp == NULL) {
20621e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
20631e1d6bddSBernard Iremonger 							"No mempool allocation"
20641e1d6bddSBernard Iremonger 							" on the socket %d\n",
20651e1d6bddSBernard Iremonger 							port->socket_id);
20661e1d6bddSBernard Iremonger 						return -1;
2067b6ea6408SIntel 					}
2068b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2069d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2070d44f8a48SQi Zhang 					     port->socket_id,
2071d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2072d44f8a48SQi Zhang 					     mp);
20731e1d6bddSBernard Iremonger 				}
2074ce8d5614SIntel 				if (diag == 0)
2075ce8d5614SIntel 					continue;
2076ce8d5614SIntel 
2077ce8d5614SIntel 				/* Fail to setup rx queue, return */
2078ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2079ce8d5614SIntel 							RTE_PORT_HANDLING,
2080ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2081ce8d5614SIntel 					printf("Port %d can not be set back "
2082ce8d5614SIntel 							"to stopped\n", pi);
2083d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2084d44f8a48SQi Zhang 				       pi);
2085ce8d5614SIntel 				/* try to reconfigure queues next time */
2086ce8d5614SIntel 				port->need_reconfig_queues = 1;
2087148f963fSBruce Richardson 				return -1;
2088ce8d5614SIntel 			}
2089ce8d5614SIntel 		}
209076ad4a2dSGaetan Rivet 
2091ce8d5614SIntel 		/* start port */
2092ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2093ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2094ce8d5614SIntel 
2095ce8d5614SIntel 			/* Fail to setup rx queue, return */
2096ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2097ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2098ce8d5614SIntel 				printf("Port %d can not be set back to "
2099ce8d5614SIntel 							"stopped\n", pi);
2100ce8d5614SIntel 			continue;
2101ce8d5614SIntel 		}
2102ce8d5614SIntel 
2103ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2104ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2105ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2106ce8d5614SIntel 
21072950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
2108d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
21092950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
21102950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
21112950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2112d8c89163SZijie Pan 
2113ce8d5614SIntel 		/* at least one port started, need checking link status */
2114ce8d5614SIntel 		need_check_link_status = 1;
2115ce8d5614SIntel 	}
2116ce8d5614SIntel 
21174fb82244SMatan Azrad 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
21184fb82244SMatan Azrad 	     event_type < RTE_ETH_EVENT_MAX;
21194fb82244SMatan Azrad 	     event_type++) {
21204fb82244SMatan Azrad 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
21214fb82244SMatan Azrad 						event_type,
21224fb82244SMatan Azrad 						eth_event_callback,
21234fb82244SMatan Azrad 						NULL);
21244fb82244SMatan Azrad 		if (diag) {
21254fb82244SMatan Azrad 			printf("Failed to setup even callback for event %d\n",
21264fb82244SMatan Azrad 				event_type);
21274fb82244SMatan Azrad 			return -1;
21284fb82244SMatan Azrad 		}
21294fb82244SMatan Azrad 	}
21304fb82244SMatan Azrad 
213192d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2132edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
213392d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2134ce8d5614SIntel 		printf("Please stop the ports first\n");
2135ce8d5614SIntel 
2136ce8d5614SIntel 	printf("Done\n");
2137148f963fSBruce Richardson 	return 0;
2138ce8d5614SIntel }
2139ce8d5614SIntel 
2140ce8d5614SIntel void
2141ce8d5614SIntel stop_port(portid_t pid)
2142ce8d5614SIntel {
2143ce8d5614SIntel 	portid_t pi;
2144ce8d5614SIntel 	struct rte_port *port;
2145ce8d5614SIntel 	int need_check_link_status = 0;
2146ce8d5614SIntel 
2147ce8d5614SIntel 	if (dcb_test) {
2148ce8d5614SIntel 		dcb_test = 0;
2149ce8d5614SIntel 		dcb_config = 0;
2150ce8d5614SIntel 	}
21514468635fSMichael Qiu 
21524468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21534468635fSMichael Qiu 		return;
21544468635fSMichael Qiu 
2155ce8d5614SIntel 	printf("Stopping ports...\n");
2156ce8d5614SIntel 
21577d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21584468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2159ce8d5614SIntel 			continue;
2160ce8d5614SIntel 
2161a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2162a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2163a8ef3e3aSBernard Iremonger 			continue;
2164a8ef3e3aSBernard Iremonger 		}
2165a8ef3e3aSBernard Iremonger 
21660e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
21670e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
21680e545d30SBernard Iremonger 			continue;
21690e545d30SBernard Iremonger 		}
21700e545d30SBernard Iremonger 
2171ce8d5614SIntel 		port = &ports[pi];
2172ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2173ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2174ce8d5614SIntel 			continue;
2175ce8d5614SIntel 
2176ce8d5614SIntel 		rte_eth_dev_stop(pi);
2177ce8d5614SIntel 
2178ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2179ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2180ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2181ce8d5614SIntel 		need_check_link_status = 1;
2182ce8d5614SIntel 	}
2183bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2184edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2185ce8d5614SIntel 
2186ce8d5614SIntel 	printf("Done\n");
2187ce8d5614SIntel }
2188ce8d5614SIntel 
2189ce8d5614SIntel void
2190ce8d5614SIntel close_port(portid_t pid)
2191ce8d5614SIntel {
2192ce8d5614SIntel 	portid_t pi;
2193ce8d5614SIntel 	struct rte_port *port;
2194ce8d5614SIntel 
21954468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21964468635fSMichael Qiu 		return;
21974468635fSMichael Qiu 
2198ce8d5614SIntel 	printf("Closing ports...\n");
2199ce8d5614SIntel 
22007d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22014468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2202ce8d5614SIntel 			continue;
2203ce8d5614SIntel 
2204a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2205a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2206a8ef3e3aSBernard Iremonger 			continue;
2207a8ef3e3aSBernard Iremonger 		}
2208a8ef3e3aSBernard Iremonger 
22090e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
22100e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
22110e545d30SBernard Iremonger 			continue;
22120e545d30SBernard Iremonger 		}
22130e545d30SBernard Iremonger 
2214ce8d5614SIntel 		port = &ports[pi];
2215ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2216d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2217d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2218d4e8ad64SMichael Qiu 			continue;
2219d4e8ad64SMichael Qiu 		}
2220d4e8ad64SMichael Qiu 
2221d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2222ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2223ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2224ce8d5614SIntel 			continue;
2225ce8d5614SIntel 		}
2226ce8d5614SIntel 
2227938a184aSAdrien Mazarguil 		if (port->flow_list)
2228938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2229ce8d5614SIntel 		rte_eth_dev_close(pi);
2230ce8d5614SIntel 
2231ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2232ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2233b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2234ce8d5614SIntel 	}
2235ce8d5614SIntel 
2236ce8d5614SIntel 	printf("Done\n");
2237ce8d5614SIntel }
2238ce8d5614SIntel 
2239edab33b1STetsuya Mukawa void
224097f1e196SWei Dai reset_port(portid_t pid)
224197f1e196SWei Dai {
224297f1e196SWei Dai 	int diag;
224397f1e196SWei Dai 	portid_t pi;
224497f1e196SWei Dai 	struct rte_port *port;
224597f1e196SWei Dai 
224697f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
224797f1e196SWei Dai 		return;
224897f1e196SWei Dai 
224997f1e196SWei Dai 	printf("Resetting ports...\n");
225097f1e196SWei Dai 
225197f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
225297f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
225397f1e196SWei Dai 			continue;
225497f1e196SWei Dai 
225597f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
225697f1e196SWei Dai 			printf("Please remove port %d from forwarding "
225797f1e196SWei Dai 			       "configuration.\n", pi);
225897f1e196SWei Dai 			continue;
225997f1e196SWei Dai 		}
226097f1e196SWei Dai 
226197f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
226297f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
226397f1e196SWei Dai 			       pi);
226497f1e196SWei Dai 			continue;
226597f1e196SWei Dai 		}
226697f1e196SWei Dai 
226797f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
226897f1e196SWei Dai 		if (diag == 0) {
226997f1e196SWei Dai 			port = &ports[pi];
227097f1e196SWei Dai 			port->need_reconfig = 1;
227197f1e196SWei Dai 			port->need_reconfig_queues = 1;
227297f1e196SWei Dai 		} else {
227397f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
227497f1e196SWei Dai 		}
227597f1e196SWei Dai 	}
227697f1e196SWei Dai 
227797f1e196SWei Dai 	printf("Done\n");
227897f1e196SWei Dai }
227997f1e196SWei Dai 
228097f1e196SWei Dai void
2281edab33b1STetsuya Mukawa attach_port(char *identifier)
2282ce8d5614SIntel {
2283ebf5e9b7SBernard Iremonger 	portid_t pi = 0;
2284931126baSBernard Iremonger 	unsigned int socket_id;
2285ce8d5614SIntel 
2286edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2287edab33b1STetsuya Mukawa 
2288edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2289edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2290edab33b1STetsuya Mukawa 		return;
2291ce8d5614SIntel 	}
2292ce8d5614SIntel 
2293edab33b1STetsuya Mukawa 	if (rte_eth_dev_attach(identifier, &pi))
2294edab33b1STetsuya Mukawa 		return;
2295edab33b1STetsuya Mukawa 
2296931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2297931126baSBernard Iremonger 	/* if socket_id is invalid, set to 0 */
2298931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
2299931126baSBernard Iremonger 		socket_id = 0;
2300931126baSBernard Iremonger 	reconfig(pi, socket_id);
2301edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2302edab33b1STetsuya Mukawa 
23034918a357SXiaoyun Li 	ports_ids[nb_ports] = pi;
2304d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
2305edab33b1STetsuya Mukawa 
2306edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2307edab33b1STetsuya Mukawa 
230803ce2c53SMatan Azrad 	update_fwd_ports(pi);
230903ce2c53SMatan Azrad 
2310edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2311edab33b1STetsuya Mukawa 	printf("Done\n");
2312edab33b1STetsuya Mukawa }
2313edab33b1STetsuya Mukawa 
2314edab33b1STetsuya Mukawa void
231528caa76aSZhiyong Yang detach_port(portid_t port_id)
23165f4ec54fSChen Jing D(Mark) {
2317edab33b1STetsuya Mukawa 	char name[RTE_ETH_NAME_MAX_LEN];
23184918a357SXiaoyun Li 	uint16_t i;
23195f4ec54fSChen Jing D(Mark) 
2320edab33b1STetsuya Mukawa 	printf("Detaching a port...\n");
23215f4ec54fSChen Jing D(Mark) 
2322edab33b1STetsuya Mukawa 	if (!port_is_closed(port_id)) {
2323edab33b1STetsuya Mukawa 		printf("Please close port first\n");
2324edab33b1STetsuya Mukawa 		return;
2325edab33b1STetsuya Mukawa 	}
2326edab33b1STetsuya Mukawa 
2327938a184aSAdrien Mazarguil 	if (ports[port_id].flow_list)
2328938a184aSAdrien Mazarguil 		port_flow_flush(port_id);
2329938a184aSAdrien Mazarguil 
23303070419eSGaetan Rivet 	if (rte_eth_dev_detach(port_id, name)) {
2331adea04c4SZhiyong Yang 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2332edab33b1STetsuya Mukawa 		return;
23333070419eSGaetan Rivet 	}
2334edab33b1STetsuya Mukawa 
23354918a357SXiaoyun Li 	for (i = 0; i < nb_ports; i++) {
23364918a357SXiaoyun Li 		if (ports_ids[i] == port_id) {
23374918a357SXiaoyun Li 			ports_ids[i] = ports_ids[nb_ports-1];
23384918a357SXiaoyun Li 			ports_ids[nb_ports-1] = 0;
23394918a357SXiaoyun Li 			break;
23404918a357SXiaoyun Li 		}
23414918a357SXiaoyun Li 	}
2342d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
2343edab33b1STetsuya Mukawa 
234403ce2c53SMatan Azrad 	update_fwd_ports(RTE_MAX_ETHPORTS);
234503ce2c53SMatan Azrad 
2346adea04c4SZhiyong Yang 	printf("Port %u is detached. Now total ports is %d\n",
2347adea04c4SZhiyong Yang 			port_id, nb_ports);
2348edab33b1STetsuya Mukawa 	printf("Done\n");
2349edab33b1STetsuya Mukawa 	return;
23505f4ec54fSChen Jing D(Mark) }
23515f4ec54fSChen Jing D(Mark) 
2352af75078fSIntel void
2353af75078fSIntel pmd_test_exit(void)
2354af75078fSIntel {
2355124909d7SZhiyong Yang 	struct rte_device *device;
2356af75078fSIntel 	portid_t pt_id;
2357fb73e096SJeff Guo 	int ret;
2358af75078fSIntel 
23598210ec25SPablo de Lara 	if (test_done == 0)
23608210ec25SPablo de Lara 		stop_packet_forwarding();
23618210ec25SPablo de Lara 
2362d3a274ceSZhihong Wang 	if (ports != NULL) {
2363d3a274ceSZhihong Wang 		no_link_check = 1;
23647d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
2365d3a274ceSZhihong Wang 			printf("\nShutting down port %d...\n", pt_id);
2366af75078fSIntel 			fflush(stdout);
2367d3a274ceSZhihong Wang 			stop_port(pt_id);
2368d3a274ceSZhihong Wang 			close_port(pt_id);
2369124909d7SZhiyong Yang 
2370124909d7SZhiyong Yang 			/*
2371124909d7SZhiyong Yang 			 * This is a workaround to fix a virtio-user issue that
2372124909d7SZhiyong Yang 			 * requires to call clean-up routine to remove existing
2373124909d7SZhiyong Yang 			 * socket.
2374124909d7SZhiyong Yang 			 * This workaround valid only for testpmd, needs a fix
2375124909d7SZhiyong Yang 			 * valid for all applications.
2376124909d7SZhiyong Yang 			 * TODO: Implement proper resource cleanup
2377124909d7SZhiyong Yang 			 */
2378124909d7SZhiyong Yang 			device = rte_eth_devices[pt_id].device;
2379124909d7SZhiyong Yang 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2380124909d7SZhiyong Yang 				detach_port(pt_id);
2381af75078fSIntel 		}
2382d3a274ceSZhihong Wang 	}
2383fb73e096SJeff Guo 
2384fb73e096SJeff Guo 	if (hot_plug) {
2385fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
23862049c511SJeff Guo 		if (ret) {
2387fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2388fb73e096SJeff Guo 				"fail to stop device event monitor.");
23892049c511SJeff Guo 			return;
23902049c511SJeff Guo 		}
2391fb73e096SJeff Guo 
23922049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
23932049c511SJeff Guo 			eth_dev_event_callback, NULL);
23942049c511SJeff Guo 		if (ret < 0) {
2395fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
23962049c511SJeff Guo 				"fail to unregister device event callback.\n");
23972049c511SJeff Guo 			return;
23982049c511SJeff Guo 		}
23992049c511SJeff Guo 
24002049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
24012049c511SJeff Guo 		if (ret) {
24022049c511SJeff Guo 			RTE_LOG(ERR, EAL,
24032049c511SJeff Guo 				"fail to disable hotplug handling.\n");
24042049c511SJeff Guo 			return;
24052049c511SJeff Guo 		}
2406fb73e096SJeff Guo 	}
2407fb73e096SJeff Guo 
2408d3a274ceSZhihong Wang 	printf("\nBye...\n");
2409af75078fSIntel }
2410af75078fSIntel 
2411af75078fSIntel typedef void (*cmd_func_t)(void);
2412af75078fSIntel struct pmd_test_command {
2413af75078fSIntel 	const char *cmd_name;
2414af75078fSIntel 	cmd_func_t cmd_func;
2415af75078fSIntel };
2416af75078fSIntel 
2417af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2418af75078fSIntel 
2419ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2420af75078fSIntel static void
2421edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2422af75078fSIntel {
2423ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2424ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2425f8244c63SZhiyong Yang 	portid_t portid;
2426f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2427ce8d5614SIntel 	struct rte_eth_link link;
2428ce8d5614SIntel 
2429ce8d5614SIntel 	printf("Checking link statuses...\n");
2430ce8d5614SIntel 	fflush(stdout);
2431ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2432ce8d5614SIntel 		all_ports_up = 1;
24337d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2434ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2435ce8d5614SIntel 				continue;
2436ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2437ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2438ce8d5614SIntel 			/* print link status if flag set */
2439ce8d5614SIntel 			if (print_flag == 1) {
2440ce8d5614SIntel 				if (link.link_status)
2441f8244c63SZhiyong Yang 					printf(
2442f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2443f8244c63SZhiyong Yang 					portid, link.link_speed,
2444ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2445ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2446ce8d5614SIntel 				else
2447f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2448ce8d5614SIntel 				continue;
2449ce8d5614SIntel 			}
2450ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
245109419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2452ce8d5614SIntel 				all_ports_up = 0;
2453ce8d5614SIntel 				break;
2454ce8d5614SIntel 			}
2455ce8d5614SIntel 		}
2456ce8d5614SIntel 		/* after finally printing all link status, get out */
2457ce8d5614SIntel 		if (print_flag == 1)
2458ce8d5614SIntel 			break;
2459ce8d5614SIntel 
2460ce8d5614SIntel 		if (all_ports_up == 0) {
2461ce8d5614SIntel 			fflush(stdout);
2462ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2463ce8d5614SIntel 		}
2464ce8d5614SIntel 
2465ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2466ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2467ce8d5614SIntel 			print_flag = 1;
2468ce8d5614SIntel 		}
24698ea656f8SGaetan Rivet 
24708ea656f8SGaetan Rivet 		if (lsc_interrupt)
24718ea656f8SGaetan Rivet 			break;
2472ce8d5614SIntel 	}
2473af75078fSIntel }
2474af75078fSIntel 
2475284c908cSGaetan Rivet static void
2476284c908cSGaetan Rivet rmv_event_callback(void *arg)
2477284c908cSGaetan Rivet {
24783b97888aSMatan Azrad 	int need_to_start = 0;
24790da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
248028caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2481284c908cSGaetan Rivet 
2482284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2483284c908cSGaetan Rivet 
24843b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
24853b97888aSMatan Azrad 		need_to_start = 1;
24863b97888aSMatan Azrad 		stop_packet_forwarding();
24873b97888aSMatan Azrad 	}
24880da2a62bSMatan Azrad 	no_link_check = 1;
2489284c908cSGaetan Rivet 	stop_port(port_id);
24900da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2491284c908cSGaetan Rivet 	close_port(port_id);
24923b97888aSMatan Azrad 	detach_port(port_id);
24933b97888aSMatan Azrad 	if (need_to_start)
24943b97888aSMatan Azrad 		start_packet_forwarding(0);
2495284c908cSGaetan Rivet }
2496284c908cSGaetan Rivet 
249776ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2498d6af1a13SBernard Iremonger static int
2499f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2500d6af1a13SBernard Iremonger 		  void *ret_param)
250176ad4a2dSGaetan Rivet {
250276ad4a2dSGaetan Rivet 	static const char * const event_desc[] = {
250376ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
250476ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
250576ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
250676ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
250776ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2508badb87c1SAnoob Joseph 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
250976ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
251076ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
25114fb82244SMatan Azrad 		[RTE_ETH_EVENT_NEW] = "device probed",
25124fb82244SMatan Azrad 		[RTE_ETH_EVENT_DESTROY] = "device released",
251376ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MAX] = NULL,
251476ad4a2dSGaetan Rivet 	};
251576ad4a2dSGaetan Rivet 
251676ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2517d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
251876ad4a2dSGaetan Rivet 
251976ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2520f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
252176ad4a2dSGaetan Rivet 			port_id, __func__, type);
252276ad4a2dSGaetan Rivet 		fflush(stderr);
25233af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2524f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
252576ad4a2dSGaetan Rivet 			event_desc[type]);
252676ad4a2dSGaetan Rivet 		fflush(stdout);
252776ad4a2dSGaetan Rivet 	}
2528284c908cSGaetan Rivet 
25290e45c64dSMatan Azrad 	if (port_id_is_invalid(port_id, DISABLED_WARN))
25300e45c64dSMatan Azrad 		return 0;
25310e45c64dSMatan Azrad 
2532284c908cSGaetan Rivet 	switch (type) {
2533284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
2534284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2535284c908cSGaetan Rivet 				rmv_event_callback, (void *)(intptr_t)port_id))
2536284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2537284c908cSGaetan Rivet 		break;
2538284c908cSGaetan Rivet 	default:
2539284c908cSGaetan Rivet 		break;
2540284c908cSGaetan Rivet 	}
2541d6af1a13SBernard Iremonger 	return 0;
254276ad4a2dSGaetan Rivet }
254376ad4a2dSGaetan Rivet 
2544fb73e096SJeff Guo /* This function is used by the interrupt thread */
2545fb73e096SJeff Guo static void
2546*89ecd110SJeff Guo eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2547fb73e096SJeff Guo 			     __rte_unused void *arg)
2548fb73e096SJeff Guo {
25492049c511SJeff Guo 	uint16_t port_id;
25502049c511SJeff Guo 	int ret;
25512049c511SJeff Guo 
2552fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2553fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2554fb73e096SJeff Guo 			__func__, type);
2555fb73e096SJeff Guo 		fflush(stderr);
2556fb73e096SJeff Guo 	}
2557fb73e096SJeff Guo 
2558fb73e096SJeff Guo 	switch (type) {
2559fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2560fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2561fb73e096SJeff Guo 			device_name);
25622049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
25632049c511SJeff Guo 		if (ret) {
25642049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
25652049c511SJeff Guo 				device_name);
25662049c511SJeff Guo 			return;
25672049c511SJeff Guo 		}
25682049c511SJeff Guo 		rmv_event_callback((void *)(intptr_t)port_id);
2569fb73e096SJeff Guo 		break;
2570fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2571fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2572fb73e096SJeff Guo 			device_name);
2573fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2574fb73e096SJeff Guo 		 * begin to attach port.
2575fb73e096SJeff Guo 		 */
2576fb73e096SJeff Guo 		break;
2577fb73e096SJeff Guo 	default:
2578fb73e096SJeff Guo 		break;
2579fb73e096SJeff Guo 	}
2580fb73e096SJeff Guo }
2581fb73e096SJeff Guo 
2582013af9b6SIntel static int
258328caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2584af75078fSIntel {
2585013af9b6SIntel 	uint16_t i;
2586af75078fSIntel 	int diag;
2587013af9b6SIntel 	uint8_t mapping_found = 0;
2588af75078fSIntel 
2589013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2590013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2591013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2592013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2593013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2594013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2595013af9b6SIntel 			if (diag != 0)
2596013af9b6SIntel 				return diag;
2597013af9b6SIntel 			mapping_found = 1;
2598af75078fSIntel 		}
2599013af9b6SIntel 	}
2600013af9b6SIntel 	if (mapping_found)
2601013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2602013af9b6SIntel 	return 0;
2603013af9b6SIntel }
2604013af9b6SIntel 
2605013af9b6SIntel static int
260628caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2607013af9b6SIntel {
2608013af9b6SIntel 	uint16_t i;
2609013af9b6SIntel 	int diag;
2610013af9b6SIntel 	uint8_t mapping_found = 0;
2611013af9b6SIntel 
2612013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2613013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2614013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2615013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2616013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2617013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2618013af9b6SIntel 			if (diag != 0)
2619013af9b6SIntel 				return diag;
2620013af9b6SIntel 			mapping_found = 1;
2621013af9b6SIntel 		}
2622013af9b6SIntel 	}
2623013af9b6SIntel 	if (mapping_found)
2624013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2625013af9b6SIntel 	return 0;
2626013af9b6SIntel }
2627013af9b6SIntel 
2628013af9b6SIntel static void
262928caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2630013af9b6SIntel {
2631013af9b6SIntel 	int diag = 0;
2632013af9b6SIntel 
2633013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2634af75078fSIntel 	if (diag != 0) {
2635013af9b6SIntel 		if (diag == -ENOTSUP) {
2636013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2637013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2638013af9b6SIntel 		}
2639013af9b6SIntel 		else
2640013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2641013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2642013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2643af75078fSIntel 					pi, diag);
2644af75078fSIntel 	}
2645013af9b6SIntel 
2646013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2647af75078fSIntel 	if (diag != 0) {
2648013af9b6SIntel 		if (diag == -ENOTSUP) {
2649013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2650013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2651013af9b6SIntel 		}
2652013af9b6SIntel 		else
2653013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2654013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2655013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2656af75078fSIntel 					pi, diag);
2657af75078fSIntel 	}
2658af75078fSIntel }
2659af75078fSIntel 
2660f2c5125aSPablo de Lara static void
2661f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2662f2c5125aSPablo de Lara {
2663d44f8a48SQi Zhang 	uint16_t qid;
2664f2c5125aSPablo de Lara 
2665d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
2666d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2667d44f8a48SQi Zhang 
2668d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2669f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2670d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2671f2c5125aSPablo de Lara 
2672f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2673d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2674f2c5125aSPablo de Lara 
2675f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2676d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2677f2c5125aSPablo de Lara 
2678f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2679d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2680f2c5125aSPablo de Lara 
2681f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2682d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2683f2c5125aSPablo de Lara 
2684d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2685d44f8a48SQi Zhang 	}
2686d44f8a48SQi Zhang 
2687d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
2688d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2689d44f8a48SQi Zhang 
2690d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2691f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2692d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2693f2c5125aSPablo de Lara 
2694f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2695d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2696f2c5125aSPablo de Lara 
2697f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2698d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2699f2c5125aSPablo de Lara 
2700f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2701d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2702f2c5125aSPablo de Lara 
2703f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2704d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2705d44f8a48SQi Zhang 
2706d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2707d44f8a48SQi Zhang 	}
2708f2c5125aSPablo de Lara }
2709f2c5125aSPablo de Lara 
2710013af9b6SIntel void
2711013af9b6SIntel init_port_config(void)
2712013af9b6SIntel {
2713013af9b6SIntel 	portid_t pid;
2714013af9b6SIntel 	struct rte_port *port;
2715013af9b6SIntel 
27167d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2717013af9b6SIntel 		port = &ports[pid];
2718013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2719422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
27203ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2721013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
272290892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2723422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2724af75078fSIntel 		} else {
2725013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2726013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2727af75078fSIntel 		}
27283ce690d3SBruce Richardson 
27295f592039SJingjing Wu 		if (port->dcb_flag == 0) {
27303ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
27313ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
27323ce690d3SBruce Richardson 			else
27333ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
27343ce690d3SBruce Richardson 		}
27353ce690d3SBruce Richardson 
2736f2c5125aSPablo de Lara 		rxtx_port_config(port);
2737013af9b6SIntel 
2738013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2739013af9b6SIntel 
2740013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
274150c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2742e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
27437b7e5ba7SIntel #endif
27448ea656f8SGaetan Rivet 
27458ea656f8SGaetan Rivet 		if (lsc_interrupt &&
27468ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
27478ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
27488ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2749284c908cSGaetan Rivet 		if (rmv_interrupt &&
2750284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2751284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2752284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2753013af9b6SIntel 	}
2754013af9b6SIntel }
2755013af9b6SIntel 
275641b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
275741b05095SBernard Iremonger {
275841b05095SBernard Iremonger 	struct rte_port *port;
275941b05095SBernard Iremonger 
276041b05095SBernard Iremonger 	port = &ports[slave_pid];
276141b05095SBernard Iremonger 	port->slave_flag = 1;
276241b05095SBernard Iremonger }
276341b05095SBernard Iremonger 
276441b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
276541b05095SBernard Iremonger {
276641b05095SBernard Iremonger 	struct rte_port *port;
276741b05095SBernard Iremonger 
276841b05095SBernard Iremonger 	port = &ports[slave_pid];
276941b05095SBernard Iremonger 	port->slave_flag = 0;
277041b05095SBernard Iremonger }
277141b05095SBernard Iremonger 
27720e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
27730e545d30SBernard Iremonger {
27740e545d30SBernard Iremonger 	struct rte_port *port;
27750e545d30SBernard Iremonger 
27760e545d30SBernard Iremonger 	port = &ports[slave_pid];
2777b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2778b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2779b8b8b344SMatan Azrad 		return 1;
2780b8b8b344SMatan Azrad 	return 0;
27810e545d30SBernard Iremonger }
27820e545d30SBernard Iremonger 
2783013af9b6SIntel const uint16_t vlan_tags[] = {
2784013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2785013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2786013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2787013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2788013af9b6SIntel };
2789013af9b6SIntel 
2790013af9b6SIntel static  int
2791ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
27921a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
27931a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
27941a572499SJingjing Wu 		 uint8_t pfc_en)
2795013af9b6SIntel {
2796013af9b6SIntel 	uint8_t i;
2797ac7c491cSKonstantin Ananyev 	int32_t rc;
2798ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
2799af75078fSIntel 
2800af75078fSIntel 	/*
2801013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2802013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2803af75078fSIntel 	 */
28041a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
28051a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
28061a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
28071a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
28081a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2809013af9b6SIntel 
2810547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
28111a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
28121a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
28131a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
28141a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
28151a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
28161a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2817013af9b6SIntel 
28181a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
28191a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
28201a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
28211a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
28221a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2823af75078fSIntel 		}
2824013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2825f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2826f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2827013af9b6SIntel 		}
2828013af9b6SIntel 
2829013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
283032e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
283132e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
28321a572499SJingjing Wu 	} else {
28331a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
28341a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
28351a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
28361a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2837013af9b6SIntel 
2838ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2839ac7c491cSKonstantin Ananyev 		if (rc != 0)
2840ac7c491cSKonstantin Ananyev 			return rc;
2841ac7c491cSKonstantin Ananyev 
28421a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
28431a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
28441a572499SJingjing Wu 
2845bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2846bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2847bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2848013af9b6SIntel 		}
2849ac7c491cSKonstantin Ananyev 
28501a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2851ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
285232e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
28531a572499SJingjing Wu 	}
28541a572499SJingjing Wu 
28551a572499SJingjing Wu 	if (pfc_en)
28561a572499SJingjing Wu 		eth_conf->dcb_capability_en =
28571a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2858013af9b6SIntel 	else
2859013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2860013af9b6SIntel 
2861013af9b6SIntel 	return 0;
2862013af9b6SIntel }
2863013af9b6SIntel 
2864013af9b6SIntel int
28651a572499SJingjing Wu init_port_dcb_config(portid_t pid,
28661a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
28671a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
28681a572499SJingjing Wu 		     uint8_t pfc_en)
2869013af9b6SIntel {
2870013af9b6SIntel 	struct rte_eth_conf port_conf;
2871013af9b6SIntel 	struct rte_port *rte_port;
2872013af9b6SIntel 	int retval;
2873013af9b6SIntel 	uint16_t i;
2874013af9b6SIntel 
28752a977b89SWenzhuo Lu 	rte_port = &ports[pid];
2876013af9b6SIntel 
2877013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2878013af9b6SIntel 	/* Enter DCB configuration status */
2879013af9b6SIntel 	dcb_config = 1;
2880013af9b6SIntel 
2881d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2882d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
2883d5354e89SYanglong Wu 
2884013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2885ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2886013af9b6SIntel 	if (retval < 0)
2887013af9b6SIntel 		return retval;
28880074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2889013af9b6SIntel 
28902f203d44SQi Zhang 	/* re-configure the device . */
28912f203d44SQi Zhang 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
28922a977b89SWenzhuo Lu 
28932a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
28942a977b89SWenzhuo Lu 
28952a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
28962a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
28972a977b89SWenzhuo Lu 	 */
28982a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
28992a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
29002a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
29012a977b89SWenzhuo Lu 			" for port %d.", pid);
29022a977b89SWenzhuo Lu 		return -1;
29032a977b89SWenzhuo Lu 	}
29042a977b89SWenzhuo Lu 
29052a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
29062a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
29072a977b89SWenzhuo Lu 	 */
29082a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
290986ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
291086ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
291186ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
291286ef65eeSBernard Iremonger 		} else {
29132a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29142a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
291586ef65eeSBernard Iremonger 		}
29162a977b89SWenzhuo Lu 	} else {
29172a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
29182a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
29192a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29202a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
29212a977b89SWenzhuo Lu 		} else {
29222a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
29232a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
29242a977b89SWenzhuo Lu 
29252a977b89SWenzhuo Lu 		}
29262a977b89SWenzhuo Lu 	}
29272a977b89SWenzhuo Lu 	rx_free_thresh = 64;
29282a977b89SWenzhuo Lu 
2929013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2930013af9b6SIntel 
2931f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
2932013af9b6SIntel 	/* VLAN filter */
29330074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
29341a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2935013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
2936013af9b6SIntel 
2937013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2938013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
2939013af9b6SIntel 
29407741e4cfSIntel 	rte_port->dcb_flag = 1;
29417741e4cfSIntel 
2942013af9b6SIntel 	return 0;
2943af75078fSIntel }
2944af75078fSIntel 
2945ffc468ffSTetsuya Mukawa static void
2946ffc468ffSTetsuya Mukawa init_port(void)
2947ffc468ffSTetsuya Mukawa {
2948ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
2949ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
2950ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2951ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
2952ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
2953ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
2954ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
2955ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
2956ffc468ffSTetsuya Mukawa 	}
2957ffc468ffSTetsuya Mukawa }
2958ffc468ffSTetsuya Mukawa 
2959d3a274ceSZhihong Wang static void
2960d3a274ceSZhihong Wang force_quit(void)
2961d3a274ceSZhihong Wang {
2962d3a274ceSZhihong Wang 	pmd_test_exit();
2963d3a274ceSZhihong Wang 	prompt_exit();
2964d3a274ceSZhihong Wang }
2965d3a274ceSZhihong Wang 
2966d3a274ceSZhihong Wang static void
2967cfea1f30SPablo de Lara print_stats(void)
2968cfea1f30SPablo de Lara {
2969cfea1f30SPablo de Lara 	uint8_t i;
2970cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2971cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2972cfea1f30SPablo de Lara 
2973cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
2974cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
2975cfea1f30SPablo de Lara 
2976cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
2977cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2978cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
2979cfea1f30SPablo de Lara }
2980cfea1f30SPablo de Lara 
2981cfea1f30SPablo de Lara static void
2982d3a274ceSZhihong Wang signal_handler(int signum)
2983d3a274ceSZhihong Wang {
2984d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
2985d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
2986d3a274ceSZhihong Wang 				signum);
2987102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
2988102b7329SReshma Pattan 		/* uninitialize packet capture framework */
2989102b7329SReshma Pattan 		rte_pdump_uninit();
2990102b7329SReshma Pattan #endif
299162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
299262d3216dSReshma Pattan 		rte_latencystats_uninit();
299362d3216dSReshma Pattan #endif
2994d3a274ceSZhihong Wang 		force_quit();
2995d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
2996d9a191a0SPhil Yang 		f_quit = 1;
2997d3a274ceSZhihong Wang 		/* exit with the expected status */
2998d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
2999d3a274ceSZhihong Wang 		kill(getpid(), signum);
3000d3a274ceSZhihong Wang 	}
3001d3a274ceSZhihong Wang }
3002d3a274ceSZhihong Wang 
3003af75078fSIntel int
3004af75078fSIntel main(int argc, char** argv)
3005af75078fSIntel {
3006af75078fSIntel 	int diag;
3007f8244c63SZhiyong Yang 	portid_t port_id;
30084918a357SXiaoyun Li 	uint16_t count;
3009fb73e096SJeff Guo 	int ret;
3010af75078fSIntel 
3011d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3012d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3013d3a274ceSZhihong Wang 
3014af75078fSIntel 	diag = rte_eal_init(argc, argv);
3015af75078fSIntel 	if (diag < 0)
3016af75078fSIntel 		rte_panic("Cannot init EAL\n");
3017af75078fSIntel 
3018285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3019285fd101SOlivier Matz 	if (testpmd_logtype < 0)
3020285fd101SOlivier Matz 		rte_panic("Cannot register log type");
3021285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3022285fd101SOlivier Matz 
30234aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
30244aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
30254aa0d012SAnatoly Burakov 	rte_pdump_init(NULL);
30264aa0d012SAnatoly Burakov #endif
30274aa0d012SAnatoly Burakov 
30284918a357SXiaoyun Li 	count = 0;
30294918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
30304918a357SXiaoyun Li 		ports_ids[count] = port_id;
30314918a357SXiaoyun Li 		count++;
30324918a357SXiaoyun Li 	}
30334918a357SXiaoyun Li 	nb_ports = (portid_t) count;
30344aa0d012SAnatoly Burakov 	if (nb_ports == 0)
30354aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
30364aa0d012SAnatoly Burakov 
30374aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
30384aa0d012SAnatoly Burakov 	init_port();
30394aa0d012SAnatoly Burakov 
30404aa0d012SAnatoly Burakov 	set_def_fwd_config();
30414aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
30424aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
30434aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
30444aa0d012SAnatoly Burakov 
3045e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3046e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3047e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3048e505d84cSAnatoly Burakov #endif
3049e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3050e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3051e505d84cSAnatoly Burakov #endif
3052e505d84cSAnatoly Burakov 
3053fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
3054fb7b8b32SAnatoly Burakov #ifdef RTE_EXEC_ENV_BSDAPP
3055fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3056fb7b8b32SAnatoly Burakov #else
3057fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3058fb7b8b32SAnatoly Burakov #endif
3059fb7b8b32SAnatoly Burakov 
3060e505d84cSAnatoly Burakov 	argc -= diag;
3061e505d84cSAnatoly Burakov 	argv += diag;
3062e505d84cSAnatoly Burakov 	if (argc > 1)
3063e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3064e505d84cSAnatoly Burakov 
3065e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3066285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
30671c036b16SEelco Chaudron 			strerror(errno));
30681c036b16SEelco Chaudron 	}
30691c036b16SEelco Chaudron 
307099cabef0SPablo de Lara 	if (tx_first && interactive)
307199cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
307299cabef0SPablo de Lara 				"interactive mode.\n");
30738820cba4SDavid Hunt 
30748820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
30758820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
30768820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
30778820cba4SDavid Hunt 		lsc_interrupt = 0;
30788820cba4SDavid Hunt 	}
30798820cba4SDavid Hunt 
30805a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
30815a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
30825a8fb55cSReshma Pattan 
30835a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3084af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3085af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3086af75078fSIntel 		       nb_rxq, nb_txq);
3087af75078fSIntel 
3088af75078fSIntel 	init_config();
3089fb73e096SJeff Guo 
3090fb73e096SJeff Guo 	if (hot_plug) {
30912049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3092fb73e096SJeff Guo 		if (ret) {
30932049c511SJeff Guo 			RTE_LOG(ERR, EAL,
30942049c511SJeff Guo 				"fail to enable hotplug handling.");
3095fb73e096SJeff Guo 			return -1;
3096fb73e096SJeff Guo 		}
3097fb73e096SJeff Guo 
30982049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
30992049c511SJeff Guo 		if (ret) {
31002049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31012049c511SJeff Guo 				"fail to start device event monitoring.");
31022049c511SJeff Guo 			return -1;
31032049c511SJeff Guo 		}
31042049c511SJeff Guo 
31052049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
31062049c511SJeff Guo 			eth_dev_event_callback, NULL);
31072049c511SJeff Guo 		if (ret) {
31082049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31092049c511SJeff Guo 				"fail  to register device event callback\n");
31102049c511SJeff Guo 			return -1;
31112049c511SJeff Guo 		}
3112fb73e096SJeff Guo 	}
3113fb73e096SJeff Guo 
3114148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
3115148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3116af75078fSIntel 
3117ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
31187d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
3119ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
3120af75078fSIntel 
31217e4441c8SRemy Horton 	/* Init metrics library */
31227e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
31237e4441c8SRemy Horton 
312462d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
312562d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
312662d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
312762d3216dSReshma Pattan 		if (ret)
312862d3216dSReshma Pattan 			printf("Warning: latencystats init()"
312962d3216dSReshma Pattan 				" returned error %d\n",	ret);
313062d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
313162d3216dSReshma Pattan 			latencystats_lcore_id);
313262d3216dSReshma Pattan 	}
313362d3216dSReshma Pattan #endif
313462d3216dSReshma Pattan 
31357e4441c8SRemy Horton 	/* Setup bitrate stats */
31367e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3137e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
31387e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
31397e4441c8SRemy Horton 		if (bitrate_data == NULL)
3140e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3141e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
31427e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3143e25e6c70SRemy Horton 	}
31447e4441c8SRemy Horton #endif
31457e4441c8SRemy Horton 
31460d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
314781ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
314881ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
314981ef862bSAllain Legacy 
3150ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3151ca7feb22SCyril Chemparathy 		if (auto_start) {
3152ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3153ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3154ca7feb22SCyril Chemparathy 		}
3155af75078fSIntel 		prompt();
31560de738cfSJiayu Hu 		pmd_test_exit();
3157ca7feb22SCyril Chemparathy 	} else
31580d56cb81SThomas Monjalon #endif
31590d56cb81SThomas Monjalon 	{
3160af75078fSIntel 		char c;
3161af75078fSIntel 		int rc;
3162af75078fSIntel 
3163d9a191a0SPhil Yang 		f_quit = 0;
3164d9a191a0SPhil Yang 
3165af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
316699cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3167cfea1f30SPablo de Lara 		if (stats_period != 0) {
3168cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3169cfea1f30SPablo de Lara 			uint64_t timer_period;
3170cfea1f30SPablo de Lara 
3171cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3172cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3173cfea1f30SPablo de Lara 
3174d9a191a0SPhil Yang 			while (f_quit == 0) {
3175cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3176cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3177cfea1f30SPablo de Lara 
3178cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3179cfea1f30SPablo de Lara 					print_stats();
3180cfea1f30SPablo de Lara 					/* Reset the timer */
3181cfea1f30SPablo de Lara 					diff_time = 0;
3182cfea1f30SPablo de Lara 				}
3183cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3184cfea1f30SPablo de Lara 				prev_time = cur_time;
3185cfea1f30SPablo de Lara 				sleep(1);
3186cfea1f30SPablo de Lara 			}
3187cfea1f30SPablo de Lara 		}
3188cfea1f30SPablo de Lara 
3189af75078fSIntel 		printf("Press enter to exit\n");
3190af75078fSIntel 		rc = read(0, &c, 1);
3191d3a274ceSZhihong Wang 		pmd_test_exit();
3192af75078fSIntel 		if (rc < 0)
3193af75078fSIntel 			return 1;
3194af75078fSIntel 	}
3195af75078fSIntel 
3196af75078fSIntel 	return 0;
3197af75078fSIntel }
3198