xref: /dpdk/app/test-pmd/testpmd.c (revision 4f1ed78ebd26f2393fd3cf29a9a9fa95ce14eb44)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70c7f5dba7SAnatoly Burakov #else
71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72c7f5dba7SAnatoly Burakov #endif
73c7f5dba7SAnatoly Burakov 
74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77c7f5dba7SAnatoly Burakov #else
78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov #endif
80c7f5dba7SAnatoly Burakov 
81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
140af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1830ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1845b590fbeSJasvinder Singh #endif
185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
186af75078fSIntel 	&ieee1588_fwd_engine,
187af75078fSIntel #endif
188af75078fSIntel 	NULL,
189af75078fSIntel };
190af75078fSIntel 
191af75078fSIntel struct fwd_config cur_fwd_config;
192af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193bf56fce1SZhihong Wang uint32_t retry_enabled;
194bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196af75078fSIntel 
197af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199c8798818SIntel                                       * specified on command-line. */
200cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201d9a191a0SPhil Yang 
202d9a191a0SPhil Yang /*
203d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
204d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205d9a191a0SPhil Yang  */
206d9a191a0SPhil Yang uint8_t f_quit;
207d9a191a0SPhil Yang 
208af75078fSIntel /*
209af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
210af75078fSIntel  */
211af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
214af75078fSIntel };
215af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216af75078fSIntel 
21779bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
21879bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
21979bec05bSKonstantin Ananyev 
220af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
222af75078fSIntel 
223900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
224900550deSIntel uint8_t dcb_config = 0;
225900550deSIntel 
226900550deSIntel /* Whether the dcb is in testing status */
227900550deSIntel uint8_t dcb_test = 0;
228900550deSIntel 
229af75078fSIntel /*
230af75078fSIntel  * Configurable number of RX/TX queues.
231af75078fSIntel  */
232af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
234af75078fSIntel 
235af75078fSIntel /*
236af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2378599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
238af75078fSIntel  */
2398599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2408599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
241af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
243af75078fSIntel 
244f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
245af75078fSIntel /*
246af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
247af75078fSIntel  */
248af75078fSIntel 
249f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
252af75078fSIntel 
253f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
256af75078fSIntel 
257af75078fSIntel /*
258af75078fSIntel  * Configurable value of RX free threshold.
259af75078fSIntel  */
260f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
261af75078fSIntel 
262af75078fSIntel /*
263ce8d5614SIntel  * Configurable value of RX drop enable.
264ce8d5614SIntel  */
265f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
266ce8d5614SIntel 
267ce8d5614SIntel /*
268af75078fSIntel  * Configurable value of TX free threshold.
269af75078fSIntel  */
270f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
271af75078fSIntel 
272af75078fSIntel /*
273af75078fSIntel  * Configurable value of TX RS bit threshold.
274af75078fSIntel  */
275f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
276af75078fSIntel 
277af75078fSIntel /*
2783c156061SJens Freimann  * Configurable value of buffered packets before sending.
2793c156061SJens Freimann  */
2803c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2813c156061SJens Freimann 
2823c156061SJens Freimann /*
2833c156061SJens Freimann  * Configurable value of packet buffer timeout.
2843c156061SJens Freimann  */
2853c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2863c156061SJens Freimann 
2873c156061SJens Freimann /*
2883c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2893c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2903c156061SJens Freimann  */
2913c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2923c156061SJens Freimann 
2933c156061SJens Freimann /*
2943c156061SJens Freimann  * Configurable value of number of random writes done in
2953c156061SJens Freimann  * VNF simulation memory area.
2963c156061SJens Freimann  */
2973c156061SJens Freimann uint64_t noisy_lkup_num_writes;
2983c156061SJens Freimann 
2993c156061SJens Freimann /*
3003c156061SJens Freimann  * Configurable value of number of random reads done in
3013c156061SJens Freimann  * VNF simulation memory area.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3073c156061SJens Freimann  * VNF simulation memory area.
3083c156061SJens Freimann  */
3093c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3103c156061SJens Freimann 
3113c156061SJens Freimann /*
312af75078fSIntel  * Receive Side Scaling (RSS) configuration.
313af75078fSIntel  */
3148a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
315af75078fSIntel 
316af75078fSIntel /*
317af75078fSIntel  * Port topology configuration
318af75078fSIntel  */
319af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
320af75078fSIntel 
3217741e4cfSIntel /*
3227741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3237741e4cfSIntel  */
3247741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3257741e4cfSIntel 
326af75078fSIntel /*
3277ee3e944SVasily Philipov  * Flow API isolated mode.
3287ee3e944SVasily Philipov  */
3297ee3e944SVasily Philipov uint8_t flow_isolate_all;
3307ee3e944SVasily Philipov 
3317ee3e944SVasily Philipov /*
332bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
333bc202406SDavid Marchand  */
334bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
335bc202406SDavid Marchand 
336bc202406SDavid Marchand /*
3378ea656f8SGaetan Rivet  * Enable link status change notification
3388ea656f8SGaetan Rivet  */
3398ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3408ea656f8SGaetan Rivet 
3418ea656f8SGaetan Rivet /*
342284c908cSGaetan Rivet  * Enable device removal notification.
343284c908cSGaetan Rivet  */
344284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
345284c908cSGaetan Rivet 
346fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
347fb73e096SJeff Guo 
348*4f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
349*4f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
350*4f1ed78eSThomas Monjalon 
35197b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
35297b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
35397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
35497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
35597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
35697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
35797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
35897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
35997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
36097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
36197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
36297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
36397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
36497b5d8b5SThomas Monjalon };
36597b5d8b5SThomas Monjalon 
366284c908cSGaetan Rivet /*
3673af72783SGaetan Rivet  * Display or mask ether events
3683af72783SGaetan Rivet  * Default to all events except VF_MBOX
3693af72783SGaetan Rivet  */
3703af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3713af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3723af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3733af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
374badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3753af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3763af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
377e505d84cSAnatoly Burakov /*
378e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
379e505d84cSAnatoly Burakov  */
380e505d84cSAnatoly Burakov int do_mlockall = 0;
3813af72783SGaetan Rivet 
3823af72783SGaetan Rivet /*
3837b7e5ba7SIntel  * NIC bypass mode configuration options.
3847b7e5ba7SIntel  */
3857b7e5ba7SIntel 
38650c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3877b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
388e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3897b7e5ba7SIntel #endif
3907b7e5ba7SIntel 
391e261265eSRadu Nicolau 
39262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
39362d3216dSReshma Pattan 
39462d3216dSReshma Pattan /*
39562d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
39662d3216dSReshma Pattan  */
39762d3216dSReshma Pattan uint8_t latencystats_enabled;
39862d3216dSReshma Pattan 
39962d3216dSReshma Pattan /*
40062d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
40162d3216dSReshma Pattan  */
40262d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
40362d3216dSReshma Pattan 
40462d3216dSReshma Pattan #endif
40562d3216dSReshma Pattan 
4067b7e5ba7SIntel /*
407af75078fSIntel  * Ethernet device configuration.
408af75078fSIntel  */
409af75078fSIntel struct rte_eth_rxmode rx_mode = {
410af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
411af75078fSIntel };
412af75078fSIntel 
41307e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
41407e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
41507e5f7bdSShahaf Shuler };
416fd8c20aaSShahaf Shuler 
417af75078fSIntel struct rte_fdir_conf fdir_conf = {
418af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
419af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
420af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
421d9d5e6f2SJingjing Wu 	.mask = {
42226f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
423d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
424d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
425d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
426d9d5e6f2SJingjing Wu 		},
427d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
428d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
429d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
430d9d5e6f2SJingjing Wu 		},
431d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
432d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
43347b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
43447b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
43547b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
436d9d5e6f2SJingjing Wu 	},
437af75078fSIntel 	.drop_queue = 127,
438af75078fSIntel };
439af75078fSIntel 
4402950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
441af75078fSIntel 
442ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
443ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
444ed30d9b6SIntel 
445ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
446ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
447ed30d9b6SIntel 
448ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
449ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
450ed30d9b6SIntel 
451a4fd5eeeSElza Mathew /*
452a4fd5eeeSElza Mathew  * Display zero values by default for xstats
453a4fd5eeeSElza Mathew  */
454a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
455a4fd5eeeSElza Mathew 
456c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
457c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4587acf894dSStephen Hurd 
459e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4607e4441c8SRemy Horton /* Bitrate statistics */
4617e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
462e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
463e25e6c70SRemy Horton uint8_t bitrate_enabled;
464e25e6c70SRemy Horton #endif
4657e4441c8SRemy Horton 
466b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
467b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
468b40f8d78SJiayu Hu 
4691960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4701960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4711960be7dSNelio Laranjeiro 	.select_vlan = 0,
4721960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4731960be7dSNelio Laranjeiro 	.udp_src = 0,
4741960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4751960be7dSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
4761960be7dSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
4771960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4781960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4791960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4801960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4811960be7dSNelio Laranjeiro 	.vlan_tci = 0,
4821960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4831960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4841960be7dSNelio Laranjeiro };
4851960be7dSNelio Laranjeiro 
486dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
487dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
488dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
489dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
490dcd962fcSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
491dcd962fcSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
492dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
493dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
494dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
495dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
496dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
497dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
498dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
499dcd962fcSNelio Laranjeiro };
500dcd962fcSNelio Laranjeiro 
501ed30d9b6SIntel /* Forward function declarations */
502c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
50328caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
50428caa76aSZhiyong Yang 						   struct rte_port *port);
505edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
506f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
50776ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
508d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
50989ecd110SJeff Guo static void eth_dev_event_callback(const char *device_name,
510fb73e096SJeff Guo 				enum rte_dev_event_type type,
511fb73e096SJeff Guo 				void *param);
512ce8d5614SIntel 
513ce8d5614SIntel /*
514ce8d5614SIntel  * Check if all the ports are started.
515ce8d5614SIntel  * If yes, return positive value. If not, return zero.
516ce8d5614SIntel  */
517ce8d5614SIntel static int all_ports_started(void);
518ed30d9b6SIntel 
51952f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
52052f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
52152f38a20SJiayu Hu 
522af75078fSIntel /*
52398a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
524c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
525c9cafcc8SShahaf Shuler  */
526c9cafcc8SShahaf Shuler int
527c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
528c9cafcc8SShahaf Shuler {
529c9cafcc8SShahaf Shuler 	unsigned int i;
530c9cafcc8SShahaf Shuler 
531c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
532c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
533c9cafcc8SShahaf Shuler 			return 0;
534c9cafcc8SShahaf Shuler 	}
535c9cafcc8SShahaf Shuler 	return 1;
536c9cafcc8SShahaf Shuler }
537c9cafcc8SShahaf Shuler 
538c9cafcc8SShahaf Shuler /*
539af75078fSIntel  * Setup default configuration.
540af75078fSIntel  */
541af75078fSIntel static void
542af75078fSIntel set_default_fwd_lcores_config(void)
543af75078fSIntel {
544af75078fSIntel 	unsigned int i;
545af75078fSIntel 	unsigned int nb_lc;
5467acf894dSStephen Hurd 	unsigned int sock_num;
547af75078fSIntel 
548af75078fSIntel 	nb_lc = 0;
549af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
550dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
551dbfb8ec7SPhil Yang 			continue;
552c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
553c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
554c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
555c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
556c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
557c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
558c9cafcc8SShahaf Shuler 			}
559c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5607acf894dSStephen Hurd 		}
561f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
562f54fe5eeSStephen Hurd 			continue;
563f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
564af75078fSIntel 	}
565af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
566af75078fSIntel 	nb_cfg_lcores = nb_lcores;
567af75078fSIntel 	nb_fwd_lcores = 1;
568af75078fSIntel }
569af75078fSIntel 
570af75078fSIntel static void
571af75078fSIntel set_def_peer_eth_addrs(void)
572af75078fSIntel {
573af75078fSIntel 	portid_t i;
574af75078fSIntel 
575af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
576af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
577af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
578af75078fSIntel 	}
579af75078fSIntel }
580af75078fSIntel 
581af75078fSIntel static void
582af75078fSIntel set_default_fwd_ports_config(void)
583af75078fSIntel {
584af75078fSIntel 	portid_t pt_id;
58565a7360cSMatan Azrad 	int i = 0;
586af75078fSIntel 
587effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
58865a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
589af75078fSIntel 
590effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
591effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
592effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
593effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
594effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
595effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
596effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
597effdb8bbSPhil Yang 			}
598effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
599effdb8bbSPhil Yang 		}
600effdb8bbSPhil Yang 	}
601effdb8bbSPhil Yang 
602af75078fSIntel 	nb_cfg_ports = nb_ports;
603af75078fSIntel 	nb_fwd_ports = nb_ports;
604af75078fSIntel }
605af75078fSIntel 
606af75078fSIntel void
607af75078fSIntel set_def_fwd_config(void)
608af75078fSIntel {
609af75078fSIntel 	set_default_fwd_lcores_config();
610af75078fSIntel 	set_def_peer_eth_addrs();
611af75078fSIntel 	set_default_fwd_ports_config();
612af75078fSIntel }
613af75078fSIntel 
614c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
615c7f5dba7SAnatoly Burakov static int
616c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
617c7f5dba7SAnatoly Burakov {
618c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
619c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
620c7f5dba7SAnatoly Burakov 
621c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
622c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
623c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
624c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
625c7f5dba7SAnatoly Burakov 	 */
626c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
627c7f5dba7SAnatoly Burakov 
628c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
629c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
630c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
631c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
632c7f5dba7SAnatoly Burakov 		return -1;
633c7f5dba7SAnatoly Burakov 	}
634c7f5dba7SAnatoly Burakov 
635c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
636c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
637c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
638c7f5dba7SAnatoly Burakov 
639c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
640c7f5dba7SAnatoly Burakov 
641c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
642c7f5dba7SAnatoly Burakov 
643c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
644c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
645c7f5dba7SAnatoly Burakov 		return -1;
646c7f5dba7SAnatoly Burakov 	}
647c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
648c7f5dba7SAnatoly Burakov 
649c7f5dba7SAnatoly Burakov 	return 0;
650c7f5dba7SAnatoly Burakov }
651c7f5dba7SAnatoly Burakov 
652c7f5dba7SAnatoly Burakov static inline uint32_t
653c7f5dba7SAnatoly Burakov bsf64(uint64_t v)
654c7f5dba7SAnatoly Burakov {
655c7f5dba7SAnatoly Burakov 	return (uint32_t)__builtin_ctzll(v);
656c7f5dba7SAnatoly Burakov }
657c7f5dba7SAnatoly Burakov 
658c7f5dba7SAnatoly Burakov static inline uint32_t
659c7f5dba7SAnatoly Burakov log2_u64(uint64_t v)
660c7f5dba7SAnatoly Burakov {
661c7f5dba7SAnatoly Burakov 	if (v == 0)
662c7f5dba7SAnatoly Burakov 		return 0;
663c7f5dba7SAnatoly Burakov 	v = rte_align64pow2(v);
664c7f5dba7SAnatoly Burakov 	return bsf64(v);
665c7f5dba7SAnatoly Burakov }
666c7f5dba7SAnatoly Burakov 
667c7f5dba7SAnatoly Burakov static int
668c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
669c7f5dba7SAnatoly Burakov {
670c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
671c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
672c7f5dba7SAnatoly Burakov 	 */
673c7f5dba7SAnatoly Burakov 	int log2 = log2_u64(page_sz);
674c7f5dba7SAnatoly Burakov 
675c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
676c7f5dba7SAnatoly Burakov }
677c7f5dba7SAnatoly Burakov 
678c7f5dba7SAnatoly Burakov static void *
679c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
680c7f5dba7SAnatoly Burakov {
681c7f5dba7SAnatoly Burakov 	void *addr;
682c7f5dba7SAnatoly Burakov 	int flags;
683c7f5dba7SAnatoly Burakov 
684c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
685c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
686c7f5dba7SAnatoly Burakov 	if (huge)
687c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
688c7f5dba7SAnatoly Burakov 
689c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
690c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
691c7f5dba7SAnatoly Burakov 		return NULL;
692c7f5dba7SAnatoly Burakov 
693c7f5dba7SAnatoly Burakov 	return addr;
694c7f5dba7SAnatoly Burakov }
695c7f5dba7SAnatoly Burakov 
696c7f5dba7SAnatoly Burakov struct extmem_param {
697c7f5dba7SAnatoly Burakov 	void *addr;
698c7f5dba7SAnatoly Burakov 	size_t len;
699c7f5dba7SAnatoly Burakov 	size_t pgsz;
700c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
701c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
702c7f5dba7SAnatoly Burakov };
703c7f5dba7SAnatoly Burakov 
704c7f5dba7SAnatoly Burakov static int
705c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
706c7f5dba7SAnatoly Burakov 		bool huge)
707c7f5dba7SAnatoly Burakov {
708c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
709c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
710c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
711c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
712c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
713c7f5dba7SAnatoly Burakov 	void *addr;
714c7f5dba7SAnatoly Burakov 	int ret;
715c7f5dba7SAnatoly Burakov 
716c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
717c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
718c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
719c7f5dba7SAnatoly Burakov 			continue;
720c7f5dba7SAnatoly Burakov 
721c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
722c7f5dba7SAnatoly Burakov 
723c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
724c7f5dba7SAnatoly Burakov 		if (!huge)
725c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
726c7f5dba7SAnatoly Burakov 
727c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
728c7f5dba7SAnatoly Burakov 		if (ret < 0) {
729c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
730c7f5dba7SAnatoly Burakov 			return -1;
731c7f5dba7SAnatoly Burakov 		}
732c7f5dba7SAnatoly Burakov 
733c7f5dba7SAnatoly Burakov 		/* allocate our memory */
734c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
735c7f5dba7SAnatoly Burakov 
736c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
737c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
738c7f5dba7SAnatoly Burakov 		 * try another one.
739c7f5dba7SAnatoly Burakov 		 */
740c7f5dba7SAnatoly Burakov 		if (addr == NULL)
741c7f5dba7SAnatoly Burakov 			continue;
742c7f5dba7SAnatoly Burakov 
743c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
744c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
745c7f5dba7SAnatoly Burakov 
746c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
747c7f5dba7SAnatoly Burakov 
748c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
749c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
750c7f5dba7SAnatoly Burakov 			goto fail;
751c7f5dba7SAnatoly Burakov 		}
752c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
753c7f5dba7SAnatoly Burakov 		if (!huge)
754c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
755c7f5dba7SAnatoly Burakov 
756c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
757c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
758c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
759c7f5dba7SAnatoly Burakov 			size_t offset;
760c7f5dba7SAnatoly Burakov 			void *cur;
761c7f5dba7SAnatoly Burakov 
762c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
763c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
764c7f5dba7SAnatoly Burakov 
765c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
766c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
767c7f5dba7SAnatoly Burakov 
768c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
769c7f5dba7SAnatoly Burakov 
770c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
771c7f5dba7SAnatoly Burakov 		}
772c7f5dba7SAnatoly Burakov 
773c7f5dba7SAnatoly Burakov 		break;
774c7f5dba7SAnatoly Burakov 	}
775c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
776c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
777c7f5dba7SAnatoly Burakov 		return -1;
778c7f5dba7SAnatoly Burakov 
779c7f5dba7SAnatoly Burakov 	param->addr = addr;
780c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
781c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
782c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
783c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
784c7f5dba7SAnatoly Burakov 
785c7f5dba7SAnatoly Burakov 	return 0;
786c7f5dba7SAnatoly Burakov fail:
787c7f5dba7SAnatoly Burakov 	if (iovas)
788c7f5dba7SAnatoly Burakov 		free(iovas);
789c7f5dba7SAnatoly Burakov 	if (addr)
790c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
791c7f5dba7SAnatoly Burakov 
792c7f5dba7SAnatoly Burakov 	return -1;
793c7f5dba7SAnatoly Burakov }
794c7f5dba7SAnatoly Burakov 
795c7f5dba7SAnatoly Burakov static int
796c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
797c7f5dba7SAnatoly Burakov {
798c7f5dba7SAnatoly Burakov 	struct extmem_param param;
799c7f5dba7SAnatoly Burakov 	int socket_id, ret;
800c7f5dba7SAnatoly Burakov 
801c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
802c7f5dba7SAnatoly Burakov 
803c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
804c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
805c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
806c7f5dba7SAnatoly Burakov 		/* create our heap */
807c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
808c7f5dba7SAnatoly Burakov 		if (ret < 0) {
809c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
810c7f5dba7SAnatoly Burakov 			return -1;
811c7f5dba7SAnatoly Burakov 		}
812c7f5dba7SAnatoly Burakov 	}
813c7f5dba7SAnatoly Burakov 
814c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
815c7f5dba7SAnatoly Burakov 	if (ret < 0) {
816c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
817c7f5dba7SAnatoly Burakov 		return -1;
818c7f5dba7SAnatoly Burakov 	}
819c7f5dba7SAnatoly Burakov 
820c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
821c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
822c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
823c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
824c7f5dba7SAnatoly Burakov 
825c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
826c7f5dba7SAnatoly Burakov 
827c7f5dba7SAnatoly Burakov 	/* not needed any more */
828c7f5dba7SAnatoly Burakov 	free(param.iova_table);
829c7f5dba7SAnatoly Burakov 
830c7f5dba7SAnatoly Burakov 	if (ret < 0) {
831c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
832c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
833c7f5dba7SAnatoly Burakov 		return -1;
834c7f5dba7SAnatoly Burakov 	}
835c7f5dba7SAnatoly Burakov 
836c7f5dba7SAnatoly Burakov 	/* success */
837c7f5dba7SAnatoly Burakov 
838c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
839c7f5dba7SAnatoly Burakov 			param.len >> 20);
840c7f5dba7SAnatoly Burakov 
841c7f5dba7SAnatoly Burakov 	return 0;
842c7f5dba7SAnatoly Burakov }
843c7f5dba7SAnatoly Burakov 
844af75078fSIntel /*
845af75078fSIntel  * Configuration initialisation done once at init time.
846af75078fSIntel  */
847af75078fSIntel static void
848af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
849af75078fSIntel 		 unsigned int socket_id)
850af75078fSIntel {
851af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
852bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
853af75078fSIntel 	uint32_t mb_size;
854af75078fSIntel 
855dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
856af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
857148f963fSBruce Richardson 
858285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
859d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
860d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
861d1eb542eSOlivier Matz 
862c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
863c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
864c7f5dba7SAnatoly Burakov 		{
865c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
866c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
867c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
868c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
869c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
870c7f5dba7SAnatoly Burakov 			break;
871c7f5dba7SAnatoly Burakov 		}
872c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
873c7f5dba7SAnatoly Burakov 		{
874b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
875c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
876148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
877148f963fSBruce Richardson 				socket_id, 0);
87824427bb9SOlivier Matz 			if (rte_mp == NULL)
87924427bb9SOlivier Matz 				goto err;
880b19a0c75SOlivier Matz 
881b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
882b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
883b19a0c75SOlivier Matz 				rte_mp = NULL;
88424427bb9SOlivier Matz 				goto err;
885b19a0c75SOlivier Matz 			}
886b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
887b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
888c7f5dba7SAnatoly Burakov 			break;
889c7f5dba7SAnatoly Burakov 		}
890c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
891c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
892c7f5dba7SAnatoly Burakov 		{
893c7f5dba7SAnatoly Burakov 			int heap_socket;
894c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
895c7f5dba7SAnatoly Burakov 
896c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
897c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
898c7f5dba7SAnatoly Burakov 
899c7f5dba7SAnatoly Burakov 			heap_socket =
900c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
901c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
902c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
903c7f5dba7SAnatoly Burakov 
9040e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
9050e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
906ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
907c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
908c7f5dba7SAnatoly Burakov 					heap_socket);
909c7f5dba7SAnatoly Burakov 			break;
910c7f5dba7SAnatoly Burakov 		}
911c7f5dba7SAnatoly Burakov 	default:
912c7f5dba7SAnatoly Burakov 		{
913c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
914c7f5dba7SAnatoly Burakov 		}
915bece7b6cSChristian Ehrhardt 	}
916148f963fSBruce Richardson 
91724427bb9SOlivier Matz err:
918af75078fSIntel 	if (rte_mp == NULL) {
919d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
920d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
921d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
922148f963fSBruce Richardson 	} else if (verbose_level > 0) {
923591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
924af75078fSIntel 	}
925af75078fSIntel }
926af75078fSIntel 
92720a0286fSLiu Xiaofeng /*
92820a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
92920a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
93020a0286fSLiu Xiaofeng  */
93120a0286fSLiu Xiaofeng static int
93220a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
93320a0286fSLiu Xiaofeng {
93420a0286fSLiu Xiaofeng 	static int warning_once = 0;
93520a0286fSLiu Xiaofeng 
936c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
93720a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
93820a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
93920a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
94020a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
94120a0286fSLiu Xiaofeng 			       " --numa.\n");
94220a0286fSLiu Xiaofeng 		warning_once = 1;
94320a0286fSLiu Xiaofeng 		return -1;
94420a0286fSLiu Xiaofeng 	}
94520a0286fSLiu Xiaofeng 	return 0;
94620a0286fSLiu Xiaofeng }
94720a0286fSLiu Xiaofeng 
9483f7311baSWei Dai /*
9493f7311baSWei Dai  * Get the allowed maximum number of RX queues.
9503f7311baSWei Dai  * *pid return the port id which has minimal value of
9513f7311baSWei Dai  * max_rx_queues in all ports.
9523f7311baSWei Dai  */
9533f7311baSWei Dai queueid_t
9543f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
9553f7311baSWei Dai {
9563f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
9573f7311baSWei Dai 	portid_t pi;
9583f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
9593f7311baSWei Dai 
9603f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
9613f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
9623f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
9633f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
9643f7311baSWei Dai 			*pid = pi;
9653f7311baSWei Dai 		}
9663f7311baSWei Dai 	}
9673f7311baSWei Dai 	return allowed_max_rxq;
9683f7311baSWei Dai }
9693f7311baSWei Dai 
9703f7311baSWei Dai /*
9713f7311baSWei Dai  * Check input rxq is valid or not.
9723f7311baSWei Dai  * If input rxq is not greater than any of maximum number
9733f7311baSWei Dai  * of RX queues of all ports, it is valid.
9743f7311baSWei Dai  * if valid, return 0, else return -1
9753f7311baSWei Dai  */
9763f7311baSWei Dai int
9773f7311baSWei Dai check_nb_rxq(queueid_t rxq)
9783f7311baSWei Dai {
9793f7311baSWei Dai 	queueid_t allowed_max_rxq;
9803f7311baSWei Dai 	portid_t pid = 0;
9813f7311baSWei Dai 
9823f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
9833f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
9843f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
9853f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
9863f7311baSWei Dai 		       rxq,
9873f7311baSWei Dai 		       allowed_max_rxq,
9883f7311baSWei Dai 		       pid);
9893f7311baSWei Dai 		return -1;
9903f7311baSWei Dai 	}
9913f7311baSWei Dai 	return 0;
9923f7311baSWei Dai }
9933f7311baSWei Dai 
99436db4f6cSWei Dai /*
99536db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
99636db4f6cSWei Dai  * *pid return the port id which has minimal value of
99736db4f6cSWei Dai  * max_tx_queues in all ports.
99836db4f6cSWei Dai  */
99936db4f6cSWei Dai queueid_t
100036db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
100136db4f6cSWei Dai {
100236db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
100336db4f6cSWei Dai 	portid_t pi;
100436db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
100536db4f6cSWei Dai 
100636db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
100736db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
100836db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
100936db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
101036db4f6cSWei Dai 			*pid = pi;
101136db4f6cSWei Dai 		}
101236db4f6cSWei Dai 	}
101336db4f6cSWei Dai 	return allowed_max_txq;
101436db4f6cSWei Dai }
101536db4f6cSWei Dai 
101636db4f6cSWei Dai /*
101736db4f6cSWei Dai  * Check input txq is valid or not.
101836db4f6cSWei Dai  * If input txq is not greater than any of maximum number
101936db4f6cSWei Dai  * of TX queues of all ports, it is valid.
102036db4f6cSWei Dai  * if valid, return 0, else return -1
102136db4f6cSWei Dai  */
102236db4f6cSWei Dai int
102336db4f6cSWei Dai check_nb_txq(queueid_t txq)
102436db4f6cSWei Dai {
102536db4f6cSWei Dai 	queueid_t allowed_max_txq;
102636db4f6cSWei Dai 	portid_t pid = 0;
102736db4f6cSWei Dai 
102836db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
102936db4f6cSWei Dai 	if (txq > allowed_max_txq) {
103036db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
103136db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
103236db4f6cSWei Dai 		       txq,
103336db4f6cSWei Dai 		       allowed_max_txq,
103436db4f6cSWei Dai 		       pid);
103536db4f6cSWei Dai 		return -1;
103636db4f6cSWei Dai 	}
103736db4f6cSWei Dai 	return 0;
103836db4f6cSWei Dai }
103936db4f6cSWei Dai 
1040af75078fSIntel static void
1041af75078fSIntel init_config(void)
1042af75078fSIntel {
1043ce8d5614SIntel 	portid_t pid;
1044af75078fSIntel 	struct rte_port *port;
1045af75078fSIntel 	struct rte_mempool *mbp;
1046af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1047af75078fSIntel 	lcoreid_t  lc_id;
10487acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1049b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
105052f38a20SJiayu Hu 	uint32_t gso_types;
1051c73a9071SWei Dai 	int k;
1052af75078fSIntel 
10537acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1054487f9a59SYulong Pei 
1055af75078fSIntel 	/* Configuration of logical cores. */
1056af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1057af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1058fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1059af75078fSIntel 	if (fwd_lcores == NULL) {
1060ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1061ce8d5614SIntel 							"failed\n", nb_lcores);
1062af75078fSIntel 	}
1063af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1064af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1065af75078fSIntel 					       sizeof(struct fwd_lcore),
1066fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1067af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1068ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1069ce8d5614SIntel 								"failed\n");
1070af75078fSIntel 		}
1071af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1072af75078fSIntel 	}
1073af75078fSIntel 
10747d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1075ce8d5614SIntel 		port = &ports[pid];
10768b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1077fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1078384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
1079ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
10807c45f6c0SFerruh Yigit 
108107e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
108207e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
108307e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
108407e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1085c18feafaSDekel Peled 		if (!(port->dev_info.tx_offload_capa &
1086c18feafaSDekel Peled 			DEV_TX_OFFLOAD_MATCH_METADATA))
1087c18feafaSDekel Peled 			port->dev_conf.txmode.offloads &=
1088c18feafaSDekel Peled 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1089b6ea6408SIntel 		if (numa_support) {
1090b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1091b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1092b6ea6408SIntel 			else {
1093b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
109420a0286fSLiu Xiaofeng 
109529841336SPhil Yang 				/*
109629841336SPhil Yang 				 * if socket_id is invalid,
109729841336SPhil Yang 				 * set to the first available socket.
109829841336SPhil Yang 				 */
109920a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
110029841336SPhil Yang 					socket_id = socket_ids[0];
1101b6ea6408SIntel 				port_per_socket[socket_id]++;
1102b6ea6408SIntel 			}
1103b6ea6408SIntel 		}
1104b6ea6408SIntel 
1105c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1106c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1107c73a9071SWei Dai 			port->rx_conf[k].offloads =
1108c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1109c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1110c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1111c73a9071SWei Dai 			port->tx_conf[k].offloads =
1112c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1113c73a9071SWei Dai 
1114ce8d5614SIntel 		/* set flag to initialize port/queue */
1115ce8d5614SIntel 		port->need_reconfig = 1;
1116ce8d5614SIntel 		port->need_reconfig_queues = 1;
1117c18feafaSDekel Peled 		port->tx_metadata = 0;
1118ce8d5614SIntel 	}
1119ce8d5614SIntel 
11203ab64341SOlivier Matz 	/*
11213ab64341SOlivier Matz 	 * Create pools of mbuf.
11223ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
11233ab64341SOlivier Matz 	 * socket 0 memory by default.
11243ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
11253ab64341SOlivier Matz 	 *
11263ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
11273ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
11283ab64341SOlivier Matz 	 */
11293ab64341SOlivier Matz 	if (param_total_num_mbufs)
11303ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
11313ab64341SOlivier Matz 	else {
11323ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
11333ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
11343ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
11353ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
11363ab64341SOlivier Matz 	}
11373ab64341SOlivier Matz 
1138b6ea6408SIntel 	if (numa_support) {
1139b6ea6408SIntel 		uint8_t i;
1140ce8d5614SIntel 
1141c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1142c9cafcc8SShahaf Shuler 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1143c9cafcc8SShahaf Shuler 					 socket_ids[i]);
11443ab64341SOlivier Matz 	} else {
11453ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
11463ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
11473ab64341SOlivier Matz 		else
11483ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
11493ab64341SOlivier Matz 						 socket_num);
11503ab64341SOlivier Matz 	}
1151b6ea6408SIntel 
1152b6ea6408SIntel 	init_port_config();
11535886ae07SAdrien Mazarguil 
115452f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1155aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
11565886ae07SAdrien Mazarguil 	/*
11575886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
11585886ae07SAdrien Mazarguil 	 */
11595886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
11608fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
11618fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
11628fd8bebcSAdrien Mazarguil 
11635886ae07SAdrien Mazarguil 		if (mbp == NULL)
11645886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
11655886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
116652f38a20SJiayu Hu 		/* initialize GSO context */
116752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
116852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
116952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
117052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
117152f38a20SJiayu Hu 			ETHER_CRC_LEN;
117252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
11735886ae07SAdrien Mazarguil 	}
11745886ae07SAdrien Mazarguil 
1175ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1176ce8d5614SIntel 	if (init_fwd_streams() < 0)
1177ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
11780c0db76fSBernard Iremonger 
11790c0db76fSBernard Iremonger 	fwd_config_setup();
1180b7091f1dSJiayu Hu 
1181b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1182b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1183b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1184b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1185b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1186b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1187b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1188b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1189b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1190b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1191b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1192b7091f1dSJiayu Hu 		}
1193b7091f1dSJiayu Hu 	}
11940ad778b3SJasvinder Singh 
11950ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
11960ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
11970ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
11980ad778b3SJasvinder Singh 			port = &ports[pid];
11990ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
12000ad778b3SJasvinder Singh 
12010ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
12020ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
12030ad778b3SJasvinder Singh 		}
12040ad778b3SJasvinder Singh 	}
12050ad778b3SJasvinder Singh #endif
12060ad778b3SJasvinder Singh 
1207ce8d5614SIntel }
1208ce8d5614SIntel 
12092950a769SDeclan Doherty 
12102950a769SDeclan Doherty void
1211a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
12122950a769SDeclan Doherty {
12132950a769SDeclan Doherty 	struct rte_port *port;
12142950a769SDeclan Doherty 
12152950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
12162950a769SDeclan Doherty 	port = &ports[new_port_id];
12172950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
12182950a769SDeclan Doherty 
12192950a769SDeclan Doherty 	/* set flag to initialize port/queue */
12202950a769SDeclan Doherty 	port->need_reconfig = 1;
12212950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1222a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
12232950a769SDeclan Doherty 
12242950a769SDeclan Doherty 	init_port_config();
12252950a769SDeclan Doherty }
12262950a769SDeclan Doherty 
12272950a769SDeclan Doherty 
1228ce8d5614SIntel int
1229ce8d5614SIntel init_fwd_streams(void)
1230ce8d5614SIntel {
1231ce8d5614SIntel 	portid_t pid;
1232ce8d5614SIntel 	struct rte_port *port;
1233ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
12345a8fb55cSReshma Pattan 	queueid_t q;
1235ce8d5614SIntel 
1236ce8d5614SIntel 	/* set socket id according to numa or not */
12377d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1238ce8d5614SIntel 		port = &ports[pid];
1239ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1240ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1241ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1242ce8d5614SIntel 				port->dev_info.max_rx_queues);
1243ce8d5614SIntel 			return -1;
1244ce8d5614SIntel 		}
1245ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1246ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1247ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1248ce8d5614SIntel 				port->dev_info.max_tx_queues);
1249ce8d5614SIntel 			return -1;
1250ce8d5614SIntel 		}
125120a0286fSLiu Xiaofeng 		if (numa_support) {
125220a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
125320a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
125420a0286fSLiu Xiaofeng 			else {
1255b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
125620a0286fSLiu Xiaofeng 
125729841336SPhil Yang 				/*
125829841336SPhil Yang 				 * if socket_id is invalid,
125929841336SPhil Yang 				 * set to the first available socket.
126029841336SPhil Yang 				 */
126120a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
126229841336SPhil Yang 					port->socket_id = socket_ids[0];
126320a0286fSLiu Xiaofeng 			}
126420a0286fSLiu Xiaofeng 		}
1265b6ea6408SIntel 		else {
1266b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1267af75078fSIntel 				port->socket_id = 0;
1268b6ea6408SIntel 			else
1269b6ea6408SIntel 				port->socket_id = socket_num;
1270b6ea6408SIntel 		}
1271af75078fSIntel 	}
1272af75078fSIntel 
12735a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
12745a8fb55cSReshma Pattan 	if (q == 0) {
12755a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
12765a8fb55cSReshma Pattan 		return -1;
12775a8fb55cSReshma Pattan 	}
12785a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1279ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1280ce8d5614SIntel 		return 0;
1281ce8d5614SIntel 	/* clear the old */
1282ce8d5614SIntel 	if (fwd_streams != NULL) {
1283ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1284ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1285ce8d5614SIntel 				continue;
1286ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1287ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1288af75078fSIntel 		}
1289ce8d5614SIntel 		rte_free(fwd_streams);
1290ce8d5614SIntel 		fwd_streams = NULL;
1291ce8d5614SIntel 	}
1292ce8d5614SIntel 
1293ce8d5614SIntel 	/* init new */
1294ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
12951f84c469SMatan Azrad 	if (nb_fwd_streams) {
1296ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
12971f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
12981f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1299ce8d5614SIntel 		if (fwd_streams == NULL)
13001f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
13011f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
13021f84c469SMatan Azrad 				 nb_fwd_streams);
1303ce8d5614SIntel 
1304af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
13051f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
13061f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
13071f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1308ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
13091f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
13101f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
13111f84c469SMatan Azrad 		}
1312af75078fSIntel 	}
1313ce8d5614SIntel 
1314ce8d5614SIntel 	return 0;
1315af75078fSIntel }
1316af75078fSIntel 
1317af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1318af75078fSIntel static void
1319af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1320af75078fSIntel {
1321af75078fSIntel 	unsigned int total_burst;
1322af75078fSIntel 	unsigned int nb_burst;
1323af75078fSIntel 	unsigned int burst_stats[3];
1324af75078fSIntel 	uint16_t pktnb_stats[3];
1325af75078fSIntel 	uint16_t nb_pkt;
1326af75078fSIntel 	int burst_percent[3];
1327af75078fSIntel 
1328af75078fSIntel 	/*
1329af75078fSIntel 	 * First compute the total number of packet bursts and the
1330af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1331af75078fSIntel 	 */
1332af75078fSIntel 	total_burst = 0;
1333af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1334af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1335af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1336af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1337af75078fSIntel 		if (nb_burst == 0)
1338af75078fSIntel 			continue;
1339af75078fSIntel 		total_burst += nb_burst;
1340af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1341af75078fSIntel 			burst_stats[1] = burst_stats[0];
1342af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1343af75078fSIntel 			burst_stats[0] = nb_burst;
1344af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1345fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1346fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1347fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1348af75078fSIntel 		}
1349af75078fSIntel 	}
1350af75078fSIntel 	if (total_burst == 0)
1351af75078fSIntel 		return;
1352af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1353af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1354af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1355af75078fSIntel 	if (burst_stats[0] == total_burst) {
1356af75078fSIntel 		printf("]\n");
1357af75078fSIntel 		return;
1358af75078fSIntel 	}
1359af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1360af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1361af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1362af75078fSIntel 		return;
1363af75078fSIntel 	}
1364af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1365af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1366af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1367af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1368af75078fSIntel 		return;
1369af75078fSIntel 	}
1370af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1371af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1372af75078fSIntel }
1373af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1374af75078fSIntel 
1375af75078fSIntel static void
1376af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1377af75078fSIntel {
1378af75078fSIntel 	struct rte_port *port;
1379013af9b6SIntel 	uint8_t i;
1380af75078fSIntel 
1381af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
1382af75078fSIntel 
1383af75078fSIntel 	port = &ports[port_id];
1384af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
1385af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
1386013af9b6SIntel 
1387013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1388af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1389af75078fSIntel 		       "%-"PRIu64"\n",
139070bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
139170bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1392af75078fSIntel 
1393af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
139458d475b7SJerin Jacob 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
139558d475b7SJerin Jacob 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
139658d475b7SJerin Jacob 			       port->rx_bad_outer_l4_csum);
139786057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1398f72a0fa6SStephen Hemminger 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
139970bdb186SIvan Boule 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
140070bdb186SIvan Boule 		}
1401af75078fSIntel 
1402af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1403af75078fSIntel 		       "%-"PRIu64"\n",
1404af75078fSIntel 		       stats->opackets, port->tx_dropped,
1405af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1406013af9b6SIntel 	}
1407013af9b6SIntel 	else {
1408013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1409013af9b6SIntel 		       "%14"PRIu64"\n",
141070bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
141170bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1412013af9b6SIntel 
1413013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
141458d475b7SJerin Jacob 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
141558d475b7SJerin Jacob 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
141658d475b7SJerin Jacob 			       port->rx_bad_outer_l4_csum);
141786057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1418f72a0fa6SStephen Hemminger 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
141970bdb186SIvan Boule 			printf("  RX-nombufs:             %14"PRIu64"\n",
142070bdb186SIvan Boule 			       stats->rx_nombuf);
142170bdb186SIvan Boule 		}
1422013af9b6SIntel 
1423013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1424013af9b6SIntel 		       "%14"PRIu64"\n",
1425013af9b6SIntel 		       stats->opackets, port->tx_dropped,
1426013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1427013af9b6SIntel 	}
1428e659b6b4SIvan Boule 
1429af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1430af75078fSIntel 	if (port->rx_stream)
1431013af9b6SIntel 		pkt_burst_stats_display("RX",
1432013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
1433af75078fSIntel 	if (port->tx_stream)
1434013af9b6SIntel 		pkt_burst_stats_display("TX",
1435013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
1436af75078fSIntel #endif
1437af75078fSIntel 
1438013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
1439013af9b6SIntel 		printf("\n");
1440013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1441013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1442013af9b6SIntel 			       "     RX-errors:%14"PRIu64
1443013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
1444013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1445013af9b6SIntel 		}
1446013af9b6SIntel 		printf("\n");
1447013af9b6SIntel 	}
1448013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
1449013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1450013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1451013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
1452013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1453013af9b6SIntel 		}
1454013af9b6SIntel 	}
1455013af9b6SIntel 
1456af75078fSIntel 	printf("  %s--------------------------------%s\n",
1457af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
1458af75078fSIntel }
1459af75078fSIntel 
1460af75078fSIntel static void
1461af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1462af75078fSIntel {
1463af75078fSIntel 	struct fwd_stream *fs;
1464af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1465af75078fSIntel 
1466af75078fSIntel 	fs = fwd_streams[stream_id];
1467af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1468af75078fSIntel 	    (fs->fwd_dropped == 0))
1469af75078fSIntel 		return;
1470af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1471af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1472af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1473af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1474af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1475af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1476af75078fSIntel 
1477af75078fSIntel 	/* if checksum mode */
1478af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1479013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
148058d475b7SJerin Jacob 			"%-14u Rx- bad outer L4 checksum: %-14u\n",
148158d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
148258d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1483af75078fSIntel 	}
1484af75078fSIntel 
1485af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1486af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1487af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1488af75078fSIntel #endif
1489af75078fSIntel }
1490af75078fSIntel 
1491af75078fSIntel static void
14927741e4cfSIntel flush_fwd_rx_queues(void)
1493af75078fSIntel {
1494af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1495af75078fSIntel 	portid_t  rxp;
14967741e4cfSIntel 	portid_t port_id;
1497af75078fSIntel 	queueid_t rxq;
1498af75078fSIntel 	uint16_t  nb_rx;
1499af75078fSIntel 	uint16_t  i;
1500af75078fSIntel 	uint8_t   j;
1501f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1502594302c7SJames Poole 	uint64_t timer_period;
1503f487715fSReshma Pattan 
1504f487715fSReshma Pattan 	/* convert to number of cycles */
1505594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1506af75078fSIntel 
1507af75078fSIntel 	for (j = 0; j < 2; j++) {
15087741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1509af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
15107741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1511f487715fSReshma Pattan 				/**
1512f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1513f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1514f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1515f487715fSReshma Pattan 				* after 1sec timer expiry.
1516f487715fSReshma Pattan 				*/
1517f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1518af75078fSIntel 				do {
15197741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1520013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1521af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1522af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1523f487715fSReshma Pattan 
1524f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1525f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1526f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1527f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1528f487715fSReshma Pattan 					(timer_tsc < timer_period));
1529f487715fSReshma Pattan 				timer_tsc = 0;
1530af75078fSIntel 			}
1531af75078fSIntel 		}
1532af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1533af75078fSIntel 	}
1534af75078fSIntel }
1535af75078fSIntel 
1536af75078fSIntel static void
1537af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1538af75078fSIntel {
1539af75078fSIntel 	struct fwd_stream **fsm;
1540af75078fSIntel 	streamid_t nb_fs;
1541af75078fSIntel 	streamid_t sm_id;
15427e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
15437e4441c8SRemy Horton 	uint64_t tics_per_1sec;
15447e4441c8SRemy Horton 	uint64_t tics_datum;
15457e4441c8SRemy Horton 	uint64_t tics_current;
15464918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1547af75078fSIntel 
15484918a357SXiaoyun Li 	cnt_ports = nb_ports;
15497e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
15507e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
15517e4441c8SRemy Horton #endif
1552af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1553af75078fSIntel 	nb_fs = fc->stream_nb;
1554af75078fSIntel 	do {
1555af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1556af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
15577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1558e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1559e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
15607e4441c8SRemy Horton 			tics_current = rte_rdtsc();
15617e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
15627e4441c8SRemy Horton 				/* Periodic bitrate calculation */
15634918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1564e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
15654918a357SXiaoyun Li 						ports_ids[i]);
15667e4441c8SRemy Horton 				tics_datum = tics_current;
15677e4441c8SRemy Horton 			}
1568e25e6c70SRemy Horton 		}
15697e4441c8SRemy Horton #endif
157062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
157165eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
157265eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
157362d3216dSReshma Pattan 			rte_latencystats_update();
157462d3216dSReshma Pattan #endif
157562d3216dSReshma Pattan 
1576af75078fSIntel 	} while (! fc->stopped);
1577af75078fSIntel }
1578af75078fSIntel 
1579af75078fSIntel static int
1580af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1581af75078fSIntel {
1582af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1583af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1584af75078fSIntel 	return 0;
1585af75078fSIntel }
1586af75078fSIntel 
1587af75078fSIntel /*
1588af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1589af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1590af75078fSIntel  */
1591af75078fSIntel static int
1592af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1593af75078fSIntel {
1594af75078fSIntel 	struct fwd_lcore *fwd_lc;
1595af75078fSIntel 	struct fwd_lcore tmp_lcore;
1596af75078fSIntel 
1597af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1598af75078fSIntel 	tmp_lcore = *fwd_lc;
1599af75078fSIntel 	tmp_lcore.stopped = 1;
1600af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1601af75078fSIntel 	return 0;
1602af75078fSIntel }
1603af75078fSIntel 
1604af75078fSIntel /*
1605af75078fSIntel  * Launch packet forwarding:
1606af75078fSIntel  *     - Setup per-port forwarding context.
1607af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1608af75078fSIntel  */
1609af75078fSIntel static void
1610af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1611af75078fSIntel {
1612af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1613af75078fSIntel 	unsigned int i;
1614af75078fSIntel 	unsigned int lc_id;
1615af75078fSIntel 	int diag;
1616af75078fSIntel 
1617af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1618af75078fSIntel 	if (port_fwd_begin != NULL) {
1619af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1620af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1621af75078fSIntel 	}
1622af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1623af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1624af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1625af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1626af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1627af75078fSIntel 						     fwd_lcores[i], lc_id);
1628af75078fSIntel 			if (diag != 0)
1629af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1630af75078fSIntel 				       lc_id, diag);
1631af75078fSIntel 		}
1632af75078fSIntel 	}
1633af75078fSIntel }
1634af75078fSIntel 
1635af75078fSIntel /*
1636af75078fSIntel  * Launch packet forwarding configuration.
1637af75078fSIntel  */
1638af75078fSIntel void
1639af75078fSIntel start_packet_forwarding(int with_tx_first)
1640af75078fSIntel {
1641af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1642af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1643af75078fSIntel 	struct rte_port *port;
1644af75078fSIntel 	unsigned int i;
1645af75078fSIntel 	portid_t   pt_id;
1646af75078fSIntel 	streamid_t sm_id;
1647af75078fSIntel 
16485a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
16495a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
16505a8fb55cSReshma Pattan 
16515a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
16525a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
16535a8fb55cSReshma Pattan 
16545a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
16555a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
16565a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
16575a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
16585a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
16595a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
16605a8fb55cSReshma Pattan 
1661ce8d5614SIntel 	if (all_ports_started() == 0) {
1662ce8d5614SIntel 		printf("Not all ports were started\n");
1663ce8d5614SIntel 		return;
1664ce8d5614SIntel 	}
1665af75078fSIntel 	if (test_done == 0) {
1666af75078fSIntel 		printf("Packet forwarding already started\n");
1667af75078fSIntel 		return;
1668af75078fSIntel 	}
1669edf87b4aSBernard Iremonger 
1670edf87b4aSBernard Iremonger 
16717741e4cfSIntel 	if(dcb_test) {
16727741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
16737741e4cfSIntel 			pt_id = fwd_ports_ids[i];
16747741e4cfSIntel 			port = &ports[pt_id];
16757741e4cfSIntel 			if (!port->dcb_flag) {
16767741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
16777741e4cfSIntel                                        "be configured in this mode.\n");
1678013af9b6SIntel 				return;
1679013af9b6SIntel 			}
16807741e4cfSIntel 		}
16817741e4cfSIntel 		if (nb_fwd_lcores == 1) {
16827741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
16837741e4cfSIntel                                "should be larger than 1.\n");
16847741e4cfSIntel 			return;
16857741e4cfSIntel 		}
16867741e4cfSIntel 	}
1687af75078fSIntel 	test_done = 0;
16887741e4cfSIntel 
168947a767b2SMatan Azrad 	fwd_config_setup();
169047a767b2SMatan Azrad 
16917741e4cfSIntel 	if(!no_flush_rx)
16927741e4cfSIntel 		flush_fwd_rx_queues();
16937741e4cfSIntel 
1694933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1695af75078fSIntel 	rxtx_config_display();
1696af75078fSIntel 
1697af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1698af75078fSIntel 		pt_id = fwd_ports_ids[i];
1699af75078fSIntel 		port = &ports[pt_id];
1700af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1701af75078fSIntel 		port->tx_dropped = 0;
1702013af9b6SIntel 
1703013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1704af75078fSIntel 	}
1705af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1706af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1707af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1708af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1709af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1710af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
171158d475b7SJerin Jacob 		fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1712af75078fSIntel 
1713af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1714af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1715af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1716af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1717af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1718af75078fSIntel #endif
1719af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1720af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1721af75078fSIntel #endif
1722af75078fSIntel 	}
1723af75078fSIntel 	if (with_tx_first) {
1724af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1725af75078fSIntel 		if (port_fwd_begin != NULL) {
1726af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1727af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1728af75078fSIntel 		}
1729acbf77a6SZhihong Wang 		while (with_tx_first--) {
1730acbf77a6SZhihong Wang 			launch_packet_forwarding(
1731acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1732af75078fSIntel 			rte_eal_mp_wait_lcore();
1733acbf77a6SZhihong Wang 		}
1734af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1735af75078fSIntel 		if (port_fwd_end != NULL) {
1736af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1737af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1738af75078fSIntel 		}
1739af75078fSIntel 	}
1740af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1741af75078fSIntel }
1742af75078fSIntel 
1743af75078fSIntel void
1744af75078fSIntel stop_packet_forwarding(void)
1745af75078fSIntel {
1746af75078fSIntel 	struct rte_eth_stats stats;
1747af75078fSIntel 	struct rte_port *port;
1748af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1749af75078fSIntel 	int i;
1750af75078fSIntel 	portid_t   pt_id;
1751af75078fSIntel 	streamid_t sm_id;
1752af75078fSIntel 	lcoreid_t  lc_id;
1753af75078fSIntel 	uint64_t total_recv;
1754af75078fSIntel 	uint64_t total_xmit;
1755af75078fSIntel 	uint64_t total_rx_dropped;
1756af75078fSIntel 	uint64_t total_tx_dropped;
1757af75078fSIntel 	uint64_t total_rx_nombuf;
1758af75078fSIntel 	uint64_t tx_dropped;
1759af75078fSIntel 	uint64_t rx_bad_ip_csum;
1760af75078fSIntel 	uint64_t rx_bad_l4_csum;
1761af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1762af75078fSIntel 	uint64_t fwd_cycles;
1763af75078fSIntel #endif
1764b7091f1dSJiayu Hu 
1765af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1766af75078fSIntel 
1767af75078fSIntel 	if (test_done) {
1768af75078fSIntel 		printf("Packet forwarding not started\n");
1769af75078fSIntel 		return;
1770af75078fSIntel 	}
1771af75078fSIntel 	printf("Telling cores to stop...");
1772af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1773af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1774af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1775af75078fSIntel 	rte_eal_mp_wait_lcore();
1776af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1777af75078fSIntel 	if (port_fwd_end != NULL) {
1778af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1779af75078fSIntel 			pt_id = fwd_ports_ids[i];
1780af75078fSIntel 			(*port_fwd_end)(pt_id);
1781af75078fSIntel 		}
1782af75078fSIntel 	}
1783af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1784af75078fSIntel 	fwd_cycles = 0;
1785af75078fSIntel #endif
1786af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1787af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1788af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1789af75078fSIntel 			fwd_stream_stats_display(sm_id);
1790af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1791af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1792af75078fSIntel 		} else {
1793af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1794af75078fSIntel 				fwd_streams[sm_id];
1795af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1796af75078fSIntel 				fwd_streams[sm_id];
1797af75078fSIntel 		}
1798af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1799af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1800af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1801af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1802af75078fSIntel 
1803013af9b6SIntel 		rx_bad_ip_csum =
1804013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1805af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1806af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1807013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1808013af9b6SIntel 							rx_bad_ip_csum;
1809af75078fSIntel 
1810013af9b6SIntel 		rx_bad_l4_csum =
1811013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1812af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1813af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1814013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1815013af9b6SIntel 							rx_bad_l4_csum;
1816af75078fSIntel 
181758d475b7SJerin Jacob 		ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
181858d475b7SJerin Jacob 				fwd_streams[sm_id]->rx_bad_outer_l4_csum;
181958d475b7SJerin Jacob 
1820af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1821af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1822af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1823af75078fSIntel #endif
1824af75078fSIntel 	}
1825af75078fSIntel 	total_recv = 0;
1826af75078fSIntel 	total_xmit = 0;
1827af75078fSIntel 	total_rx_dropped = 0;
1828af75078fSIntel 	total_tx_dropped = 0;
1829af75078fSIntel 	total_rx_nombuf  = 0;
18307741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1831af75078fSIntel 		pt_id = fwd_ports_ids[i];
1832af75078fSIntel 
1833af75078fSIntel 		port = &ports[pt_id];
1834af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1835af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1836af75078fSIntel 		port->stats.ipackets = 0;
1837af75078fSIntel 		stats.opackets -= port->stats.opackets;
1838af75078fSIntel 		port->stats.opackets = 0;
1839af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1840af75078fSIntel 		port->stats.ibytes = 0;
1841af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1842af75078fSIntel 		port->stats.obytes = 0;
184370bdb186SIvan Boule 		stats.imissed  -= port->stats.imissed;
184470bdb186SIvan Boule 		port->stats.imissed = 0;
1845af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1846af75078fSIntel 		port->stats.oerrors = 0;
1847af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1848af75078fSIntel 		port->stats.rx_nombuf = 0;
1849af75078fSIntel 
1850af75078fSIntel 		total_recv += stats.ipackets;
1851af75078fSIntel 		total_xmit += stats.opackets;
185270bdb186SIvan Boule 		total_rx_dropped += stats.imissed;
1853af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1854af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1855af75078fSIntel 
1856af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1857af75078fSIntel 	}
1858b7091f1dSJiayu Hu 
1859af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1860af75078fSIntel 	       "%s\n",
1861af75078fSIntel 	       acc_stats_border, acc_stats_border);
1862af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1863af75078fSIntel 	       "%-"PRIu64"\n"
1864af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1865af75078fSIntel 	       "%-"PRIu64"\n",
1866af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1867af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1868af75078fSIntel 	if (total_rx_nombuf > 0)
1869af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1870af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1871af75078fSIntel 	       "%s\n",
1872af75078fSIntel 	       acc_stats_border, acc_stats_border);
1873af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1874af75078fSIntel 	if (total_recv > 0)
1875af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1876af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1877af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1878af75078fSIntel 		       fwd_cycles, total_recv);
1879af75078fSIntel #endif
1880af75078fSIntel 	printf("\nDone.\n");
1881af75078fSIntel 	test_done = 1;
1882af75078fSIntel }
1883af75078fSIntel 
1884cfae07fdSOuyang Changchun void
1885cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1886cfae07fdSOuyang Changchun {
1887492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1888cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1889cfae07fdSOuyang Changchun }
1890cfae07fdSOuyang Changchun 
1891cfae07fdSOuyang Changchun void
1892cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1893cfae07fdSOuyang Changchun {
1894492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1895cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1896cfae07fdSOuyang Changchun }
1897cfae07fdSOuyang Changchun 
1898ce8d5614SIntel static int
1899ce8d5614SIntel all_ports_started(void)
1900ce8d5614SIntel {
1901ce8d5614SIntel 	portid_t pi;
1902ce8d5614SIntel 	struct rte_port *port;
1903ce8d5614SIntel 
19047d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1905ce8d5614SIntel 		port = &ports[pi];
1906ce8d5614SIntel 		/* Check if there is a port which is not started */
190741b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
190841b05095SBernard Iremonger 			(port->slave_flag == 0))
1909ce8d5614SIntel 			return 0;
1910ce8d5614SIntel 	}
1911ce8d5614SIntel 
1912ce8d5614SIntel 	/* No port is not started */
1913ce8d5614SIntel 	return 1;
1914ce8d5614SIntel }
1915ce8d5614SIntel 
1916148f963fSBruce Richardson int
19176018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
19186018eb8cSShahaf Shuler {
19196018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
19206018eb8cSShahaf Shuler 
19216018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
19226018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
19236018eb8cSShahaf Shuler 		return 0;
19246018eb8cSShahaf Shuler 	return 1;
19256018eb8cSShahaf Shuler }
19266018eb8cSShahaf Shuler 
19276018eb8cSShahaf Shuler int
1928edab33b1STetsuya Mukawa all_ports_stopped(void)
1929edab33b1STetsuya Mukawa {
1930edab33b1STetsuya Mukawa 	portid_t pi;
1931edab33b1STetsuya Mukawa 
19327d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
19336018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
1934edab33b1STetsuya Mukawa 			return 0;
1935edab33b1STetsuya Mukawa 	}
1936edab33b1STetsuya Mukawa 
1937edab33b1STetsuya Mukawa 	return 1;
1938edab33b1STetsuya Mukawa }
1939edab33b1STetsuya Mukawa 
1940edab33b1STetsuya Mukawa int
1941edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
1942edab33b1STetsuya Mukawa {
1943edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1944edab33b1STetsuya Mukawa 		return 0;
1945edab33b1STetsuya Mukawa 
1946edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1947edab33b1STetsuya Mukawa 		return 0;
1948edab33b1STetsuya Mukawa 
1949edab33b1STetsuya Mukawa 	return 1;
1950edab33b1STetsuya Mukawa }
1951edab33b1STetsuya Mukawa 
1952edab33b1STetsuya Mukawa int
1953ce8d5614SIntel start_port(portid_t pid)
1954ce8d5614SIntel {
195592d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
1956ce8d5614SIntel 	portid_t pi;
1957ce8d5614SIntel 	queueid_t qi;
1958ce8d5614SIntel 	struct rte_port *port;
19592950a769SDeclan Doherty 	struct ether_addr mac_addr;
1960ce8d5614SIntel 
19614468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
19624468635fSMichael Qiu 		return 0;
19634468635fSMichael Qiu 
1964ce8d5614SIntel 	if(dcb_config)
1965ce8d5614SIntel 		dcb_test = 1;
19667d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1967edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1968ce8d5614SIntel 			continue;
1969ce8d5614SIntel 
197092d2703eSMichael Qiu 		need_check_link_status = 0;
1971ce8d5614SIntel 		port = &ports[pi];
1972ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1973ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1974ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1975ce8d5614SIntel 			continue;
1976ce8d5614SIntel 		}
1977ce8d5614SIntel 
1978ce8d5614SIntel 		if (port->need_reconfig > 0) {
1979ce8d5614SIntel 			port->need_reconfig = 0;
1980ce8d5614SIntel 
19817ee3e944SVasily Philipov 			if (flow_isolate_all) {
19827ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
19837ee3e944SVasily Philipov 				if (ret) {
19847ee3e944SVasily Philipov 					printf("Failed to apply isolated"
19857ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
19867ee3e944SVasily Philipov 					return -1;
19877ee3e944SVasily Philipov 				}
19887ee3e944SVasily Philipov 			}
1989b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
19905706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
199120a0286fSLiu Xiaofeng 					port->socket_id);
1992ce8d5614SIntel 			/* configure port */
1993ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1994ce8d5614SIntel 						&(port->dev_conf));
1995ce8d5614SIntel 			if (diag != 0) {
1996ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1997ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1998ce8d5614SIntel 					printf("Port %d can not be set back "
1999ce8d5614SIntel 							"to stopped\n", pi);
2000ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2001ce8d5614SIntel 				/* try to reconfigure port next time */
2002ce8d5614SIntel 				port->need_reconfig = 1;
2003148f963fSBruce Richardson 				return -1;
2004ce8d5614SIntel 			}
2005ce8d5614SIntel 		}
2006ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2007ce8d5614SIntel 			port->need_reconfig_queues = 0;
2008ce8d5614SIntel 			/* setup tx queues */
2009ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2010b6ea6408SIntel 				if ((numa_support) &&
2011b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2012b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2013d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2014d44f8a48SQi Zhang 						txring_numa[pi],
2015d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2016b6ea6408SIntel 				else
2017b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2018d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2019d44f8a48SQi Zhang 						port->socket_id,
2020d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2021b6ea6408SIntel 
2022ce8d5614SIntel 				if (diag == 0)
2023ce8d5614SIntel 					continue;
2024ce8d5614SIntel 
2025ce8d5614SIntel 				/* Fail to setup tx queue, return */
2026ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2027ce8d5614SIntel 							RTE_PORT_HANDLING,
2028ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2029ce8d5614SIntel 					printf("Port %d can not be set back "
2030ce8d5614SIntel 							"to stopped\n", pi);
2031d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2032d44f8a48SQi Zhang 				       pi);
2033ce8d5614SIntel 				/* try to reconfigure queues next time */
2034ce8d5614SIntel 				port->need_reconfig_queues = 1;
2035148f963fSBruce Richardson 				return -1;
2036ce8d5614SIntel 			}
2037ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2038d44f8a48SQi Zhang 				/* setup rx queues */
2039b6ea6408SIntel 				if ((numa_support) &&
2040b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2041b6ea6408SIntel 					struct rte_mempool * mp =
2042b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2043b6ea6408SIntel 					if (mp == NULL) {
2044b6ea6408SIntel 						printf("Failed to setup RX queue:"
2045b6ea6408SIntel 							"No mempool allocation"
2046b6ea6408SIntel 							" on the socket %d\n",
2047b6ea6408SIntel 							rxring_numa[pi]);
2048148f963fSBruce Richardson 						return -1;
2049b6ea6408SIntel 					}
2050b6ea6408SIntel 
2051b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2052d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2053d44f8a48SQi Zhang 					     rxring_numa[pi],
2054d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2055d44f8a48SQi Zhang 					     mp);
20561e1d6bddSBernard Iremonger 				} else {
20571e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
20581e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
20591e1d6bddSBernard Iremonger 					if (mp == NULL) {
20601e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
20611e1d6bddSBernard Iremonger 							"No mempool allocation"
20621e1d6bddSBernard Iremonger 							" on the socket %d\n",
20631e1d6bddSBernard Iremonger 							port->socket_id);
20641e1d6bddSBernard Iremonger 						return -1;
2065b6ea6408SIntel 					}
2066b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2067d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2068d44f8a48SQi Zhang 					     port->socket_id,
2069d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2070d44f8a48SQi Zhang 					     mp);
20711e1d6bddSBernard Iremonger 				}
2072ce8d5614SIntel 				if (diag == 0)
2073ce8d5614SIntel 					continue;
2074ce8d5614SIntel 
2075ce8d5614SIntel 				/* Fail to setup rx queue, return */
2076ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2077ce8d5614SIntel 							RTE_PORT_HANDLING,
2078ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2079ce8d5614SIntel 					printf("Port %d can not be set back "
2080ce8d5614SIntel 							"to stopped\n", pi);
2081d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2082d44f8a48SQi Zhang 				       pi);
2083ce8d5614SIntel 				/* try to reconfigure queues next time */
2084ce8d5614SIntel 				port->need_reconfig_queues = 1;
2085148f963fSBruce Richardson 				return -1;
2086ce8d5614SIntel 			}
2087ce8d5614SIntel 		}
2088b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2089ce8d5614SIntel 		/* start port */
2090ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2091ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2092ce8d5614SIntel 
2093ce8d5614SIntel 			/* Fail to setup rx queue, return */
2094ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2095ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2096ce8d5614SIntel 				printf("Port %d can not be set back to "
2097ce8d5614SIntel 							"stopped\n", pi);
2098ce8d5614SIntel 			continue;
2099ce8d5614SIntel 		}
2100ce8d5614SIntel 
2101ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2102ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2103ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2104ce8d5614SIntel 
21052950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
2106d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
21072950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
21082950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
21092950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2110d8c89163SZijie Pan 
2111ce8d5614SIntel 		/* at least one port started, need checking link status */
2112ce8d5614SIntel 		need_check_link_status = 1;
2113ce8d5614SIntel 	}
2114ce8d5614SIntel 
211592d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2116edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
211792d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2118ce8d5614SIntel 		printf("Please stop the ports first\n");
2119ce8d5614SIntel 
2120ce8d5614SIntel 	printf("Done\n");
2121148f963fSBruce Richardson 	return 0;
2122ce8d5614SIntel }
2123ce8d5614SIntel 
2124ce8d5614SIntel void
2125ce8d5614SIntel stop_port(portid_t pid)
2126ce8d5614SIntel {
2127ce8d5614SIntel 	portid_t pi;
2128ce8d5614SIntel 	struct rte_port *port;
2129ce8d5614SIntel 	int need_check_link_status = 0;
2130ce8d5614SIntel 
2131ce8d5614SIntel 	if (dcb_test) {
2132ce8d5614SIntel 		dcb_test = 0;
2133ce8d5614SIntel 		dcb_config = 0;
2134ce8d5614SIntel 	}
21354468635fSMichael Qiu 
21364468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21374468635fSMichael Qiu 		return;
21384468635fSMichael Qiu 
2139ce8d5614SIntel 	printf("Stopping ports...\n");
2140ce8d5614SIntel 
21417d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21424468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2143ce8d5614SIntel 			continue;
2144ce8d5614SIntel 
2145a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2146a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2147a8ef3e3aSBernard Iremonger 			continue;
2148a8ef3e3aSBernard Iremonger 		}
2149a8ef3e3aSBernard Iremonger 
21500e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
21510e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
21520e545d30SBernard Iremonger 			continue;
21530e545d30SBernard Iremonger 		}
21540e545d30SBernard Iremonger 
2155ce8d5614SIntel 		port = &ports[pi];
2156ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2157ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2158ce8d5614SIntel 			continue;
2159ce8d5614SIntel 
2160ce8d5614SIntel 		rte_eth_dev_stop(pi);
2161ce8d5614SIntel 
2162ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2163ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2164ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2165ce8d5614SIntel 		need_check_link_status = 1;
2166ce8d5614SIntel 	}
2167bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2168edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2169ce8d5614SIntel 
2170ce8d5614SIntel 	printf("Done\n");
2171ce8d5614SIntel }
2172ce8d5614SIntel 
2173ce6959bfSWisam Jaddo static void
21744f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2175ce6959bfSWisam Jaddo {
21764f1de450SThomas Monjalon 	portid_t i;
21774f1de450SThomas Monjalon 	portid_t new_total = 0;
2178ce6959bfSWisam Jaddo 
21794f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
21804f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
21814f1de450SThomas Monjalon 			array[new_total] = array[i];
21824f1de450SThomas Monjalon 			new_total++;
2183ce6959bfSWisam Jaddo 		}
21844f1de450SThomas Monjalon 	*total = new_total;
21854f1de450SThomas Monjalon }
21864f1de450SThomas Monjalon 
21874f1de450SThomas Monjalon static void
21884f1de450SThomas Monjalon remove_invalid_ports(void)
21894f1de450SThomas Monjalon {
21904f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
21914f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
21924f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2193ce6959bfSWisam Jaddo }
2194ce6959bfSWisam Jaddo 
2195ce8d5614SIntel void
2196ce8d5614SIntel close_port(portid_t pid)
2197ce8d5614SIntel {
2198ce8d5614SIntel 	portid_t pi;
2199ce8d5614SIntel 	struct rte_port *port;
2200ce8d5614SIntel 
22014468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
22024468635fSMichael Qiu 		return;
22034468635fSMichael Qiu 
2204ce8d5614SIntel 	printf("Closing ports...\n");
2205ce8d5614SIntel 
22067d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22074468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2208ce8d5614SIntel 			continue;
2209ce8d5614SIntel 
2210a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2211a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2212a8ef3e3aSBernard Iremonger 			continue;
2213a8ef3e3aSBernard Iremonger 		}
2214a8ef3e3aSBernard Iremonger 
22150e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
22160e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
22170e545d30SBernard Iremonger 			continue;
22180e545d30SBernard Iremonger 		}
22190e545d30SBernard Iremonger 
2220ce8d5614SIntel 		port = &ports[pi];
2221ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2222d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2223d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2224d4e8ad64SMichael Qiu 			continue;
2225d4e8ad64SMichael Qiu 		}
2226d4e8ad64SMichael Qiu 
2227d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2228ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2229ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2230ce8d5614SIntel 			continue;
2231ce8d5614SIntel 		}
2232ce8d5614SIntel 
2233938a184aSAdrien Mazarguil 		if (port->flow_list)
2234938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2235ce8d5614SIntel 		rte_eth_dev_close(pi);
2236ce8d5614SIntel 
22374f1de450SThomas Monjalon 		remove_invalid_ports();
223823ea57a2SThomas Monjalon 
2239ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2240ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2241b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2242ce8d5614SIntel 	}
2243ce8d5614SIntel 
2244ce8d5614SIntel 	printf("Done\n");
2245ce8d5614SIntel }
2246ce8d5614SIntel 
2247edab33b1STetsuya Mukawa void
224897f1e196SWei Dai reset_port(portid_t pid)
224997f1e196SWei Dai {
225097f1e196SWei Dai 	int diag;
225197f1e196SWei Dai 	portid_t pi;
225297f1e196SWei Dai 	struct rte_port *port;
225397f1e196SWei Dai 
225497f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
225597f1e196SWei Dai 		return;
225697f1e196SWei Dai 
225797f1e196SWei Dai 	printf("Resetting ports...\n");
225897f1e196SWei Dai 
225997f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
226097f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
226197f1e196SWei Dai 			continue;
226297f1e196SWei Dai 
226397f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
226497f1e196SWei Dai 			printf("Please remove port %d from forwarding "
226597f1e196SWei Dai 			       "configuration.\n", pi);
226697f1e196SWei Dai 			continue;
226797f1e196SWei Dai 		}
226897f1e196SWei Dai 
226997f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
227097f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
227197f1e196SWei Dai 			       pi);
227297f1e196SWei Dai 			continue;
227397f1e196SWei Dai 		}
227497f1e196SWei Dai 
227597f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
227697f1e196SWei Dai 		if (diag == 0) {
227797f1e196SWei Dai 			port = &ports[pi];
227897f1e196SWei Dai 			port->need_reconfig = 1;
227997f1e196SWei Dai 			port->need_reconfig_queues = 1;
228097f1e196SWei Dai 		} else {
228197f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
228297f1e196SWei Dai 		}
228397f1e196SWei Dai 	}
228497f1e196SWei Dai 
228597f1e196SWei Dai 	printf("Done\n");
228697f1e196SWei Dai }
228797f1e196SWei Dai 
228897f1e196SWei Dai void
2289edab33b1STetsuya Mukawa attach_port(char *identifier)
2290ce8d5614SIntel {
2291*4f1ed78eSThomas Monjalon 	portid_t pi;
2292c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2293ce8d5614SIntel 
2294edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2295edab33b1STetsuya Mukawa 
2296edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2297edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2298edab33b1STetsuya Mukawa 		return;
2299ce8d5614SIntel 	}
2300ce8d5614SIntel 
2301c9cce428SThomas Monjalon 	if (rte_dev_probe(identifier) != 0) {
2302c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2303edab33b1STetsuya Mukawa 		return;
2304c9cce428SThomas Monjalon 	}
2305c9cce428SThomas Monjalon 
2306*4f1ed78eSThomas Monjalon 	/* first attach mode: event */
2307*4f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
2308*4f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
2309*4f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2310*4f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
2311*4f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
2312*4f1ed78eSThomas Monjalon 				setup_attached_port(pi);
2313*4f1ed78eSThomas Monjalon 		return;
2314*4f1ed78eSThomas Monjalon 	}
2315*4f1ed78eSThomas Monjalon 
2316*4f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
231786fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2318*4f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
231986fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
232086fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2321c9cce428SThomas Monjalon 		setup_attached_port(pi);
2322c9cce428SThomas Monjalon 	}
232386fa5de1SThomas Monjalon }
2324c9cce428SThomas Monjalon 
2325c9cce428SThomas Monjalon static void
2326c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2327c9cce428SThomas Monjalon {
2328c9cce428SThomas Monjalon 	unsigned int socket_id;
2329edab33b1STetsuya Mukawa 
2330931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
233129841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2332931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
233329841336SPhil Yang 		socket_id = socket_ids[0];
2334931126baSBernard Iremonger 	reconfig(pi, socket_id);
2335edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2336edab33b1STetsuya Mukawa 
23374f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
23384f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
23394f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2340*4f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2341edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2342edab33b1STetsuya Mukawa 
2343edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2344edab33b1STetsuya Mukawa 	printf("Done\n");
2345edab33b1STetsuya Mukawa }
2346edab33b1STetsuya Mukawa 
2347edab33b1STetsuya Mukawa void
2348f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id)
23495f4ec54fSChen Jing D(Mark) {
2350f8e5baa2SThomas Monjalon 	struct rte_device *dev;
2351f8e5baa2SThomas Monjalon 	portid_t sibling;
2352f8e5baa2SThomas Monjalon 
2353c9cce428SThomas Monjalon 	printf("Removing a device...\n");
23545f4ec54fSChen Jing D(Mark) 
2355f8e5baa2SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
2356f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2357f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2358f8e5baa2SThomas Monjalon 		return;
2359f8e5baa2SThomas Monjalon 	}
2360f8e5baa2SThomas Monjalon 
236123ea57a2SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
23623f4a8370SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
23633f4a8370SThomas Monjalon 			printf("Port not stopped\n");
2364edab33b1STetsuya Mukawa 			return;
2365edab33b1STetsuya Mukawa 		}
23663f4a8370SThomas Monjalon 		printf("Port was not closed\n");
2367938a184aSAdrien Mazarguil 		if (ports[port_id].flow_list)
2368938a184aSAdrien Mazarguil 			port_flow_flush(port_id);
23693f4a8370SThomas Monjalon 	}
2370938a184aSAdrien Mazarguil 
2371f8e5baa2SThomas Monjalon 	if (rte_dev_remove(dev) != 0) {
2372f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2373edab33b1STetsuya Mukawa 		return;
23743070419eSGaetan Rivet 	}
2375edab33b1STetsuya Mukawa 
2376f8e5baa2SThomas Monjalon 	for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
2377f8e5baa2SThomas Monjalon 		if (rte_eth_devices[sibling].device != dev)
2378f8e5baa2SThomas Monjalon 			continue;
2379f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2380f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2381f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2382f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2383f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2384f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2385f8e5baa2SThomas Monjalon 		}
2386f8e5baa2SThomas Monjalon 	}
2387f8e5baa2SThomas Monjalon 
23884f1de450SThomas Monjalon 	remove_invalid_ports();
238903ce2c53SMatan Azrad 
2390f8e5baa2SThomas Monjalon 	printf("Device of port %u is detached\n", port_id);
2391f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2392edab33b1STetsuya Mukawa 	printf("Done\n");
2393edab33b1STetsuya Mukawa 	return;
23945f4ec54fSChen Jing D(Mark) }
23955f4ec54fSChen Jing D(Mark) 
2396af75078fSIntel void
2397af75078fSIntel pmd_test_exit(void)
2398af75078fSIntel {
2399124909d7SZhiyong Yang 	struct rte_device *device;
2400af75078fSIntel 	portid_t pt_id;
2401fb73e096SJeff Guo 	int ret;
2402af75078fSIntel 
24038210ec25SPablo de Lara 	if (test_done == 0)
24048210ec25SPablo de Lara 		stop_packet_forwarding();
24058210ec25SPablo de Lara 
2406d3a274ceSZhihong Wang 	if (ports != NULL) {
2407d3a274ceSZhihong Wang 		no_link_check = 1;
24087d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
2409d3a274ceSZhihong Wang 			printf("\nShutting down port %d...\n", pt_id);
2410af75078fSIntel 			fflush(stdout);
2411d3a274ceSZhihong Wang 			stop_port(pt_id);
2412d3a274ceSZhihong Wang 			close_port(pt_id);
2413124909d7SZhiyong Yang 
2414124909d7SZhiyong Yang 			/*
2415124909d7SZhiyong Yang 			 * This is a workaround to fix a virtio-user issue that
2416124909d7SZhiyong Yang 			 * requires to call clean-up routine to remove existing
2417124909d7SZhiyong Yang 			 * socket.
2418124909d7SZhiyong Yang 			 * This workaround valid only for testpmd, needs a fix
2419124909d7SZhiyong Yang 			 * valid for all applications.
2420124909d7SZhiyong Yang 			 * TODO: Implement proper resource cleanup
2421124909d7SZhiyong Yang 			 */
2422124909d7SZhiyong Yang 			device = rte_eth_devices[pt_id].device;
2423124909d7SZhiyong Yang 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2424f8e5baa2SThomas Monjalon 				detach_port_device(pt_id);
2425af75078fSIntel 		}
2426d3a274ceSZhihong Wang 	}
2427fb73e096SJeff Guo 
2428fb73e096SJeff Guo 	if (hot_plug) {
2429fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
24302049c511SJeff Guo 		if (ret) {
2431fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2432fb73e096SJeff Guo 				"fail to stop device event monitor.");
24332049c511SJeff Guo 			return;
24342049c511SJeff Guo 		}
2435fb73e096SJeff Guo 
24362049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
24372049c511SJeff Guo 			eth_dev_event_callback, NULL);
24382049c511SJeff Guo 		if (ret < 0) {
2439fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
24402049c511SJeff Guo 				"fail to unregister device event callback.\n");
24412049c511SJeff Guo 			return;
24422049c511SJeff Guo 		}
24432049c511SJeff Guo 
24442049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
24452049c511SJeff Guo 		if (ret) {
24462049c511SJeff Guo 			RTE_LOG(ERR, EAL,
24472049c511SJeff Guo 				"fail to disable hotplug handling.\n");
24482049c511SJeff Guo 			return;
24492049c511SJeff Guo 		}
2450fb73e096SJeff Guo 	}
2451fb73e096SJeff Guo 
2452d3a274ceSZhihong Wang 	printf("\nBye...\n");
2453af75078fSIntel }
2454af75078fSIntel 
2455af75078fSIntel typedef void (*cmd_func_t)(void);
2456af75078fSIntel struct pmd_test_command {
2457af75078fSIntel 	const char *cmd_name;
2458af75078fSIntel 	cmd_func_t cmd_func;
2459af75078fSIntel };
2460af75078fSIntel 
2461af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2462af75078fSIntel 
2463ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2464af75078fSIntel static void
2465edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2466af75078fSIntel {
2467ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2468ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2469f8244c63SZhiyong Yang 	portid_t portid;
2470f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2471ce8d5614SIntel 	struct rte_eth_link link;
2472ce8d5614SIntel 
2473ce8d5614SIntel 	printf("Checking link statuses...\n");
2474ce8d5614SIntel 	fflush(stdout);
2475ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2476ce8d5614SIntel 		all_ports_up = 1;
24777d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2478ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2479ce8d5614SIntel 				continue;
2480ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2481ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2482ce8d5614SIntel 			/* print link status if flag set */
2483ce8d5614SIntel 			if (print_flag == 1) {
2484ce8d5614SIntel 				if (link.link_status)
2485f8244c63SZhiyong Yang 					printf(
2486f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2487f8244c63SZhiyong Yang 					portid, link.link_speed,
2488ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2489ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2490ce8d5614SIntel 				else
2491f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2492ce8d5614SIntel 				continue;
2493ce8d5614SIntel 			}
2494ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
249509419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2496ce8d5614SIntel 				all_ports_up = 0;
2497ce8d5614SIntel 				break;
2498ce8d5614SIntel 			}
2499ce8d5614SIntel 		}
2500ce8d5614SIntel 		/* after finally printing all link status, get out */
2501ce8d5614SIntel 		if (print_flag == 1)
2502ce8d5614SIntel 			break;
2503ce8d5614SIntel 
2504ce8d5614SIntel 		if (all_ports_up == 0) {
2505ce8d5614SIntel 			fflush(stdout);
2506ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2507ce8d5614SIntel 		}
2508ce8d5614SIntel 
2509ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2510ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2511ce8d5614SIntel 			print_flag = 1;
2512ce8d5614SIntel 		}
25138ea656f8SGaetan Rivet 
25148ea656f8SGaetan Rivet 		if (lsc_interrupt)
25158ea656f8SGaetan Rivet 			break;
2516ce8d5614SIntel 	}
2517af75078fSIntel }
2518af75078fSIntel 
2519284c908cSGaetan Rivet static void
2520284c908cSGaetan Rivet rmv_event_callback(void *arg)
2521284c908cSGaetan Rivet {
25223b97888aSMatan Azrad 	int need_to_start = 0;
25230da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
252428caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2525284c908cSGaetan Rivet 
2526284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2527284c908cSGaetan Rivet 
25283b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
25293b97888aSMatan Azrad 		need_to_start = 1;
25303b97888aSMatan Azrad 		stop_packet_forwarding();
25313b97888aSMatan Azrad 	}
25320da2a62bSMatan Azrad 	no_link_check = 1;
2533284c908cSGaetan Rivet 	stop_port(port_id);
25340da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2535284c908cSGaetan Rivet 	close_port(port_id);
2536f8e5baa2SThomas Monjalon 	detach_port_device(port_id);
25373b97888aSMatan Azrad 	if (need_to_start)
25383b97888aSMatan Azrad 		start_packet_forwarding(0);
2539284c908cSGaetan Rivet }
2540284c908cSGaetan Rivet 
254176ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2542d6af1a13SBernard Iremonger static int
2543f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2544d6af1a13SBernard Iremonger 		  void *ret_param)
254576ad4a2dSGaetan Rivet {
254676ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2547d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
254876ad4a2dSGaetan Rivet 
254976ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2550f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
255176ad4a2dSGaetan Rivet 			port_id, __func__, type);
255276ad4a2dSGaetan Rivet 		fflush(stderr);
25533af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2554f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
255597b5d8b5SThomas Monjalon 			eth_event_desc[type]);
255676ad4a2dSGaetan Rivet 		fflush(stdout);
255776ad4a2dSGaetan Rivet 	}
2558284c908cSGaetan Rivet 
2559284c908cSGaetan Rivet 	switch (type) {
2560*4f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
2561*4f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
2562*4f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
2563*4f1ed78eSThomas Monjalon 		break;
2564284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
2565*4f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
2566*4f1ed78eSThomas Monjalon 			break;
2567284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2568284c908cSGaetan Rivet 				rmv_event_callback, (void *)(intptr_t)port_id))
2569284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2570284c908cSGaetan Rivet 		break;
2571284c908cSGaetan Rivet 	default:
2572284c908cSGaetan Rivet 		break;
2573284c908cSGaetan Rivet 	}
2574d6af1a13SBernard Iremonger 	return 0;
257576ad4a2dSGaetan Rivet }
257676ad4a2dSGaetan Rivet 
257797b5d8b5SThomas Monjalon static int
257897b5d8b5SThomas Monjalon register_eth_event_callback(void)
257997b5d8b5SThomas Monjalon {
258097b5d8b5SThomas Monjalon 	int ret;
258197b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
258297b5d8b5SThomas Monjalon 
258397b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
258497b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
258597b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
258697b5d8b5SThomas Monjalon 				event,
258797b5d8b5SThomas Monjalon 				eth_event_callback,
258897b5d8b5SThomas Monjalon 				NULL);
258997b5d8b5SThomas Monjalon 		if (ret != 0) {
259097b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
259197b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
259297b5d8b5SThomas Monjalon 			return -1;
259397b5d8b5SThomas Monjalon 		}
259497b5d8b5SThomas Monjalon 	}
259597b5d8b5SThomas Monjalon 
259697b5d8b5SThomas Monjalon 	return 0;
259797b5d8b5SThomas Monjalon }
259897b5d8b5SThomas Monjalon 
2599fb73e096SJeff Guo /* This function is used by the interrupt thread */
2600fb73e096SJeff Guo static void
260189ecd110SJeff Guo eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2602fb73e096SJeff Guo 			     __rte_unused void *arg)
2603fb73e096SJeff Guo {
26042049c511SJeff Guo 	uint16_t port_id;
26052049c511SJeff Guo 	int ret;
26062049c511SJeff Guo 
2607fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2608fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2609fb73e096SJeff Guo 			__func__, type);
2610fb73e096SJeff Guo 		fflush(stderr);
2611fb73e096SJeff Guo 	}
2612fb73e096SJeff Guo 
2613fb73e096SJeff Guo 	switch (type) {
2614fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2615fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2616fb73e096SJeff Guo 			device_name);
26172049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
26182049c511SJeff Guo 		if (ret) {
26192049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
26202049c511SJeff Guo 				device_name);
26212049c511SJeff Guo 			return;
26222049c511SJeff Guo 		}
26232049c511SJeff Guo 		rmv_event_callback((void *)(intptr_t)port_id);
2624fb73e096SJeff Guo 		break;
2625fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2626fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2627fb73e096SJeff Guo 			device_name);
2628fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2629fb73e096SJeff Guo 		 * begin to attach port.
2630fb73e096SJeff Guo 		 */
2631fb73e096SJeff Guo 		break;
2632fb73e096SJeff Guo 	default:
2633fb73e096SJeff Guo 		break;
2634fb73e096SJeff Guo 	}
2635fb73e096SJeff Guo }
2636fb73e096SJeff Guo 
2637013af9b6SIntel static int
263828caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2639af75078fSIntel {
2640013af9b6SIntel 	uint16_t i;
2641af75078fSIntel 	int diag;
2642013af9b6SIntel 	uint8_t mapping_found = 0;
2643af75078fSIntel 
2644013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2645013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2646013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2647013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2648013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2649013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2650013af9b6SIntel 			if (diag != 0)
2651013af9b6SIntel 				return diag;
2652013af9b6SIntel 			mapping_found = 1;
2653af75078fSIntel 		}
2654013af9b6SIntel 	}
2655013af9b6SIntel 	if (mapping_found)
2656013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2657013af9b6SIntel 	return 0;
2658013af9b6SIntel }
2659013af9b6SIntel 
2660013af9b6SIntel static int
266128caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2662013af9b6SIntel {
2663013af9b6SIntel 	uint16_t i;
2664013af9b6SIntel 	int diag;
2665013af9b6SIntel 	uint8_t mapping_found = 0;
2666013af9b6SIntel 
2667013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2668013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2669013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2670013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2671013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2672013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2673013af9b6SIntel 			if (diag != 0)
2674013af9b6SIntel 				return diag;
2675013af9b6SIntel 			mapping_found = 1;
2676013af9b6SIntel 		}
2677013af9b6SIntel 	}
2678013af9b6SIntel 	if (mapping_found)
2679013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2680013af9b6SIntel 	return 0;
2681013af9b6SIntel }
2682013af9b6SIntel 
2683013af9b6SIntel static void
268428caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2685013af9b6SIntel {
2686013af9b6SIntel 	int diag = 0;
2687013af9b6SIntel 
2688013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2689af75078fSIntel 	if (diag != 0) {
2690013af9b6SIntel 		if (diag == -ENOTSUP) {
2691013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2692013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2693013af9b6SIntel 		}
2694013af9b6SIntel 		else
2695013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2696013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2697013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2698af75078fSIntel 					pi, diag);
2699af75078fSIntel 	}
2700013af9b6SIntel 
2701013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2702af75078fSIntel 	if (diag != 0) {
2703013af9b6SIntel 		if (diag == -ENOTSUP) {
2704013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2705013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2706013af9b6SIntel 		}
2707013af9b6SIntel 		else
2708013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2709013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2710013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2711af75078fSIntel 					pi, diag);
2712af75078fSIntel 	}
2713af75078fSIntel }
2714af75078fSIntel 
2715f2c5125aSPablo de Lara static void
2716f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2717f2c5125aSPablo de Lara {
2718d44f8a48SQi Zhang 	uint16_t qid;
2719f2c5125aSPablo de Lara 
2720d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
2721d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2722d44f8a48SQi Zhang 
2723d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2724f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2725d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2726f2c5125aSPablo de Lara 
2727f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2728d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2729f2c5125aSPablo de Lara 
2730f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2731d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2732f2c5125aSPablo de Lara 
2733f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2734d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2735f2c5125aSPablo de Lara 
2736f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2737d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2738f2c5125aSPablo de Lara 
2739d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2740d44f8a48SQi Zhang 	}
2741d44f8a48SQi Zhang 
2742d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
2743d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2744d44f8a48SQi Zhang 
2745d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2746f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2747d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2748f2c5125aSPablo de Lara 
2749f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2750d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2751f2c5125aSPablo de Lara 
2752f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2753d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2754f2c5125aSPablo de Lara 
2755f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2756d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2757f2c5125aSPablo de Lara 
2758f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2759d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2760d44f8a48SQi Zhang 
2761d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2762d44f8a48SQi Zhang 	}
2763f2c5125aSPablo de Lara }
2764f2c5125aSPablo de Lara 
2765013af9b6SIntel void
2766013af9b6SIntel init_port_config(void)
2767013af9b6SIntel {
2768013af9b6SIntel 	portid_t pid;
2769013af9b6SIntel 	struct rte_port *port;
2770013af9b6SIntel 
27717d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2772013af9b6SIntel 		port = &ports[pid];
2773013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2774422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
27753ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2776013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
277790892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2778422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2779af75078fSIntel 		} else {
2780013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2781013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2782af75078fSIntel 		}
27833ce690d3SBruce Richardson 
27845f592039SJingjing Wu 		if (port->dcb_flag == 0) {
27853ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
27863ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
27873ce690d3SBruce Richardson 			else
27883ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
27893ce690d3SBruce Richardson 		}
27903ce690d3SBruce Richardson 
2791f2c5125aSPablo de Lara 		rxtx_port_config(port);
2792013af9b6SIntel 
2793013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2794013af9b6SIntel 
2795013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
279650c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2797e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
27987b7e5ba7SIntel #endif
27998ea656f8SGaetan Rivet 
28008ea656f8SGaetan Rivet 		if (lsc_interrupt &&
28018ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
28028ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
28038ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2804284c908cSGaetan Rivet 		if (rmv_interrupt &&
2805284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2806284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2807284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2808013af9b6SIntel 	}
2809013af9b6SIntel }
2810013af9b6SIntel 
281141b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
281241b05095SBernard Iremonger {
281341b05095SBernard Iremonger 	struct rte_port *port;
281441b05095SBernard Iremonger 
281541b05095SBernard Iremonger 	port = &ports[slave_pid];
281641b05095SBernard Iremonger 	port->slave_flag = 1;
281741b05095SBernard Iremonger }
281841b05095SBernard Iremonger 
281941b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
282041b05095SBernard Iremonger {
282141b05095SBernard Iremonger 	struct rte_port *port;
282241b05095SBernard Iremonger 
282341b05095SBernard Iremonger 	port = &ports[slave_pid];
282441b05095SBernard Iremonger 	port->slave_flag = 0;
282541b05095SBernard Iremonger }
282641b05095SBernard Iremonger 
28270e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
28280e545d30SBernard Iremonger {
28290e545d30SBernard Iremonger 	struct rte_port *port;
28300e545d30SBernard Iremonger 
28310e545d30SBernard Iremonger 	port = &ports[slave_pid];
2832b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2833b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2834b8b8b344SMatan Azrad 		return 1;
2835b8b8b344SMatan Azrad 	return 0;
28360e545d30SBernard Iremonger }
28370e545d30SBernard Iremonger 
2838013af9b6SIntel const uint16_t vlan_tags[] = {
2839013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2840013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2841013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2842013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2843013af9b6SIntel };
2844013af9b6SIntel 
2845013af9b6SIntel static  int
2846ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
28471a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
28481a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
28491a572499SJingjing Wu 		 uint8_t pfc_en)
2850013af9b6SIntel {
2851013af9b6SIntel 	uint8_t i;
2852ac7c491cSKonstantin Ananyev 	int32_t rc;
2853ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
2854af75078fSIntel 
2855af75078fSIntel 	/*
2856013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2857013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2858af75078fSIntel 	 */
28591a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
28601a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
28611a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
28621a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
28631a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2864013af9b6SIntel 
2865547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
28661a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
28671a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
28681a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
28691a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
28701a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
28711a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2872013af9b6SIntel 
28731a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
28741a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
28751a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
28761a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
28771a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2878af75078fSIntel 		}
2879013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2880f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2881f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2882013af9b6SIntel 		}
2883013af9b6SIntel 
2884013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
288532e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
288632e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
28871a572499SJingjing Wu 	} else {
28881a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
28891a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
28901a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
28911a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2892013af9b6SIntel 
2893ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2894ac7c491cSKonstantin Ananyev 		if (rc != 0)
2895ac7c491cSKonstantin Ananyev 			return rc;
2896ac7c491cSKonstantin Ananyev 
28971a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
28981a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
28991a572499SJingjing Wu 
2900bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2901bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2902bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2903013af9b6SIntel 		}
2904ac7c491cSKonstantin Ananyev 
29051a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2906ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
290732e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
29081a572499SJingjing Wu 	}
29091a572499SJingjing Wu 
29101a572499SJingjing Wu 	if (pfc_en)
29111a572499SJingjing Wu 		eth_conf->dcb_capability_en =
29121a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2913013af9b6SIntel 	else
2914013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2915013af9b6SIntel 
2916013af9b6SIntel 	return 0;
2917013af9b6SIntel }
2918013af9b6SIntel 
2919013af9b6SIntel int
29201a572499SJingjing Wu init_port_dcb_config(portid_t pid,
29211a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
29221a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
29231a572499SJingjing Wu 		     uint8_t pfc_en)
2924013af9b6SIntel {
2925013af9b6SIntel 	struct rte_eth_conf port_conf;
2926013af9b6SIntel 	struct rte_port *rte_port;
2927013af9b6SIntel 	int retval;
2928013af9b6SIntel 	uint16_t i;
2929013af9b6SIntel 
29302a977b89SWenzhuo Lu 	rte_port = &ports[pid];
2931013af9b6SIntel 
2932013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2933013af9b6SIntel 	/* Enter DCB configuration status */
2934013af9b6SIntel 	dcb_config = 1;
2935013af9b6SIntel 
2936d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2937d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
2938d5354e89SYanglong Wu 
2939013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2940ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2941013af9b6SIntel 	if (retval < 0)
2942013af9b6SIntel 		return retval;
29430074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2944013af9b6SIntel 
29452f203d44SQi Zhang 	/* re-configure the device . */
29462f203d44SQi Zhang 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
29472a977b89SWenzhuo Lu 
29482a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
29492a977b89SWenzhuo Lu 
29502a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
29512a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
29522a977b89SWenzhuo Lu 	 */
29532a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
29542a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
29552a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
29562a977b89SWenzhuo Lu 			" for port %d.", pid);
29572a977b89SWenzhuo Lu 		return -1;
29582a977b89SWenzhuo Lu 	}
29592a977b89SWenzhuo Lu 
29602a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
29612a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
29622a977b89SWenzhuo Lu 	 */
29632a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
296486ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
296586ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
296686ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
296786ef65eeSBernard Iremonger 		} else {
29682a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29692a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
297086ef65eeSBernard Iremonger 		}
29712a977b89SWenzhuo Lu 	} else {
29722a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
29732a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
29742a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29752a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
29762a977b89SWenzhuo Lu 		} else {
29772a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
29782a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
29792a977b89SWenzhuo Lu 
29802a977b89SWenzhuo Lu 		}
29812a977b89SWenzhuo Lu 	}
29822a977b89SWenzhuo Lu 	rx_free_thresh = 64;
29832a977b89SWenzhuo Lu 
2984013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2985013af9b6SIntel 
2986f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
2987013af9b6SIntel 	/* VLAN filter */
29880074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
29891a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2990013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
2991013af9b6SIntel 
2992013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2993013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
2994013af9b6SIntel 
29957741e4cfSIntel 	rte_port->dcb_flag = 1;
29967741e4cfSIntel 
2997013af9b6SIntel 	return 0;
2998af75078fSIntel }
2999af75078fSIntel 
3000ffc468ffSTetsuya Mukawa static void
3001ffc468ffSTetsuya Mukawa init_port(void)
3002ffc468ffSTetsuya Mukawa {
3003ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3004ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3005ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3006ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3007ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3008ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3009ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3010ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3011ffc468ffSTetsuya Mukawa 	}
301229841336SPhil Yang 
301329841336SPhil Yang 	/* Initialize ports NUMA structures */
301429841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
301529841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
301629841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3017ffc468ffSTetsuya Mukawa }
3018ffc468ffSTetsuya Mukawa 
3019d3a274ceSZhihong Wang static void
3020d3a274ceSZhihong Wang force_quit(void)
3021d3a274ceSZhihong Wang {
3022d3a274ceSZhihong Wang 	pmd_test_exit();
3023d3a274ceSZhihong Wang 	prompt_exit();
3024d3a274ceSZhihong Wang }
3025d3a274ceSZhihong Wang 
3026d3a274ceSZhihong Wang static void
3027cfea1f30SPablo de Lara print_stats(void)
3028cfea1f30SPablo de Lara {
3029cfea1f30SPablo de Lara 	uint8_t i;
3030cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3031cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3032cfea1f30SPablo de Lara 
3033cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3034cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3035cfea1f30SPablo de Lara 
3036cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3037cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3038cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3039cfea1f30SPablo de Lara }
3040cfea1f30SPablo de Lara 
3041cfea1f30SPablo de Lara static void
3042d3a274ceSZhihong Wang signal_handler(int signum)
3043d3a274ceSZhihong Wang {
3044d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3045d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3046d3a274ceSZhihong Wang 				signum);
3047102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3048102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3049102b7329SReshma Pattan 		rte_pdump_uninit();
3050102b7329SReshma Pattan #endif
305162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
305262d3216dSReshma Pattan 		rte_latencystats_uninit();
305362d3216dSReshma Pattan #endif
3054d3a274ceSZhihong Wang 		force_quit();
3055d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3056d9a191a0SPhil Yang 		f_quit = 1;
3057d3a274ceSZhihong Wang 		/* exit with the expected status */
3058d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3059d3a274ceSZhihong Wang 		kill(getpid(), signum);
3060d3a274ceSZhihong Wang 	}
3061d3a274ceSZhihong Wang }
3062d3a274ceSZhihong Wang 
3063af75078fSIntel int
3064af75078fSIntel main(int argc, char** argv)
3065af75078fSIntel {
3066af75078fSIntel 	int diag;
3067f8244c63SZhiyong Yang 	portid_t port_id;
30684918a357SXiaoyun Li 	uint16_t count;
3069fb73e096SJeff Guo 	int ret;
3070af75078fSIntel 
3071d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3072d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3073d3a274ceSZhihong Wang 
3074af75078fSIntel 	diag = rte_eal_init(argc, argv);
3075af75078fSIntel 	if (diag < 0)
3076af75078fSIntel 		rte_panic("Cannot init EAL\n");
3077af75078fSIntel 
3078285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3079285fd101SOlivier Matz 	if (testpmd_logtype < 0)
3080285fd101SOlivier Matz 		rte_panic("Cannot register log type");
3081285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3082285fd101SOlivier Matz 
308397b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
308497b5d8b5SThomas Monjalon 	if (ret != 0)
308597b5d8b5SThomas Monjalon 		rte_panic("Cannot register for ethdev events");
308697b5d8b5SThomas Monjalon 
30874aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
30884aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
30894aa0d012SAnatoly Burakov 	rte_pdump_init(NULL);
30904aa0d012SAnatoly Burakov #endif
30914aa0d012SAnatoly Burakov 
30924918a357SXiaoyun Li 	count = 0;
30934918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
30944918a357SXiaoyun Li 		ports_ids[count] = port_id;
30954918a357SXiaoyun Li 		count++;
30964918a357SXiaoyun Li 	}
30974918a357SXiaoyun Li 	nb_ports = (portid_t) count;
30984aa0d012SAnatoly Burakov 	if (nb_ports == 0)
30994aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
31004aa0d012SAnatoly Burakov 
31014aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
31024aa0d012SAnatoly Burakov 	init_port();
31034aa0d012SAnatoly Burakov 
31044aa0d012SAnatoly Burakov 	set_def_fwd_config();
31054aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
31064aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
31074aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
31084aa0d012SAnatoly Burakov 
3109e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3110e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3111e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3112e505d84cSAnatoly Burakov #endif
3113e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3114e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3115e505d84cSAnatoly Burakov #endif
3116e505d84cSAnatoly Burakov 
3117fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
3118fb7b8b32SAnatoly Burakov #ifdef RTE_EXEC_ENV_BSDAPP
3119fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3120fb7b8b32SAnatoly Burakov #else
3121fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3122fb7b8b32SAnatoly Burakov #endif
3123fb7b8b32SAnatoly Burakov 
3124e505d84cSAnatoly Burakov 	argc -= diag;
3125e505d84cSAnatoly Burakov 	argv += diag;
3126e505d84cSAnatoly Burakov 	if (argc > 1)
3127e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3128e505d84cSAnatoly Burakov 
3129e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3130285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
31311c036b16SEelco Chaudron 			strerror(errno));
31321c036b16SEelco Chaudron 	}
31331c036b16SEelco Chaudron 
313499cabef0SPablo de Lara 	if (tx_first && interactive)
313599cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
313699cabef0SPablo de Lara 				"interactive mode.\n");
31378820cba4SDavid Hunt 
31388820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
31398820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
31408820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
31418820cba4SDavid Hunt 		lsc_interrupt = 0;
31428820cba4SDavid Hunt 	}
31438820cba4SDavid Hunt 
31445a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
31455a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
31465a8fb55cSReshma Pattan 
31475a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3148af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3149af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3150af75078fSIntel 		       nb_rxq, nb_txq);
3151af75078fSIntel 
3152af75078fSIntel 	init_config();
3153fb73e096SJeff Guo 
3154fb73e096SJeff Guo 	if (hot_plug) {
31552049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3156fb73e096SJeff Guo 		if (ret) {
31572049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31582049c511SJeff Guo 				"fail to enable hotplug handling.");
3159fb73e096SJeff Guo 			return -1;
3160fb73e096SJeff Guo 		}
3161fb73e096SJeff Guo 
31622049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
31632049c511SJeff Guo 		if (ret) {
31642049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31652049c511SJeff Guo 				"fail to start device event monitoring.");
31662049c511SJeff Guo 			return -1;
31672049c511SJeff Guo 		}
31682049c511SJeff Guo 
31692049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
31702049c511SJeff Guo 			eth_dev_event_callback, NULL);
31712049c511SJeff Guo 		if (ret) {
31722049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31732049c511SJeff Guo 				"fail  to register device event callback\n");
31742049c511SJeff Guo 			return -1;
31752049c511SJeff Guo 		}
3176fb73e096SJeff Guo 	}
3177fb73e096SJeff Guo 
3178148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
3179148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3180af75078fSIntel 
3181ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
31827d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
3183ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
3184af75078fSIntel 
31857e4441c8SRemy Horton 	/* Init metrics library */
31867e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
31877e4441c8SRemy Horton 
318862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
318962d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
319062d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
319162d3216dSReshma Pattan 		if (ret)
319262d3216dSReshma Pattan 			printf("Warning: latencystats init()"
319362d3216dSReshma Pattan 				" returned error %d\n",	ret);
319462d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
319562d3216dSReshma Pattan 			latencystats_lcore_id);
319662d3216dSReshma Pattan 	}
319762d3216dSReshma Pattan #endif
319862d3216dSReshma Pattan 
31997e4441c8SRemy Horton 	/* Setup bitrate stats */
32007e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3201e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
32027e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
32037e4441c8SRemy Horton 		if (bitrate_data == NULL)
3204e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3205e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
32067e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3207e25e6c70SRemy Horton 	}
32087e4441c8SRemy Horton #endif
32097e4441c8SRemy Horton 
32100d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
321181ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
321281ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
321381ef862bSAllain Legacy 
3214ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3215ca7feb22SCyril Chemparathy 		if (auto_start) {
3216ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3217ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3218ca7feb22SCyril Chemparathy 		}
3219af75078fSIntel 		prompt();
32200de738cfSJiayu Hu 		pmd_test_exit();
3221ca7feb22SCyril Chemparathy 	} else
32220d56cb81SThomas Monjalon #endif
32230d56cb81SThomas Monjalon 	{
3224af75078fSIntel 		char c;
3225af75078fSIntel 		int rc;
3226af75078fSIntel 
3227d9a191a0SPhil Yang 		f_quit = 0;
3228d9a191a0SPhil Yang 
3229af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
323099cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3231cfea1f30SPablo de Lara 		if (stats_period != 0) {
3232cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3233cfea1f30SPablo de Lara 			uint64_t timer_period;
3234cfea1f30SPablo de Lara 
3235cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3236cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3237cfea1f30SPablo de Lara 
3238d9a191a0SPhil Yang 			while (f_quit == 0) {
3239cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3240cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3241cfea1f30SPablo de Lara 
3242cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3243cfea1f30SPablo de Lara 					print_stats();
3244cfea1f30SPablo de Lara 					/* Reset the timer */
3245cfea1f30SPablo de Lara 					diff_time = 0;
3246cfea1f30SPablo de Lara 				}
3247cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3248cfea1f30SPablo de Lara 				prev_time = cur_time;
3249cfea1f30SPablo de Lara 				sleep(1);
3250cfea1f30SPablo de Lara 			}
3251cfea1f30SPablo de Lara 		}
3252cfea1f30SPablo de Lara 
3253af75078fSIntel 		printf("Press enter to exit\n");
3254af75078fSIntel 		rc = read(0, &c, 1);
3255d3a274ceSZhihong Wang 		pmd_test_exit();
3256af75078fSIntel 		if (rc < 0)
3257af75078fSIntel 			return 1;
3258af75078fSIntel 	}
3259af75078fSIntel 
3260af75078fSIntel 	return 0;
3261af75078fSIntel }
3262