xref: /dpdk/app/test-pmd/testpmd.c (revision d6af1a13d7a14d062d11b37f6e31caa0f3823fe0)
1af75078fSIntel /*-
2af75078fSIntel  *   BSD LICENSE
3af75078fSIntel  *
47e4441c8SRemy Horton  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5af75078fSIntel  *   All rights reserved.
6af75078fSIntel  *
7af75078fSIntel  *   Redistribution and use in source and binary forms, with or without
8af75078fSIntel  *   modification, are permitted provided that the following conditions
9af75078fSIntel  *   are met:
10af75078fSIntel  *
11af75078fSIntel  *     * Redistributions of source code must retain the above copyright
12af75078fSIntel  *       notice, this list of conditions and the following disclaimer.
13af75078fSIntel  *     * Redistributions in binary form must reproduce the above copyright
14af75078fSIntel  *       notice, this list of conditions and the following disclaimer in
15af75078fSIntel  *       the documentation and/or other materials provided with the
16af75078fSIntel  *       distribution.
17af75078fSIntel  *     * Neither the name of Intel Corporation nor the names of its
18af75078fSIntel  *       contributors may be used to endorse or promote products derived
19af75078fSIntel  *       from this software without specific prior written permission.
20af75078fSIntel  *
21af75078fSIntel  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22af75078fSIntel  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23af75078fSIntel  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24af75078fSIntel  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25af75078fSIntel  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26af75078fSIntel  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27af75078fSIntel  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28af75078fSIntel  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29af75078fSIntel  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30af75078fSIntel  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31af75078fSIntel  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32af75078fSIntel  */
33af75078fSIntel 
34af75078fSIntel #include <stdarg.h>
35af75078fSIntel #include <stdio.h>
36af75078fSIntel #include <stdlib.h>
37af75078fSIntel #include <signal.h>
38af75078fSIntel #include <string.h>
39af75078fSIntel #include <time.h>
40af75078fSIntel #include <fcntl.h>
41af75078fSIntel #include <sys/types.h>
42af75078fSIntel #include <errno.h>
43af75078fSIntel 
44af75078fSIntel #include <sys/queue.h>
45af75078fSIntel #include <sys/stat.h>
46af75078fSIntel 
47af75078fSIntel #include <stdint.h>
48af75078fSIntel #include <unistd.h>
49af75078fSIntel #include <inttypes.h>
50af75078fSIntel 
51af75078fSIntel #include <rte_common.h>
52d1eb542eSOlivier Matz #include <rte_errno.h>
53af75078fSIntel #include <rte_byteorder.h>
54af75078fSIntel #include <rte_log.h>
55af75078fSIntel #include <rte_debug.h>
56af75078fSIntel #include <rte_cycles.h>
57af75078fSIntel #include <rte_memory.h>
58af75078fSIntel #include <rte_memcpy.h>
59af75078fSIntel #include <rte_memzone.h>
60af75078fSIntel #include <rte_launch.h>
61af75078fSIntel #include <rte_eal.h>
62284c908cSGaetan Rivet #include <rte_alarm.h>
63af75078fSIntel #include <rte_per_lcore.h>
64af75078fSIntel #include <rte_lcore.h>
65af75078fSIntel #include <rte_atomic.h>
66af75078fSIntel #include <rte_branch_prediction.h>
67af75078fSIntel #include <rte_mempool.h>
68af75078fSIntel #include <rte_malloc.h>
69af75078fSIntel #include <rte_mbuf.h>
70af75078fSIntel #include <rte_interrupts.h>
71af75078fSIntel #include <rte_pci.h>
72af75078fSIntel #include <rte_ether.h>
73af75078fSIntel #include <rte_ethdev.h>
74edab33b1STetsuya Mukawa #include <rte_dev.h>
75af75078fSIntel #include <rte_string_fns.h>
76e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
77e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
78e261265eSRadu Nicolau #endif
79148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT
80148f963fSBruce Richardson #include <rte_eth_xenvirt.h>
81148f963fSBruce Richardson #endif
82102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
83102b7329SReshma Pattan #include <rte_pdump.h>
84102b7329SReshma Pattan #endif
85938a184aSAdrien Mazarguil #include <rte_flow.h>
867e4441c8SRemy Horton #include <rte_metrics.h>
877e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
887e4441c8SRemy Horton #include <rte_bitrate.h>
897e4441c8SRemy Horton #endif
9062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
9162d3216dSReshma Pattan #include <rte_latencystats.h>
9262d3216dSReshma Pattan #endif
93af75078fSIntel 
94af75078fSIntel #include "testpmd.h"
95af75078fSIntel 
96af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
97af75078fSIntel 
98af75078fSIntel /* use master core for command line ? */
99af75078fSIntel uint8_t interactive = 0;
100ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
10181ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
102af75078fSIntel 
103af75078fSIntel /*
104af75078fSIntel  * NUMA support configuration.
105af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
106af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
107af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
108af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
109af75078fSIntel  */
110999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
111af75078fSIntel 
112af75078fSIntel /*
113b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
114b6ea6408SIntel  * not configured.
115b6ea6408SIntel  */
116b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
117b6ea6408SIntel 
118b6ea6408SIntel /*
119148f963fSBruce Richardson  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120148f963fSBruce Richardson  */
121148f963fSBruce Richardson uint8_t mp_anon = 0;
122148f963fSBruce Richardson 
123148f963fSBruce Richardson /*
124af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
125af75078fSIntel  * forwarded.
126547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
127af75078fSIntel  * ports.
128af75078fSIntel  */
129af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
130af75078fSIntel portid_t nb_peer_eth_addrs = 0;
131af75078fSIntel 
132af75078fSIntel /*
133af75078fSIntel  * Probed Target Environment.
134af75078fSIntel  */
135af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
136af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
137af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
138af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
139af75078fSIntel 
140af75078fSIntel /*
141af75078fSIntel  * Test Forwarding Configuration.
142af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
143af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
144af75078fSIntel  */
145af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
146af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
147af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
148af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
149af75078fSIntel 
150af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
151af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
152af75078fSIntel 
153af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
154af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
155af75078fSIntel 
156af75078fSIntel /*
157af75078fSIntel  * Forwarding engines.
158af75078fSIntel  */
159af75078fSIntel struct fwd_engine * fwd_engines[] = {
160af75078fSIntel 	&io_fwd_engine,
161af75078fSIntel 	&mac_fwd_engine,
162d47388f1SCyril Chemparathy 	&mac_swap_engine,
163e9e23a61SCyril Chemparathy 	&flow_gen_engine,
164af75078fSIntel 	&rx_only_engine,
165af75078fSIntel 	&tx_only_engine,
166af75078fSIntel 	&csum_fwd_engine,
167168dfa61SIvan Boule 	&icmp_echo_engine,
168af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
169af75078fSIntel 	&ieee1588_fwd_engine,
170af75078fSIntel #endif
171af75078fSIntel 	NULL,
172af75078fSIntel };
173af75078fSIntel 
174af75078fSIntel struct fwd_config cur_fwd_config;
175af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
176bf56fce1SZhihong Wang uint32_t retry_enabled;
177bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
178bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
179af75078fSIntel 
180af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
181c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
182c8798818SIntel                                       * specified on command-line. */
183af75078fSIntel 
184af75078fSIntel /*
185af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
186af75078fSIntel  */
187af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
190af75078fSIntel };
191af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
192af75078fSIntel 
19379bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
19479bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
19579bec05bSKonstantin Ananyev 
196af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
198af75078fSIntel 
199900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
200900550deSIntel uint8_t dcb_config = 0;
201900550deSIntel 
202900550deSIntel /* Whether the dcb is in testing status */
203900550deSIntel uint8_t dcb_test = 0;
204900550deSIntel 
205af75078fSIntel /*
206af75078fSIntel  * Configurable number of RX/TX queues.
207af75078fSIntel  */
208af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
210af75078fSIntel 
211af75078fSIntel /*
212af75078fSIntel  * Configurable number of RX/TX ring descriptors.
213af75078fSIntel  */
214af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128
215af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512
216af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
218af75078fSIntel 
219f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
220af75078fSIntel /*
221af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
222af75078fSIntel  */
223af75078fSIntel 
224f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
227af75078fSIntel 
228f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
231af75078fSIntel 
232af75078fSIntel /*
233af75078fSIntel  * Configurable value of RX free threshold.
234af75078fSIntel  */
235f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
236af75078fSIntel 
237af75078fSIntel /*
238ce8d5614SIntel  * Configurable value of RX drop enable.
239ce8d5614SIntel  */
240f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
241ce8d5614SIntel 
242ce8d5614SIntel /*
243af75078fSIntel  * Configurable value of TX free threshold.
244af75078fSIntel  */
245f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
246af75078fSIntel 
247af75078fSIntel /*
248af75078fSIntel  * Configurable value of TX RS bit threshold.
249af75078fSIntel  */
250f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
251af75078fSIntel 
252af75078fSIntel /*
253ce8d5614SIntel  * Configurable value of TX queue flags.
254ce8d5614SIntel  */
255f2c5125aSPablo de Lara int32_t txq_flags = RTE_PMD_PARAM_UNSET;
256ce8d5614SIntel 
257ce8d5614SIntel /*
258af75078fSIntel  * Receive Side Scaling (RSS) configuration.
259af75078fSIntel  */
2608a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
261af75078fSIntel 
262af75078fSIntel /*
263af75078fSIntel  * Port topology configuration
264af75078fSIntel  */
265af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
266af75078fSIntel 
2677741e4cfSIntel /*
2687741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
2697741e4cfSIntel  */
2707741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
2717741e4cfSIntel 
272af75078fSIntel /*
273bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
274bc202406SDavid Marchand  */
275bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
276bc202406SDavid Marchand 
277bc202406SDavid Marchand /*
2788ea656f8SGaetan Rivet  * Enable link status change notification
2798ea656f8SGaetan Rivet  */
2808ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
2818ea656f8SGaetan Rivet 
2828ea656f8SGaetan Rivet /*
283284c908cSGaetan Rivet  * Enable device removal notification.
284284c908cSGaetan Rivet  */
285284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
286284c908cSGaetan Rivet 
287284c908cSGaetan Rivet /*
2883af72783SGaetan Rivet  * Display or mask ether events
2893af72783SGaetan Rivet  * Default to all events except VF_MBOX
2903af72783SGaetan Rivet  */
2913af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
2923af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
2933af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
2943af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
2953af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
2963af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
2973af72783SGaetan Rivet 
2983af72783SGaetan Rivet /*
2997b7e5ba7SIntel  * NIC bypass mode configuration options.
3007b7e5ba7SIntel  */
3017b7e5ba7SIntel 
30250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3037b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
304e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3057b7e5ba7SIntel #endif
3067b7e5ba7SIntel 
307e261265eSRadu Nicolau 
30862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
30962d3216dSReshma Pattan 
31062d3216dSReshma Pattan /*
31162d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
31262d3216dSReshma Pattan  */
31362d3216dSReshma Pattan uint8_t latencystats_enabled;
31462d3216dSReshma Pattan 
31562d3216dSReshma Pattan /*
31662d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
31762d3216dSReshma Pattan  */
31862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
31962d3216dSReshma Pattan 
32062d3216dSReshma Pattan #endif
32162d3216dSReshma Pattan 
3227b7e5ba7SIntel /*
323af75078fSIntel  * Ethernet device configuration.
324af75078fSIntel  */
325af75078fSIntel struct rte_eth_rxmode rx_mode = {
326af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
327af75078fSIntel 	.split_hdr_size = 0,
328af75078fSIntel 	.header_split   = 0, /**< Header Split disabled. */
329af75078fSIntel 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
330af75078fSIntel 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
331a47aa8b9SIntel 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
332a47aa8b9SIntel 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
333af75078fSIntel 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
33479dd163fSJeff Guo 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
335af75078fSIntel };
336af75078fSIntel 
337af75078fSIntel struct rte_fdir_conf fdir_conf = {
338af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
339af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
340af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
341d9d5e6f2SJingjing Wu 	.mask = {
342d9d5e6f2SJingjing Wu 		.vlan_tci_mask = 0x0,
343d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
344d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
345d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
346d9d5e6f2SJingjing Wu 		},
347d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
348d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
349d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
350d9d5e6f2SJingjing Wu 		},
351d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
352d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
35347b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
35447b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
35547b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
356d9d5e6f2SJingjing Wu 	},
357af75078fSIntel 	.drop_queue = 127,
358af75078fSIntel };
359af75078fSIntel 
3602950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
361af75078fSIntel 
362ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
363ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
364ed30d9b6SIntel 
365ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
366ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
367ed30d9b6SIntel 
368ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
369ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
370ed30d9b6SIntel 
371c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
372c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
3737acf894dSStephen Hurd 
374e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3757e4441c8SRemy Horton /* Bitrate statistics */
3767e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
377e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
378e25e6c70SRemy Horton uint8_t bitrate_enabled;
379e25e6c70SRemy Horton #endif
3807e4441c8SRemy Horton 
381ed30d9b6SIntel /* Forward function declarations */
382ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
383edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
384*d6af1a13SBernard Iremonger static int eth_event_callback(uint8_t port_id,
38576ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
386*d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
387ce8d5614SIntel 
388ce8d5614SIntel /*
389ce8d5614SIntel  * Check if all the ports are started.
390ce8d5614SIntel  * If yes, return positive value. If not, return zero.
391ce8d5614SIntel  */
392ce8d5614SIntel static int all_ports_started(void);
393ed30d9b6SIntel 
394af75078fSIntel /*
39598a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
396c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
397c9cafcc8SShahaf Shuler  */
398c9cafcc8SShahaf Shuler int
399c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
400c9cafcc8SShahaf Shuler {
401c9cafcc8SShahaf Shuler 	unsigned int i;
402c9cafcc8SShahaf Shuler 
403c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
404c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
405c9cafcc8SShahaf Shuler 			return 0;
406c9cafcc8SShahaf Shuler 	}
407c9cafcc8SShahaf Shuler 	return 1;
408c9cafcc8SShahaf Shuler }
409c9cafcc8SShahaf Shuler 
410c9cafcc8SShahaf Shuler /*
411af75078fSIntel  * Setup default configuration.
412af75078fSIntel  */
413af75078fSIntel static void
414af75078fSIntel set_default_fwd_lcores_config(void)
415af75078fSIntel {
416af75078fSIntel 	unsigned int i;
417af75078fSIntel 	unsigned int nb_lc;
4187acf894dSStephen Hurd 	unsigned int sock_num;
419af75078fSIntel 
420af75078fSIntel 	nb_lc = 0;
421af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
422c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
423c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
424c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
425c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
426c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
427c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
428c9cafcc8SShahaf Shuler 			}
429c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
4307acf894dSStephen Hurd 		}
431f54fe5eeSStephen Hurd 		if (!rte_lcore_is_enabled(i))
432f54fe5eeSStephen Hurd 			continue;
433f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
434f54fe5eeSStephen Hurd 			continue;
435f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
436af75078fSIntel 	}
437af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
438af75078fSIntel 	nb_cfg_lcores = nb_lcores;
439af75078fSIntel 	nb_fwd_lcores = 1;
440af75078fSIntel }
441af75078fSIntel 
442af75078fSIntel static void
443af75078fSIntel set_def_peer_eth_addrs(void)
444af75078fSIntel {
445af75078fSIntel 	portid_t i;
446af75078fSIntel 
447af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
448af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
449af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
450af75078fSIntel 	}
451af75078fSIntel }
452af75078fSIntel 
453af75078fSIntel static void
454af75078fSIntel set_default_fwd_ports_config(void)
455af75078fSIntel {
456af75078fSIntel 	portid_t pt_id;
457af75078fSIntel 
458af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
459af75078fSIntel 		fwd_ports_ids[pt_id] = pt_id;
460af75078fSIntel 
461af75078fSIntel 	nb_cfg_ports = nb_ports;
462af75078fSIntel 	nb_fwd_ports = nb_ports;
463af75078fSIntel }
464af75078fSIntel 
465af75078fSIntel void
466af75078fSIntel set_def_fwd_config(void)
467af75078fSIntel {
468af75078fSIntel 	set_default_fwd_lcores_config();
469af75078fSIntel 	set_def_peer_eth_addrs();
470af75078fSIntel 	set_default_fwd_ports_config();
471af75078fSIntel }
472af75078fSIntel 
473af75078fSIntel /*
474af75078fSIntel  * Configuration initialisation done once at init time.
475af75078fSIntel  */
476af75078fSIntel static void
477af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
478af75078fSIntel 		 unsigned int socket_id)
479af75078fSIntel {
480af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
481bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
482af75078fSIntel 	uint32_t mb_size;
483af75078fSIntel 
484dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
485af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
486148f963fSBruce Richardson 
487d1eb542eSOlivier Matz 	RTE_LOG(INFO, USER1,
488d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
489d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
490d1eb542eSOlivier Matz 
491148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT
492148f963fSBruce Richardson 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
493af75078fSIntel 		(unsigned) mb_mempool_cache,
494af75078fSIntel 		sizeof(struct rte_pktmbuf_pool_private),
495dfb03bbeSOlivier Matz 		rte_pktmbuf_pool_init, NULL,
496dfb03bbeSOlivier Matz 		rte_pktmbuf_init, NULL,
497af75078fSIntel 		socket_id, 0);
498bece7b6cSChristian Ehrhardt #endif
499148f963fSBruce Richardson 
500bece7b6cSChristian Ehrhardt 	/* if the former XEN allocation failed fall back to normal allocation */
501bece7b6cSChristian Ehrhardt 	if (rte_mp == NULL) {
502b19a0c75SOlivier Matz 		if (mp_anon != 0) {
503b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
504bece7b6cSChristian Ehrhardt 				mb_size, (unsigned) mb_mempool_cache,
505148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
506148f963fSBruce Richardson 				socket_id, 0);
50724427bb9SOlivier Matz 			if (rte_mp == NULL)
50824427bb9SOlivier Matz 				goto err;
509b19a0c75SOlivier Matz 
510b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
511b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
512b19a0c75SOlivier Matz 				rte_mp = NULL;
51324427bb9SOlivier Matz 				goto err;
514b19a0c75SOlivier Matz 			}
515b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
516b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
517b19a0c75SOlivier Matz 		} else {
518ea0c20eaSOlivier Matz 			/* wrapper to rte_mempool_create() */
519ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
520ea0c20eaSOlivier Matz 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
521bece7b6cSChristian Ehrhardt 		}
522b19a0c75SOlivier Matz 	}
523148f963fSBruce Richardson 
52424427bb9SOlivier Matz err:
525af75078fSIntel 	if (rte_mp == NULL) {
526d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
527d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
528d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
529148f963fSBruce Richardson 	} else if (verbose_level > 0) {
530591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
531af75078fSIntel 	}
532af75078fSIntel }
533af75078fSIntel 
53420a0286fSLiu Xiaofeng /*
53520a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
53620a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
53720a0286fSLiu Xiaofeng  */
53820a0286fSLiu Xiaofeng static int
53920a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
54020a0286fSLiu Xiaofeng {
54120a0286fSLiu Xiaofeng 	static int warning_once = 0;
54220a0286fSLiu Xiaofeng 
543c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
54420a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
54520a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
54620a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
54720a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
54820a0286fSLiu Xiaofeng 			       " --numa.\n");
54920a0286fSLiu Xiaofeng 		warning_once = 1;
55020a0286fSLiu Xiaofeng 		return -1;
55120a0286fSLiu Xiaofeng 	}
55220a0286fSLiu Xiaofeng 	return 0;
55320a0286fSLiu Xiaofeng }
55420a0286fSLiu Xiaofeng 
555af75078fSIntel static void
556af75078fSIntel init_config(void)
557af75078fSIntel {
558ce8d5614SIntel 	portid_t pid;
559af75078fSIntel 	struct rte_port *port;
560af75078fSIntel 	struct rte_mempool *mbp;
561af75078fSIntel 	unsigned int nb_mbuf_per_pool;
562af75078fSIntel 	lcoreid_t  lc_id;
5637acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
564af75078fSIntel 
5657acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
566487f9a59SYulong Pei 
567487f9a59SYulong Pei 	if (numa_support) {
568487f9a59SYulong Pei 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
569487f9a59SYulong Pei 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
570487f9a59SYulong Pei 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
571487f9a59SYulong Pei 	}
572487f9a59SYulong Pei 
573af75078fSIntel 	/* Configuration of logical cores. */
574af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
575af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
576fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
577af75078fSIntel 	if (fwd_lcores == NULL) {
578ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
579ce8d5614SIntel 							"failed\n", nb_lcores);
580af75078fSIntel 	}
581af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
582af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
583af75078fSIntel 					       sizeof(struct fwd_lcore),
584fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
585af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
586ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
587ce8d5614SIntel 								"failed\n");
588af75078fSIntel 		}
589af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
590af75078fSIntel 	}
591af75078fSIntel 
5927d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
593ce8d5614SIntel 		port = &ports[pid];
594ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
595ce8d5614SIntel 
596b6ea6408SIntel 		if (numa_support) {
597b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
598b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
599b6ea6408SIntel 			else {
600b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
60120a0286fSLiu Xiaofeng 
60220a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
60320a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
60420a0286fSLiu Xiaofeng 					socket_id = 0;
605b6ea6408SIntel 				port_per_socket[socket_id]++;
606b6ea6408SIntel 			}
607b6ea6408SIntel 		}
608b6ea6408SIntel 
609ce8d5614SIntel 		/* set flag to initialize port/queue */
610ce8d5614SIntel 		port->need_reconfig = 1;
611ce8d5614SIntel 		port->need_reconfig_queues = 1;
612ce8d5614SIntel 	}
613ce8d5614SIntel 
6143ab64341SOlivier Matz 	/*
6153ab64341SOlivier Matz 	 * Create pools of mbuf.
6163ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
6173ab64341SOlivier Matz 	 * socket 0 memory by default.
6183ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
6193ab64341SOlivier Matz 	 *
6203ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
6213ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
6223ab64341SOlivier Matz 	 */
6233ab64341SOlivier Matz 	if (param_total_num_mbufs)
6243ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
6253ab64341SOlivier Matz 	else {
6263ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
6273ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
6283ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
6293ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
6303ab64341SOlivier Matz 	}
6313ab64341SOlivier Matz 
632b6ea6408SIntel 	if (numa_support) {
633b6ea6408SIntel 		uint8_t i;
634ce8d5614SIntel 
635c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
636c9cafcc8SShahaf Shuler 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
637c9cafcc8SShahaf Shuler 					 socket_ids[i]);
6383ab64341SOlivier Matz 	} else {
6393ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
6403ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
6413ab64341SOlivier Matz 		else
6423ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
6433ab64341SOlivier Matz 						 socket_num);
6443ab64341SOlivier Matz 	}
645b6ea6408SIntel 
646b6ea6408SIntel 	init_port_config();
6475886ae07SAdrien Mazarguil 
6485886ae07SAdrien Mazarguil 	/*
6495886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
6505886ae07SAdrien Mazarguil 	 */
6515886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
6528fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
6538fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
6548fd8bebcSAdrien Mazarguil 
6555886ae07SAdrien Mazarguil 		if (mbp == NULL)
6565886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
6575886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
6585886ae07SAdrien Mazarguil 	}
6595886ae07SAdrien Mazarguil 
660ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
661ce8d5614SIntel 	if (init_fwd_streams() < 0)
662ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
6630c0db76fSBernard Iremonger 
6640c0db76fSBernard Iremonger 	fwd_config_setup();
665ce8d5614SIntel }
666ce8d5614SIntel 
6672950a769SDeclan Doherty 
6682950a769SDeclan Doherty void
669a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
6702950a769SDeclan Doherty {
6712950a769SDeclan Doherty 	struct rte_port *port;
6722950a769SDeclan Doherty 
6732950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
6742950a769SDeclan Doherty 	port = &ports[new_port_id];
6752950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
6762950a769SDeclan Doherty 
6772950a769SDeclan Doherty 	/* set flag to initialize port/queue */
6782950a769SDeclan Doherty 	port->need_reconfig = 1;
6792950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
680a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
6812950a769SDeclan Doherty 
6822950a769SDeclan Doherty 	init_port_config();
6832950a769SDeclan Doherty }
6842950a769SDeclan Doherty 
6852950a769SDeclan Doherty 
686ce8d5614SIntel int
687ce8d5614SIntel init_fwd_streams(void)
688ce8d5614SIntel {
689ce8d5614SIntel 	portid_t pid;
690ce8d5614SIntel 	struct rte_port *port;
691ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
6925a8fb55cSReshma Pattan 	queueid_t q;
693ce8d5614SIntel 
694ce8d5614SIntel 	/* set socket id according to numa or not */
6957d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
696ce8d5614SIntel 		port = &ports[pid];
697ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
698ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
699ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
700ce8d5614SIntel 				port->dev_info.max_rx_queues);
701ce8d5614SIntel 			return -1;
702ce8d5614SIntel 		}
703ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
704ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
705ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
706ce8d5614SIntel 				port->dev_info.max_tx_queues);
707ce8d5614SIntel 			return -1;
708ce8d5614SIntel 		}
70920a0286fSLiu Xiaofeng 		if (numa_support) {
71020a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
71120a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
71220a0286fSLiu Xiaofeng 			else {
713b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
71420a0286fSLiu Xiaofeng 
71520a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
71620a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
71720a0286fSLiu Xiaofeng 					port->socket_id = 0;
71820a0286fSLiu Xiaofeng 			}
71920a0286fSLiu Xiaofeng 		}
720b6ea6408SIntel 		else {
721b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
722af75078fSIntel 				port->socket_id = 0;
723b6ea6408SIntel 			else
724b6ea6408SIntel 				port->socket_id = socket_num;
725b6ea6408SIntel 		}
726af75078fSIntel 	}
727af75078fSIntel 
7285a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
7295a8fb55cSReshma Pattan 	if (q == 0) {
7305a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
7315a8fb55cSReshma Pattan 		return -1;
7325a8fb55cSReshma Pattan 	}
7335a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
734ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
735ce8d5614SIntel 		return 0;
736ce8d5614SIntel 	/* clear the old */
737ce8d5614SIntel 	if (fwd_streams != NULL) {
738ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
739ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
740ce8d5614SIntel 				continue;
741ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
742ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
743af75078fSIntel 		}
744ce8d5614SIntel 		rte_free(fwd_streams);
745ce8d5614SIntel 		fwd_streams = NULL;
746ce8d5614SIntel 	}
747ce8d5614SIntel 
748ce8d5614SIntel 	/* init new */
749ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
750ce8d5614SIntel 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
751fdf20fa7SSergio Gonzalez Monroy 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
752ce8d5614SIntel 	if (fwd_streams == NULL)
753ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
754ce8d5614SIntel 						"failed\n", nb_fwd_streams);
755ce8d5614SIntel 
756af75078fSIntel 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
757af75078fSIntel 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
758fdf20fa7SSergio Gonzalez Monroy 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
759ce8d5614SIntel 		if (fwd_streams[sm_id] == NULL)
760ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
761ce8d5614SIntel 								" failed\n");
762af75078fSIntel 	}
763ce8d5614SIntel 
764ce8d5614SIntel 	return 0;
765af75078fSIntel }
766af75078fSIntel 
767af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
768af75078fSIntel static void
769af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
770af75078fSIntel {
771af75078fSIntel 	unsigned int total_burst;
772af75078fSIntel 	unsigned int nb_burst;
773af75078fSIntel 	unsigned int burst_stats[3];
774af75078fSIntel 	uint16_t pktnb_stats[3];
775af75078fSIntel 	uint16_t nb_pkt;
776af75078fSIntel 	int burst_percent[3];
777af75078fSIntel 
778af75078fSIntel 	/*
779af75078fSIntel 	 * First compute the total number of packet bursts and the
780af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
781af75078fSIntel 	 */
782af75078fSIntel 	total_burst = 0;
783af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
784af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
785af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
786af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
787af75078fSIntel 		if (nb_burst == 0)
788af75078fSIntel 			continue;
789af75078fSIntel 		total_burst += nb_burst;
790af75078fSIntel 		if (nb_burst > burst_stats[0]) {
791af75078fSIntel 			burst_stats[1] = burst_stats[0];
792af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
793af75078fSIntel 			burst_stats[0] = nb_burst;
794af75078fSIntel 			pktnb_stats[0] = nb_pkt;
795af75078fSIntel 		}
796af75078fSIntel 	}
797af75078fSIntel 	if (total_burst == 0)
798af75078fSIntel 		return;
799af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
800af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
801af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
802af75078fSIntel 	if (burst_stats[0] == total_burst) {
803af75078fSIntel 		printf("]\n");
804af75078fSIntel 		return;
805af75078fSIntel 	}
806af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
807af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
808af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
809af75078fSIntel 		return;
810af75078fSIntel 	}
811af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
812af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
813af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
814af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
815af75078fSIntel 		return;
816af75078fSIntel 	}
817af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
818af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
819af75078fSIntel }
820af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
821af75078fSIntel 
822af75078fSIntel static void
823af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
824af75078fSIntel {
825af75078fSIntel 	struct rte_port *port;
826013af9b6SIntel 	uint8_t i;
827af75078fSIntel 
828af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
829af75078fSIntel 
830af75078fSIntel 	port = &ports[port_id];
831af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
832af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
833013af9b6SIntel 
834013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
835af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
836af75078fSIntel 		       "%-"PRIu64"\n",
83770bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
83870bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
839af75078fSIntel 
840af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
841af75078fSIntel 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
842af75078fSIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
84386057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
844f72a0fa6SStephen Hemminger 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
84570bdb186SIvan Boule 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
84670bdb186SIvan Boule 		}
847af75078fSIntel 
848af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
849af75078fSIntel 		       "%-"PRIu64"\n",
850af75078fSIntel 		       stats->opackets, port->tx_dropped,
851af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
852013af9b6SIntel 	}
853013af9b6SIntel 	else {
854013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
855013af9b6SIntel 		       "%14"PRIu64"\n",
85670bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
85770bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
858013af9b6SIntel 
859013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
860013af9b6SIntel 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
861013af9b6SIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
86286057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
863f72a0fa6SStephen Hemminger 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
86470bdb186SIvan Boule 			printf("  RX-nombufs:             %14"PRIu64"\n",
86570bdb186SIvan Boule 			       stats->rx_nombuf);
86670bdb186SIvan Boule 		}
867013af9b6SIntel 
868013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
869013af9b6SIntel 		       "%14"PRIu64"\n",
870013af9b6SIntel 		       stats->opackets, port->tx_dropped,
871013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
872013af9b6SIntel 	}
873e659b6b4SIvan Boule 
874af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
875af75078fSIntel 	if (port->rx_stream)
876013af9b6SIntel 		pkt_burst_stats_display("RX",
877013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
878af75078fSIntel 	if (port->tx_stream)
879013af9b6SIntel 		pkt_burst_stats_display("TX",
880013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
881af75078fSIntel #endif
882af75078fSIntel 
883013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
884013af9b6SIntel 		printf("\n");
885013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
886013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
887013af9b6SIntel 			       "     RX-errors:%14"PRIu64
888013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
889013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
890013af9b6SIntel 		}
891013af9b6SIntel 		printf("\n");
892013af9b6SIntel 	}
893013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
894013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
895013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
896013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
897013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
898013af9b6SIntel 		}
899013af9b6SIntel 	}
900013af9b6SIntel 
901af75078fSIntel 	printf("  %s--------------------------------%s\n",
902af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
903af75078fSIntel }
904af75078fSIntel 
905af75078fSIntel static void
906af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
907af75078fSIntel {
908af75078fSIntel 	struct fwd_stream *fs;
909af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
910af75078fSIntel 
911af75078fSIntel 	fs = fwd_streams[stream_id];
912af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
913af75078fSIntel 	    (fs->fwd_dropped == 0))
914af75078fSIntel 		return;
915af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
916af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
917af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
918af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
919af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
920af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
921af75078fSIntel 
922af75078fSIntel 	/* if checksum mode */
923af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
924013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
925013af9b6SIntel 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
926af75078fSIntel 	}
927af75078fSIntel 
928af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
929af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
930af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
931af75078fSIntel #endif
932af75078fSIntel }
933af75078fSIntel 
934af75078fSIntel static void
9357741e4cfSIntel flush_fwd_rx_queues(void)
936af75078fSIntel {
937af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
938af75078fSIntel 	portid_t  rxp;
9397741e4cfSIntel 	portid_t port_id;
940af75078fSIntel 	queueid_t rxq;
941af75078fSIntel 	uint16_t  nb_rx;
942af75078fSIntel 	uint16_t  i;
943af75078fSIntel 	uint8_t   j;
944f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
945594302c7SJames Poole 	uint64_t timer_period;
946f487715fSReshma Pattan 
947f487715fSReshma Pattan 	/* convert to number of cycles */
948594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
949af75078fSIntel 
950af75078fSIntel 	for (j = 0; j < 2; j++) {
9517741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
952af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
9537741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
954f487715fSReshma Pattan 				/**
955f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
956f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
957f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
958f487715fSReshma Pattan 				* after 1sec timer expiry.
959f487715fSReshma Pattan 				*/
960f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
961af75078fSIntel 				do {
9627741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
963013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
964af75078fSIntel 					for (i = 0; i < nb_rx; i++)
965af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
966f487715fSReshma Pattan 
967f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
968f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
969f487715fSReshma Pattan 					timer_tsc += diff_tsc;
970f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
971f487715fSReshma Pattan 					(timer_tsc < timer_period));
972f487715fSReshma Pattan 				timer_tsc = 0;
973af75078fSIntel 			}
974af75078fSIntel 		}
975af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
976af75078fSIntel 	}
977af75078fSIntel }
978af75078fSIntel 
979af75078fSIntel static void
980af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
981af75078fSIntel {
982af75078fSIntel 	struct fwd_stream **fsm;
983af75078fSIntel 	streamid_t nb_fs;
984af75078fSIntel 	streamid_t sm_id;
9857e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
9867e4441c8SRemy Horton 	uint64_t tics_per_1sec;
9877e4441c8SRemy Horton 	uint64_t tics_datum;
9887e4441c8SRemy Horton 	uint64_t tics_current;
9897e4441c8SRemy Horton 	uint8_t idx_port, cnt_ports;
990af75078fSIntel 
9917e4441c8SRemy Horton 	cnt_ports = rte_eth_dev_count();
9927e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
9937e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
9947e4441c8SRemy Horton #endif
995af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
996af75078fSIntel 	nb_fs = fc->stream_nb;
997af75078fSIntel 	do {
998af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
999af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
10007e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1001e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1002e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
10037e4441c8SRemy Horton 			tics_current = rte_rdtsc();
10047e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
10057e4441c8SRemy Horton 				/* Periodic bitrate calculation */
1006e25e6c70SRemy Horton 				for (idx_port = 0;
1007e25e6c70SRemy Horton 						idx_port < cnt_ports;
1008e25e6c70SRemy Horton 						idx_port++)
1009e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
1010e25e6c70SRemy Horton 						idx_port);
10117e4441c8SRemy Horton 				tics_datum = tics_current;
10127e4441c8SRemy Horton 			}
1013e25e6c70SRemy Horton 		}
10147e4441c8SRemy Horton #endif
101562d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
101665eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
101765eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
101862d3216dSReshma Pattan 			rte_latencystats_update();
101962d3216dSReshma Pattan #endif
102062d3216dSReshma Pattan 
1021af75078fSIntel 	} while (! fc->stopped);
1022af75078fSIntel }
1023af75078fSIntel 
1024af75078fSIntel static int
1025af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1026af75078fSIntel {
1027af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1028af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1029af75078fSIntel 	return 0;
1030af75078fSIntel }
1031af75078fSIntel 
1032af75078fSIntel /*
1033af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1034af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1035af75078fSIntel  */
1036af75078fSIntel static int
1037af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1038af75078fSIntel {
1039af75078fSIntel 	struct fwd_lcore *fwd_lc;
1040af75078fSIntel 	struct fwd_lcore tmp_lcore;
1041af75078fSIntel 
1042af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1043af75078fSIntel 	tmp_lcore = *fwd_lc;
1044af75078fSIntel 	tmp_lcore.stopped = 1;
1045af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1046af75078fSIntel 	return 0;
1047af75078fSIntel }
1048af75078fSIntel 
1049af75078fSIntel /*
1050af75078fSIntel  * Launch packet forwarding:
1051af75078fSIntel  *     - Setup per-port forwarding context.
1052af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1053af75078fSIntel  */
1054af75078fSIntel static void
1055af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1056af75078fSIntel {
1057af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1058af75078fSIntel 	unsigned int i;
1059af75078fSIntel 	unsigned int lc_id;
1060af75078fSIntel 	int diag;
1061af75078fSIntel 
1062af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1063af75078fSIntel 	if (port_fwd_begin != NULL) {
1064af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1065af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1066af75078fSIntel 	}
1067af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1068af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1069af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1070af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1071af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1072af75078fSIntel 						     fwd_lcores[i], lc_id);
1073af75078fSIntel 			if (diag != 0)
1074af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1075af75078fSIntel 				       lc_id, diag);
1076af75078fSIntel 		}
1077af75078fSIntel 	}
1078af75078fSIntel }
1079af75078fSIntel 
1080af75078fSIntel /*
1081af75078fSIntel  * Launch packet forwarding configuration.
1082af75078fSIntel  */
1083af75078fSIntel void
1084af75078fSIntel start_packet_forwarding(int with_tx_first)
1085af75078fSIntel {
1086af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1087af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1088af75078fSIntel 	struct rte_port *port;
1089af75078fSIntel 	unsigned int i;
1090af75078fSIntel 	portid_t   pt_id;
1091af75078fSIntel 	streamid_t sm_id;
1092af75078fSIntel 
10935a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
10945a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
10955a8fb55cSReshma Pattan 
10965a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
10975a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
10985a8fb55cSReshma Pattan 
10995a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
11005a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
11015a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
11025a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
11035a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
11045a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
11055a8fb55cSReshma Pattan 
1106ce8d5614SIntel 	if (all_ports_started() == 0) {
1107ce8d5614SIntel 		printf("Not all ports were started\n");
1108ce8d5614SIntel 		return;
1109ce8d5614SIntel 	}
1110af75078fSIntel 	if (test_done == 0) {
1111af75078fSIntel 		printf("Packet forwarding already started\n");
1112af75078fSIntel 		return;
1113af75078fSIntel 	}
1114edf87b4aSBernard Iremonger 
1115edf87b4aSBernard Iremonger 	if (init_fwd_streams() < 0) {
1116edf87b4aSBernard Iremonger 		printf("Fail from init_fwd_streams()\n");
1117edf87b4aSBernard Iremonger 		return;
1118edf87b4aSBernard Iremonger 	}
1119edf87b4aSBernard Iremonger 
11207741e4cfSIntel 	if(dcb_test) {
11217741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
11227741e4cfSIntel 			pt_id = fwd_ports_ids[i];
11237741e4cfSIntel 			port = &ports[pt_id];
11247741e4cfSIntel 			if (!port->dcb_flag) {
11257741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
11267741e4cfSIntel                                        "be configured in this mode.\n");
1127013af9b6SIntel 				return;
1128013af9b6SIntel 			}
11297741e4cfSIntel 		}
11307741e4cfSIntel 		if (nb_fwd_lcores == 1) {
11317741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
11327741e4cfSIntel                                "should be larger than 1.\n");
11337741e4cfSIntel 			return;
11347741e4cfSIntel 		}
11357741e4cfSIntel 	}
1136af75078fSIntel 	test_done = 0;
11377741e4cfSIntel 
11387741e4cfSIntel 	if(!no_flush_rx)
11397741e4cfSIntel 		flush_fwd_rx_queues();
11407741e4cfSIntel 
1141af75078fSIntel 	fwd_config_setup();
1142933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1143af75078fSIntel 	rxtx_config_display();
1144af75078fSIntel 
1145af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1146af75078fSIntel 		pt_id = fwd_ports_ids[i];
1147af75078fSIntel 		port = &ports[pt_id];
1148af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1149af75078fSIntel 		port->tx_dropped = 0;
1150013af9b6SIntel 
1151013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1152af75078fSIntel 	}
1153af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1154af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1155af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1156af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1157af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1158af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1159af75078fSIntel 
1160af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1161af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1162af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1163af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1164af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1165af75078fSIntel #endif
1166af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1167af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1168af75078fSIntel #endif
1169af75078fSIntel 	}
1170af75078fSIntel 	if (with_tx_first) {
1171af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1172af75078fSIntel 		if (port_fwd_begin != NULL) {
1173af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1174af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1175af75078fSIntel 		}
1176acbf77a6SZhihong Wang 		while (with_tx_first--) {
1177acbf77a6SZhihong Wang 			launch_packet_forwarding(
1178acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1179af75078fSIntel 			rte_eal_mp_wait_lcore();
1180acbf77a6SZhihong Wang 		}
1181af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1182af75078fSIntel 		if (port_fwd_end != NULL) {
1183af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1184af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1185af75078fSIntel 		}
1186af75078fSIntel 	}
1187af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1188af75078fSIntel }
1189af75078fSIntel 
1190af75078fSIntel void
1191af75078fSIntel stop_packet_forwarding(void)
1192af75078fSIntel {
1193af75078fSIntel 	struct rte_eth_stats stats;
1194af75078fSIntel 	struct rte_port *port;
1195af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1196af75078fSIntel 	int i;
1197af75078fSIntel 	portid_t   pt_id;
1198af75078fSIntel 	streamid_t sm_id;
1199af75078fSIntel 	lcoreid_t  lc_id;
1200af75078fSIntel 	uint64_t total_recv;
1201af75078fSIntel 	uint64_t total_xmit;
1202af75078fSIntel 	uint64_t total_rx_dropped;
1203af75078fSIntel 	uint64_t total_tx_dropped;
1204af75078fSIntel 	uint64_t total_rx_nombuf;
1205af75078fSIntel 	uint64_t tx_dropped;
1206af75078fSIntel 	uint64_t rx_bad_ip_csum;
1207af75078fSIntel 	uint64_t rx_bad_l4_csum;
1208af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1209af75078fSIntel 	uint64_t fwd_cycles;
1210af75078fSIntel #endif
1211af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1212af75078fSIntel 
1213af75078fSIntel 	if (test_done) {
1214af75078fSIntel 		printf("Packet forwarding not started\n");
1215af75078fSIntel 		return;
1216af75078fSIntel 	}
1217af75078fSIntel 	printf("Telling cores to stop...");
1218af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1219af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1220af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1221af75078fSIntel 	rte_eal_mp_wait_lcore();
1222af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1223af75078fSIntel 	if (port_fwd_end != NULL) {
1224af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1225af75078fSIntel 			pt_id = fwd_ports_ids[i];
1226af75078fSIntel 			(*port_fwd_end)(pt_id);
1227af75078fSIntel 		}
1228af75078fSIntel 	}
1229af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1230af75078fSIntel 	fwd_cycles = 0;
1231af75078fSIntel #endif
1232af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1233af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1234af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1235af75078fSIntel 			fwd_stream_stats_display(sm_id);
1236af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1237af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1238af75078fSIntel 		} else {
1239af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1240af75078fSIntel 				fwd_streams[sm_id];
1241af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1242af75078fSIntel 				fwd_streams[sm_id];
1243af75078fSIntel 		}
1244af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1245af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1246af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1247af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1248af75078fSIntel 
1249013af9b6SIntel 		rx_bad_ip_csum =
1250013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1251af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1252af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1253013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1254013af9b6SIntel 							rx_bad_ip_csum;
1255af75078fSIntel 
1256013af9b6SIntel 		rx_bad_l4_csum =
1257013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1258af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1259af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1260013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1261013af9b6SIntel 							rx_bad_l4_csum;
1262af75078fSIntel 
1263af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1264af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1265af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1266af75078fSIntel #endif
1267af75078fSIntel 	}
1268af75078fSIntel 	total_recv = 0;
1269af75078fSIntel 	total_xmit = 0;
1270af75078fSIntel 	total_rx_dropped = 0;
1271af75078fSIntel 	total_tx_dropped = 0;
1272af75078fSIntel 	total_rx_nombuf  = 0;
12737741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1274af75078fSIntel 		pt_id = fwd_ports_ids[i];
1275af75078fSIntel 
1276af75078fSIntel 		port = &ports[pt_id];
1277af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1278af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1279af75078fSIntel 		port->stats.ipackets = 0;
1280af75078fSIntel 		stats.opackets -= port->stats.opackets;
1281af75078fSIntel 		port->stats.opackets = 0;
1282af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1283af75078fSIntel 		port->stats.ibytes = 0;
1284af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1285af75078fSIntel 		port->stats.obytes = 0;
128670bdb186SIvan Boule 		stats.imissed  -= port->stats.imissed;
128770bdb186SIvan Boule 		port->stats.imissed = 0;
1288af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1289af75078fSIntel 		port->stats.oerrors = 0;
1290af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1291af75078fSIntel 		port->stats.rx_nombuf = 0;
1292af75078fSIntel 
1293af75078fSIntel 		total_recv += stats.ipackets;
1294af75078fSIntel 		total_xmit += stats.opackets;
129570bdb186SIvan Boule 		total_rx_dropped += stats.imissed;
1296af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1297af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1298af75078fSIntel 
1299af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1300af75078fSIntel 	}
1301af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1302af75078fSIntel 	       "%s\n",
1303af75078fSIntel 	       acc_stats_border, acc_stats_border);
1304af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1305af75078fSIntel 	       "%-"PRIu64"\n"
1306af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1307af75078fSIntel 	       "%-"PRIu64"\n",
1308af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1309af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1310af75078fSIntel 	if (total_rx_nombuf > 0)
1311af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1312af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1313af75078fSIntel 	       "%s\n",
1314af75078fSIntel 	       acc_stats_border, acc_stats_border);
1315af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1316af75078fSIntel 	if (total_recv > 0)
1317af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1318af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1319af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1320af75078fSIntel 		       fwd_cycles, total_recv);
1321af75078fSIntel #endif
1322af75078fSIntel 	printf("\nDone.\n");
1323af75078fSIntel 	test_done = 1;
1324af75078fSIntel }
1325af75078fSIntel 
1326cfae07fdSOuyang Changchun void
1327cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1328cfae07fdSOuyang Changchun {
1329cfae07fdSOuyang Changchun 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1330cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1331cfae07fdSOuyang Changchun }
1332cfae07fdSOuyang Changchun 
1333cfae07fdSOuyang Changchun void
1334cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1335cfae07fdSOuyang Changchun {
1336cfae07fdSOuyang Changchun 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1337cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1338cfae07fdSOuyang Changchun }
1339cfae07fdSOuyang Changchun 
1340ce8d5614SIntel static int
1341ce8d5614SIntel all_ports_started(void)
1342ce8d5614SIntel {
1343ce8d5614SIntel 	portid_t pi;
1344ce8d5614SIntel 	struct rte_port *port;
1345ce8d5614SIntel 
13467d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1347ce8d5614SIntel 		port = &ports[pi];
1348ce8d5614SIntel 		/* Check if there is a port which is not started */
134941b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
135041b05095SBernard Iremonger 			(port->slave_flag == 0))
1351ce8d5614SIntel 			return 0;
1352ce8d5614SIntel 	}
1353ce8d5614SIntel 
1354ce8d5614SIntel 	/* No port is not started */
1355ce8d5614SIntel 	return 1;
1356ce8d5614SIntel }
1357ce8d5614SIntel 
1358148f963fSBruce Richardson int
1359edab33b1STetsuya Mukawa all_ports_stopped(void)
1360edab33b1STetsuya Mukawa {
1361edab33b1STetsuya Mukawa 	portid_t pi;
1362edab33b1STetsuya Mukawa 	struct rte_port *port;
1363edab33b1STetsuya Mukawa 
13647d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1365edab33b1STetsuya Mukawa 		port = &ports[pi];
136641b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STOPPED) &&
136741b05095SBernard Iremonger 			(port->slave_flag == 0))
1368edab33b1STetsuya Mukawa 			return 0;
1369edab33b1STetsuya Mukawa 	}
1370edab33b1STetsuya Mukawa 
1371edab33b1STetsuya Mukawa 	return 1;
1372edab33b1STetsuya Mukawa }
1373edab33b1STetsuya Mukawa 
1374edab33b1STetsuya Mukawa int
1375edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
1376edab33b1STetsuya Mukawa {
1377edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1378edab33b1STetsuya Mukawa 		return 0;
1379edab33b1STetsuya Mukawa 
1380edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1381edab33b1STetsuya Mukawa 		return 0;
1382edab33b1STetsuya Mukawa 
1383edab33b1STetsuya Mukawa 	return 1;
1384edab33b1STetsuya Mukawa }
1385edab33b1STetsuya Mukawa 
1386edab33b1STetsuya Mukawa static int
1387edab33b1STetsuya Mukawa port_is_closed(portid_t port_id)
1388edab33b1STetsuya Mukawa {
1389edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1390edab33b1STetsuya Mukawa 		return 0;
1391edab33b1STetsuya Mukawa 
1392edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1393edab33b1STetsuya Mukawa 		return 0;
1394edab33b1STetsuya Mukawa 
1395edab33b1STetsuya Mukawa 	return 1;
1396edab33b1STetsuya Mukawa }
1397edab33b1STetsuya Mukawa 
1398edab33b1STetsuya Mukawa int
1399ce8d5614SIntel start_port(portid_t pid)
1400ce8d5614SIntel {
140192d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
1402ce8d5614SIntel 	portid_t pi;
1403ce8d5614SIntel 	queueid_t qi;
1404ce8d5614SIntel 	struct rte_port *port;
14052950a769SDeclan Doherty 	struct ether_addr mac_addr;
140676ad4a2dSGaetan Rivet 	enum rte_eth_event_type event_type;
1407ce8d5614SIntel 
14084468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
14094468635fSMichael Qiu 		return 0;
14104468635fSMichael Qiu 
1411ce8d5614SIntel 	if(dcb_config)
1412ce8d5614SIntel 		dcb_test = 1;
14137d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1414edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1415ce8d5614SIntel 			continue;
1416ce8d5614SIntel 
141792d2703eSMichael Qiu 		need_check_link_status = 0;
1418ce8d5614SIntel 		port = &ports[pi];
1419ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1420ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1421ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1422ce8d5614SIntel 			continue;
1423ce8d5614SIntel 		}
1424ce8d5614SIntel 
1425ce8d5614SIntel 		if (port->need_reconfig > 0) {
1426ce8d5614SIntel 			port->need_reconfig = 0;
1427ce8d5614SIntel 
14285706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
142920a0286fSLiu Xiaofeng 					port->socket_id);
1430ce8d5614SIntel 			/* configure port */
1431ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1432ce8d5614SIntel 						&(port->dev_conf));
1433ce8d5614SIntel 			if (diag != 0) {
1434ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1435ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1436ce8d5614SIntel 					printf("Port %d can not be set back "
1437ce8d5614SIntel 							"to stopped\n", pi);
1438ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1439ce8d5614SIntel 				/* try to reconfigure port next time */
1440ce8d5614SIntel 				port->need_reconfig = 1;
1441148f963fSBruce Richardson 				return -1;
1442ce8d5614SIntel 			}
1443ce8d5614SIntel 		}
1444ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1445ce8d5614SIntel 			port->need_reconfig_queues = 0;
1446ce8d5614SIntel 			/* setup tx queues */
1447ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1448b6ea6408SIntel 				if ((numa_support) &&
1449b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1450b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1451b6ea6408SIntel 						nb_txd,txring_numa[pi],
1452b6ea6408SIntel 						&(port->tx_conf));
1453b6ea6408SIntel 				else
1454b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1455b6ea6408SIntel 						nb_txd,port->socket_id,
1456b6ea6408SIntel 						&(port->tx_conf));
1457b6ea6408SIntel 
1458ce8d5614SIntel 				if (diag == 0)
1459ce8d5614SIntel 					continue;
1460ce8d5614SIntel 
1461ce8d5614SIntel 				/* Fail to setup tx queue, return */
1462ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1463ce8d5614SIntel 							RTE_PORT_HANDLING,
1464ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1465ce8d5614SIntel 					printf("Port %d can not be set back "
1466ce8d5614SIntel 							"to stopped\n", pi);
1467ce8d5614SIntel 				printf("Fail to configure port %d tx queues\n", pi);
1468ce8d5614SIntel 				/* try to reconfigure queues next time */
1469ce8d5614SIntel 				port->need_reconfig_queues = 1;
1470148f963fSBruce Richardson 				return -1;
1471ce8d5614SIntel 			}
1472ce8d5614SIntel 			/* setup rx queues */
1473ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
1474b6ea6408SIntel 				if ((numa_support) &&
1475b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1476b6ea6408SIntel 					struct rte_mempool * mp =
1477b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
1478b6ea6408SIntel 					if (mp == NULL) {
1479b6ea6408SIntel 						printf("Failed to setup RX queue:"
1480b6ea6408SIntel 							"No mempool allocation"
1481b6ea6408SIntel 							" on the socket %d\n",
1482b6ea6408SIntel 							rxring_numa[pi]);
1483148f963fSBruce Richardson 						return -1;
1484b6ea6408SIntel 					}
1485b6ea6408SIntel 
1486b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1487b6ea6408SIntel 					     nb_rxd,rxring_numa[pi],
1488b6ea6408SIntel 					     &(port->rx_conf),mp);
14891e1d6bddSBernard Iremonger 				} else {
14901e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
14911e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
14921e1d6bddSBernard Iremonger 					if (mp == NULL) {
14931e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
14941e1d6bddSBernard Iremonger 							"No mempool allocation"
14951e1d6bddSBernard Iremonger 							" on the socket %d\n",
14961e1d6bddSBernard Iremonger 							port->socket_id);
14971e1d6bddSBernard Iremonger 						return -1;
1498b6ea6408SIntel 					}
1499b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1500b6ea6408SIntel 					     nb_rxd,port->socket_id,
15011e1d6bddSBernard Iremonger 					     &(port->rx_conf), mp);
15021e1d6bddSBernard Iremonger 				}
1503ce8d5614SIntel 				if (diag == 0)
1504ce8d5614SIntel 					continue;
1505ce8d5614SIntel 
1506ce8d5614SIntel 				/* Fail to setup rx queue, return */
1507ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1508ce8d5614SIntel 							RTE_PORT_HANDLING,
1509ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1510ce8d5614SIntel 					printf("Port %d can not be set back "
1511ce8d5614SIntel 							"to stopped\n", pi);
1512ce8d5614SIntel 				printf("Fail to configure port %d rx queues\n", pi);
1513ce8d5614SIntel 				/* try to reconfigure queues next time */
1514ce8d5614SIntel 				port->need_reconfig_queues = 1;
1515148f963fSBruce Richardson 				return -1;
1516ce8d5614SIntel 			}
1517ce8d5614SIntel 		}
151876ad4a2dSGaetan Rivet 
151976ad4a2dSGaetan Rivet 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
152076ad4a2dSGaetan Rivet 		     event_type < RTE_ETH_EVENT_MAX;
152176ad4a2dSGaetan Rivet 		     event_type++) {
152276ad4a2dSGaetan Rivet 			diag = rte_eth_dev_callback_register(pi,
152376ad4a2dSGaetan Rivet 							event_type,
152476ad4a2dSGaetan Rivet 							eth_event_callback,
152576ad4a2dSGaetan Rivet 							NULL);
152676ad4a2dSGaetan Rivet 			if (diag) {
152776ad4a2dSGaetan Rivet 				printf("Failed to setup even callback for event %d\n",
152876ad4a2dSGaetan Rivet 					event_type);
152976ad4a2dSGaetan Rivet 				return -1;
153076ad4a2dSGaetan Rivet 			}
153176ad4a2dSGaetan Rivet 		}
153276ad4a2dSGaetan Rivet 
1533ce8d5614SIntel 		/* start port */
1534ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
1535ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
1536ce8d5614SIntel 
1537ce8d5614SIntel 			/* Fail to setup rx queue, return */
1538ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
1539ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1540ce8d5614SIntel 				printf("Port %d can not be set back to "
1541ce8d5614SIntel 							"stopped\n", pi);
1542ce8d5614SIntel 			continue;
1543ce8d5614SIntel 		}
1544ce8d5614SIntel 
1545ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1546ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1547ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
1548ce8d5614SIntel 
15492950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
1550d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
15512950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
15522950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
15532950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1554d8c89163SZijie Pan 
1555ce8d5614SIntel 		/* at least one port started, need checking link status */
1556ce8d5614SIntel 		need_check_link_status = 1;
1557ce8d5614SIntel 	}
1558ce8d5614SIntel 
155992d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
1560edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
156192d2703eSMichael Qiu 	else if (need_check_link_status == 0)
1562ce8d5614SIntel 		printf("Please stop the ports first\n");
1563ce8d5614SIntel 
1564ce8d5614SIntel 	printf("Done\n");
1565148f963fSBruce Richardson 	return 0;
1566ce8d5614SIntel }
1567ce8d5614SIntel 
1568ce8d5614SIntel void
1569ce8d5614SIntel stop_port(portid_t pid)
1570ce8d5614SIntel {
1571ce8d5614SIntel 	portid_t pi;
1572ce8d5614SIntel 	struct rte_port *port;
1573ce8d5614SIntel 	int need_check_link_status = 0;
1574ce8d5614SIntel 
1575ce8d5614SIntel 	if (dcb_test) {
1576ce8d5614SIntel 		dcb_test = 0;
1577ce8d5614SIntel 		dcb_config = 0;
1578ce8d5614SIntel 	}
15794468635fSMichael Qiu 
15804468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
15814468635fSMichael Qiu 		return;
15824468635fSMichael Qiu 
1583ce8d5614SIntel 	printf("Stopping ports...\n");
1584ce8d5614SIntel 
15857d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
15864468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1587ce8d5614SIntel 			continue;
1588ce8d5614SIntel 
1589a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1590a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
1591a8ef3e3aSBernard Iremonger 			continue;
1592a8ef3e3aSBernard Iremonger 		}
1593a8ef3e3aSBernard Iremonger 
15940e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
15950e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
15960e545d30SBernard Iremonger 			continue;
15970e545d30SBernard Iremonger 		}
15980e545d30SBernard Iremonger 
1599ce8d5614SIntel 		port = &ports[pi];
1600ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1601ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
1602ce8d5614SIntel 			continue;
1603ce8d5614SIntel 
1604ce8d5614SIntel 		rte_eth_dev_stop(pi);
1605ce8d5614SIntel 
1606ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1607ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1608ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1609ce8d5614SIntel 		need_check_link_status = 1;
1610ce8d5614SIntel 	}
1611bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
1612edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
1613ce8d5614SIntel 
1614ce8d5614SIntel 	printf("Done\n");
1615ce8d5614SIntel }
1616ce8d5614SIntel 
1617ce8d5614SIntel void
1618ce8d5614SIntel close_port(portid_t pid)
1619ce8d5614SIntel {
1620ce8d5614SIntel 	portid_t pi;
1621ce8d5614SIntel 	struct rte_port *port;
1622ce8d5614SIntel 
16234468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
16244468635fSMichael Qiu 		return;
16254468635fSMichael Qiu 
1626ce8d5614SIntel 	printf("Closing ports...\n");
1627ce8d5614SIntel 
16287d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
16294468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1630ce8d5614SIntel 			continue;
1631ce8d5614SIntel 
1632a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1633a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
1634a8ef3e3aSBernard Iremonger 			continue;
1635a8ef3e3aSBernard Iremonger 		}
1636a8ef3e3aSBernard Iremonger 
16370e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
16380e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
16390e545d30SBernard Iremonger 			continue;
16400e545d30SBernard Iremonger 		}
16410e545d30SBernard Iremonger 
1642ce8d5614SIntel 		port = &ports[pi];
1643ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1644d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1645d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
1646d4e8ad64SMichael Qiu 			continue;
1647d4e8ad64SMichael Qiu 		}
1648d4e8ad64SMichael Qiu 
1649d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
1650ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1651ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1652ce8d5614SIntel 			continue;
1653ce8d5614SIntel 		}
1654ce8d5614SIntel 
1655938a184aSAdrien Mazarguil 		if (port->flow_list)
1656938a184aSAdrien Mazarguil 			port_flow_flush(pi);
1657ce8d5614SIntel 		rte_eth_dev_close(pi);
1658ce8d5614SIntel 
1659ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1660ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1661b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
1662ce8d5614SIntel 	}
1663ce8d5614SIntel 
1664ce8d5614SIntel 	printf("Done\n");
1665ce8d5614SIntel }
1666ce8d5614SIntel 
1667edab33b1STetsuya Mukawa void
1668edab33b1STetsuya Mukawa attach_port(char *identifier)
1669ce8d5614SIntel {
1670ebf5e9b7SBernard Iremonger 	portid_t pi = 0;
1671931126baSBernard Iremonger 	unsigned int socket_id;
1672ce8d5614SIntel 
1673edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
1674edab33b1STetsuya Mukawa 
1675edab33b1STetsuya Mukawa 	if (identifier == NULL) {
1676edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
1677edab33b1STetsuya Mukawa 		return;
1678ce8d5614SIntel 	}
1679ce8d5614SIntel 
1680edab33b1STetsuya Mukawa 	if (rte_eth_dev_attach(identifier, &pi))
1681edab33b1STetsuya Mukawa 		return;
1682edab33b1STetsuya Mukawa 
1683931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1684931126baSBernard Iremonger 	/* if socket_id is invalid, set to 0 */
1685931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
1686931126baSBernard Iremonger 		socket_id = 0;
1687931126baSBernard Iremonger 	reconfig(pi, socket_id);
1688edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
1689edab33b1STetsuya Mukawa 
1690edab33b1STetsuya Mukawa 	nb_ports = rte_eth_dev_count();
1691edab33b1STetsuya Mukawa 
1692edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
1693edab33b1STetsuya Mukawa 
1694edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1695edab33b1STetsuya Mukawa 	printf("Done\n");
1696edab33b1STetsuya Mukawa }
1697edab33b1STetsuya Mukawa 
1698edab33b1STetsuya Mukawa void
1699edab33b1STetsuya Mukawa detach_port(uint8_t port_id)
17005f4ec54fSChen Jing D(Mark) {
1701edab33b1STetsuya Mukawa 	char name[RTE_ETH_NAME_MAX_LEN];
17025f4ec54fSChen Jing D(Mark) 
1703edab33b1STetsuya Mukawa 	printf("Detaching a port...\n");
17045f4ec54fSChen Jing D(Mark) 
1705edab33b1STetsuya Mukawa 	if (!port_is_closed(port_id)) {
1706edab33b1STetsuya Mukawa 		printf("Please close port first\n");
1707edab33b1STetsuya Mukawa 		return;
1708edab33b1STetsuya Mukawa 	}
1709edab33b1STetsuya Mukawa 
1710938a184aSAdrien Mazarguil 	if (ports[port_id].flow_list)
1711938a184aSAdrien Mazarguil 		port_flow_flush(port_id);
1712938a184aSAdrien Mazarguil 
1713edab33b1STetsuya Mukawa 	if (rte_eth_dev_detach(port_id, name))
1714edab33b1STetsuya Mukawa 		return;
1715edab33b1STetsuya Mukawa 
1716edab33b1STetsuya Mukawa 	nb_ports = rte_eth_dev_count();
1717edab33b1STetsuya Mukawa 
1718edab33b1STetsuya Mukawa 	printf("Port '%s' is detached. Now total ports is %d\n",
1719edab33b1STetsuya Mukawa 			name, nb_ports);
1720edab33b1STetsuya Mukawa 	printf("Done\n");
1721edab33b1STetsuya Mukawa 	return;
17225f4ec54fSChen Jing D(Mark) }
17235f4ec54fSChen Jing D(Mark) 
1724af75078fSIntel void
1725af75078fSIntel pmd_test_exit(void)
1726af75078fSIntel {
1727af75078fSIntel 	portid_t pt_id;
1728af75078fSIntel 
17298210ec25SPablo de Lara 	if (test_done == 0)
17308210ec25SPablo de Lara 		stop_packet_forwarding();
17318210ec25SPablo de Lara 
1732d3a274ceSZhihong Wang 	if (ports != NULL) {
1733d3a274ceSZhihong Wang 		no_link_check = 1;
17347d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
1735d3a274ceSZhihong Wang 			printf("\nShutting down port %d...\n", pt_id);
1736af75078fSIntel 			fflush(stdout);
1737d3a274ceSZhihong Wang 			stop_port(pt_id);
1738d3a274ceSZhihong Wang 			close_port(pt_id);
1739af75078fSIntel 		}
1740d3a274ceSZhihong Wang 	}
1741d3a274ceSZhihong Wang 	printf("\nBye...\n");
1742af75078fSIntel }
1743af75078fSIntel 
1744af75078fSIntel typedef void (*cmd_func_t)(void);
1745af75078fSIntel struct pmd_test_command {
1746af75078fSIntel 	const char *cmd_name;
1747af75078fSIntel 	cmd_func_t cmd_func;
1748af75078fSIntel };
1749af75078fSIntel 
1750af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1751af75078fSIntel 
1752ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
1753af75078fSIntel static void
1754edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
1755af75078fSIntel {
1756ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
1757ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1758ce8d5614SIntel 	uint8_t portid, count, all_ports_up, print_flag = 0;
1759ce8d5614SIntel 	struct rte_eth_link link;
1760ce8d5614SIntel 
1761ce8d5614SIntel 	printf("Checking link statuses...\n");
1762ce8d5614SIntel 	fflush(stdout);
1763ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1764ce8d5614SIntel 		all_ports_up = 1;
17657d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
1766ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
1767ce8d5614SIntel 				continue;
1768ce8d5614SIntel 			memset(&link, 0, sizeof(link));
1769ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
1770ce8d5614SIntel 			/* print link status if flag set */
1771ce8d5614SIntel 			if (print_flag == 1) {
1772ce8d5614SIntel 				if (link.link_status)
1773ce8d5614SIntel 					printf("Port %d Link Up - speed %u "
1774ce8d5614SIntel 						"Mbps - %s\n", (uint8_t)portid,
1775ce8d5614SIntel 						(unsigned)link.link_speed,
1776ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1777ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
1778ce8d5614SIntel 				else
1779ce8d5614SIntel 					printf("Port %d Link Down\n",
1780ce8d5614SIntel 						(uint8_t)portid);
1781ce8d5614SIntel 				continue;
1782ce8d5614SIntel 			}
1783ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
178409419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
1785ce8d5614SIntel 				all_ports_up = 0;
1786ce8d5614SIntel 				break;
1787ce8d5614SIntel 			}
1788ce8d5614SIntel 		}
1789ce8d5614SIntel 		/* after finally printing all link status, get out */
1790ce8d5614SIntel 		if (print_flag == 1)
1791ce8d5614SIntel 			break;
1792ce8d5614SIntel 
1793ce8d5614SIntel 		if (all_ports_up == 0) {
1794ce8d5614SIntel 			fflush(stdout);
1795ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
1796ce8d5614SIntel 		}
1797ce8d5614SIntel 
1798ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
1799ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1800ce8d5614SIntel 			print_flag = 1;
1801ce8d5614SIntel 		}
18028ea656f8SGaetan Rivet 
18038ea656f8SGaetan Rivet 		if (lsc_interrupt)
18048ea656f8SGaetan Rivet 			break;
1805ce8d5614SIntel 	}
1806af75078fSIntel }
1807af75078fSIntel 
1808284c908cSGaetan Rivet static void
1809284c908cSGaetan Rivet rmv_event_callback(void *arg)
1810284c908cSGaetan Rivet {
1811284c908cSGaetan Rivet 	struct rte_eth_dev *dev;
1812284c908cSGaetan Rivet 	struct rte_devargs *da;
1813284c908cSGaetan Rivet 	char name[32] = "";
1814284c908cSGaetan Rivet 	uint8_t port_id = (intptr_t)arg;
1815284c908cSGaetan Rivet 
1816284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1817284c908cSGaetan Rivet 	dev = &rte_eth_devices[port_id];
1818284c908cSGaetan Rivet 	da = dev->device->devargs;
1819284c908cSGaetan Rivet 
1820284c908cSGaetan Rivet 	stop_port(port_id);
1821284c908cSGaetan Rivet 	close_port(port_id);
1822284c908cSGaetan Rivet 	if (da->type == RTE_DEVTYPE_VIRTUAL)
1823284c908cSGaetan Rivet 		snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1824284c908cSGaetan Rivet 	else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
18253dcfe039SThomas Monjalon 		rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1826284c908cSGaetan Rivet 	printf("removing device %s\n", name);
1827284c908cSGaetan Rivet 	rte_eal_dev_detach(name);
1828284c908cSGaetan Rivet 	dev->state = RTE_ETH_DEV_UNUSED;
1829284c908cSGaetan Rivet }
1830284c908cSGaetan Rivet 
183176ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
1832*d6af1a13SBernard Iremonger static int
1833*d6af1a13SBernard Iremonger eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
1834*d6af1a13SBernard Iremonger 		  void *ret_param)
183576ad4a2dSGaetan Rivet {
183676ad4a2dSGaetan Rivet 	static const char * const event_desc[] = {
183776ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
183876ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
183976ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
184076ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
184176ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
184276ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
184376ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
184476ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MAX] = NULL,
184576ad4a2dSGaetan Rivet 	};
184676ad4a2dSGaetan Rivet 
184776ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
1848*d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
184976ad4a2dSGaetan Rivet 
185076ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
185176ad4a2dSGaetan Rivet 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
185276ad4a2dSGaetan Rivet 			port_id, __func__, type);
185376ad4a2dSGaetan Rivet 		fflush(stderr);
18543af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
185576ad4a2dSGaetan Rivet 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
185676ad4a2dSGaetan Rivet 			event_desc[type]);
185776ad4a2dSGaetan Rivet 		fflush(stdout);
185876ad4a2dSGaetan Rivet 	}
1859284c908cSGaetan Rivet 
1860284c908cSGaetan Rivet 	switch (type) {
1861284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
1862284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
1863284c908cSGaetan Rivet 				rmv_event_callback, (void *)(intptr_t)port_id))
1864284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
1865284c908cSGaetan Rivet 		break;
1866284c908cSGaetan Rivet 	default:
1867284c908cSGaetan Rivet 		break;
1868284c908cSGaetan Rivet 	}
1869*d6af1a13SBernard Iremonger 	return 0;
187076ad4a2dSGaetan Rivet }
187176ad4a2dSGaetan Rivet 
1872013af9b6SIntel static int
1873013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1874af75078fSIntel {
1875013af9b6SIntel 	uint16_t i;
1876af75078fSIntel 	int diag;
1877013af9b6SIntel 	uint8_t mapping_found = 0;
1878af75078fSIntel 
1879013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1880013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1881013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1882013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1883013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
1884013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
1885013af9b6SIntel 			if (diag != 0)
1886013af9b6SIntel 				return diag;
1887013af9b6SIntel 			mapping_found = 1;
1888af75078fSIntel 		}
1889013af9b6SIntel 	}
1890013af9b6SIntel 	if (mapping_found)
1891013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
1892013af9b6SIntel 	return 0;
1893013af9b6SIntel }
1894013af9b6SIntel 
1895013af9b6SIntel static int
1896013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1897013af9b6SIntel {
1898013af9b6SIntel 	uint16_t i;
1899013af9b6SIntel 	int diag;
1900013af9b6SIntel 	uint8_t mapping_found = 0;
1901013af9b6SIntel 
1902013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1903013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1904013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1905013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1906013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
1907013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
1908013af9b6SIntel 			if (diag != 0)
1909013af9b6SIntel 				return diag;
1910013af9b6SIntel 			mapping_found = 1;
1911013af9b6SIntel 		}
1912013af9b6SIntel 	}
1913013af9b6SIntel 	if (mapping_found)
1914013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
1915013af9b6SIntel 	return 0;
1916013af9b6SIntel }
1917013af9b6SIntel 
1918013af9b6SIntel static void
1919013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1920013af9b6SIntel {
1921013af9b6SIntel 	int diag = 0;
1922013af9b6SIntel 
1923013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1924af75078fSIntel 	if (diag != 0) {
1925013af9b6SIntel 		if (diag == -ENOTSUP) {
1926013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
1927013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1928013af9b6SIntel 		}
1929013af9b6SIntel 		else
1930013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1931013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
1932013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1933af75078fSIntel 					pi, diag);
1934af75078fSIntel 	}
1935013af9b6SIntel 
1936013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1937af75078fSIntel 	if (diag != 0) {
1938013af9b6SIntel 		if (diag == -ENOTSUP) {
1939013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
1940013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1941013af9b6SIntel 		}
1942013af9b6SIntel 		else
1943013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1944013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
1945013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1946af75078fSIntel 					pi, diag);
1947af75078fSIntel 	}
1948af75078fSIntel }
1949af75078fSIntel 
1950f2c5125aSPablo de Lara static void
1951f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
1952f2c5125aSPablo de Lara {
1953f2c5125aSPablo de Lara 	port->rx_conf = port->dev_info.default_rxconf;
1954f2c5125aSPablo de Lara 	port->tx_conf = port->dev_info.default_txconf;
1955f2c5125aSPablo de Lara 
1956f2c5125aSPablo de Lara 	/* Check if any RX/TX parameters have been passed */
1957f2c5125aSPablo de Lara 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1958f2c5125aSPablo de Lara 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1959f2c5125aSPablo de Lara 
1960f2c5125aSPablo de Lara 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1961f2c5125aSPablo de Lara 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1962f2c5125aSPablo de Lara 
1963f2c5125aSPablo de Lara 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1964f2c5125aSPablo de Lara 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1965f2c5125aSPablo de Lara 
1966f2c5125aSPablo de Lara 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1967f2c5125aSPablo de Lara 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1968f2c5125aSPablo de Lara 
1969f2c5125aSPablo de Lara 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1970f2c5125aSPablo de Lara 		port->rx_conf.rx_drop_en = rx_drop_en;
1971f2c5125aSPablo de Lara 
1972f2c5125aSPablo de Lara 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1973f2c5125aSPablo de Lara 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1974f2c5125aSPablo de Lara 
1975f2c5125aSPablo de Lara 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1976f2c5125aSPablo de Lara 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1977f2c5125aSPablo de Lara 
1978f2c5125aSPablo de Lara 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1979f2c5125aSPablo de Lara 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1980f2c5125aSPablo de Lara 
1981f2c5125aSPablo de Lara 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1982f2c5125aSPablo de Lara 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1983f2c5125aSPablo de Lara 
1984f2c5125aSPablo de Lara 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1985f2c5125aSPablo de Lara 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1986f2c5125aSPablo de Lara 
1987f2c5125aSPablo de Lara 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1988f2c5125aSPablo de Lara 		port->tx_conf.txq_flags = txq_flags;
1989f2c5125aSPablo de Lara }
1990f2c5125aSPablo de Lara 
1991013af9b6SIntel void
1992013af9b6SIntel init_port_config(void)
1993013af9b6SIntel {
1994013af9b6SIntel 	portid_t pid;
1995013af9b6SIntel 	struct rte_port *port;
1996013af9b6SIntel 
19977d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1998013af9b6SIntel 		port = &ports[pid];
1999013af9b6SIntel 		port->dev_conf.rxmode = rx_mode;
2000013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
20013ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2002013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2003013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2004af75078fSIntel 		} else {
2005013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2006013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2007af75078fSIntel 		}
20083ce690d3SBruce Richardson 
20095f592039SJingjing Wu 		if (port->dcb_flag == 0) {
20103ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
20113ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
20123ce690d3SBruce Richardson 			else
20133ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
20143ce690d3SBruce Richardson 		}
20153ce690d3SBruce Richardson 
2016f2c5125aSPablo de Lara 		rxtx_port_config(port);
2017013af9b6SIntel 
2018013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2019013af9b6SIntel 
2020013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
202150c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2022e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
20237b7e5ba7SIntel #endif
20248ea656f8SGaetan Rivet 
20258ea656f8SGaetan Rivet 		if (lsc_interrupt &&
20268ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
20278ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
20288ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2029284c908cSGaetan Rivet 		if (rmv_interrupt &&
2030284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2031284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2032284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2033013af9b6SIntel 	}
2034013af9b6SIntel }
2035013af9b6SIntel 
203641b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
203741b05095SBernard Iremonger {
203841b05095SBernard Iremonger 	struct rte_port *port;
203941b05095SBernard Iremonger 
204041b05095SBernard Iremonger 	port = &ports[slave_pid];
204141b05095SBernard Iremonger 	port->slave_flag = 1;
204241b05095SBernard Iremonger }
204341b05095SBernard Iremonger 
204441b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
204541b05095SBernard Iremonger {
204641b05095SBernard Iremonger 	struct rte_port *port;
204741b05095SBernard Iremonger 
204841b05095SBernard Iremonger 	port = &ports[slave_pid];
204941b05095SBernard Iremonger 	port->slave_flag = 0;
205041b05095SBernard Iremonger }
205141b05095SBernard Iremonger 
20520e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
20530e545d30SBernard Iremonger {
20540e545d30SBernard Iremonger 	struct rte_port *port;
20550e545d30SBernard Iremonger 
20560e545d30SBernard Iremonger 	port = &ports[slave_pid];
20570e545d30SBernard Iremonger 	return port->slave_flag;
20580e545d30SBernard Iremonger }
20590e545d30SBernard Iremonger 
2060013af9b6SIntel const uint16_t vlan_tags[] = {
2061013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2062013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2063013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2064013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2065013af9b6SIntel };
2066013af9b6SIntel 
2067013af9b6SIntel static  int
20681a572499SJingjing Wu get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
20691a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
20701a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
20711a572499SJingjing Wu 		 uint8_t pfc_en)
2072013af9b6SIntel {
2073013af9b6SIntel 	uint8_t i;
2074af75078fSIntel 
2075af75078fSIntel 	/*
2076013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2077013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2078af75078fSIntel 	 */
20791a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
20801a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
20811a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
20821a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
20831a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2084013af9b6SIntel 
2085547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
20861a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
20871a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
20881a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
20891a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
20901a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
20911a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2092013af9b6SIntel 
20931a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
20941a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
20951a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
20961a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
20971a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2098af75078fSIntel 		}
2099013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
21001a572499SJingjing Wu 			vmdq_rx_conf->dcb_tc[i] = i;
21011a572499SJingjing Wu 			vmdq_tx_conf->dcb_tc[i] = i;
2102013af9b6SIntel 		}
2103013af9b6SIntel 
2104013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
210532e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
210632e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
21071a572499SJingjing Wu 	} else {
21081a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
21091a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
21101a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
21111a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2112013af9b6SIntel 
21131a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
21141a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
21151a572499SJingjing Wu 
2116bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2117bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2118bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2119013af9b6SIntel 		}
21201a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
21211a572499SJingjing Wu 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
212232e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
21231a572499SJingjing Wu 	}
21241a572499SJingjing Wu 
21251a572499SJingjing Wu 	if (pfc_en)
21261a572499SJingjing Wu 		eth_conf->dcb_capability_en =
21271a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2128013af9b6SIntel 	else
2129013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2130013af9b6SIntel 
2131013af9b6SIntel 	return 0;
2132013af9b6SIntel }
2133013af9b6SIntel 
2134013af9b6SIntel int
21351a572499SJingjing Wu init_port_dcb_config(portid_t pid,
21361a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
21371a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
21381a572499SJingjing Wu 		     uint8_t pfc_en)
2139013af9b6SIntel {
2140013af9b6SIntel 	struct rte_eth_conf port_conf;
2141013af9b6SIntel 	struct rte_port *rte_port;
2142013af9b6SIntel 	int retval;
2143013af9b6SIntel 	uint16_t i;
2144013af9b6SIntel 
21452a977b89SWenzhuo Lu 	rte_port = &ports[pid];
2146013af9b6SIntel 
2147013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2148013af9b6SIntel 	/* Enter DCB configuration status */
2149013af9b6SIntel 	dcb_config = 1;
2150013af9b6SIntel 
2151013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
21521a572499SJingjing Wu 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2153013af9b6SIntel 	if (retval < 0)
2154013af9b6SIntel 		return retval;
21552a977b89SWenzhuo Lu 	port_conf.rxmode.hw_vlan_filter = 1;
2156013af9b6SIntel 
21572a977b89SWenzhuo Lu 	/**
21582a977b89SWenzhuo Lu 	 * Write the configuration into the device.
21592a977b89SWenzhuo Lu 	 * Set the numbers of RX & TX queues to 0, so
21602a977b89SWenzhuo Lu 	 * the RX & TX queues will not be setup.
21612a977b89SWenzhuo Lu 	 */
21622a977b89SWenzhuo Lu 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
21632a977b89SWenzhuo Lu 
21642a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
21652a977b89SWenzhuo Lu 
21662a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
21672a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
21682a977b89SWenzhuo Lu 	 */
21692a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
21702a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
21712a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
21722a977b89SWenzhuo Lu 			" for port %d.", pid);
21732a977b89SWenzhuo Lu 		return -1;
21742a977b89SWenzhuo Lu 	}
21752a977b89SWenzhuo Lu 
21762a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
21772a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
21782a977b89SWenzhuo Lu 	 */
21792a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
218086ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
218186ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
218286ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
218386ef65eeSBernard Iremonger 		} else {
21842a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
21852a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
218686ef65eeSBernard Iremonger 		}
21872a977b89SWenzhuo Lu 	} else {
21882a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
21892a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
21902a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
21912a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
21922a977b89SWenzhuo Lu 		} else {
21932a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
21942a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
21952a977b89SWenzhuo Lu 
21962a977b89SWenzhuo Lu 		}
21972a977b89SWenzhuo Lu 	}
21982a977b89SWenzhuo Lu 	rx_free_thresh = 64;
21992a977b89SWenzhuo Lu 
2200013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2201013af9b6SIntel 
2202f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
2203013af9b6SIntel 	/* VLAN filter */
2204013af9b6SIntel 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
22051a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2206013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
2207013af9b6SIntel 
2208013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2209013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
2210013af9b6SIntel 
22117741e4cfSIntel 	rte_port->dcb_flag = 1;
22127741e4cfSIntel 
2213013af9b6SIntel 	return 0;
2214af75078fSIntel }
2215af75078fSIntel 
2216ffc468ffSTetsuya Mukawa static void
2217ffc468ffSTetsuya Mukawa init_port(void)
2218ffc468ffSTetsuya Mukawa {
2219ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
2220ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
2221ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2222ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
2223ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
2224ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
2225ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
2226ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
2227ffc468ffSTetsuya Mukawa 	}
2228ffc468ffSTetsuya Mukawa }
2229ffc468ffSTetsuya Mukawa 
2230d3a274ceSZhihong Wang static void
2231d3a274ceSZhihong Wang force_quit(void)
2232d3a274ceSZhihong Wang {
2233d3a274ceSZhihong Wang 	pmd_test_exit();
2234d3a274ceSZhihong Wang 	prompt_exit();
2235d3a274ceSZhihong Wang }
2236d3a274ceSZhihong Wang 
2237d3a274ceSZhihong Wang static void
2238d3a274ceSZhihong Wang signal_handler(int signum)
2239d3a274ceSZhihong Wang {
2240d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
2241d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
2242d3a274ceSZhihong Wang 				signum);
2243102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
2244102b7329SReshma Pattan 		/* uninitialize packet capture framework */
2245102b7329SReshma Pattan 		rte_pdump_uninit();
2246102b7329SReshma Pattan #endif
224762d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
224862d3216dSReshma Pattan 		rte_latencystats_uninit();
224962d3216dSReshma Pattan #endif
2250d3a274ceSZhihong Wang 		force_quit();
2251d3a274ceSZhihong Wang 		/* exit with the expected status */
2252d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
2253d3a274ceSZhihong Wang 		kill(getpid(), signum);
2254d3a274ceSZhihong Wang 	}
2255d3a274ceSZhihong Wang }
2256d3a274ceSZhihong Wang 
2257af75078fSIntel int
2258af75078fSIntel main(int argc, char** argv)
2259af75078fSIntel {
2260af75078fSIntel 	int  diag;
2261013af9b6SIntel 	uint8_t port_id;
2262af75078fSIntel 
2263d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
2264d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
2265d3a274ceSZhihong Wang 
2266af75078fSIntel 	diag = rte_eal_init(argc, argv);
2267af75078fSIntel 	if (diag < 0)
2268af75078fSIntel 		rte_panic("Cannot init EAL\n");
2269af75078fSIntel 
2270102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
2271102b7329SReshma Pattan 	/* initialize packet capture framework */
2272102b7329SReshma Pattan 	rte_pdump_init(NULL);
2273102b7329SReshma Pattan #endif
2274102b7329SReshma Pattan 
2275af75078fSIntel 	nb_ports = (portid_t) rte_eth_dev_count();
2276af75078fSIntel 	if (nb_ports == 0)
2277edab33b1STetsuya Mukawa 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2278af75078fSIntel 
2279ffc468ffSTetsuya Mukawa 	/* allocate port structures, and init them */
2280ffc468ffSTetsuya Mukawa 	init_port();
2281ffc468ffSTetsuya Mukawa 
2282af75078fSIntel 	set_def_fwd_config();
2283af75078fSIntel 	if (nb_lcores == 0)
2284af75078fSIntel 		rte_panic("Empty set of forwarding logical cores - check the "
2285af75078fSIntel 			  "core mask supplied in the command parameters\n");
2286af75078fSIntel 
228765eb1e54SPablo de Lara 	/* Bitrate/latency stats disabled by default */
228830bcc68cSPablo de Lara #ifdef RTE_LIBRTE_BITRATE
2289e25e6c70SRemy Horton 	bitrate_enabled = 0;
229030bcc68cSPablo de Lara #endif
229165eb1e54SPablo de Lara #ifdef RTE_LIBRTE_LATENCY_STATS
229265eb1e54SPablo de Lara 	latencystats_enabled = 0;
229365eb1e54SPablo de Lara #endif
2294e25e6c70SRemy Horton 
2295af75078fSIntel 	argc -= diag;
2296af75078fSIntel 	argv += diag;
2297af75078fSIntel 	if (argc > 1)
2298af75078fSIntel 		launch_args_parse(argc, argv);
2299af75078fSIntel 
23005a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
23015a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
23025a8fb55cSReshma Pattan 
23035a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2304af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2305af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
2306af75078fSIntel 		       nb_rxq, nb_txq);
2307af75078fSIntel 
2308af75078fSIntel 	init_config();
2309148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
2310148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2311af75078fSIntel 
2312ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
23137d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
2314ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
2315af75078fSIntel 
23167e4441c8SRemy Horton 	/* Init metrics library */
23177e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
23187e4441c8SRemy Horton 
231962d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
232062d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
232162d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
232262d3216dSReshma Pattan 		if (ret)
232362d3216dSReshma Pattan 			printf("Warning: latencystats init()"
232462d3216dSReshma Pattan 				" returned error %d\n",	ret);
232562d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
232662d3216dSReshma Pattan 			latencystats_lcore_id);
232762d3216dSReshma Pattan 	}
232862d3216dSReshma Pattan #endif
232962d3216dSReshma Pattan 
23307e4441c8SRemy Horton 	/* Setup bitrate stats */
23317e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
2332e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
23337e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
23347e4441c8SRemy Horton 		if (bitrate_data == NULL)
2335e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
2336e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
23377e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
2338e25e6c70SRemy Horton 	}
23397e4441c8SRemy Horton #endif
23407e4441c8SRemy Horton 
23410d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
234281ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
234381ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
234481ef862bSAllain Legacy 
2345ca7feb22SCyril Chemparathy 	if (interactive == 1) {
2346ca7feb22SCyril Chemparathy 		if (auto_start) {
2347ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
2348ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
2349ca7feb22SCyril Chemparathy 		}
2350af75078fSIntel 		prompt();
23510de738cfSJiayu Hu 		pmd_test_exit();
2352ca7feb22SCyril Chemparathy 	} else
23530d56cb81SThomas Monjalon #endif
23540d56cb81SThomas Monjalon 	{
2355af75078fSIntel 		char c;
2356af75078fSIntel 		int rc;
2357af75078fSIntel 
2358af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
2359af75078fSIntel 		start_packet_forwarding(0);
2360af75078fSIntel 		printf("Press enter to exit\n");
2361af75078fSIntel 		rc = read(0, &c, 1);
2362d3a274ceSZhihong Wang 		pmd_test_exit();
2363af75078fSIntel 		if (rc < 0)
2364af75078fSIntel 			return 1;
2365af75078fSIntel 	}
2366af75078fSIntel 
2367af75078fSIntel 	return 0;
2368af75078fSIntel }
2369