xref: /dpdk/app/test-pmd/testpmd.c (revision e9d48c0072d36eb6423b45fba4ec49d0def6c36f)
1af75078fSIntel /*-
2af75078fSIntel  *   BSD LICENSE
3af75078fSIntel  *
4*e9d48c00SBruce Richardson  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5af75078fSIntel  *   All rights reserved.
6af75078fSIntel  *
7af75078fSIntel  *   Redistribution and use in source and binary forms, with or without
8af75078fSIntel  *   modification, are permitted provided that the following conditions
9af75078fSIntel  *   are met:
10af75078fSIntel  *
11af75078fSIntel  *     * Redistributions of source code must retain the above copyright
12af75078fSIntel  *       notice, this list of conditions and the following disclaimer.
13af75078fSIntel  *     * Redistributions in binary form must reproduce the above copyright
14af75078fSIntel  *       notice, this list of conditions and the following disclaimer in
15af75078fSIntel  *       the documentation and/or other materials provided with the
16af75078fSIntel  *       distribution.
17af75078fSIntel  *     * Neither the name of Intel Corporation nor the names of its
18af75078fSIntel  *       contributors may be used to endorse or promote products derived
19af75078fSIntel  *       from this software without specific prior written permission.
20af75078fSIntel  *
21af75078fSIntel  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22af75078fSIntel  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23af75078fSIntel  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24af75078fSIntel  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25af75078fSIntel  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26af75078fSIntel  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27af75078fSIntel  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28af75078fSIntel  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29af75078fSIntel  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30af75078fSIntel  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31af75078fSIntel  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32af75078fSIntel  */
33af75078fSIntel 
34af75078fSIntel #include <stdarg.h>
35af75078fSIntel #include <stdio.h>
36af75078fSIntel #include <stdlib.h>
37af75078fSIntel #include <signal.h>
38af75078fSIntel #include <string.h>
39af75078fSIntel #include <time.h>
40af75078fSIntel #include <fcntl.h>
41af75078fSIntel #include <sys/types.h>
42af75078fSIntel #include <errno.h>
43af75078fSIntel 
44af75078fSIntel #include <sys/queue.h>
45af75078fSIntel #include <sys/stat.h>
46af75078fSIntel 
47af75078fSIntel #include <stdint.h>
48af75078fSIntel #include <unistd.h>
49af75078fSIntel #include <inttypes.h>
50af75078fSIntel 
51af75078fSIntel #include <rte_common.h>
52af75078fSIntel #include <rte_byteorder.h>
53af75078fSIntel #include <rte_log.h>
54af75078fSIntel #include <rte_debug.h>
55af75078fSIntel #include <rte_cycles.h>
56af75078fSIntel #include <rte_memory.h>
57af75078fSIntel #include <rte_memcpy.h>
58af75078fSIntel #include <rte_memzone.h>
59af75078fSIntel #include <rte_launch.h>
60af75078fSIntel #include <rte_tailq.h>
61af75078fSIntel #include <rte_eal.h>
62af75078fSIntel #include <rte_per_lcore.h>
63af75078fSIntel #include <rte_lcore.h>
64af75078fSIntel #include <rte_atomic.h>
65af75078fSIntel #include <rte_branch_prediction.h>
66af75078fSIntel #include <rte_ring.h>
67af75078fSIntel #include <rte_mempool.h>
68af75078fSIntel #include <rte_malloc.h>
69af75078fSIntel #include <rte_mbuf.h>
70af75078fSIntel #include <rte_interrupts.h>
71af75078fSIntel #include <rte_pci.h>
72af75078fSIntel #include <rte_ether.h>
73af75078fSIntel #include <rte_ethdev.h>
74af75078fSIntel #include <rte_string_fns.h>
75af75078fSIntel 
76af75078fSIntel #include "testpmd.h"
77af75078fSIntel 
78af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
79af75078fSIntel 
80af75078fSIntel /* use master core for command line ? */
81af75078fSIntel uint8_t interactive = 0;
82af75078fSIntel 
83af75078fSIntel /*
84af75078fSIntel  * NUMA support configuration.
85af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
86af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
87af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
88af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
89af75078fSIntel  */
90af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */
91af75078fSIntel 
92af75078fSIntel /*
93b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
94b6ea6408SIntel  * not configured.
95b6ea6408SIntel  */
96b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
97b6ea6408SIntel 
98b6ea6408SIntel /*
99af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
100af75078fSIntel  * forwarded.
101af75078fSIntel  * Must be instanciated with the ethernet addresses of peer traffic generator
102af75078fSIntel  * ports.
103af75078fSIntel  */
104af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
105af75078fSIntel portid_t nb_peer_eth_addrs = 0;
106af75078fSIntel 
107af75078fSIntel /*
108af75078fSIntel  * Probed Target Environment.
109af75078fSIntel  */
110af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
111af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
112af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
113af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
114af75078fSIntel 
115af75078fSIntel /*
116af75078fSIntel  * Test Forwarding Configuration.
117af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
118af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
119af75078fSIntel  */
120af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
121af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
122af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
123af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
124af75078fSIntel 
125af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
126af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
127af75078fSIntel 
128af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
129af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
130af75078fSIntel 
131af75078fSIntel /*
132af75078fSIntel  * Forwarding engines.
133af75078fSIntel  */
134af75078fSIntel struct fwd_engine * fwd_engines[] = {
135af75078fSIntel 	&io_fwd_engine,
136af75078fSIntel 	&mac_fwd_engine,
137af75078fSIntel 	&rx_only_engine,
138af75078fSIntel 	&tx_only_engine,
139af75078fSIntel 	&csum_fwd_engine,
140af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
141af75078fSIntel 	&ieee1588_fwd_engine,
142af75078fSIntel #endif
143af75078fSIntel 	NULL,
144af75078fSIntel };
145af75078fSIntel 
146af75078fSIntel struct fwd_config cur_fwd_config;
147af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
148af75078fSIntel 
149af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
150c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
151c8798818SIntel                                       * specified on command-line. */
152af75078fSIntel 
153af75078fSIntel /*
154af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
155af75078fSIntel  */
156af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
157af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
158af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
159af75078fSIntel };
160af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
161af75078fSIntel 
162af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
163af75078fSIntel uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
164af75078fSIntel 
165900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
166900550deSIntel uint8_t dcb_config = 0;
167900550deSIntel 
168900550deSIntel /* Whether the dcb is in testing status */
169900550deSIntel uint8_t dcb_test = 0;
170900550deSIntel 
171900550deSIntel /* DCB on and VT on mapping is default */
172900550deSIntel enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
173af75078fSIntel 
174af75078fSIntel /*
175af75078fSIntel  * Configurable number of RX/TX queues.
176af75078fSIntel  */
177af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
178af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
179af75078fSIntel 
180af75078fSIntel /*
181af75078fSIntel  * Configurable number of RX/TX ring descriptors.
182af75078fSIntel  */
183af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128
184af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512
185af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
186af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
187af75078fSIntel 
188af75078fSIntel /*
189af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
190af75078fSIntel  */
191af75078fSIntel #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
192af75078fSIntel #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
193af75078fSIntel #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
194af75078fSIntel 
195af75078fSIntel #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
196af75078fSIntel #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
197af75078fSIntel #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
198af75078fSIntel 
199af75078fSIntel struct rte_eth_thresh rx_thresh = {
200af75078fSIntel 	.pthresh = RX_PTHRESH,
201af75078fSIntel 	.hthresh = RX_HTHRESH,
202af75078fSIntel 	.wthresh = RX_WTHRESH,
203af75078fSIntel };
204af75078fSIntel 
205af75078fSIntel struct rte_eth_thresh tx_thresh = {
206af75078fSIntel 	.pthresh = TX_PTHRESH,
207af75078fSIntel 	.hthresh = TX_HTHRESH,
208af75078fSIntel 	.wthresh = TX_WTHRESH,
209af75078fSIntel };
210af75078fSIntel 
211af75078fSIntel /*
212af75078fSIntel  * Configurable value of RX free threshold.
213af75078fSIntel  */
214af75078fSIntel uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
215af75078fSIntel 
216af75078fSIntel /*
217ce8d5614SIntel  * Configurable value of RX drop enable.
218ce8d5614SIntel  */
219ce8d5614SIntel uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
220ce8d5614SIntel 
221ce8d5614SIntel /*
222af75078fSIntel  * Configurable value of TX free threshold.
223af75078fSIntel  */
224af75078fSIntel uint16_t tx_free_thresh = 0; /* Use default values. */
225af75078fSIntel 
226af75078fSIntel /*
227af75078fSIntel  * Configurable value of TX RS bit threshold.
228af75078fSIntel  */
229af75078fSIntel uint16_t tx_rs_thresh = 0; /* Use default values. */
230af75078fSIntel 
231af75078fSIntel /*
232ce8d5614SIntel  * Configurable value of TX queue flags.
233ce8d5614SIntel  */
234ce8d5614SIntel uint32_t txq_flags = 0; /* No flags set. */
235ce8d5614SIntel 
236ce8d5614SIntel /*
237af75078fSIntel  * Receive Side Scaling (RSS) configuration.
238af75078fSIntel  */
239af75078fSIntel uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
240af75078fSIntel 
241af75078fSIntel /*
242af75078fSIntel  * Port topology configuration
243af75078fSIntel  */
244af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
245af75078fSIntel 
2467741e4cfSIntel /*
2477741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
2487741e4cfSIntel  */
2497741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
2507741e4cfSIntel 
251af75078fSIntel /*
2527b7e5ba7SIntel  * NIC bypass mode configuration options.
2537b7e5ba7SIntel  */
2547b7e5ba7SIntel #ifdef RTE_NIC_BYPASS
2557b7e5ba7SIntel 
2567b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
2577b7e5ba7SIntel uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
2587b7e5ba7SIntel 
2597b7e5ba7SIntel #endif
2607b7e5ba7SIntel 
2617b7e5ba7SIntel /*
262af75078fSIntel  * Ethernet device configuration.
263af75078fSIntel  */
264af75078fSIntel struct rte_eth_rxmode rx_mode = {
265af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
266af75078fSIntel 	.split_hdr_size = 0,
267af75078fSIntel 	.header_split   = 0, /**< Header Split disabled. */
268af75078fSIntel 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
269af75078fSIntel 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
270a47aa8b9SIntel 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
271a47aa8b9SIntel 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
272af75078fSIntel 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
273af75078fSIntel 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
274af75078fSIntel };
275af75078fSIntel 
276af75078fSIntel struct rte_fdir_conf fdir_conf = {
277af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
278af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
279af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
280af75078fSIntel 	.flexbytes_offset = 0x6,
281af75078fSIntel 	.drop_queue = 127,
282af75078fSIntel };
283af75078fSIntel 
284af75078fSIntel static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
285af75078fSIntel 
286ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
287ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
288ed30d9b6SIntel 
289ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
290ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
291ed30d9b6SIntel 
292ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
293ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
294ed30d9b6SIntel 
295ed30d9b6SIntel /* Forward function declarations */
296ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
297ce8d5614SIntel static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
298ce8d5614SIntel 
299ce8d5614SIntel /*
300ce8d5614SIntel  * Check if all the ports are started.
301ce8d5614SIntel  * If yes, return positive value. If not, return zero.
302ce8d5614SIntel  */
303ce8d5614SIntel static int all_ports_started(void);
304ed30d9b6SIntel 
305af75078fSIntel /*
306af75078fSIntel  * Setup default configuration.
307af75078fSIntel  */
308af75078fSIntel static void
309af75078fSIntel set_default_fwd_lcores_config(void)
310af75078fSIntel {
311af75078fSIntel 	unsigned int i;
312af75078fSIntel 	unsigned int nb_lc;
313af75078fSIntel 
314af75078fSIntel 	nb_lc = 0;
315af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
316af75078fSIntel 		if (! rte_lcore_is_enabled(i))
317af75078fSIntel 			continue;
318af75078fSIntel 		if (i == rte_get_master_lcore())
319af75078fSIntel 			continue;
320af75078fSIntel 		fwd_lcores_cpuids[nb_lc++] = i;
321af75078fSIntel 	}
322af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
323af75078fSIntel 	nb_cfg_lcores = nb_lcores;
324af75078fSIntel 	nb_fwd_lcores = 1;
325af75078fSIntel }
326af75078fSIntel 
327af75078fSIntel static void
328af75078fSIntel set_def_peer_eth_addrs(void)
329af75078fSIntel {
330af75078fSIntel 	portid_t i;
331af75078fSIntel 
332af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
333af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
334af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
335af75078fSIntel 	}
336af75078fSIntel }
337af75078fSIntel 
338af75078fSIntel static void
339af75078fSIntel set_default_fwd_ports_config(void)
340af75078fSIntel {
341af75078fSIntel 	portid_t pt_id;
342af75078fSIntel 
343af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
344af75078fSIntel 		fwd_ports_ids[pt_id] = pt_id;
345af75078fSIntel 
346af75078fSIntel 	nb_cfg_ports = nb_ports;
347af75078fSIntel 	nb_fwd_ports = nb_ports;
348af75078fSIntel }
349af75078fSIntel 
350af75078fSIntel void
351af75078fSIntel set_def_fwd_config(void)
352af75078fSIntel {
353af75078fSIntel 	set_default_fwd_lcores_config();
354af75078fSIntel 	set_def_peer_eth_addrs();
355af75078fSIntel 	set_default_fwd_ports_config();
356af75078fSIntel }
357af75078fSIntel 
358af75078fSIntel /*
359af75078fSIntel  * Configuration initialisation done once at init time.
360af75078fSIntel  */
361af75078fSIntel struct mbuf_ctor_arg {
362af75078fSIntel 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
363af75078fSIntel 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
364af75078fSIntel };
365af75078fSIntel 
366af75078fSIntel struct mbuf_pool_ctor_arg {
367af75078fSIntel 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
368af75078fSIntel };
369af75078fSIntel 
370af75078fSIntel static void
371af75078fSIntel testpmd_mbuf_ctor(struct rte_mempool *mp,
372af75078fSIntel 		  void *opaque_arg,
373af75078fSIntel 		  void *raw_mbuf,
374af75078fSIntel 		  __attribute__((unused)) unsigned i)
375af75078fSIntel {
376af75078fSIntel 	struct mbuf_ctor_arg *mb_ctor_arg;
377af75078fSIntel 	struct rte_mbuf    *mb;
378af75078fSIntel 
379af75078fSIntel 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
380af75078fSIntel 	mb = (struct rte_mbuf *) raw_mbuf;
381af75078fSIntel 
382e788fe1cSIvan Boule 	mb->type         = RTE_MBUF_PKT;
383af75078fSIntel 	mb->pool         = mp;
384af75078fSIntel 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
385af75078fSIntel 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
386af75078fSIntel 			mb_ctor_arg->seg_buf_offset);
387af75078fSIntel 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
388af75078fSIntel 	mb->type         = RTE_MBUF_PKT;
389af75078fSIntel 	mb->ol_flags     = 0;
390af75078fSIntel 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
391af75078fSIntel 	mb->pkt.nb_segs  = 1;
39242d71416SIntel 	mb->pkt.vlan_macip.data = 0;
393af75078fSIntel 	mb->pkt.hash.rss = 0;
394af75078fSIntel }
395af75078fSIntel 
396af75078fSIntel static void
397af75078fSIntel testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
398af75078fSIntel 		       void *opaque_arg)
399af75078fSIntel {
400af75078fSIntel 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
401af75078fSIntel 	struct rte_pktmbuf_pool_private *mbp_priv;
402af75078fSIntel 
403af75078fSIntel 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
404af75078fSIntel 		printf("%s(%s) private_data_size %d < %d\n",
405af75078fSIntel 		       __func__, mp->name, (int) mp->private_data_size,
406af75078fSIntel 		       (int) sizeof(struct rte_pktmbuf_pool_private));
407af75078fSIntel 		return;
408af75078fSIntel 	}
409af75078fSIntel 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
410af75078fSIntel 	mbp_priv = (struct rte_pktmbuf_pool_private *)
411af75078fSIntel 		((char *)mp + sizeof(struct rte_mempool));
412af75078fSIntel 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
413af75078fSIntel }
414af75078fSIntel 
415af75078fSIntel static void
416af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
417af75078fSIntel 		 unsigned int socket_id)
418af75078fSIntel {
419af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
420af75078fSIntel 	struct rte_mempool *rte_mp;
421af75078fSIntel 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
422af75078fSIntel 	struct mbuf_ctor_arg mb_ctor_arg;
423af75078fSIntel 	uint32_t mb_size;
424af75078fSIntel 
425af75078fSIntel 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
426af75078fSIntel 						mbuf_seg_size);
427af75078fSIntel 	mb_ctor_arg.seg_buf_offset =
428af75078fSIntel 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
429af75078fSIntel 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
430af75078fSIntel 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
431af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
432af75078fSIntel 	rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
433af75078fSIntel 				    (unsigned) mb_mempool_cache,
434af75078fSIntel 				    sizeof(struct rte_pktmbuf_pool_private),
435af75078fSIntel 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
436af75078fSIntel 				    testpmd_mbuf_ctor, &mb_ctor_arg,
437af75078fSIntel 				    socket_id, 0);
438af75078fSIntel 	if (rte_mp == NULL) {
439ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
440ce8d5614SIntel 						"failed\n", socket_id);
441af75078fSIntel 	}
442af75078fSIntel }
443af75078fSIntel 
444af75078fSIntel static void
445af75078fSIntel init_config(void)
446af75078fSIntel {
447ce8d5614SIntel 	portid_t pid;
448af75078fSIntel 	struct rte_port *port;
449af75078fSIntel 	struct rte_mempool *mbp;
450af75078fSIntel 	unsigned int nb_mbuf_per_pool;
451af75078fSIntel 	lcoreid_t  lc_id;
452b6ea6408SIntel 	uint8_t port_per_socket[MAX_SOCKET];
453af75078fSIntel 
454b6ea6408SIntel 	memset(port_per_socket,0,MAX_SOCKET);
455af75078fSIntel 	/* Configuration of logical cores. */
456af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
457af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
458af75078fSIntel 				CACHE_LINE_SIZE);
459af75078fSIntel 	if (fwd_lcores == NULL) {
460ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
461ce8d5614SIntel 							"failed\n", nb_lcores);
462af75078fSIntel 	}
463af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
464af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
465af75078fSIntel 					       sizeof(struct fwd_lcore),
466af75078fSIntel 					       CACHE_LINE_SIZE);
467af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
468ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
469ce8d5614SIntel 								"failed\n");
470af75078fSIntel 		}
471af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
472af75078fSIntel 	}
473af75078fSIntel 
474af75078fSIntel 	/*
475af75078fSIntel 	 * Create pools of mbuf.
476af75078fSIntel 	 * If NUMA support is disabled, create a single pool of mbuf in
477b6ea6408SIntel 	 * socket 0 memory by default.
478af75078fSIntel 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
479c8798818SIntel 	 *
480c8798818SIntel 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
481c8798818SIntel 	 * nb_txd can be configured at run time.
482af75078fSIntel 	 */
483c8798818SIntel 	if (param_total_num_mbufs)
484c8798818SIntel 		nb_mbuf_per_pool = param_total_num_mbufs;
485c8798818SIntel 	else {
486c8798818SIntel 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
487c8798818SIntel 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
488b6ea6408SIntel 
489b6ea6408SIntel 		if (!numa_support)
490c8798818SIntel 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
491c8798818SIntel 	}
492af75078fSIntel 
493b6ea6408SIntel 	if (!numa_support) {
494b6ea6408SIntel 		if (socket_num == UMA_NO_CONFIG)
495b6ea6408SIntel 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
496b6ea6408SIntel 		else
497b6ea6408SIntel 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
498b6ea6408SIntel 						 socket_num);
499b6ea6408SIntel 	}
500af75078fSIntel 	/*
501af75078fSIntel 	 * Records which Mbuf pool to use by each logical core, if needed.
502af75078fSIntel 	 */
503af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
504af75078fSIntel 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
505af75078fSIntel 		if (mbp == NULL)
506af75078fSIntel 			mbp = mbuf_pool_find(0);
507af75078fSIntel 		fwd_lcores[lc_id]->mbp = mbp;
508af75078fSIntel 	}
509af75078fSIntel 
510af75078fSIntel 	/* Configuration of Ethernet ports. */
511af75078fSIntel 	ports = rte_zmalloc("testpmd: ports",
512af75078fSIntel 			    sizeof(struct rte_port) * nb_ports,
513af75078fSIntel 			    CACHE_LINE_SIZE);
514af75078fSIntel 	if (ports == NULL) {
515ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
516ce8d5614SIntel 							"failed\n", nb_ports);
517af75078fSIntel 	}
518af75078fSIntel 
519ce8d5614SIntel 	for (pid = 0; pid < nb_ports; pid++) {
520ce8d5614SIntel 		port = &ports[pid];
521ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
522ce8d5614SIntel 
523b6ea6408SIntel 		if (numa_support) {
524b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
525b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
526b6ea6408SIntel 			else {
527b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
528b6ea6408SIntel 				port_per_socket[socket_id]++;
529b6ea6408SIntel 			}
530b6ea6408SIntel 		}
531b6ea6408SIntel 
532ce8d5614SIntel 		/* set flag to initialize port/queue */
533ce8d5614SIntel 		port->need_reconfig = 1;
534ce8d5614SIntel 		port->need_reconfig_queues = 1;
535ce8d5614SIntel 	}
536ce8d5614SIntel 
537b6ea6408SIntel 	if (numa_support) {
538b6ea6408SIntel 		uint8_t i;
539b6ea6408SIntel 		unsigned int nb_mbuf;
540ce8d5614SIntel 
541b6ea6408SIntel 		if (param_total_num_mbufs)
542b6ea6408SIntel 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
543b6ea6408SIntel 
544b6ea6408SIntel 		for (i = 0; i < MAX_SOCKET; i++) {
545b6ea6408SIntel 			nb_mbuf = (nb_mbuf_per_pool *
546b6ea6408SIntel 						port_per_socket[i]);
547b6ea6408SIntel 			if (nb_mbuf)
548b6ea6408SIntel 				mbuf_pool_create(mbuf_data_size,
549b6ea6408SIntel 						nb_mbuf,i);
550b6ea6408SIntel 		}
551b6ea6408SIntel 	}
552b6ea6408SIntel 	init_port_config();
553ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
554ce8d5614SIntel 	if (init_fwd_streams() < 0)
555ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
556ce8d5614SIntel }
557ce8d5614SIntel 
558ce8d5614SIntel int
559ce8d5614SIntel init_fwd_streams(void)
560ce8d5614SIntel {
561ce8d5614SIntel 	portid_t pid;
562ce8d5614SIntel 	struct rte_port *port;
563ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
564ce8d5614SIntel 
565ce8d5614SIntel 	/* set socket id according to numa or not */
566ce8d5614SIntel 	for (pid = 0; pid < nb_ports; pid++) {
567ce8d5614SIntel 		port = &ports[pid];
568ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
569ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
570ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
571ce8d5614SIntel 				port->dev_info.max_rx_queues);
572ce8d5614SIntel 			return -1;
573ce8d5614SIntel 		}
574ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
575ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
576ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
577ce8d5614SIntel 				port->dev_info.max_tx_queues);
578ce8d5614SIntel 			return -1;
579ce8d5614SIntel 		}
580af75078fSIntel 		if (numa_support)
581b6ea6408SIntel 			port->socket_id = rte_eth_dev_socket_id(pid);
582b6ea6408SIntel 		else {
583b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
584af75078fSIntel 				port->socket_id = 0;
585b6ea6408SIntel 			else
586b6ea6408SIntel 				port->socket_id = socket_num;
587b6ea6408SIntel 		}
588af75078fSIntel 	}
589af75078fSIntel 
590ce8d5614SIntel 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
591ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
592ce8d5614SIntel 		return 0;
593ce8d5614SIntel 	/* clear the old */
594ce8d5614SIntel 	if (fwd_streams != NULL) {
595ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
596ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
597ce8d5614SIntel 				continue;
598ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
599ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
600af75078fSIntel 		}
601ce8d5614SIntel 		rte_free(fwd_streams);
602ce8d5614SIntel 		fwd_streams = NULL;
603ce8d5614SIntel 	}
604ce8d5614SIntel 
605ce8d5614SIntel 	/* init new */
606ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
607ce8d5614SIntel 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
608ce8d5614SIntel 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
609ce8d5614SIntel 	if (fwd_streams == NULL)
610ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
611ce8d5614SIntel 						"failed\n", nb_fwd_streams);
612ce8d5614SIntel 
613af75078fSIntel 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
614af75078fSIntel 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
615ce8d5614SIntel 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
616ce8d5614SIntel 		if (fwd_streams[sm_id] == NULL)
617ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
618ce8d5614SIntel 								" failed\n");
619af75078fSIntel 	}
620ce8d5614SIntel 
621ce8d5614SIntel 	return 0;
622af75078fSIntel }
623af75078fSIntel 
624af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
625af75078fSIntel static void
626af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
627af75078fSIntel {
628af75078fSIntel 	unsigned int total_burst;
629af75078fSIntel 	unsigned int nb_burst;
630af75078fSIntel 	unsigned int burst_stats[3];
631af75078fSIntel 	uint16_t pktnb_stats[3];
632af75078fSIntel 	uint16_t nb_pkt;
633af75078fSIntel 	int burst_percent[3];
634af75078fSIntel 
635af75078fSIntel 	/*
636af75078fSIntel 	 * First compute the total number of packet bursts and the
637af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
638af75078fSIntel 	 */
639af75078fSIntel 	total_burst = 0;
640af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
641af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
642af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
643af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
644af75078fSIntel 		if (nb_burst == 0)
645af75078fSIntel 			continue;
646af75078fSIntel 		total_burst += nb_burst;
647af75078fSIntel 		if (nb_burst > burst_stats[0]) {
648af75078fSIntel 			burst_stats[1] = burst_stats[0];
649af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
650af75078fSIntel 			burst_stats[0] = nb_burst;
651af75078fSIntel 			pktnb_stats[0] = nb_pkt;
652af75078fSIntel 		}
653af75078fSIntel 	}
654af75078fSIntel 	if (total_burst == 0)
655af75078fSIntel 		return;
656af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
657af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
658af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
659af75078fSIntel 	if (burst_stats[0] == total_burst) {
660af75078fSIntel 		printf("]\n");
661af75078fSIntel 		return;
662af75078fSIntel 	}
663af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
664af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
665af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
666af75078fSIntel 		return;
667af75078fSIntel 	}
668af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
669af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
670af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
671af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
672af75078fSIntel 		return;
673af75078fSIntel 	}
674af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
675af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
676af75078fSIntel }
677af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
678af75078fSIntel 
679af75078fSIntel static void
680af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
681af75078fSIntel {
682af75078fSIntel 	struct rte_port *port;
683013af9b6SIntel 	uint8_t i;
684af75078fSIntel 
685af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
686af75078fSIntel 
687af75078fSIntel 	port = &ports[port_id];
688af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
689af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
690013af9b6SIntel 
691013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
692af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
693af75078fSIntel 		       "%-"PRIu64"\n",
694af75078fSIntel 		       stats->ipackets, stats->ierrors,
695af75078fSIntel 		       (uint64_t) (stats->ipackets + stats->ierrors));
696af75078fSIntel 
697af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
698af75078fSIntel 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
699af75078fSIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
700af75078fSIntel 
701af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
702af75078fSIntel 		       "%-"PRIu64"\n",
703af75078fSIntel 		       stats->opackets, port->tx_dropped,
704af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
705af75078fSIntel 
706af75078fSIntel 		if (stats->rx_nombuf > 0)
707af75078fSIntel 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
708013af9b6SIntel 
709013af9b6SIntel 	}
710013af9b6SIntel 	else {
711013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
712013af9b6SIntel 		       "%14"PRIu64"\n",
713013af9b6SIntel 		       stats->ipackets, stats->ierrors,
714013af9b6SIntel 		       (uint64_t) (stats->ipackets + stats->ierrors));
715013af9b6SIntel 
716013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
717013af9b6SIntel 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
718013af9b6SIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
719013af9b6SIntel 
720013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
721013af9b6SIntel 		       "%14"PRIu64"\n",
722013af9b6SIntel 		       stats->opackets, port->tx_dropped,
723013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
724013af9b6SIntel 
725013af9b6SIntel 		if (stats->rx_nombuf > 0)
726013af9b6SIntel 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
727013af9b6SIntel 	}
728af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
729af75078fSIntel 	if (port->rx_stream)
730013af9b6SIntel 		pkt_burst_stats_display("RX",
731013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
732af75078fSIntel 	if (port->tx_stream)
733013af9b6SIntel 		pkt_burst_stats_display("TX",
734013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
735af75078fSIntel #endif
736af75078fSIntel 	/* stats fdir */
737af75078fSIntel 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
738013af9b6SIntel 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
739af75078fSIntel 		       stats->fdirmiss,
740af75078fSIntel 		       stats->fdirmatch);
741af75078fSIntel 
742013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
743013af9b6SIntel 		printf("\n");
744013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
745013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
746013af9b6SIntel 			       "     RX-errors:%14"PRIu64
747013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
748013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
749013af9b6SIntel 		}
750013af9b6SIntel 		printf("\n");
751013af9b6SIntel 	}
752013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
753013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
754013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
755013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
756013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
757013af9b6SIntel 		}
758013af9b6SIntel 	}
759013af9b6SIntel 
760af75078fSIntel 	printf("  %s--------------------------------%s\n",
761af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
762af75078fSIntel }
763af75078fSIntel 
764af75078fSIntel static void
765af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
766af75078fSIntel {
767af75078fSIntel 	struct fwd_stream *fs;
768af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
769af75078fSIntel 
770af75078fSIntel 	fs = fwd_streams[stream_id];
771af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
772af75078fSIntel 	    (fs->fwd_dropped == 0))
773af75078fSIntel 		return;
774af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
775af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
776af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
777af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
778af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
779af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
780af75078fSIntel 
781af75078fSIntel 	/* if checksum mode */
782af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
783013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
784013af9b6SIntel 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
785af75078fSIntel 	}
786af75078fSIntel 
787af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
788af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
789af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
790af75078fSIntel #endif
791af75078fSIntel }
792af75078fSIntel 
793af75078fSIntel static void
7947741e4cfSIntel flush_fwd_rx_queues(void)
795af75078fSIntel {
796af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
797af75078fSIntel 	portid_t  rxp;
7987741e4cfSIntel 	portid_t port_id;
799af75078fSIntel 	queueid_t rxq;
800af75078fSIntel 	uint16_t  nb_rx;
801af75078fSIntel 	uint16_t  i;
802af75078fSIntel 	uint8_t   j;
803af75078fSIntel 
804af75078fSIntel 	for (j = 0; j < 2; j++) {
8057741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
806af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
8077741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
808af75078fSIntel 				do {
8097741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
810013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
811af75078fSIntel 					for (i = 0; i < nb_rx; i++)
812af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
813af75078fSIntel 				} while (nb_rx > 0);
814af75078fSIntel 			}
815af75078fSIntel 		}
816af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
817af75078fSIntel 	}
818af75078fSIntel }
819af75078fSIntel 
820af75078fSIntel static void
821af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
822af75078fSIntel {
823af75078fSIntel 	struct fwd_stream **fsm;
824af75078fSIntel 	streamid_t nb_fs;
825af75078fSIntel 	streamid_t sm_id;
826af75078fSIntel 
827af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
828af75078fSIntel 	nb_fs = fc->stream_nb;
829af75078fSIntel 	do {
830af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
831af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
832af75078fSIntel 	} while (! fc->stopped);
833af75078fSIntel }
834af75078fSIntel 
835af75078fSIntel static int
836af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
837af75078fSIntel {
838af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
839af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
840af75078fSIntel 	return 0;
841af75078fSIntel }
842af75078fSIntel 
843af75078fSIntel /*
844af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
845af75078fSIntel  * Used to start communication flows in network loopback test configurations.
846af75078fSIntel  */
847af75078fSIntel static int
848af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
849af75078fSIntel {
850af75078fSIntel 	struct fwd_lcore *fwd_lc;
851af75078fSIntel 	struct fwd_lcore tmp_lcore;
852af75078fSIntel 
853af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
854af75078fSIntel 	tmp_lcore = *fwd_lc;
855af75078fSIntel 	tmp_lcore.stopped = 1;
856af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
857af75078fSIntel 	return 0;
858af75078fSIntel }
859af75078fSIntel 
860af75078fSIntel /*
861af75078fSIntel  * Launch packet forwarding:
862af75078fSIntel  *     - Setup per-port forwarding context.
863af75078fSIntel  *     - launch logical cores with their forwarding configuration.
864af75078fSIntel  */
865af75078fSIntel static void
866af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
867af75078fSIntel {
868af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
869af75078fSIntel 	unsigned int i;
870af75078fSIntel 	unsigned int lc_id;
871af75078fSIntel 	int diag;
872af75078fSIntel 
873af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
874af75078fSIntel 	if (port_fwd_begin != NULL) {
875af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
876af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
877af75078fSIntel 	}
878af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
879af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
880af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
881af75078fSIntel 			fwd_lcores[i]->stopped = 0;
882af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
883af75078fSIntel 						     fwd_lcores[i], lc_id);
884af75078fSIntel 			if (diag != 0)
885af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
886af75078fSIntel 				       lc_id, diag);
887af75078fSIntel 		}
888af75078fSIntel 	}
889af75078fSIntel }
890af75078fSIntel 
891af75078fSIntel /*
892af75078fSIntel  * Launch packet forwarding configuration.
893af75078fSIntel  */
894af75078fSIntel void
895af75078fSIntel start_packet_forwarding(int with_tx_first)
896af75078fSIntel {
897af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
898af75078fSIntel 	port_fwd_end_t  port_fwd_end;
899af75078fSIntel 	struct rte_port *port;
900af75078fSIntel 	unsigned int i;
901af75078fSIntel 	portid_t   pt_id;
902af75078fSIntel 	streamid_t sm_id;
903af75078fSIntel 
904ce8d5614SIntel 	if (all_ports_started() == 0) {
905ce8d5614SIntel 		printf("Not all ports were started\n");
906ce8d5614SIntel 		return;
907ce8d5614SIntel 	}
908af75078fSIntel 	if (test_done == 0) {
909af75078fSIntel 		printf("Packet forwarding already started\n");
910af75078fSIntel 		return;
911af75078fSIntel 	}
9127741e4cfSIntel 	if(dcb_test) {
9137741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
9147741e4cfSIntel 			pt_id = fwd_ports_ids[i];
9157741e4cfSIntel 			port = &ports[pt_id];
9167741e4cfSIntel 			if (!port->dcb_flag) {
9177741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
9187741e4cfSIntel                                        "be configured in this mode.\n");
919013af9b6SIntel 				return;
920013af9b6SIntel 			}
9217741e4cfSIntel 		}
9227741e4cfSIntel 		if (nb_fwd_lcores == 1) {
9237741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
9247741e4cfSIntel                                "should be larger than 1.\n");
9257741e4cfSIntel 			return;
9267741e4cfSIntel 		}
9277741e4cfSIntel 	}
928af75078fSIntel 	test_done = 0;
9297741e4cfSIntel 
9307741e4cfSIntel 	if(!no_flush_rx)
9317741e4cfSIntel 		flush_fwd_rx_queues();
9327741e4cfSIntel 
933af75078fSIntel 	fwd_config_setup();
934af75078fSIntel 	rxtx_config_display();
935af75078fSIntel 
936af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
937af75078fSIntel 		pt_id = fwd_ports_ids[i];
938af75078fSIntel 		port = &ports[pt_id];
939af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
940af75078fSIntel 		port->tx_dropped = 0;
941013af9b6SIntel 
942013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
943af75078fSIntel 	}
944af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
945af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
946af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
947af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
948af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
949af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
950af75078fSIntel 
951af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
952af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
953af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
954af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
955af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
956af75078fSIntel #endif
957af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
958af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
959af75078fSIntel #endif
960af75078fSIntel 	}
961af75078fSIntel 	if (with_tx_first) {
962af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
963af75078fSIntel 		if (port_fwd_begin != NULL) {
964af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
965af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
966af75078fSIntel 		}
967af75078fSIntel 		launch_packet_forwarding(run_one_txonly_burst_on_core);
968af75078fSIntel 		rte_eal_mp_wait_lcore();
969af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
970af75078fSIntel 		if (port_fwd_end != NULL) {
971af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
972af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
973af75078fSIntel 		}
974af75078fSIntel 	}
975af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
976af75078fSIntel }
977af75078fSIntel 
978af75078fSIntel void
979af75078fSIntel stop_packet_forwarding(void)
980af75078fSIntel {
981af75078fSIntel 	struct rte_eth_stats stats;
982af75078fSIntel 	struct rte_port *port;
983af75078fSIntel 	port_fwd_end_t  port_fwd_end;
984af75078fSIntel 	int i;
985af75078fSIntel 	portid_t   pt_id;
986af75078fSIntel 	streamid_t sm_id;
987af75078fSIntel 	lcoreid_t  lc_id;
988af75078fSIntel 	uint64_t total_recv;
989af75078fSIntel 	uint64_t total_xmit;
990af75078fSIntel 	uint64_t total_rx_dropped;
991af75078fSIntel 	uint64_t total_tx_dropped;
992af75078fSIntel 	uint64_t total_rx_nombuf;
993af75078fSIntel 	uint64_t tx_dropped;
994af75078fSIntel 	uint64_t rx_bad_ip_csum;
995af75078fSIntel 	uint64_t rx_bad_l4_csum;
996af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
997af75078fSIntel 	uint64_t fwd_cycles;
998af75078fSIntel #endif
999af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1000af75078fSIntel 
1001ce8d5614SIntel 	if (all_ports_started() == 0) {
1002ce8d5614SIntel 		printf("Not all ports were started\n");
1003ce8d5614SIntel 		return;
1004ce8d5614SIntel 	}
1005af75078fSIntel 	if (test_done) {
1006af75078fSIntel 		printf("Packet forwarding not started\n");
1007af75078fSIntel 		return;
1008af75078fSIntel 	}
1009af75078fSIntel 	printf("Telling cores to stop...");
1010af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1011af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1012af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1013af75078fSIntel 	rte_eal_mp_wait_lcore();
1014af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1015af75078fSIntel 	if (port_fwd_end != NULL) {
1016af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1017af75078fSIntel 			pt_id = fwd_ports_ids[i];
1018af75078fSIntel 			(*port_fwd_end)(pt_id);
1019af75078fSIntel 		}
1020af75078fSIntel 	}
1021af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1022af75078fSIntel 	fwd_cycles = 0;
1023af75078fSIntel #endif
1024af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1025af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1026af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1027af75078fSIntel 			fwd_stream_stats_display(sm_id);
1028af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1029af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1030af75078fSIntel 		} else {
1031af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1032af75078fSIntel 				fwd_streams[sm_id];
1033af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1034af75078fSIntel 				fwd_streams[sm_id];
1035af75078fSIntel 		}
1036af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1037af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1038af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1039af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1040af75078fSIntel 
1041013af9b6SIntel 		rx_bad_ip_csum =
1042013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1043af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1044af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1045013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1046013af9b6SIntel 							rx_bad_ip_csum;
1047af75078fSIntel 
1048013af9b6SIntel 		rx_bad_l4_csum =
1049013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1050af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1051af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1052013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1053013af9b6SIntel 							rx_bad_l4_csum;
1054af75078fSIntel 
1055af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1056af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1057af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1058af75078fSIntel #endif
1059af75078fSIntel 	}
1060af75078fSIntel 	total_recv = 0;
1061af75078fSIntel 	total_xmit = 0;
1062af75078fSIntel 	total_rx_dropped = 0;
1063af75078fSIntel 	total_tx_dropped = 0;
1064af75078fSIntel 	total_rx_nombuf  = 0;
10657741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1066af75078fSIntel 		pt_id = fwd_ports_ids[i];
1067af75078fSIntel 
1068af75078fSIntel 		port = &ports[pt_id];
1069af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1070af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1071af75078fSIntel 		port->stats.ipackets = 0;
1072af75078fSIntel 		stats.opackets -= port->stats.opackets;
1073af75078fSIntel 		port->stats.opackets = 0;
1074af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1075af75078fSIntel 		port->stats.ibytes = 0;
1076af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1077af75078fSIntel 		port->stats.obytes = 0;
1078af75078fSIntel 		stats.ierrors  -= port->stats.ierrors;
1079af75078fSIntel 		port->stats.ierrors = 0;
1080af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1081af75078fSIntel 		port->stats.oerrors = 0;
1082af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1083af75078fSIntel 		port->stats.rx_nombuf = 0;
1084af75078fSIntel 		stats.fdirmatch -= port->stats.fdirmatch;
1085af75078fSIntel 		port->stats.rx_nombuf = 0;
1086af75078fSIntel 		stats.fdirmiss -= port->stats.fdirmiss;
1087af75078fSIntel 		port->stats.rx_nombuf = 0;
1088af75078fSIntel 
1089af75078fSIntel 		total_recv += stats.ipackets;
1090af75078fSIntel 		total_xmit += stats.opackets;
1091af75078fSIntel 		total_rx_dropped += stats.ierrors;
1092af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1093af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1094af75078fSIntel 
1095af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1096af75078fSIntel 	}
1097af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1098af75078fSIntel 	       "%s\n",
1099af75078fSIntel 	       acc_stats_border, acc_stats_border);
1100af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1101af75078fSIntel 	       "%-"PRIu64"\n"
1102af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1103af75078fSIntel 	       "%-"PRIu64"\n",
1104af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1105af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1106af75078fSIntel 	if (total_rx_nombuf > 0)
1107af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1108af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1109af75078fSIntel 	       "%s\n",
1110af75078fSIntel 	       acc_stats_border, acc_stats_border);
1111af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1112af75078fSIntel 	if (total_recv > 0)
1113af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1114af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1115af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1116af75078fSIntel 		       fwd_cycles, total_recv);
1117af75078fSIntel #endif
1118af75078fSIntel 	printf("\nDone.\n");
1119af75078fSIntel 	test_done = 1;
1120af75078fSIntel }
1121af75078fSIntel 
1122ce8d5614SIntel static int
1123ce8d5614SIntel all_ports_started(void)
1124ce8d5614SIntel {
1125ce8d5614SIntel 	portid_t pi;
1126ce8d5614SIntel 	struct rte_port *port;
1127ce8d5614SIntel 
1128ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1129ce8d5614SIntel 		port = &ports[pi];
1130ce8d5614SIntel 		/* Check if there is a port which is not started */
1131ce8d5614SIntel 		if (port->port_status != RTE_PORT_STARTED)
1132ce8d5614SIntel 			return 0;
1133ce8d5614SIntel 	}
1134ce8d5614SIntel 
1135ce8d5614SIntel 	/* No port is not started */
1136ce8d5614SIntel 	return 1;
1137ce8d5614SIntel }
1138ce8d5614SIntel 
1139ce8d5614SIntel void
1140ce8d5614SIntel start_port(portid_t pid)
1141ce8d5614SIntel {
1142ce8d5614SIntel 	int diag, need_check_link_status = 0;
1143ce8d5614SIntel 	portid_t pi;
1144ce8d5614SIntel 	queueid_t qi;
1145ce8d5614SIntel 	struct rte_port *port;
1146ce8d5614SIntel 
1147ce8d5614SIntel 	if (test_done == 0) {
1148ce8d5614SIntel 		printf("Please stop forwarding first\n");
1149ce8d5614SIntel 		return;
1150ce8d5614SIntel 	}
1151ce8d5614SIntel 
1152ce8d5614SIntel 	if (init_fwd_streams() < 0) {
1153ce8d5614SIntel 		printf("Fail from init_fwd_streams()\n");
1154ce8d5614SIntel 		return;
1155ce8d5614SIntel 	}
1156ce8d5614SIntel 
1157ce8d5614SIntel 	if(dcb_config)
1158ce8d5614SIntel 		dcb_test = 1;
1159ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1160ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1161ce8d5614SIntel 			continue;
1162ce8d5614SIntel 
1163ce8d5614SIntel 		port = &ports[pi];
1164ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1165ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1166ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1167ce8d5614SIntel 			continue;
1168ce8d5614SIntel 		}
1169ce8d5614SIntel 
1170ce8d5614SIntel 		if (port->need_reconfig > 0) {
1171ce8d5614SIntel 			port->need_reconfig = 0;
1172ce8d5614SIntel 
1173b6ea6408SIntel 			printf("Configuring Port %d (socket %d)\n", pi,
1174b6ea6408SIntel 					rte_eth_dev_socket_id(pi));
1175ce8d5614SIntel 			/* configure port */
1176ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1177ce8d5614SIntel 						&(port->dev_conf));
1178ce8d5614SIntel 			if (diag != 0) {
1179ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1180ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1181ce8d5614SIntel 					printf("Port %d can not be set back "
1182ce8d5614SIntel 							"to stopped\n", pi);
1183ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1184ce8d5614SIntel 				/* try to reconfigure port next time */
1185ce8d5614SIntel 				port->need_reconfig = 1;
1186ce8d5614SIntel 				return;
1187ce8d5614SIntel 			}
1188ce8d5614SIntel 		}
1189ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1190ce8d5614SIntel 			port->need_reconfig_queues = 0;
1191ce8d5614SIntel 			/* setup tx queues */
1192ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1193b6ea6408SIntel 				if ((numa_support) &&
1194b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1195b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1196b6ea6408SIntel 						nb_txd,txring_numa[pi],
1197b6ea6408SIntel 						&(port->tx_conf));
1198b6ea6408SIntel 				else
1199b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1200b6ea6408SIntel 						nb_txd,port->socket_id,
1201b6ea6408SIntel 						&(port->tx_conf));
1202b6ea6408SIntel 
1203ce8d5614SIntel 				if (diag == 0)
1204ce8d5614SIntel 					continue;
1205ce8d5614SIntel 
1206ce8d5614SIntel 				/* Fail to setup tx queue, return */
1207ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1208ce8d5614SIntel 							RTE_PORT_HANDLING,
1209ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1210ce8d5614SIntel 					printf("Port %d can not be set back "
1211ce8d5614SIntel 							"to stopped\n", pi);
1212ce8d5614SIntel 				printf("Fail to configure port %d tx queues\n", pi);
1213ce8d5614SIntel 				/* try to reconfigure queues next time */
1214ce8d5614SIntel 				port->need_reconfig_queues = 1;
1215ce8d5614SIntel 				return;
1216ce8d5614SIntel 			}
1217ce8d5614SIntel 			/* setup rx queues */
1218ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
1219b6ea6408SIntel 				if ((numa_support) &&
1220b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1221b6ea6408SIntel 					struct rte_mempool * mp =
1222b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
1223b6ea6408SIntel 					if (mp == NULL) {
1224b6ea6408SIntel 						printf("Failed to setup RX queue:"
1225b6ea6408SIntel 							"No mempool allocation"
1226b6ea6408SIntel 							"on the socket %d\n",
1227b6ea6408SIntel 							rxring_numa[pi]);
1228b6ea6408SIntel 						return;
1229b6ea6408SIntel 					}
1230b6ea6408SIntel 
1231b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1232b6ea6408SIntel 					     nb_rxd,rxring_numa[pi],
1233b6ea6408SIntel 					     &(port->rx_conf),mp);
1234b6ea6408SIntel 				}
1235b6ea6408SIntel 				else
1236b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1237b6ea6408SIntel 					     nb_rxd,port->socket_id,
1238b6ea6408SIntel 					     &(port->rx_conf),
1239ce8d5614SIntel 				             mbuf_pool_find(port->socket_id));
1240b6ea6408SIntel 
1241ce8d5614SIntel 				if (diag == 0)
1242ce8d5614SIntel 					continue;
1243ce8d5614SIntel 
1244b6ea6408SIntel 
1245ce8d5614SIntel 				/* Fail to setup rx queue, return */
1246ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1247ce8d5614SIntel 							RTE_PORT_HANDLING,
1248ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1249ce8d5614SIntel 					printf("Port %d can not be set back "
1250ce8d5614SIntel 							"to stopped\n", pi);
1251ce8d5614SIntel 				printf("Fail to configure port %d rx queues\n", pi);
1252ce8d5614SIntel 				/* try to reconfigure queues next time */
1253ce8d5614SIntel 				port->need_reconfig_queues = 1;
1254ce8d5614SIntel 				return;
1255ce8d5614SIntel 			}
1256ce8d5614SIntel 		}
1257ce8d5614SIntel 		/* start port */
1258ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
1259ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
1260ce8d5614SIntel 
1261ce8d5614SIntel 			/* Fail to setup rx queue, return */
1262ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
1263ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1264ce8d5614SIntel 				printf("Port %d can not be set back to "
1265ce8d5614SIntel 							"stopped\n", pi);
1266ce8d5614SIntel 			continue;
1267ce8d5614SIntel 		}
1268ce8d5614SIntel 
1269ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1270ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1271ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
1272ce8d5614SIntel 
1273ce8d5614SIntel 		/* at least one port started, need checking link status */
1274ce8d5614SIntel 		need_check_link_status = 1;
1275ce8d5614SIntel 	}
1276ce8d5614SIntel 
1277ce8d5614SIntel 	if (need_check_link_status)
1278ce8d5614SIntel 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1279ce8d5614SIntel 	else
1280ce8d5614SIntel 		printf("Please stop the ports first\n");
1281ce8d5614SIntel 
1282ce8d5614SIntel 	printf("Done\n");
1283ce8d5614SIntel }
1284ce8d5614SIntel 
1285ce8d5614SIntel void
1286ce8d5614SIntel stop_port(portid_t pid)
1287ce8d5614SIntel {
1288ce8d5614SIntel 	portid_t pi;
1289ce8d5614SIntel 	struct rte_port *port;
1290ce8d5614SIntel 	int need_check_link_status = 0;
1291ce8d5614SIntel 
1292ce8d5614SIntel 	if (test_done == 0) {
1293ce8d5614SIntel 		printf("Please stop forwarding first\n");
1294ce8d5614SIntel 		return;
1295ce8d5614SIntel 	}
1296ce8d5614SIntel 	if (dcb_test) {
1297ce8d5614SIntel 		dcb_test = 0;
1298ce8d5614SIntel 		dcb_config = 0;
1299ce8d5614SIntel 	}
1300ce8d5614SIntel 	printf("Stopping ports...\n");
1301ce8d5614SIntel 
1302ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1303ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1304ce8d5614SIntel 			continue;
1305ce8d5614SIntel 
1306ce8d5614SIntel 		port = &ports[pi];
1307ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1308ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
1309ce8d5614SIntel 			continue;
1310ce8d5614SIntel 
1311ce8d5614SIntel 		rte_eth_dev_stop(pi);
1312ce8d5614SIntel 
1313ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1314ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1315ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1316ce8d5614SIntel 		need_check_link_status = 1;
1317ce8d5614SIntel 	}
1318ce8d5614SIntel 	if (need_check_link_status)
1319ce8d5614SIntel 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1320ce8d5614SIntel 
1321ce8d5614SIntel 	printf("Done\n");
1322ce8d5614SIntel }
1323ce8d5614SIntel 
1324ce8d5614SIntel void
1325ce8d5614SIntel close_port(portid_t pid)
1326ce8d5614SIntel {
1327ce8d5614SIntel 	portid_t pi;
1328ce8d5614SIntel 	struct rte_port *port;
1329ce8d5614SIntel 
1330ce8d5614SIntel 	if (test_done == 0) {
1331ce8d5614SIntel 		printf("Please stop forwarding first\n");
1332ce8d5614SIntel 		return;
1333ce8d5614SIntel 	}
1334ce8d5614SIntel 
1335ce8d5614SIntel 	printf("Closing ports...\n");
1336ce8d5614SIntel 
1337ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1338ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1339ce8d5614SIntel 			continue;
1340ce8d5614SIntel 
1341ce8d5614SIntel 		port = &ports[pi];
1342ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1343ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1344ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1345ce8d5614SIntel 			continue;
1346ce8d5614SIntel 		}
1347ce8d5614SIntel 
1348ce8d5614SIntel 		rte_eth_dev_close(pi);
1349ce8d5614SIntel 
1350ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1351ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1352ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1353ce8d5614SIntel 	}
1354ce8d5614SIntel 
1355ce8d5614SIntel 	printf("Done\n");
1356ce8d5614SIntel }
1357ce8d5614SIntel 
1358ce8d5614SIntel int
1359ce8d5614SIntel all_ports_stopped(void)
1360ce8d5614SIntel {
1361ce8d5614SIntel 	portid_t pi;
1362ce8d5614SIntel 	struct rte_port *port;
1363ce8d5614SIntel 
1364ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1365ce8d5614SIntel 		port = &ports[pi];
1366ce8d5614SIntel 		if (port->port_status != RTE_PORT_STOPPED)
1367ce8d5614SIntel 			return 0;
1368ce8d5614SIntel 	}
1369ce8d5614SIntel 
1370ce8d5614SIntel 	return 1;
1371ce8d5614SIntel }
1372ce8d5614SIntel 
1373af75078fSIntel void
1374af75078fSIntel pmd_test_exit(void)
1375af75078fSIntel {
1376af75078fSIntel 	portid_t pt_id;
1377af75078fSIntel 
1378af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1379af75078fSIntel 		printf("Stopping port %d...", pt_id);
1380af75078fSIntel 		fflush(stdout);
1381af75078fSIntel 		rte_eth_dev_close(pt_id);
1382af75078fSIntel 		printf("done\n");
1383af75078fSIntel 	}
1384af75078fSIntel 	printf("bye...\n");
1385af75078fSIntel }
1386af75078fSIntel 
1387af75078fSIntel typedef void (*cmd_func_t)(void);
1388af75078fSIntel struct pmd_test_command {
1389af75078fSIntel 	const char *cmd_name;
1390af75078fSIntel 	cmd_func_t cmd_func;
1391af75078fSIntel };
1392af75078fSIntel 
1393af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1394af75078fSIntel 
1395ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
1396af75078fSIntel static void
1397ce8d5614SIntel check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1398af75078fSIntel {
1399ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
1400ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1401ce8d5614SIntel 	uint8_t portid, count, all_ports_up, print_flag = 0;
1402ce8d5614SIntel 	struct rte_eth_link link;
1403ce8d5614SIntel 
1404ce8d5614SIntel 	printf("Checking link statuses...\n");
1405ce8d5614SIntel 	fflush(stdout);
1406ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1407ce8d5614SIntel 		all_ports_up = 1;
1408ce8d5614SIntel 		for (portid = 0; portid < port_num; portid++) {
1409ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
1410ce8d5614SIntel 				continue;
1411ce8d5614SIntel 			memset(&link, 0, sizeof(link));
1412ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
1413ce8d5614SIntel 			/* print link status if flag set */
1414ce8d5614SIntel 			if (print_flag == 1) {
1415ce8d5614SIntel 				if (link.link_status)
1416ce8d5614SIntel 					printf("Port %d Link Up - speed %u "
1417ce8d5614SIntel 						"Mbps - %s\n", (uint8_t)portid,
1418ce8d5614SIntel 						(unsigned)link.link_speed,
1419ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1420ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
1421ce8d5614SIntel 				else
1422ce8d5614SIntel 					printf("Port %d Link Down\n",
1423ce8d5614SIntel 						(uint8_t)portid);
1424ce8d5614SIntel 				continue;
1425ce8d5614SIntel 			}
1426ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
1427ce8d5614SIntel 			if (link.link_status == 0) {
1428ce8d5614SIntel 				all_ports_up = 0;
1429ce8d5614SIntel 				break;
1430ce8d5614SIntel 			}
1431ce8d5614SIntel 		}
1432ce8d5614SIntel 		/* after finally printing all link status, get out */
1433ce8d5614SIntel 		if (print_flag == 1)
1434ce8d5614SIntel 			break;
1435ce8d5614SIntel 
1436ce8d5614SIntel 		if (all_ports_up == 0) {
1437ce8d5614SIntel 			fflush(stdout);
1438ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
1439ce8d5614SIntel 		}
1440ce8d5614SIntel 
1441ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
1442ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1443ce8d5614SIntel 			print_flag = 1;
1444ce8d5614SIntel 		}
1445ce8d5614SIntel 	}
1446af75078fSIntel }
1447af75078fSIntel 
1448013af9b6SIntel static int
1449013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1450af75078fSIntel {
1451013af9b6SIntel 	uint16_t i;
1452af75078fSIntel 	int diag;
1453013af9b6SIntel 	uint8_t mapping_found = 0;
1454af75078fSIntel 
1455013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1456013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1457013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1458013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1459013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
1460013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
1461013af9b6SIntel 			if (diag != 0)
1462013af9b6SIntel 				return diag;
1463013af9b6SIntel 			mapping_found = 1;
1464af75078fSIntel 		}
1465013af9b6SIntel 	}
1466013af9b6SIntel 	if (mapping_found)
1467013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
1468013af9b6SIntel 	return 0;
1469013af9b6SIntel }
1470013af9b6SIntel 
1471013af9b6SIntel static int
1472013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1473013af9b6SIntel {
1474013af9b6SIntel 	uint16_t i;
1475013af9b6SIntel 	int diag;
1476013af9b6SIntel 	uint8_t mapping_found = 0;
1477013af9b6SIntel 
1478013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1479013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1480013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1481013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1482013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
1483013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
1484013af9b6SIntel 			if (diag != 0)
1485013af9b6SIntel 				return diag;
1486013af9b6SIntel 			mapping_found = 1;
1487013af9b6SIntel 		}
1488013af9b6SIntel 	}
1489013af9b6SIntel 	if (mapping_found)
1490013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
1491013af9b6SIntel 	return 0;
1492013af9b6SIntel }
1493013af9b6SIntel 
1494013af9b6SIntel static void
1495013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1496013af9b6SIntel {
1497013af9b6SIntel 	int diag = 0;
1498013af9b6SIntel 
1499013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1500af75078fSIntel 	if (diag != 0) {
1501013af9b6SIntel 		if (diag == -ENOTSUP) {
1502013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
1503013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1504013af9b6SIntel 		}
1505013af9b6SIntel 		else
1506013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1507013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
1508013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1509af75078fSIntel 					pi, diag);
1510af75078fSIntel 	}
1511013af9b6SIntel 
1512013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1513af75078fSIntel 	if (diag != 0) {
1514013af9b6SIntel 		if (diag == -ENOTSUP) {
1515013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
1516013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1517013af9b6SIntel 		}
1518013af9b6SIntel 		else
1519013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1520013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
1521013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1522af75078fSIntel 					pi, diag);
1523af75078fSIntel 	}
1524af75078fSIntel }
1525af75078fSIntel 
1526013af9b6SIntel void
1527013af9b6SIntel init_port_config(void)
1528013af9b6SIntel {
1529013af9b6SIntel 	portid_t pid;
1530013af9b6SIntel 	struct rte_port *port;
1531013af9b6SIntel 
1532013af9b6SIntel 	for (pid = 0; pid < nb_ports; pid++) {
1533013af9b6SIntel 		port = &ports[pid];
1534013af9b6SIntel 		port->dev_conf.rxmode = rx_mode;
1535013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
1536013af9b6SIntel 		if (nb_rxq > 0) {
1537013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1538013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1539af75078fSIntel 		} else {
1540013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1541013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1542af75078fSIntel 		}
1543013af9b6SIntel 		port->rx_conf.rx_thresh = rx_thresh;
1544013af9b6SIntel 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1545013af9b6SIntel 		port->rx_conf.rx_drop_en = rx_drop_en;
1546013af9b6SIntel 		port->tx_conf.tx_thresh = tx_thresh;
1547013af9b6SIntel 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1548013af9b6SIntel 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1549013af9b6SIntel 		port->tx_conf.txq_flags = txq_flags;
1550013af9b6SIntel 
1551013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
1552013af9b6SIntel 
1553013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
15547b7e5ba7SIntel #ifdef RTE_NIC_BYPASS
15557b7e5ba7SIntel 		rte_eth_dev_bypass_init(pid);
15567b7e5ba7SIntel #endif
1557013af9b6SIntel 	}
1558013af9b6SIntel }
1559013af9b6SIntel 
1560013af9b6SIntel const uint16_t vlan_tags[] = {
1561013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
1562013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
1563013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
1564013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
1565013af9b6SIntel };
1566013af9b6SIntel 
1567013af9b6SIntel static  int
1568013af9b6SIntel get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1569013af9b6SIntel {
1570013af9b6SIntel         uint8_t i;
1571af75078fSIntel 
1572af75078fSIntel  	/*
1573013af9b6SIntel  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1574013af9b6SIntel  	 * given above, and the number of traffic classes available for use.
1575af75078fSIntel  	 */
1576013af9b6SIntel 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1577013af9b6SIntel 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1578013af9b6SIntel 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1579013af9b6SIntel 
1580013af9b6SIntel 		/* VMDQ+DCB RX and TX configrations */
1581013af9b6SIntel 		vmdq_rx_conf.enable_default_pool = 0;
1582013af9b6SIntel 		vmdq_rx_conf.default_pool = 0;
1583013af9b6SIntel 		vmdq_rx_conf.nb_queue_pools =
1584013af9b6SIntel 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1585013af9b6SIntel 		vmdq_tx_conf.nb_queue_pools =
1586013af9b6SIntel 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1587013af9b6SIntel 
1588013af9b6SIntel 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1589013af9b6SIntel 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1590013af9b6SIntel 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1591013af9b6SIntel 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1592af75078fSIntel 		}
1593013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1594013af9b6SIntel 			vmdq_rx_conf.dcb_queue[i] = i;
1595013af9b6SIntel 			vmdq_tx_conf.dcb_queue[i] = i;
1596013af9b6SIntel 		}
1597013af9b6SIntel 
1598013af9b6SIntel 		/*set DCB mode of RX and TX of multiple queues*/
159932e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
160032e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1601013af9b6SIntel 		if (dcb_conf->pfc_en)
1602013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1603013af9b6SIntel 		else
1604013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1605013af9b6SIntel 
1606013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1607013af9b6SIntel                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1608013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1609013af9b6SIntel                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1610013af9b6SIntel 	}
1611013af9b6SIntel 	else {
1612013af9b6SIntel 		struct rte_eth_dcb_rx_conf rx_conf;
1613013af9b6SIntel 		struct rte_eth_dcb_tx_conf tx_conf;
1614013af9b6SIntel 
1615013af9b6SIntel 		/* queue mapping configuration of DCB RX and TX */
1616013af9b6SIntel 		if (dcb_conf->num_tcs == ETH_4_TCS)
1617013af9b6SIntel 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1618013af9b6SIntel 		else
1619013af9b6SIntel 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1620013af9b6SIntel 
1621013af9b6SIntel 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1622013af9b6SIntel 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1623013af9b6SIntel 
1624013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1625013af9b6SIntel 			rx_conf.dcb_queue[i] = i;
1626013af9b6SIntel 			tx_conf.dcb_queue[i] = i;
1627013af9b6SIntel 		}
162832e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
162932e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1630013af9b6SIntel 		if (dcb_conf->pfc_en)
1631013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1632013af9b6SIntel 		else
1633013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1634013af9b6SIntel 
1635013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1636013af9b6SIntel                                 sizeof(struct rte_eth_dcb_rx_conf)));
1637013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1638013af9b6SIntel                                 sizeof(struct rte_eth_dcb_tx_conf)));
1639013af9b6SIntel 	}
1640013af9b6SIntel 
1641013af9b6SIntel 	return 0;
1642013af9b6SIntel }
1643013af9b6SIntel 
1644013af9b6SIntel int
1645013af9b6SIntel init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1646013af9b6SIntel {
1647013af9b6SIntel 	struct rte_eth_conf port_conf;
1648013af9b6SIntel 	struct rte_port *rte_port;
1649013af9b6SIntel 	int retval;
1650013af9b6SIntel 	uint16_t nb_vlan;
1651013af9b6SIntel 	uint16_t i;
1652013af9b6SIntel 
1653013af9b6SIntel 	/* rxq and txq configuration in dcb mode */
1654013af9b6SIntel 	nb_rxq = 128;
1655013af9b6SIntel 	nb_txq = 128;
1656013af9b6SIntel 	rx_free_thresh = 64;
1657013af9b6SIntel 
1658013af9b6SIntel 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1659013af9b6SIntel 	/* Enter DCB configuration status */
1660013af9b6SIntel 	dcb_config = 1;
1661013af9b6SIntel 
1662013af9b6SIntel 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1663013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1664013af9b6SIntel 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1665013af9b6SIntel 	if (retval < 0)
1666013af9b6SIntel 		return retval;
1667013af9b6SIntel 
1668013af9b6SIntel 	rte_port = &ports[pid];
1669013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1670013af9b6SIntel 
1671013af9b6SIntel 	rte_port->rx_conf.rx_thresh = rx_thresh;
1672013af9b6SIntel 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1673013af9b6SIntel 	rte_port->tx_conf.tx_thresh = tx_thresh;
1674013af9b6SIntel 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1675013af9b6SIntel 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1676013af9b6SIntel 	/* VLAN filter */
1677013af9b6SIntel 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1678013af9b6SIntel 	for (i = 0; i < nb_vlan; i++){
1679013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
1680013af9b6SIntel 	}
1681013af9b6SIntel 
1682013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1683013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
1684013af9b6SIntel 
16857741e4cfSIntel 	rte_port->dcb_flag = 1;
16867741e4cfSIntel 
1687013af9b6SIntel 	return 0;
1688af75078fSIntel }
1689af75078fSIntel 
1690af75078fSIntel #ifdef RTE_EXEC_ENV_BAREMETAL
1691af75078fSIntel #define main _main
1692af75078fSIntel #endif
1693af75078fSIntel 
1694af75078fSIntel int
1695af75078fSIntel main(int argc, char** argv)
1696af75078fSIntel {
1697af75078fSIntel 	int  diag;
1698013af9b6SIntel 	uint8_t port_id;
1699af75078fSIntel 
1700af75078fSIntel 	diag = rte_eal_init(argc, argv);
1701af75078fSIntel 	if (diag < 0)
1702af75078fSIntel 		rte_panic("Cannot init EAL\n");
1703af75078fSIntel 
170469d22b8eSIntel 	if (rte_pmd_init_all())
170569d22b8eSIntel 		rte_panic("Cannot init PMD\n");
1706af75078fSIntel 
1707af75078fSIntel 	if (rte_eal_pci_probe())
1708af75078fSIntel 		rte_panic("Cannot probe PCI\n");
1709af75078fSIntel 
1710af75078fSIntel 	nb_ports = (portid_t) rte_eth_dev_count();
1711af75078fSIntel 	if (nb_ports == 0)
1712013af9b6SIntel 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1713013af9b6SIntel 							"check that "
1714af75078fSIntel 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
171569d22b8eSIntel 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1716af75078fSIntel 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1717af75078fSIntel 			  "configuration file\n");
1718af75078fSIntel 
1719af75078fSIntel 	set_def_fwd_config();
1720af75078fSIntel 	if (nb_lcores == 0)
1721af75078fSIntel 		rte_panic("Empty set of forwarding logical cores - check the "
1722af75078fSIntel 			  "core mask supplied in the command parameters\n");
1723af75078fSIntel 
1724af75078fSIntel 	argc -= diag;
1725af75078fSIntel 	argv += diag;
1726af75078fSIntel 	if (argc > 1)
1727af75078fSIntel 		launch_args_parse(argc, argv);
1728af75078fSIntel 
1729af75078fSIntel 	if (nb_rxq > nb_txq)
1730af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1731af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
1732af75078fSIntel 		       nb_rxq, nb_txq);
1733af75078fSIntel 
1734af75078fSIntel 	init_config();
1735ce8d5614SIntel 	start_port(RTE_PORT_ALL);
1736af75078fSIntel 
1737ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
1738ce8d5614SIntel 	for (port_id = 0; port_id < nb_ports; port_id++)
1739ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
1740af75078fSIntel 
1741af75078fSIntel 	if (interactive == 1)
1742af75078fSIntel 		prompt();
1743af75078fSIntel 	else {
1744af75078fSIntel 		char c;
1745af75078fSIntel 		int rc;
1746af75078fSIntel 
1747af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
1748af75078fSIntel 		start_packet_forwarding(0);
1749af75078fSIntel 		printf("Press enter to exit\n");
1750af75078fSIntel 		rc = read(0, &c, 1);
1751af75078fSIntel 		if (rc < 0)
1752af75078fSIntel 			return 1;
1753af75078fSIntel 	}
1754af75078fSIntel 
1755af75078fSIntel 	return 0;
1756af75078fSIntel }
1757