xref: /dpdk/app/test-pmd/testpmd.c (revision 013af9b6b64f6e72aed00aead63c2842c7c4ff4f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include <stdarg.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <signal.h>
39 #include <string.h>
40 #include <time.h>
41 #include <fcntl.h>
42 #include <sys/types.h>
43 #include <errno.h>
44 
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51 
52 #include <rte_common.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_tailq.h>
62 #include <rte_eal.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_ring.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 
77 #include "testpmd.h"
78 
79 uint16_t verbose_level = 0; /**< Silent by default. */
80 
81 /* use master core for command line ? */
82 uint8_t interactive = 0;
83 
84 /*
85  * NUMA support configuration.
86  * When set, the NUMA support attempts to dispatch the allocation of the
87  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
88  * probed ports among the CPU sockets 0 and 1.
89  * Otherwise, all memory is allocated from CPU socket 0.
90  */
91 uint8_t numa_support = 0; /**< No numa support by default */
92 
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instanciated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101 
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;	       /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109 
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119 
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122 
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125 
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130 	&io_fwd_engine,
131 	&mac_fwd_engine,
132 	&rx_only_engine,
133 	&tx_only_engine,
134 	&csum_fwd_engine,
135 #ifdef RTE_LIBRTE_IEEE1588
136 	&ieee1588_fwd_engine,
137 #endif
138 	NULL,
139 };
140 
141 struct fwd_config cur_fwd_config;
142 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
143 
144 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
145 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
146                                       * specified on command-line. */
147 
148 /*
149  * Configuration of packet segments used by the "txonly" processing engine.
150  */
151 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
152 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
153 	TXONLY_DEF_PACKET_LEN,
154 };
155 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
156 
157 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
158 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
159 
160 /* current configuration is in DCB or not,0 means it is not in DCB mode */
161 uint8_t dcb_config = 0;
162 
163 /* Whether the dcb is in testing status */
164 uint8_t dcb_test = 0;
165 
166 /* DCB on and VT on mapping is default */
167 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
168 
169 /*
170  * Configurable number of RX/TX queues.
171  */
172 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
173 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
174 
175 /*
176  * Configurable number of RX/TX ring descriptors.
177  */
178 #define RTE_TEST_RX_DESC_DEFAULT 128
179 #define RTE_TEST_TX_DESC_DEFAULT 512
180 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
181 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
182 
183 /*
184  * Configurable values of RX and TX ring threshold registers.
185  */
186 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
187 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
188 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
189 
190 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
191 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
192 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
193 
194 struct rte_eth_thresh rx_thresh = {
195 	.pthresh = RX_PTHRESH,
196 	.hthresh = RX_HTHRESH,
197 	.wthresh = RX_WTHRESH,
198 };
199 
200 struct rte_eth_thresh tx_thresh = {
201 	.pthresh = TX_PTHRESH,
202 	.hthresh = TX_HTHRESH,
203 	.wthresh = TX_WTHRESH,
204 };
205 
206 /*
207  * Configurable value of RX free threshold.
208  */
209 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
210 
211 /*
212  * Configurable value of RX drop enable.
213  */
214 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
215 
216 /*
217  * Configurable value of TX free threshold.
218  */
219 uint16_t tx_free_thresh = 0; /* Use default values. */
220 
221 /*
222  * Configurable value of TX RS bit threshold.
223  */
224 uint16_t tx_rs_thresh = 0; /* Use default values. */
225 
226 /*
227  * Configurable value of TX queue flags.
228  */
229 uint32_t txq_flags = 0; /* No flags set. */
230 
231 /*
232  * Receive Side Scaling (RSS) configuration.
233  */
234 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
235 
236 /*
237  * Port topology configuration
238  */
239 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
240 
241 /*
242  * Ethernet device configuration.
243  */
244 struct rte_eth_rxmode rx_mode = {
245 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
246 	.split_hdr_size = 0,
247 	.header_split   = 0, /**< Header Split disabled. */
248 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
249 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
250 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
251 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
252 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
253 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
254 };
255 
256 struct rte_fdir_conf fdir_conf = {
257 	.mode = RTE_FDIR_MODE_NONE,
258 	.pballoc = RTE_FDIR_PBALLOC_64K,
259 	.status = RTE_FDIR_REPORT_STATUS,
260 	.flexbytes_offset = 0x6,
261 	.drop_queue = 127,
262 };
263 
264 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
265 
266 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
267 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
268 
269 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
270 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
271 
272 uint16_t nb_tx_queue_stats_mappings = 0;
273 uint16_t nb_rx_queue_stats_mappings = 0;
274 
275 /* Forward function declarations */
276 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
277 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
278 
279 /*
280  * Check if all the ports are started.
281  * If yes, return positive value. If not, return zero.
282  */
283 static int all_ports_started(void);
284 
285 /*
286  * Setup default configuration.
287  */
288 static void
289 set_default_fwd_lcores_config(void)
290 {
291 	unsigned int i;
292 	unsigned int nb_lc;
293 
294 	nb_lc = 0;
295 	for (i = 0; i < RTE_MAX_LCORE; i++) {
296 		if (! rte_lcore_is_enabled(i))
297 			continue;
298 		if (i == rte_get_master_lcore())
299 			continue;
300 		fwd_lcores_cpuids[nb_lc++] = i;
301 	}
302 	nb_lcores = (lcoreid_t) nb_lc;
303 	nb_cfg_lcores = nb_lcores;
304 	nb_fwd_lcores = 1;
305 }
306 
307 static void
308 set_def_peer_eth_addrs(void)
309 {
310 	portid_t i;
311 
312 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
313 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
314 		peer_eth_addrs[i].addr_bytes[5] = i;
315 	}
316 }
317 
318 static void
319 set_default_fwd_ports_config(void)
320 {
321 	portid_t pt_id;
322 
323 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
324 		fwd_ports_ids[pt_id] = pt_id;
325 
326 	nb_cfg_ports = nb_ports;
327 	nb_fwd_ports = nb_ports;
328 }
329 
330 void
331 set_def_fwd_config(void)
332 {
333 	set_default_fwd_lcores_config();
334 	set_def_peer_eth_addrs();
335 	set_default_fwd_ports_config();
336 }
337 
338 /*
339  * Configuration initialisation done once at init time.
340  */
341 struct mbuf_ctor_arg {
342 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
343 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
344 };
345 
346 struct mbuf_pool_ctor_arg {
347 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
348 };
349 
350 static void
351 testpmd_mbuf_ctor(struct rte_mempool *mp,
352 		  void *opaque_arg,
353 		  void *raw_mbuf,
354 		  __attribute__((unused)) unsigned i)
355 {
356 	struct mbuf_ctor_arg *mb_ctor_arg;
357 	struct rte_mbuf    *mb;
358 
359 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
360 	mb = (struct rte_mbuf *) raw_mbuf;
361 
362 	mb->pool         = mp;
363 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
364 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
365 			mb_ctor_arg->seg_buf_offset);
366 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
367 	mb->type         = RTE_MBUF_PKT;
368 	mb->ol_flags     = 0;
369 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
370 	mb->pkt.nb_segs  = 1;
371 	mb->pkt.vlan_macip.data = 0;
372 	mb->pkt.hash.rss = 0;
373 }
374 
375 static void
376 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
377 		       void *opaque_arg)
378 {
379 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
380 	struct rte_pktmbuf_pool_private *mbp_priv;
381 
382 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
383 		printf("%s(%s) private_data_size %d < %d\n",
384 		       __func__, mp->name, (int) mp->private_data_size,
385 		       (int) sizeof(struct rte_pktmbuf_pool_private));
386 		return;
387 	}
388 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
389 	mbp_priv = (struct rte_pktmbuf_pool_private *)
390 		((char *)mp + sizeof(struct rte_mempool));
391 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
392 }
393 
394 static void
395 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
396 		 unsigned int socket_id)
397 {
398 	char pool_name[RTE_MEMPOOL_NAMESIZE];
399 	struct rte_mempool *rte_mp;
400 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
401 	struct mbuf_ctor_arg mb_ctor_arg;
402 	uint32_t mb_size;
403 
404 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
405 						mbuf_seg_size);
406 	mb_ctor_arg.seg_buf_offset =
407 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
408 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
409 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
410 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
411 	rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
412 				    (unsigned) mb_mempool_cache,
413 				    sizeof(struct rte_pktmbuf_pool_private),
414 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
415 				    testpmd_mbuf_ctor, &mb_ctor_arg,
416 				    socket_id, 0);
417 	if (rte_mp == NULL) {
418 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
419 						"failed\n", socket_id);
420 	}
421 }
422 
423 static void
424 init_config(void)
425 {
426 	portid_t pid;
427 	struct rte_port *port;
428 	struct rte_mempool *mbp;
429 	unsigned int nb_mbuf_per_pool;
430 	lcoreid_t  lc_id;
431 
432 	/* Configuration of logical cores. */
433 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
434 				sizeof(struct fwd_lcore *) * nb_lcores,
435 				CACHE_LINE_SIZE);
436 	if (fwd_lcores == NULL) {
437 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
438 							"failed\n", nb_lcores);
439 	}
440 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
441 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
442 					       sizeof(struct fwd_lcore),
443 					       CACHE_LINE_SIZE);
444 		if (fwd_lcores[lc_id] == NULL) {
445 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
446 								"failed\n");
447 		}
448 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
449 	}
450 
451 	/*
452 	 * Create pools of mbuf.
453 	 * If NUMA support is disabled, create a single pool of mbuf in
454 	 * socket 0 memory.
455 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
456 	 *
457 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
458 	 * nb_txd can be configured at run time.
459 	 */
460 	if (param_total_num_mbufs)
461 		nb_mbuf_per_pool = param_total_num_mbufs;
462 	else {
463 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
464 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
465 		nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
466 	}
467 	if (numa_support) {
468 		nb_mbuf_per_pool /= 2;
469 		mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
470 		mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1);
471 	} else {
472 		mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
473 	}
474 
475 	/*
476 	 * Records which Mbuf pool to use by each logical core, if needed.
477 	 */
478 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
479 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
480 		if (mbp == NULL)
481 			mbp = mbuf_pool_find(0);
482 		fwd_lcores[lc_id]->mbp = mbp;
483 	}
484 
485 	/* Configuration of Ethernet ports. */
486 	ports = rte_zmalloc("testpmd: ports",
487 			    sizeof(struct rte_port) * nb_ports,
488 			    CACHE_LINE_SIZE);
489 	if (ports == NULL) {
490 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
491 							"failed\n", nb_ports);
492 	}
493 
494 	for (pid = 0; pid < nb_ports; pid++) {
495 		port = &ports[pid];
496 		rte_eth_dev_info_get(pid, &port->dev_info);
497 
498 		/* set flag to initialize port/queue */
499 		port->need_reconfig = 1;
500 		port->need_reconfig_queues = 1;
501 	}
502 
503 	init_port_config();
504 
505 	/* Configuration of packet forwarding streams. */
506 	if (init_fwd_streams() < 0)
507 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
508 }
509 
510 int
511 init_fwd_streams(void)
512 {
513 	portid_t pid;
514 	struct rte_port *port;
515 	streamid_t sm_id, nb_fwd_streams_new;
516 
517 	/* set socket id according to numa or not */
518 	for (pid = 0; pid < nb_ports; pid++) {
519 		port = &ports[pid];
520 		if (nb_rxq > port->dev_info.max_rx_queues) {
521 			printf("Fail: nb_rxq(%d) is greater than "
522 				"max_rx_queues(%d)\n", nb_rxq,
523 				port->dev_info.max_rx_queues);
524 			return -1;
525 		}
526 		if (nb_txq > port->dev_info.max_tx_queues) {
527 			printf("Fail: nb_txq(%d) is greater than "
528 				"max_tx_queues(%d)\n", nb_txq,
529 				port->dev_info.max_tx_queues);
530 			return -1;
531 		}
532 		if (numa_support)
533 			port->socket_id = (pid < (nb_ports >> 1)) ? 0 : 1;
534 		else
535 			port->socket_id = 0;
536 	}
537 
538 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
539 	if (nb_fwd_streams_new == nb_fwd_streams)
540 		return 0;
541 	/* clear the old */
542 	if (fwd_streams != NULL) {
543 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
544 			if (fwd_streams[sm_id] == NULL)
545 				continue;
546 			rte_free(fwd_streams[sm_id]);
547 			fwd_streams[sm_id] = NULL;
548 		}
549 		rte_free(fwd_streams);
550 		fwd_streams = NULL;
551 	}
552 
553 	/* init new */
554 	nb_fwd_streams = nb_fwd_streams_new;
555 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
556 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
557 	if (fwd_streams == NULL)
558 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
559 						"failed\n", nb_fwd_streams);
560 
561 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
562 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
563 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
564 		if (fwd_streams[sm_id] == NULL)
565 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
566 								" failed\n");
567 	}
568 
569 	return 0;
570 }
571 
572 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
573 static void
574 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
575 {
576 	unsigned int total_burst;
577 	unsigned int nb_burst;
578 	unsigned int burst_stats[3];
579 	uint16_t pktnb_stats[3];
580 	uint16_t nb_pkt;
581 	int burst_percent[3];
582 
583 	/*
584 	 * First compute the total number of packet bursts and the
585 	 * two highest numbers of bursts of the same number of packets.
586 	 */
587 	total_burst = 0;
588 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
589 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
590 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
591 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
592 		if (nb_burst == 0)
593 			continue;
594 		total_burst += nb_burst;
595 		if (nb_burst > burst_stats[0]) {
596 			burst_stats[1] = burst_stats[0];
597 			pktnb_stats[1] = pktnb_stats[0];
598 			burst_stats[0] = nb_burst;
599 			pktnb_stats[0] = nb_pkt;
600 		}
601 	}
602 	if (total_burst == 0)
603 		return;
604 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
605 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
606 	       burst_percent[0], (int) pktnb_stats[0]);
607 	if (burst_stats[0] == total_burst) {
608 		printf("]\n");
609 		return;
610 	}
611 	if (burst_stats[0] + burst_stats[1] == total_burst) {
612 		printf(" + %d%% of %d pkts]\n",
613 		       100 - burst_percent[0], pktnb_stats[1]);
614 		return;
615 	}
616 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
617 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
618 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
619 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
620 		return;
621 	}
622 	printf(" + %d%% of %d pkts + %d%% of others]\n",
623 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
624 }
625 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
626 
627 static void
628 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
629 {
630 	struct rte_port *port;
631 	uint8_t i;
632 
633 	static const char *fwd_stats_border = "----------------------";
634 
635 	port = &ports[port_id];
636 	printf("\n  %s Forward statistics for port %-2d %s\n",
637 	       fwd_stats_border, port_id, fwd_stats_border);
638 
639 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
640 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
641 		       "%-"PRIu64"\n",
642 		       stats->ipackets, stats->ierrors,
643 		       (uint64_t) (stats->ipackets + stats->ierrors));
644 
645 		if (cur_fwd_eng == &csum_fwd_engine)
646 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
647 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
648 
649 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
650 		       "%-"PRIu64"\n",
651 		       stats->opackets, port->tx_dropped,
652 		       (uint64_t) (stats->opackets + port->tx_dropped));
653 
654 		if (stats->rx_nombuf > 0)
655 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
656 
657 	}
658 	else {
659 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
660 		       "%14"PRIu64"\n",
661 		       stats->ipackets, stats->ierrors,
662 		       (uint64_t) (stats->ipackets + stats->ierrors));
663 
664 		if (cur_fwd_eng == &csum_fwd_engine)
665 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
666 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
667 
668 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
669 		       "%14"PRIu64"\n",
670 		       stats->opackets, port->tx_dropped,
671 		       (uint64_t) (stats->opackets + port->tx_dropped));
672 
673 		if (stats->rx_nombuf > 0)
674 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
675 	}
676 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
677 	if (port->rx_stream)
678 		pkt_burst_stats_display("RX",
679 			&port->rx_stream->rx_burst_stats);
680 	if (port->tx_stream)
681 		pkt_burst_stats_display("TX",
682 			&port->tx_stream->tx_burst_stats);
683 #endif
684 	/* stats fdir */
685 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
686 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
687 		       stats->fdirmiss,
688 		       stats->fdirmatch);
689 
690 	if (port->rx_queue_stats_mapping_enabled) {
691 		printf("\n");
692 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
693 			printf("  Stats reg %2d RX-packets:%14"PRIu64
694 			       "     RX-errors:%14"PRIu64
695 			       "    RX-bytes:%14"PRIu64"\n",
696 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
697 		}
698 		printf("\n");
699 	}
700 	if (port->tx_queue_stats_mapping_enabled) {
701 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
702 			printf("  Stats reg %2d TX-packets:%14"PRIu64
703 			       "                                 TX-bytes:%14"PRIu64"\n",
704 			       i, stats->q_opackets[i], stats->q_obytes[i]);
705 		}
706 	}
707 
708 	printf("  %s--------------------------------%s\n",
709 	       fwd_stats_border, fwd_stats_border);
710 }
711 
712 static void
713 fwd_stream_stats_display(streamid_t stream_id)
714 {
715 	struct fwd_stream *fs;
716 	static const char *fwd_top_stats_border = "-------";
717 
718 	fs = fwd_streams[stream_id];
719 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
720 	    (fs->fwd_dropped == 0))
721 		return;
722 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
723 	       "TX Port=%2d/Queue=%2d %s\n",
724 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
725 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
726 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
727 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
728 
729 	/* if checksum mode */
730 	if (cur_fwd_eng == &csum_fwd_engine) {
731 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
732 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
733 	}
734 
735 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
736 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
737 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
738 #endif
739 }
740 
741 static void
742 flush_all_rx_queues(void)
743 {
744 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
745 	portid_t  rxp;
746 	queueid_t rxq;
747 	uint16_t  nb_rx;
748 	uint16_t  i;
749 	uint8_t   j;
750 
751 	for (j = 0; j < 2; j++) {
752 		for (rxp = 0; rxp < nb_ports; rxp++) {
753 			for (rxq = 0; rxq < nb_rxq; rxq++) {
754 				do {
755 					nb_rx = rte_eth_rx_burst(rxp, rxq,
756 						pkts_burst, MAX_PKT_BURST);
757 					for (i = 0; i < nb_rx; i++)
758 						rte_pktmbuf_free(pkts_burst[i]);
759 				} while (nb_rx > 0);
760 			}
761 		}
762 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
763 	}
764 }
765 
766 static void
767 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
768 {
769 	struct fwd_stream **fsm;
770 	streamid_t nb_fs;
771 	streamid_t sm_id;
772 
773 	fsm = &fwd_streams[fc->stream_idx];
774 	nb_fs = fc->stream_nb;
775 	do {
776 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
777 			(*pkt_fwd)(fsm[sm_id]);
778 	} while (! fc->stopped);
779 }
780 
781 static int
782 start_pkt_forward_on_core(void *fwd_arg)
783 {
784 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
785 			     cur_fwd_config.fwd_eng->packet_fwd);
786 	return 0;
787 }
788 
789 /*
790  * Run the TXONLY packet forwarding engine to send a single burst of packets.
791  * Used to start communication flows in network loopback test configurations.
792  */
793 static int
794 run_one_txonly_burst_on_core(void *fwd_arg)
795 {
796 	struct fwd_lcore *fwd_lc;
797 	struct fwd_lcore tmp_lcore;
798 
799 	fwd_lc = (struct fwd_lcore *) fwd_arg;
800 	tmp_lcore = *fwd_lc;
801 	tmp_lcore.stopped = 1;
802 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
803 	return 0;
804 }
805 
806 /*
807  * Launch packet forwarding:
808  *     - Setup per-port forwarding context.
809  *     - launch logical cores with their forwarding configuration.
810  */
811 static void
812 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
813 {
814 	port_fwd_begin_t port_fwd_begin;
815 	unsigned int i;
816 	unsigned int lc_id;
817 	int diag;
818 
819 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
820 	if (port_fwd_begin != NULL) {
821 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
822 			(*port_fwd_begin)(fwd_ports_ids[i]);
823 	}
824 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
825 		lc_id = fwd_lcores_cpuids[i];
826 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
827 			fwd_lcores[i]->stopped = 0;
828 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
829 						     fwd_lcores[i], lc_id);
830 			if (diag != 0)
831 				printf("launch lcore %u failed - diag=%d\n",
832 				       lc_id, diag);
833 		}
834 	}
835 }
836 
837 /*
838  * Launch packet forwarding configuration.
839  */
840 void
841 start_packet_forwarding(int with_tx_first)
842 {
843 	port_fwd_begin_t port_fwd_begin;
844 	port_fwd_end_t  port_fwd_end;
845 	struct rte_port *port;
846 	unsigned int i;
847 	portid_t   pt_id;
848 	streamid_t sm_id;
849 
850 	if (all_ports_started() == 0) {
851 		printf("Not all ports were started\n");
852 		return;
853 	}
854 	if (test_done == 0) {
855 		printf("Packet forwarding already started\n");
856 		return;
857 	}
858 	if((dcb_test) && (nb_fwd_lcores == 1)) {
859 		printf("In DCB mode,the nb forwarding cores should be larger than 1.\n");
860 		return;
861 	}
862 	test_done = 0;
863 	flush_all_rx_queues();
864 	fwd_config_setup();
865 	rxtx_config_display();
866 
867 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
868 		pt_id = fwd_ports_ids[i];
869 		port = &ports[pt_id];
870 		rte_eth_stats_get(pt_id, &port->stats);
871 		port->tx_dropped = 0;
872 
873 		map_port_queue_stats_mapping_registers(pt_id, port);
874 	}
875 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
876 		fwd_streams[sm_id]->rx_packets = 0;
877 		fwd_streams[sm_id]->tx_packets = 0;
878 		fwd_streams[sm_id]->fwd_dropped = 0;
879 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
880 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
881 
882 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
883 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
884 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
885 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
886 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
887 #endif
888 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
889 		fwd_streams[sm_id]->core_cycles = 0;
890 #endif
891 	}
892 	if (with_tx_first) {
893 		port_fwd_begin = tx_only_engine.port_fwd_begin;
894 		if (port_fwd_begin != NULL) {
895 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
896 				(*port_fwd_begin)(fwd_ports_ids[i]);
897 		}
898 		launch_packet_forwarding(run_one_txonly_burst_on_core);
899 		rte_eal_mp_wait_lcore();
900 		port_fwd_end = tx_only_engine.port_fwd_end;
901 		if (port_fwd_end != NULL) {
902 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
903 				(*port_fwd_end)(fwd_ports_ids[i]);
904 		}
905 	}
906 	launch_packet_forwarding(start_pkt_forward_on_core);
907 }
908 
909 void
910 stop_packet_forwarding(void)
911 {
912 	struct rte_eth_stats stats;
913 	struct rte_port *port;
914 	port_fwd_end_t  port_fwd_end;
915 	int i;
916 	portid_t   pt_id;
917 	streamid_t sm_id;
918 	lcoreid_t  lc_id;
919 	uint64_t total_recv;
920 	uint64_t total_xmit;
921 	uint64_t total_rx_dropped;
922 	uint64_t total_tx_dropped;
923 	uint64_t total_rx_nombuf;
924 	uint64_t tx_dropped;
925 	uint64_t rx_bad_ip_csum;
926 	uint64_t rx_bad_l4_csum;
927 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
928 	uint64_t fwd_cycles;
929 #endif
930 	static const char *acc_stats_border = "+++++++++++++++";
931 
932 	if (all_ports_started() == 0) {
933 		printf("Not all ports were started\n");
934 		return;
935 	}
936 	if (test_done) {
937 		printf("Packet forwarding not started\n");
938 		return;
939 	}
940 	printf("Telling cores to stop...");
941 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
942 		fwd_lcores[lc_id]->stopped = 1;
943 	printf("\nWaiting for lcores to finish...\n");
944 	rte_eal_mp_wait_lcore();
945 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
946 	if (port_fwd_end != NULL) {
947 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
948 			pt_id = fwd_ports_ids[i];
949 			(*port_fwd_end)(pt_id);
950 		}
951 	}
952 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
953 	fwd_cycles = 0;
954 #endif
955 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
956 		if (cur_fwd_config.nb_fwd_streams >
957 		    cur_fwd_config.nb_fwd_ports) {
958 			fwd_stream_stats_display(sm_id);
959 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
960 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
961 		} else {
962 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
963 				fwd_streams[sm_id];
964 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
965 				fwd_streams[sm_id];
966 		}
967 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
968 		tx_dropped = (uint64_t) (tx_dropped +
969 					 fwd_streams[sm_id]->fwd_dropped);
970 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
971 
972 		rx_bad_ip_csum =
973 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
974 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
975 					 fwd_streams[sm_id]->rx_bad_ip_csum);
976 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
977 							rx_bad_ip_csum;
978 
979 		rx_bad_l4_csum =
980 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
981 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
982 					 fwd_streams[sm_id]->rx_bad_l4_csum);
983 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
984 							rx_bad_l4_csum;
985 
986 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
987 		fwd_cycles = (uint64_t) (fwd_cycles +
988 					 fwd_streams[sm_id]->core_cycles);
989 #endif
990 	}
991 	total_recv = 0;
992 	total_xmit = 0;
993 	total_rx_dropped = 0;
994 	total_tx_dropped = 0;
995 	total_rx_nombuf  = 0;
996 	for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
997 		pt_id = fwd_ports_ids[i];
998 
999 		port = &ports[pt_id];
1000 		rte_eth_stats_get(pt_id, &stats);
1001 		stats.ipackets -= port->stats.ipackets;
1002 		port->stats.ipackets = 0;
1003 		stats.opackets -= port->stats.opackets;
1004 		port->stats.opackets = 0;
1005 		stats.ibytes   -= port->stats.ibytes;
1006 		port->stats.ibytes = 0;
1007 		stats.obytes   -= port->stats.obytes;
1008 		port->stats.obytes = 0;
1009 		stats.ierrors  -= port->stats.ierrors;
1010 		port->stats.ierrors = 0;
1011 		stats.oerrors  -= port->stats.oerrors;
1012 		port->stats.oerrors = 0;
1013 		stats.rx_nombuf -= port->stats.rx_nombuf;
1014 		port->stats.rx_nombuf = 0;
1015 		stats.fdirmatch -= port->stats.fdirmatch;
1016 		port->stats.rx_nombuf = 0;
1017 		stats.fdirmiss -= port->stats.fdirmiss;
1018 		port->stats.rx_nombuf = 0;
1019 
1020 		total_recv += stats.ipackets;
1021 		total_xmit += stats.opackets;
1022 		total_rx_dropped += stats.ierrors;
1023 		total_tx_dropped += port->tx_dropped;
1024 		total_rx_nombuf  += stats.rx_nombuf;
1025 
1026 		fwd_port_stats_display(pt_id, &stats);
1027 	}
1028 	printf("\n  %s Accumulated forward statistics for all ports"
1029 	       "%s\n",
1030 	       acc_stats_border, acc_stats_border);
1031 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1032 	       "%-"PRIu64"\n"
1033 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1034 	       "%-"PRIu64"\n",
1035 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1036 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1037 	if (total_rx_nombuf > 0)
1038 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1039 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1040 	       "%s\n",
1041 	       acc_stats_border, acc_stats_border);
1042 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1043 	if (total_recv > 0)
1044 		printf("\n  CPU cycles/packet=%u (total cycles="
1045 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1046 		       (unsigned int)(fwd_cycles / total_recv),
1047 		       fwd_cycles, total_recv);
1048 #endif
1049 	printf("\nDone.\n");
1050 	test_done = 1;
1051 }
1052 
1053 static int
1054 all_ports_started(void)
1055 {
1056 	portid_t pi;
1057 	struct rte_port *port;
1058 
1059 	for (pi = 0; pi < nb_ports; pi++) {
1060 		port = &ports[pi];
1061 		/* Check if there is a port which is not started */
1062 		if (port->port_status != RTE_PORT_STARTED)
1063 			return 0;
1064 	}
1065 
1066 	/* No port is not started */
1067 	return 1;
1068 }
1069 
1070 void
1071 start_port(portid_t pid)
1072 {
1073 	int diag, need_check_link_status = 0;
1074 	portid_t pi;
1075 	queueid_t qi;
1076 	struct rte_port *port;
1077 
1078 	if (test_done == 0) {
1079 		printf("Please stop forwarding first\n");
1080 		return;
1081 	}
1082 
1083 	if (init_fwd_streams() < 0) {
1084 		printf("Fail from init_fwd_streams()\n");
1085 		return;
1086 	}
1087 
1088 	if(dcb_config)
1089 		dcb_test = 1;
1090 	for (pi = 0; pi < nb_ports; pi++) {
1091 		if (pid < nb_ports && pid != pi)
1092 			continue;
1093 
1094 		port = &ports[pi];
1095 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1096 						 RTE_PORT_HANDLING) == 0) {
1097 			printf("Port %d is now not stopped\n", pi);
1098 			continue;
1099 		}
1100 
1101 		if (port->need_reconfig > 0) {
1102 			port->need_reconfig = 0;
1103 
1104 			printf("Configuring Port %d\n", pi);
1105 			/* configure port */
1106 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1107 						&(port->dev_conf));
1108 			if (diag != 0) {
1109 				if (rte_atomic16_cmpset(&(port->port_status),
1110 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1111 					printf("Port %d can not be set back "
1112 							"to stopped\n", pi);
1113 				printf("Fail to configure port %d\n", pi);
1114 				/* try to reconfigure port next time */
1115 				port->need_reconfig = 1;
1116 				return;
1117 			}
1118 		}
1119 
1120 		if (port->need_reconfig_queues > 0) {
1121 			port->need_reconfig_queues = 0;
1122 
1123 			/* setup tx queues */
1124 			for (qi = 0; qi < nb_txq; qi++) {
1125 				diag = rte_eth_tx_queue_setup(pi, qi, nb_txd,
1126 					port->socket_id, &(port->tx_conf));
1127 				if (diag == 0)
1128 					continue;
1129 
1130 				/* Fail to setup tx queue, return */
1131 				if (rte_atomic16_cmpset(&(port->port_status),
1132 							RTE_PORT_HANDLING,
1133 							RTE_PORT_STOPPED) == 0)
1134 					printf("Port %d can not be set back "
1135 							"to stopped\n", pi);
1136 				printf("Fail to configure port %d tx queues\n", pi);
1137 				/* try to reconfigure queues next time */
1138 				port->need_reconfig_queues = 1;
1139 				return;
1140 			}
1141 			/* setup rx queues */
1142 			for (qi = 0; qi < nb_rxq; qi++) {
1143 				diag = rte_eth_rx_queue_setup(pi, qi, nb_rxd,
1144 					port->socket_id, &(port->rx_conf),
1145 					mbuf_pool_find(port->socket_id));
1146 				if (diag == 0)
1147 					continue;
1148 
1149 				/* Fail to setup rx queue, return */
1150 				if (rte_atomic16_cmpset(&(port->port_status),
1151 							RTE_PORT_HANDLING,
1152 							RTE_PORT_STOPPED) == 0)
1153 					printf("Port %d can not be set back "
1154 							"to stopped\n", pi);
1155 				printf("Fail to configure port %d rx queues\n", pi);
1156 				/* try to reconfigure queues next time */
1157 				port->need_reconfig_queues = 1;
1158 				return;
1159 			}
1160 		}
1161 
1162 		/* start port */
1163 		if (rte_eth_dev_start(pi) < 0) {
1164 			printf("Fail to start port %d\n", pi);
1165 
1166 			/* Fail to setup rx queue, return */
1167 			if (rte_atomic16_cmpset(&(port->port_status),
1168 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1169 				printf("Port %d can not be set back to "
1170 							"stopped\n", pi);
1171 			continue;
1172 		}
1173 
1174 		if (rte_atomic16_cmpset(&(port->port_status),
1175 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1176 			printf("Port %d can not be set into started\n", pi);
1177 
1178 		/* at least one port started, need checking link status */
1179 		need_check_link_status = 1;
1180 	}
1181 
1182 	if (need_check_link_status)
1183 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1184 	else
1185 		printf("Please stop the ports first\n");
1186 
1187 	printf("Done\n");
1188 }
1189 
1190 void
1191 stop_port(portid_t pid)
1192 {
1193 	portid_t pi;
1194 	struct rte_port *port;
1195 	int need_check_link_status = 0;
1196 
1197 	if (test_done == 0) {
1198 		printf("Please stop forwarding first\n");
1199 		return;
1200 	}
1201 	if (dcb_test) {
1202 		dcb_test = 0;
1203 		dcb_config = 0;
1204 	}
1205 	printf("Stopping ports...\n");
1206 
1207 	for (pi = 0; pi < nb_ports; pi++) {
1208 		if (pid < nb_ports && pid != pi)
1209 			continue;
1210 
1211 		port = &ports[pi];
1212 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1213 						RTE_PORT_HANDLING) == 0)
1214 			continue;
1215 
1216 		rte_eth_dev_stop(pi);
1217 
1218 		if (rte_atomic16_cmpset(&(port->port_status),
1219 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1220 			printf("Port %d can not be set into stopped\n", pi);
1221 		need_check_link_status = 1;
1222 	}
1223 	if (need_check_link_status)
1224 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1225 
1226 	printf("Done\n");
1227 }
1228 
1229 void
1230 close_port(portid_t pid)
1231 {
1232 	portid_t pi;
1233 	struct rte_port *port;
1234 
1235 	if (test_done == 0) {
1236 		printf("Please stop forwarding first\n");
1237 		return;
1238 	}
1239 
1240 	printf("Closing ports...\n");
1241 
1242 	for (pi = 0; pi < nb_ports; pi++) {
1243 		if (pid < nb_ports && pid != pi)
1244 			continue;
1245 
1246 		port = &ports[pi];
1247 		if (rte_atomic16_cmpset(&(port->port_status),
1248 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1249 			printf("Port %d is now not stopped\n", pi);
1250 			continue;
1251 		}
1252 
1253 		rte_eth_dev_close(pi);
1254 
1255 		if (rte_atomic16_cmpset(&(port->port_status),
1256 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1257 			printf("Port %d can not be set into stopped\n", pi);
1258 	}
1259 
1260 	printf("Done\n");
1261 }
1262 
1263 int
1264 all_ports_stopped(void)
1265 {
1266 	portid_t pi;
1267 	struct rte_port *port;
1268 
1269 	for (pi = 0; pi < nb_ports; pi++) {
1270 		port = &ports[pi];
1271 		if (port->port_status != RTE_PORT_STOPPED)
1272 			return 0;
1273 	}
1274 
1275 	return 1;
1276 }
1277 
1278 void
1279 pmd_test_exit(void)
1280 {
1281 	portid_t pt_id;
1282 
1283 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1284 		printf("Stopping port %d...", pt_id);
1285 		fflush(stdout);
1286 		rte_eth_dev_close(pt_id);
1287 		printf("done\n");
1288 	}
1289 	printf("bye...\n");
1290 }
1291 
1292 typedef void (*cmd_func_t)(void);
1293 struct pmd_test_command {
1294 	const char *cmd_name;
1295 	cmd_func_t cmd_func;
1296 };
1297 
1298 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1299 
1300 /* Check the link status of all ports in up to 9s, and print them finally */
1301 static void
1302 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1303 {
1304 #define CHECK_INTERVAL 100 /* 100ms */
1305 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1306 	uint8_t portid, count, all_ports_up, print_flag = 0;
1307 	struct rte_eth_link link;
1308 
1309 	printf("Checking link statuses...\n");
1310 	fflush(stdout);
1311 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1312 		all_ports_up = 1;
1313 		for (portid = 0; portid < port_num; portid++) {
1314 			if ((port_mask & (1 << portid)) == 0)
1315 				continue;
1316 			memset(&link, 0, sizeof(link));
1317 			rte_eth_link_get_nowait(portid, &link);
1318 			/* print link status if flag set */
1319 			if (print_flag == 1) {
1320 				if (link.link_status)
1321 					printf("Port %d Link Up - speed %u "
1322 						"Mbps - %s\n", (uint8_t)portid,
1323 						(unsigned)link.link_speed,
1324 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1325 					("full-duplex") : ("half-duplex\n"));
1326 				else
1327 					printf("Port %d Link Down\n",
1328 						(uint8_t)portid);
1329 				continue;
1330 			}
1331 			/* clear all_ports_up flag if any link down */
1332 			if (link.link_status == 0) {
1333 				all_ports_up = 0;
1334 				break;
1335 			}
1336 		}
1337 		/* after finally printing all link status, get out */
1338 		if (print_flag == 1)
1339 			break;
1340 
1341 		if (all_ports_up == 0) {
1342 			fflush(stdout);
1343 			rte_delay_ms(CHECK_INTERVAL);
1344 		}
1345 
1346 		/* set the print_flag if all ports up or timeout */
1347 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1348 			print_flag = 1;
1349 		}
1350 	}
1351 }
1352 
1353 static int
1354 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1355 {
1356 	uint16_t i;
1357 	int diag;
1358 	uint8_t mapping_found = 0;
1359 
1360 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1361 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1362 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1363 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1364 					tx_queue_stats_mappings[i].queue_id,
1365 					tx_queue_stats_mappings[i].stats_counter_id);
1366 			if (diag != 0)
1367 				return diag;
1368 			mapping_found = 1;
1369 		}
1370 	}
1371 	if (mapping_found)
1372 		port->tx_queue_stats_mapping_enabled = 1;
1373 	return 0;
1374 }
1375 
1376 static int
1377 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1378 {
1379 	uint16_t i;
1380 	int diag;
1381 	uint8_t mapping_found = 0;
1382 
1383 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1384 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1385 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1386 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1387 					rx_queue_stats_mappings[i].queue_id,
1388 					rx_queue_stats_mappings[i].stats_counter_id);
1389 			if (diag != 0)
1390 				return diag;
1391 			mapping_found = 1;
1392 		}
1393 	}
1394 	if (mapping_found)
1395 		port->rx_queue_stats_mapping_enabled = 1;
1396 	return 0;
1397 }
1398 
1399 static void
1400 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1401 {
1402 	int diag = 0;
1403 
1404 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1405 	if (diag != 0) {
1406 		if (diag == -ENOTSUP) {
1407 			port->tx_queue_stats_mapping_enabled = 0;
1408 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1409 		}
1410 		else
1411 			rte_exit(EXIT_FAILURE,
1412 					"set_tx_queue_stats_mapping_registers "
1413 					"failed for port id=%d diag=%d\n",
1414 					pi, diag);
1415 	}
1416 
1417 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1418 	if (diag != 0) {
1419 		if (diag == -ENOTSUP) {
1420 			port->rx_queue_stats_mapping_enabled = 0;
1421 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1422 		}
1423 		else
1424 			rte_exit(EXIT_FAILURE,
1425 					"set_rx_queue_stats_mapping_registers "
1426 					"failed for port id=%d diag=%d\n",
1427 					pi, diag);
1428 	}
1429 }
1430 
1431 void
1432 init_port_config(void)
1433 {
1434 	portid_t pid;
1435 	struct rte_port *port;
1436 
1437 	for (pid = 0; pid < nb_ports; pid++) {
1438 		port = &ports[pid];
1439 		port->dev_conf.rxmode = rx_mode;
1440 		port->dev_conf.fdir_conf = fdir_conf;
1441 		if (nb_rxq > 0) {
1442 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1443 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1444 		} else {
1445 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1446 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1447 		}
1448 		port->rx_conf.rx_thresh = rx_thresh;
1449 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1450 		port->rx_conf.rx_drop_en = rx_drop_en;
1451 		port->tx_conf.tx_thresh = tx_thresh;
1452 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1453 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1454 		port->tx_conf.txq_flags = txq_flags;
1455 
1456 		rte_eth_macaddr_get(pid, &port->eth_addr);
1457 
1458 		map_port_queue_stats_mapping_registers(pid, port);
1459 	}
1460 }
1461 
1462 const uint16_t vlan_tags[] = {
1463 		0,  1,  2,  3,  4,  5,  6,  7,
1464 		8,  9, 10, 11,  12, 13, 14, 15,
1465 		16, 17, 18, 19, 20, 21, 22, 23,
1466 		24, 25, 26, 27, 28, 29, 30, 31
1467 };
1468 
1469 static  int
1470 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1471 {
1472         uint8_t i;
1473 
1474  	/*
1475  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1476  	 * given above, and the number of traffic classes available for use.
1477  	 */
1478 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1479 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1480 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1481 
1482 		/* VMDQ+DCB RX and TX configrations */
1483 		vmdq_rx_conf.enable_default_pool = 0;
1484 		vmdq_rx_conf.default_pool = 0;
1485 		vmdq_rx_conf.nb_queue_pools =
1486 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1487 		vmdq_tx_conf.nb_queue_pools =
1488 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1489 
1490 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1491 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1492 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1493 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1494 		}
1495 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1496 			vmdq_rx_conf.dcb_queue[i] = i;
1497 			vmdq_tx_conf.dcb_queue[i] = i;
1498 		}
1499 
1500 		/*set DCB mode of RX and TX of multiple queues*/
1501 		eth_conf->rxmode.mq_mode = ETH_VMDQ_DCB;
1502 		eth_conf->txmode.mq_mode = ETH_VMDQ_DCB_TX;
1503 		if (dcb_conf->pfc_en)
1504 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1505 		else
1506 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1507 
1508 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1509                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1510 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1511                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1512 	}
1513 	else {
1514 		struct rte_eth_dcb_rx_conf rx_conf;
1515 		struct rte_eth_dcb_tx_conf tx_conf;
1516 
1517 		/* queue mapping configuration of DCB RX and TX */
1518 		if (dcb_conf->num_tcs == ETH_4_TCS)
1519 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1520 		else
1521 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1522 
1523 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1524 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1525 
1526 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1527 			rx_conf.dcb_queue[i] = i;
1528 			tx_conf.dcb_queue[i] = i;
1529 		}
1530 		eth_conf->rxmode.mq_mode = ETH_DCB_RX;
1531 		eth_conf->txmode.mq_mode = ETH_DCB_TX;
1532 		if (dcb_conf->pfc_en)
1533 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1534 		else
1535 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1536 
1537 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1538                                 sizeof(struct rte_eth_dcb_rx_conf)));
1539 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1540                                 sizeof(struct rte_eth_dcb_tx_conf)));
1541 	}
1542 
1543 	return 0;
1544 }
1545 
1546 int
1547 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1548 {
1549 	struct rte_eth_conf port_conf;
1550 	struct rte_port *rte_port;
1551 	int retval;
1552 	uint16_t nb_vlan;
1553 	uint16_t i;
1554 
1555 	/* rxq and txq configuration in dcb mode */
1556 	nb_rxq = 128;
1557 	nb_txq = 128;
1558 	rx_free_thresh = 64;
1559 
1560 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1561 	/* Enter DCB configuration status */
1562 	dcb_config = 1;
1563 
1564 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1565 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1566 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1567 	if (retval < 0)
1568 		return retval;
1569 
1570 	rte_port = &ports[pid];
1571 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1572 
1573 	rte_port->rx_conf.rx_thresh = rx_thresh;
1574 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1575 	rte_port->tx_conf.tx_thresh = tx_thresh;
1576 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1577 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1578 	/* VLAN filter */
1579 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1580 	for (i = 0; i < nb_vlan; i++){
1581 		rx_vft_set(pid, vlan_tags[i], 1);
1582 	}
1583 
1584 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1585 	map_port_queue_stats_mapping_registers(pid, rte_port);
1586 
1587 	return 0;
1588 }
1589 
1590 #ifdef RTE_EXEC_ENV_BAREMETAL
1591 #define main _main
1592 #endif
1593 
1594 int
1595 main(int argc, char** argv)
1596 {
1597 	int  diag;
1598 	uint8_t port_id;
1599 
1600 	diag = rte_eal_init(argc, argv);
1601 	if (diag < 0)
1602 		rte_panic("Cannot init EAL\n");
1603 
1604 	if (rte_pmd_init_all())
1605 		rte_panic("Cannot init PMD\n");
1606 
1607 	if (rte_eal_pci_probe())
1608 		rte_panic("Cannot probe PCI\n");
1609 
1610 	nb_ports = (portid_t) rte_eth_dev_count();
1611 	if (nb_ports == 0)
1612 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1613 							"check that "
1614 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1615 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1616 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1617 			  "configuration file\n");
1618 
1619 	set_def_fwd_config();
1620 	if (nb_lcores == 0)
1621 		rte_panic("Empty set of forwarding logical cores - check the "
1622 			  "core mask supplied in the command parameters\n");
1623 
1624 	argc -= diag;
1625 	argv += diag;
1626 	if (argc > 1)
1627 		launch_args_parse(argc, argv);
1628 
1629 	if (nb_rxq > nb_txq)
1630 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1631 		       "but nb_txq=%d will prevent to fully test it.\n",
1632 		       nb_rxq, nb_txq);
1633 
1634 	init_config();
1635 	start_port(RTE_PORT_ALL);
1636 
1637 	/* set all ports to promiscuous mode by default */
1638 	for (port_id = 0; port_id < nb_ports; port_id++)
1639 		rte_eth_promiscuous_enable(port_id);
1640 
1641 	if (interactive == 1)
1642 		prompt();
1643 	else {
1644 		char c;
1645 		int rc;
1646 
1647 		printf("No commandline core given, start packet forwarding\n");
1648 		start_packet_forwarding(0);
1649 		printf("Press enter to exit\n");
1650 		rc = read(0, &c, 1);
1651 		if (rc < 0)
1652 			return 1;
1653 	}
1654 
1655 	return 0;
1656 }
1657