xref: /dpdk/app/test-pmd/testpmd.c (revision 7741e4cf16c0548e15d07b847fd96691b44efb29)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 
76 #include "testpmd.h"
77 
78 uint16_t verbose_level = 0; /**< Silent by default. */
79 
80 /* use master core for command line ? */
81 uint8_t interactive = 0;
82 
83 /*
84  * NUMA support configuration.
85  * When set, the NUMA support attempts to dispatch the allocation of the
86  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
87  * probed ports among the CPU sockets 0 and 1.
88  * Otherwise, all memory is allocated from CPU socket 0.
89  */
90 uint8_t numa_support = 0; /**< No numa support by default */
91 
92 /*
93  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
94  * not configured.
95  */
96 uint8_t socket_num = UMA_NO_CONFIG;
97 
98 /*
99  * Record the Ethernet address of peer target ports to which packets are
100  * forwarded.
101  * Must be instanciated with the ethernet addresses of peer traffic generator
102  * ports.
103  */
104 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
105 portid_t nb_peer_eth_addrs = 0;
106 
107 /*
108  * Probed Target Environment.
109  */
110 struct rte_port *ports;	       /**< For all probed ethernet ports. */
111 portid_t nb_ports;             /**< Number of probed ethernet ports. */
112 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
113 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
114 
115 /*
116  * Test Forwarding Configuration.
117  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
118  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
119  */
120 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
121 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
122 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
123 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
124 
125 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
126 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
127 
128 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
129 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
130 
131 /*
132  * Forwarding engines.
133  */
134 struct fwd_engine * fwd_engines[] = {
135 	&io_fwd_engine,
136 	&mac_fwd_engine,
137 	&rx_only_engine,
138 	&tx_only_engine,
139 	&csum_fwd_engine,
140 #ifdef RTE_LIBRTE_IEEE1588
141 	&ieee1588_fwd_engine,
142 #endif
143 	NULL,
144 };
145 
146 struct fwd_config cur_fwd_config;
147 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
148 
149 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
150 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
151                                       * specified on command-line. */
152 
153 /*
154  * Configuration of packet segments used by the "txonly" processing engine.
155  */
156 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
157 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
158 	TXONLY_DEF_PACKET_LEN,
159 };
160 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
161 
162 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
163 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
164 
165 /* current configuration is in DCB or not,0 means it is not in DCB mode */
166 uint8_t dcb_config = 0;
167 
168 /* Whether the dcb is in testing status */
169 uint8_t dcb_test = 0;
170 
171 /* DCB on and VT on mapping is default */
172 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
173 
174 /*
175  * Configurable number of RX/TX queues.
176  */
177 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
178 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
179 
180 /*
181  * Configurable number of RX/TX ring descriptors.
182  */
183 #define RTE_TEST_RX_DESC_DEFAULT 128
184 #define RTE_TEST_TX_DESC_DEFAULT 512
185 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
186 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
187 
188 /*
189  * Configurable values of RX and TX ring threshold registers.
190  */
191 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
192 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
193 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
194 
195 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
196 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
197 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
198 
199 struct rte_eth_thresh rx_thresh = {
200 	.pthresh = RX_PTHRESH,
201 	.hthresh = RX_HTHRESH,
202 	.wthresh = RX_WTHRESH,
203 };
204 
205 struct rte_eth_thresh tx_thresh = {
206 	.pthresh = TX_PTHRESH,
207 	.hthresh = TX_HTHRESH,
208 	.wthresh = TX_WTHRESH,
209 };
210 
211 /*
212  * Configurable value of RX free threshold.
213  */
214 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
215 
216 /*
217  * Configurable value of RX drop enable.
218  */
219 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
220 
221 /*
222  * Configurable value of TX free threshold.
223  */
224 uint16_t tx_free_thresh = 0; /* Use default values. */
225 
226 /*
227  * Configurable value of TX RS bit threshold.
228  */
229 uint16_t tx_rs_thresh = 0; /* Use default values. */
230 
231 /*
232  * Configurable value of TX queue flags.
233  */
234 uint32_t txq_flags = 0; /* No flags set. */
235 
236 /*
237  * Receive Side Scaling (RSS) configuration.
238  */
239 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
240 
241 /*
242  * Port topology configuration
243  */
244 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
245 
246 
247 /*
248  * Avoids to flush all the RX streams before starts forwarding.
249  */
250 uint8_t no_flush_rx = 0; /* flush by default */
251 
252 /*
253  * Ethernet device configuration.
254  */
255 struct rte_eth_rxmode rx_mode = {
256 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
257 	.split_hdr_size = 0,
258 	.header_split   = 0, /**< Header Split disabled. */
259 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
260 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
261 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
262 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
263 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
264 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
265 };
266 
267 struct rte_fdir_conf fdir_conf = {
268 	.mode = RTE_FDIR_MODE_NONE,
269 	.pballoc = RTE_FDIR_PBALLOC_64K,
270 	.status = RTE_FDIR_REPORT_STATUS,
271 	.flexbytes_offset = 0x6,
272 	.drop_queue = 127,
273 };
274 
275 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
276 
277 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
278 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
279 
280 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
281 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
282 
283 uint16_t nb_tx_queue_stats_mappings = 0;
284 uint16_t nb_rx_queue_stats_mappings = 0;
285 
286 /* Forward function declarations */
287 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
288 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
289 
290 /*
291  * Check if all the ports are started.
292  * If yes, return positive value. If not, return zero.
293  */
294 static int all_ports_started(void);
295 
296 /*
297  * Setup default configuration.
298  */
299 static void
300 set_default_fwd_lcores_config(void)
301 {
302 	unsigned int i;
303 	unsigned int nb_lc;
304 
305 	nb_lc = 0;
306 	for (i = 0; i < RTE_MAX_LCORE; i++) {
307 		if (! rte_lcore_is_enabled(i))
308 			continue;
309 		if (i == rte_get_master_lcore())
310 			continue;
311 		fwd_lcores_cpuids[nb_lc++] = i;
312 	}
313 	nb_lcores = (lcoreid_t) nb_lc;
314 	nb_cfg_lcores = nb_lcores;
315 	nb_fwd_lcores = 1;
316 }
317 
318 static void
319 set_def_peer_eth_addrs(void)
320 {
321 	portid_t i;
322 
323 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
324 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
325 		peer_eth_addrs[i].addr_bytes[5] = i;
326 	}
327 }
328 
329 static void
330 set_default_fwd_ports_config(void)
331 {
332 	portid_t pt_id;
333 
334 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
335 		fwd_ports_ids[pt_id] = pt_id;
336 
337 	nb_cfg_ports = nb_ports;
338 	nb_fwd_ports = nb_ports;
339 }
340 
341 void
342 set_def_fwd_config(void)
343 {
344 	set_default_fwd_lcores_config();
345 	set_def_peer_eth_addrs();
346 	set_default_fwd_ports_config();
347 }
348 
349 /*
350  * Configuration initialisation done once at init time.
351  */
352 struct mbuf_ctor_arg {
353 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
354 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
355 };
356 
357 struct mbuf_pool_ctor_arg {
358 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
359 };
360 
361 static void
362 testpmd_mbuf_ctor(struct rte_mempool *mp,
363 		  void *opaque_arg,
364 		  void *raw_mbuf,
365 		  __attribute__((unused)) unsigned i)
366 {
367 	struct mbuf_ctor_arg *mb_ctor_arg;
368 	struct rte_mbuf    *mb;
369 
370 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
371 	mb = (struct rte_mbuf *) raw_mbuf;
372 
373 	mb->type         = RTE_MBUF_PKT;
374 	mb->pool         = mp;
375 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
376 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
377 			mb_ctor_arg->seg_buf_offset);
378 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
379 	mb->type         = RTE_MBUF_PKT;
380 	mb->ol_flags     = 0;
381 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
382 	mb->pkt.nb_segs  = 1;
383 	mb->pkt.vlan_macip.data = 0;
384 	mb->pkt.hash.rss = 0;
385 }
386 
387 static void
388 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
389 		       void *opaque_arg)
390 {
391 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
392 	struct rte_pktmbuf_pool_private *mbp_priv;
393 
394 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
395 		printf("%s(%s) private_data_size %d < %d\n",
396 		       __func__, mp->name, (int) mp->private_data_size,
397 		       (int) sizeof(struct rte_pktmbuf_pool_private));
398 		return;
399 	}
400 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
401 	mbp_priv = (struct rte_pktmbuf_pool_private *)
402 		((char *)mp + sizeof(struct rte_mempool));
403 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
404 }
405 
406 static void
407 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
408 		 unsigned int socket_id)
409 {
410 	char pool_name[RTE_MEMPOOL_NAMESIZE];
411 	struct rte_mempool *rte_mp;
412 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
413 	struct mbuf_ctor_arg mb_ctor_arg;
414 	uint32_t mb_size;
415 
416 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
417 						mbuf_seg_size);
418 	mb_ctor_arg.seg_buf_offset =
419 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
420 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
421 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
422 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
423 	rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
424 				    (unsigned) mb_mempool_cache,
425 				    sizeof(struct rte_pktmbuf_pool_private),
426 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
427 				    testpmd_mbuf_ctor, &mb_ctor_arg,
428 				    socket_id, 0);
429 	if (rte_mp == NULL) {
430 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
431 						"failed\n", socket_id);
432 	}
433 }
434 
435 static void
436 init_config(void)
437 {
438 	portid_t pid;
439 	struct rte_port *port;
440 	struct rte_mempool *mbp;
441 	unsigned int nb_mbuf_per_pool;
442 	lcoreid_t  lc_id;
443 	uint8_t port_per_socket[MAX_SOCKET];
444 
445 	memset(port_per_socket,0,MAX_SOCKET);
446 	/* Configuration of logical cores. */
447 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
448 				sizeof(struct fwd_lcore *) * nb_lcores,
449 				CACHE_LINE_SIZE);
450 	if (fwd_lcores == NULL) {
451 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
452 							"failed\n", nb_lcores);
453 	}
454 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
455 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
456 					       sizeof(struct fwd_lcore),
457 					       CACHE_LINE_SIZE);
458 		if (fwd_lcores[lc_id] == NULL) {
459 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
460 								"failed\n");
461 		}
462 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
463 	}
464 
465 	/*
466 	 * Create pools of mbuf.
467 	 * If NUMA support is disabled, create a single pool of mbuf in
468 	 * socket 0 memory by default.
469 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
470 	 *
471 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
472 	 * nb_txd can be configured at run time.
473 	 */
474 	if (param_total_num_mbufs)
475 		nb_mbuf_per_pool = param_total_num_mbufs;
476 	else {
477 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
478 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
479 
480 		if (!numa_support)
481 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
482 	}
483 
484 	if (!numa_support) {
485 		if (socket_num == UMA_NO_CONFIG)
486 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
487 		else
488 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
489 						 socket_num);
490 	}
491 	/*
492 	 * Records which Mbuf pool to use by each logical core, if needed.
493 	 */
494 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
495 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
496 		if (mbp == NULL)
497 			mbp = mbuf_pool_find(0);
498 		fwd_lcores[lc_id]->mbp = mbp;
499 	}
500 
501 	/* Configuration of Ethernet ports. */
502 	ports = rte_zmalloc("testpmd: ports",
503 			    sizeof(struct rte_port) * nb_ports,
504 			    CACHE_LINE_SIZE);
505 	if (ports == NULL) {
506 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
507 							"failed\n", nb_ports);
508 	}
509 
510 	for (pid = 0; pid < nb_ports; pid++) {
511 		port = &ports[pid];
512 		rte_eth_dev_info_get(pid, &port->dev_info);
513 
514 		if (numa_support) {
515 			if (port_numa[pid] != NUMA_NO_CONFIG)
516 				port_per_socket[port_numa[pid]]++;
517 			else {
518 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
519 				port_per_socket[socket_id]++;
520 			}
521 		}
522 
523 		/* set flag to initialize port/queue */
524 		port->need_reconfig = 1;
525 		port->need_reconfig_queues = 1;
526 	}
527 
528 	if (numa_support) {
529 		uint8_t i;
530 		unsigned int nb_mbuf;
531 
532 		if (param_total_num_mbufs)
533 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
534 
535 		for (i = 0; i < MAX_SOCKET; i++) {
536 			nb_mbuf = (nb_mbuf_per_pool *
537 						port_per_socket[i]);
538 			if (nb_mbuf)
539 				mbuf_pool_create(mbuf_data_size,
540 						nb_mbuf,i);
541 		}
542 	}
543 	init_port_config();
544 	/* Configuration of packet forwarding streams. */
545 	if (init_fwd_streams() < 0)
546 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
547 }
548 
549 int
550 init_fwd_streams(void)
551 {
552 	portid_t pid;
553 	struct rte_port *port;
554 	streamid_t sm_id, nb_fwd_streams_new;
555 
556 	/* set socket id according to numa or not */
557 	for (pid = 0; pid < nb_ports; pid++) {
558 		port = &ports[pid];
559 		if (nb_rxq > port->dev_info.max_rx_queues) {
560 			printf("Fail: nb_rxq(%d) is greater than "
561 				"max_rx_queues(%d)\n", nb_rxq,
562 				port->dev_info.max_rx_queues);
563 			return -1;
564 		}
565 		if (nb_txq > port->dev_info.max_tx_queues) {
566 			printf("Fail: nb_txq(%d) is greater than "
567 				"max_tx_queues(%d)\n", nb_txq,
568 				port->dev_info.max_tx_queues);
569 			return -1;
570 		}
571 		if (numa_support)
572 			port->socket_id = rte_eth_dev_socket_id(pid);
573 		else {
574 			if (socket_num == UMA_NO_CONFIG)
575 				port->socket_id = 0;
576 			else
577 				port->socket_id = socket_num;
578 		}
579 	}
580 
581 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
582 	if (nb_fwd_streams_new == nb_fwd_streams)
583 		return 0;
584 	/* clear the old */
585 	if (fwd_streams != NULL) {
586 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
587 			if (fwd_streams[sm_id] == NULL)
588 				continue;
589 			rte_free(fwd_streams[sm_id]);
590 			fwd_streams[sm_id] = NULL;
591 		}
592 		rte_free(fwd_streams);
593 		fwd_streams = NULL;
594 	}
595 
596 	/* init new */
597 	nb_fwd_streams = nb_fwd_streams_new;
598 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
599 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
600 	if (fwd_streams == NULL)
601 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
602 						"failed\n", nb_fwd_streams);
603 
604 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
605 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
606 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
607 		if (fwd_streams[sm_id] == NULL)
608 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
609 								" failed\n");
610 	}
611 
612 	return 0;
613 }
614 
615 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
616 static void
617 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
618 {
619 	unsigned int total_burst;
620 	unsigned int nb_burst;
621 	unsigned int burst_stats[3];
622 	uint16_t pktnb_stats[3];
623 	uint16_t nb_pkt;
624 	int burst_percent[3];
625 
626 	/*
627 	 * First compute the total number of packet bursts and the
628 	 * two highest numbers of bursts of the same number of packets.
629 	 */
630 	total_burst = 0;
631 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
632 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
633 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
634 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
635 		if (nb_burst == 0)
636 			continue;
637 		total_burst += nb_burst;
638 		if (nb_burst > burst_stats[0]) {
639 			burst_stats[1] = burst_stats[0];
640 			pktnb_stats[1] = pktnb_stats[0];
641 			burst_stats[0] = nb_burst;
642 			pktnb_stats[0] = nb_pkt;
643 		}
644 	}
645 	if (total_burst == 0)
646 		return;
647 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
648 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
649 	       burst_percent[0], (int) pktnb_stats[0]);
650 	if (burst_stats[0] == total_burst) {
651 		printf("]\n");
652 		return;
653 	}
654 	if (burst_stats[0] + burst_stats[1] == total_burst) {
655 		printf(" + %d%% of %d pkts]\n",
656 		       100 - burst_percent[0], pktnb_stats[1]);
657 		return;
658 	}
659 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
660 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
661 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
662 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
663 		return;
664 	}
665 	printf(" + %d%% of %d pkts + %d%% of others]\n",
666 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
667 }
668 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
669 
670 static void
671 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
672 {
673 	struct rte_port *port;
674 	uint8_t i;
675 
676 	static const char *fwd_stats_border = "----------------------";
677 
678 	port = &ports[port_id];
679 	printf("\n  %s Forward statistics for port %-2d %s\n",
680 	       fwd_stats_border, port_id, fwd_stats_border);
681 
682 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
683 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
684 		       "%-"PRIu64"\n",
685 		       stats->ipackets, stats->ierrors,
686 		       (uint64_t) (stats->ipackets + stats->ierrors));
687 
688 		if (cur_fwd_eng == &csum_fwd_engine)
689 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
690 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
691 
692 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
693 		       "%-"PRIu64"\n",
694 		       stats->opackets, port->tx_dropped,
695 		       (uint64_t) (stats->opackets + port->tx_dropped));
696 
697 		if (stats->rx_nombuf > 0)
698 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
699 
700 	}
701 	else {
702 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
703 		       "%14"PRIu64"\n",
704 		       stats->ipackets, stats->ierrors,
705 		       (uint64_t) (stats->ipackets + stats->ierrors));
706 
707 		if (cur_fwd_eng == &csum_fwd_engine)
708 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
709 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
710 
711 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
712 		       "%14"PRIu64"\n",
713 		       stats->opackets, port->tx_dropped,
714 		       (uint64_t) (stats->opackets + port->tx_dropped));
715 
716 		if (stats->rx_nombuf > 0)
717 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
718 	}
719 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
720 	if (port->rx_stream)
721 		pkt_burst_stats_display("RX",
722 			&port->rx_stream->rx_burst_stats);
723 	if (port->tx_stream)
724 		pkt_burst_stats_display("TX",
725 			&port->tx_stream->tx_burst_stats);
726 #endif
727 	/* stats fdir */
728 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
729 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
730 		       stats->fdirmiss,
731 		       stats->fdirmatch);
732 
733 	if (port->rx_queue_stats_mapping_enabled) {
734 		printf("\n");
735 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
736 			printf("  Stats reg %2d RX-packets:%14"PRIu64
737 			       "     RX-errors:%14"PRIu64
738 			       "    RX-bytes:%14"PRIu64"\n",
739 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
740 		}
741 		printf("\n");
742 	}
743 	if (port->tx_queue_stats_mapping_enabled) {
744 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
745 			printf("  Stats reg %2d TX-packets:%14"PRIu64
746 			       "                                 TX-bytes:%14"PRIu64"\n",
747 			       i, stats->q_opackets[i], stats->q_obytes[i]);
748 		}
749 	}
750 
751 	printf("  %s--------------------------------%s\n",
752 	       fwd_stats_border, fwd_stats_border);
753 }
754 
755 static void
756 fwd_stream_stats_display(streamid_t stream_id)
757 {
758 	struct fwd_stream *fs;
759 	static const char *fwd_top_stats_border = "-------";
760 
761 	fs = fwd_streams[stream_id];
762 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
763 	    (fs->fwd_dropped == 0))
764 		return;
765 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
766 	       "TX Port=%2d/Queue=%2d %s\n",
767 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
768 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
769 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
770 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
771 
772 	/* if checksum mode */
773 	if (cur_fwd_eng == &csum_fwd_engine) {
774 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
775 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
776 	}
777 
778 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
779 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
780 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
781 #endif
782 }
783 
784 static void
785 flush_fwd_rx_queues(void)
786 {
787 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
788 	portid_t  rxp;
789 	portid_t port_id;
790 	queueid_t rxq;
791 	uint16_t  nb_rx;
792 	uint16_t  i;
793 	uint8_t   j;
794 
795 	for (j = 0; j < 2; j++) {
796 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
797 			for (rxq = 0; rxq < nb_rxq; rxq++) {
798 				port_id = fwd_ports_ids[rxp];
799 				do {
800 					nb_rx = rte_eth_rx_burst(port_id, rxq,
801 						pkts_burst, MAX_PKT_BURST);
802 					for (i = 0; i < nb_rx; i++)
803 						rte_pktmbuf_free(pkts_burst[i]);
804 				} while (nb_rx > 0);
805 			}
806 		}
807 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
808 	}
809 }
810 
811 static void
812 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
813 {
814 	struct fwd_stream **fsm;
815 	streamid_t nb_fs;
816 	streamid_t sm_id;
817 
818 	fsm = &fwd_streams[fc->stream_idx];
819 	nb_fs = fc->stream_nb;
820 	do {
821 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
822 			(*pkt_fwd)(fsm[sm_id]);
823 	} while (! fc->stopped);
824 }
825 
826 static int
827 start_pkt_forward_on_core(void *fwd_arg)
828 {
829 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
830 			     cur_fwd_config.fwd_eng->packet_fwd);
831 	return 0;
832 }
833 
834 /*
835  * Run the TXONLY packet forwarding engine to send a single burst of packets.
836  * Used to start communication flows in network loopback test configurations.
837  */
838 static int
839 run_one_txonly_burst_on_core(void *fwd_arg)
840 {
841 	struct fwd_lcore *fwd_lc;
842 	struct fwd_lcore tmp_lcore;
843 
844 	fwd_lc = (struct fwd_lcore *) fwd_arg;
845 	tmp_lcore = *fwd_lc;
846 	tmp_lcore.stopped = 1;
847 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
848 	return 0;
849 }
850 
851 /*
852  * Launch packet forwarding:
853  *     - Setup per-port forwarding context.
854  *     - launch logical cores with their forwarding configuration.
855  */
856 static void
857 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
858 {
859 	port_fwd_begin_t port_fwd_begin;
860 	unsigned int i;
861 	unsigned int lc_id;
862 	int diag;
863 
864 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
865 	if (port_fwd_begin != NULL) {
866 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
867 			(*port_fwd_begin)(fwd_ports_ids[i]);
868 	}
869 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
870 		lc_id = fwd_lcores_cpuids[i];
871 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
872 			fwd_lcores[i]->stopped = 0;
873 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
874 						     fwd_lcores[i], lc_id);
875 			if (diag != 0)
876 				printf("launch lcore %u failed - diag=%d\n",
877 				       lc_id, diag);
878 		}
879 	}
880 }
881 
882 /*
883  * Launch packet forwarding configuration.
884  */
885 void
886 start_packet_forwarding(int with_tx_first)
887 {
888 	port_fwd_begin_t port_fwd_begin;
889 	port_fwd_end_t  port_fwd_end;
890 	struct rte_port *port;
891 	unsigned int i;
892 	portid_t   pt_id;
893 	streamid_t sm_id;
894 
895 	if (all_ports_started() == 0) {
896 		printf("Not all ports were started\n");
897 		return;
898 	}
899 	if (test_done == 0) {
900 		printf("Packet forwarding already started\n");
901 		return;
902 	}
903 	if(dcb_test) {
904 		for (i = 0; i < nb_fwd_ports; i++) {
905 			pt_id = fwd_ports_ids[i];
906 			port = &ports[pt_id];
907 			if (!port->dcb_flag) {
908 				printf("In DCB mode, all forwarding ports must "
909                                        "be configured in this mode.\n");
910 				return;
911 			}
912 		}
913 		if (nb_fwd_lcores == 1) {
914 			printf("In DCB mode,the nb forwarding cores "
915                                "should be larger than 1.\n");
916 			return;
917 		}
918 	}
919 	test_done = 0;
920 
921 	if(!no_flush_rx)
922 		flush_fwd_rx_queues();
923 
924 	fwd_config_setup();
925 	rxtx_config_display();
926 
927 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
928 		pt_id = fwd_ports_ids[i];
929 		port = &ports[pt_id];
930 		rte_eth_stats_get(pt_id, &port->stats);
931 		port->tx_dropped = 0;
932 
933 		map_port_queue_stats_mapping_registers(pt_id, port);
934 	}
935 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
936 		fwd_streams[sm_id]->rx_packets = 0;
937 		fwd_streams[sm_id]->tx_packets = 0;
938 		fwd_streams[sm_id]->fwd_dropped = 0;
939 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
940 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
941 
942 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
943 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
944 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
945 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
946 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
947 #endif
948 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
949 		fwd_streams[sm_id]->core_cycles = 0;
950 #endif
951 	}
952 	if (with_tx_first) {
953 		port_fwd_begin = tx_only_engine.port_fwd_begin;
954 		if (port_fwd_begin != NULL) {
955 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
956 				(*port_fwd_begin)(fwd_ports_ids[i]);
957 		}
958 		launch_packet_forwarding(run_one_txonly_burst_on_core);
959 		rte_eal_mp_wait_lcore();
960 		port_fwd_end = tx_only_engine.port_fwd_end;
961 		if (port_fwd_end != NULL) {
962 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
963 				(*port_fwd_end)(fwd_ports_ids[i]);
964 		}
965 	}
966 	launch_packet_forwarding(start_pkt_forward_on_core);
967 }
968 
969 void
970 stop_packet_forwarding(void)
971 {
972 	struct rte_eth_stats stats;
973 	struct rte_port *port;
974 	port_fwd_end_t  port_fwd_end;
975 	int i;
976 	portid_t   pt_id;
977 	streamid_t sm_id;
978 	lcoreid_t  lc_id;
979 	uint64_t total_recv;
980 	uint64_t total_xmit;
981 	uint64_t total_rx_dropped;
982 	uint64_t total_tx_dropped;
983 	uint64_t total_rx_nombuf;
984 	uint64_t tx_dropped;
985 	uint64_t rx_bad_ip_csum;
986 	uint64_t rx_bad_l4_csum;
987 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
988 	uint64_t fwd_cycles;
989 #endif
990 	static const char *acc_stats_border = "+++++++++++++++";
991 
992 	if (all_ports_started() == 0) {
993 		printf("Not all ports were started\n");
994 		return;
995 	}
996 	if (test_done) {
997 		printf("Packet forwarding not started\n");
998 		return;
999 	}
1000 	printf("Telling cores to stop...");
1001 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1002 		fwd_lcores[lc_id]->stopped = 1;
1003 	printf("\nWaiting for lcores to finish...\n");
1004 	rte_eal_mp_wait_lcore();
1005 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1006 	if (port_fwd_end != NULL) {
1007 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1008 			pt_id = fwd_ports_ids[i];
1009 			(*port_fwd_end)(pt_id);
1010 		}
1011 	}
1012 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1013 	fwd_cycles = 0;
1014 #endif
1015 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1016 		if (cur_fwd_config.nb_fwd_streams >
1017 		    cur_fwd_config.nb_fwd_ports) {
1018 			fwd_stream_stats_display(sm_id);
1019 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1020 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1021 		} else {
1022 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1023 				fwd_streams[sm_id];
1024 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1025 				fwd_streams[sm_id];
1026 		}
1027 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1028 		tx_dropped = (uint64_t) (tx_dropped +
1029 					 fwd_streams[sm_id]->fwd_dropped);
1030 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1031 
1032 		rx_bad_ip_csum =
1033 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1034 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1035 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1036 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1037 							rx_bad_ip_csum;
1038 
1039 		rx_bad_l4_csum =
1040 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1041 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1042 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1043 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1044 							rx_bad_l4_csum;
1045 
1046 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1047 		fwd_cycles = (uint64_t) (fwd_cycles +
1048 					 fwd_streams[sm_id]->core_cycles);
1049 #endif
1050 	}
1051 	total_recv = 0;
1052 	total_xmit = 0;
1053 	total_rx_dropped = 0;
1054 	total_tx_dropped = 0;
1055 	total_rx_nombuf  = 0;
1056 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1057 		pt_id = fwd_ports_ids[i];
1058 
1059 		port = &ports[pt_id];
1060 		rte_eth_stats_get(pt_id, &stats);
1061 		stats.ipackets -= port->stats.ipackets;
1062 		port->stats.ipackets = 0;
1063 		stats.opackets -= port->stats.opackets;
1064 		port->stats.opackets = 0;
1065 		stats.ibytes   -= port->stats.ibytes;
1066 		port->stats.ibytes = 0;
1067 		stats.obytes   -= port->stats.obytes;
1068 		port->stats.obytes = 0;
1069 		stats.ierrors  -= port->stats.ierrors;
1070 		port->stats.ierrors = 0;
1071 		stats.oerrors  -= port->stats.oerrors;
1072 		port->stats.oerrors = 0;
1073 		stats.rx_nombuf -= port->stats.rx_nombuf;
1074 		port->stats.rx_nombuf = 0;
1075 		stats.fdirmatch -= port->stats.fdirmatch;
1076 		port->stats.rx_nombuf = 0;
1077 		stats.fdirmiss -= port->stats.fdirmiss;
1078 		port->stats.rx_nombuf = 0;
1079 
1080 		total_recv += stats.ipackets;
1081 		total_xmit += stats.opackets;
1082 		total_rx_dropped += stats.ierrors;
1083 		total_tx_dropped += port->tx_dropped;
1084 		total_rx_nombuf  += stats.rx_nombuf;
1085 
1086 		fwd_port_stats_display(pt_id, &stats);
1087 	}
1088 	printf("\n  %s Accumulated forward statistics for all ports"
1089 	       "%s\n",
1090 	       acc_stats_border, acc_stats_border);
1091 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1092 	       "%-"PRIu64"\n"
1093 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1094 	       "%-"PRIu64"\n",
1095 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1096 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1097 	if (total_rx_nombuf > 0)
1098 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1099 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1100 	       "%s\n",
1101 	       acc_stats_border, acc_stats_border);
1102 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1103 	if (total_recv > 0)
1104 		printf("\n  CPU cycles/packet=%u (total cycles="
1105 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1106 		       (unsigned int)(fwd_cycles / total_recv),
1107 		       fwd_cycles, total_recv);
1108 #endif
1109 	printf("\nDone.\n");
1110 	test_done = 1;
1111 }
1112 
1113 static int
1114 all_ports_started(void)
1115 {
1116 	portid_t pi;
1117 	struct rte_port *port;
1118 
1119 	for (pi = 0; pi < nb_ports; pi++) {
1120 		port = &ports[pi];
1121 		/* Check if there is a port which is not started */
1122 		if (port->port_status != RTE_PORT_STARTED)
1123 			return 0;
1124 	}
1125 
1126 	/* No port is not started */
1127 	return 1;
1128 }
1129 
1130 void
1131 start_port(portid_t pid)
1132 {
1133 	int diag, need_check_link_status = 0;
1134 	portid_t pi;
1135 	queueid_t qi;
1136 	struct rte_port *port;
1137 
1138 	if (test_done == 0) {
1139 		printf("Please stop forwarding first\n");
1140 		return;
1141 	}
1142 
1143 	if (init_fwd_streams() < 0) {
1144 		printf("Fail from init_fwd_streams()\n");
1145 		return;
1146 	}
1147 
1148 	if(dcb_config)
1149 		dcb_test = 1;
1150 	for (pi = 0; pi < nb_ports; pi++) {
1151 		if (pid < nb_ports && pid != pi)
1152 			continue;
1153 
1154 		port = &ports[pi];
1155 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1156 						 RTE_PORT_HANDLING) == 0) {
1157 			printf("Port %d is now not stopped\n", pi);
1158 			continue;
1159 		}
1160 
1161 		if (port->need_reconfig > 0) {
1162 			port->need_reconfig = 0;
1163 
1164 			printf("Configuring Port %d (socket %d)\n", pi,
1165 					rte_eth_dev_socket_id(pi));
1166 			/* configure port */
1167 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1168 						&(port->dev_conf));
1169 			if (diag != 0) {
1170 				if (rte_atomic16_cmpset(&(port->port_status),
1171 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1172 					printf("Port %d can not be set back "
1173 							"to stopped\n", pi);
1174 				printf("Fail to configure port %d\n", pi);
1175 				/* try to reconfigure port next time */
1176 				port->need_reconfig = 1;
1177 				return;
1178 			}
1179 		}
1180 		if (port->need_reconfig_queues > 0) {
1181 			port->need_reconfig_queues = 0;
1182 			/* setup tx queues */
1183 			for (qi = 0; qi < nb_txq; qi++) {
1184 				if ((numa_support) &&
1185 					(txring_numa[pi] != NUMA_NO_CONFIG))
1186 					diag = rte_eth_tx_queue_setup(pi, qi,
1187 						nb_txd,txring_numa[pi],
1188 						&(port->tx_conf));
1189 				else
1190 					diag = rte_eth_tx_queue_setup(pi, qi,
1191 						nb_txd,port->socket_id,
1192 						&(port->tx_conf));
1193 
1194 				if (diag == 0)
1195 					continue;
1196 
1197 				/* Fail to setup tx queue, return */
1198 				if (rte_atomic16_cmpset(&(port->port_status),
1199 							RTE_PORT_HANDLING,
1200 							RTE_PORT_STOPPED) == 0)
1201 					printf("Port %d can not be set back "
1202 							"to stopped\n", pi);
1203 				printf("Fail to configure port %d tx queues\n", pi);
1204 				/* try to reconfigure queues next time */
1205 				port->need_reconfig_queues = 1;
1206 				return;
1207 			}
1208 			/* setup rx queues */
1209 			for (qi = 0; qi < nb_rxq; qi++) {
1210 				if ((numa_support) &&
1211 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1212 					struct rte_mempool * mp =
1213 						mbuf_pool_find(rxring_numa[pi]);
1214 					if (mp == NULL) {
1215 						printf("Failed to setup RX queue:"
1216 							"No mempool allocation"
1217 							"on the socket %d\n",
1218 							rxring_numa[pi]);
1219 						return;
1220 					}
1221 
1222 					diag = rte_eth_rx_queue_setup(pi, qi,
1223 					     nb_rxd,rxring_numa[pi],
1224 					     &(port->rx_conf),mp);
1225 				}
1226 				else
1227 					diag = rte_eth_rx_queue_setup(pi, qi,
1228 					     nb_rxd,port->socket_id,
1229 					     &(port->rx_conf),
1230 				             mbuf_pool_find(port->socket_id));
1231 
1232 				if (diag == 0)
1233 					continue;
1234 
1235 
1236 				/* Fail to setup rx queue, return */
1237 				if (rte_atomic16_cmpset(&(port->port_status),
1238 							RTE_PORT_HANDLING,
1239 							RTE_PORT_STOPPED) == 0)
1240 					printf("Port %d can not be set back "
1241 							"to stopped\n", pi);
1242 				printf("Fail to configure port %d rx queues\n", pi);
1243 				/* try to reconfigure queues next time */
1244 				port->need_reconfig_queues = 1;
1245 				return;
1246 			}
1247 		}
1248 		/* start port */
1249 		if (rte_eth_dev_start(pi) < 0) {
1250 			printf("Fail to start port %d\n", pi);
1251 
1252 			/* Fail to setup rx queue, return */
1253 			if (rte_atomic16_cmpset(&(port->port_status),
1254 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1255 				printf("Port %d can not be set back to "
1256 							"stopped\n", pi);
1257 			continue;
1258 		}
1259 
1260 		if (rte_atomic16_cmpset(&(port->port_status),
1261 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1262 			printf("Port %d can not be set into started\n", pi);
1263 
1264 		/* at least one port started, need checking link status */
1265 		need_check_link_status = 1;
1266 	}
1267 
1268 	if (need_check_link_status)
1269 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1270 	else
1271 		printf("Please stop the ports first\n");
1272 
1273 	printf("Done\n");
1274 }
1275 
1276 void
1277 stop_port(portid_t pid)
1278 {
1279 	portid_t pi;
1280 	struct rte_port *port;
1281 	int need_check_link_status = 0;
1282 
1283 	if (test_done == 0) {
1284 		printf("Please stop forwarding first\n");
1285 		return;
1286 	}
1287 	if (dcb_test) {
1288 		dcb_test = 0;
1289 		dcb_config = 0;
1290 	}
1291 	printf("Stopping ports...\n");
1292 
1293 	for (pi = 0; pi < nb_ports; pi++) {
1294 		if (pid < nb_ports && pid != pi)
1295 			continue;
1296 
1297 		port = &ports[pi];
1298 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1299 						RTE_PORT_HANDLING) == 0)
1300 			continue;
1301 
1302 		rte_eth_dev_stop(pi);
1303 
1304 		if (rte_atomic16_cmpset(&(port->port_status),
1305 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1306 			printf("Port %d can not be set into stopped\n", pi);
1307 		need_check_link_status = 1;
1308 	}
1309 	if (need_check_link_status)
1310 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1311 
1312 	printf("Done\n");
1313 }
1314 
1315 void
1316 close_port(portid_t pid)
1317 {
1318 	portid_t pi;
1319 	struct rte_port *port;
1320 
1321 	if (test_done == 0) {
1322 		printf("Please stop forwarding first\n");
1323 		return;
1324 	}
1325 
1326 	printf("Closing ports...\n");
1327 
1328 	for (pi = 0; pi < nb_ports; pi++) {
1329 		if (pid < nb_ports && pid != pi)
1330 			continue;
1331 
1332 		port = &ports[pi];
1333 		if (rte_atomic16_cmpset(&(port->port_status),
1334 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1335 			printf("Port %d is now not stopped\n", pi);
1336 			continue;
1337 		}
1338 
1339 		rte_eth_dev_close(pi);
1340 
1341 		if (rte_atomic16_cmpset(&(port->port_status),
1342 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1343 			printf("Port %d can not be set into stopped\n", pi);
1344 	}
1345 
1346 	printf("Done\n");
1347 }
1348 
1349 int
1350 all_ports_stopped(void)
1351 {
1352 	portid_t pi;
1353 	struct rte_port *port;
1354 
1355 	for (pi = 0; pi < nb_ports; pi++) {
1356 		port = &ports[pi];
1357 		if (port->port_status != RTE_PORT_STOPPED)
1358 			return 0;
1359 	}
1360 
1361 	return 1;
1362 }
1363 
1364 void
1365 pmd_test_exit(void)
1366 {
1367 	portid_t pt_id;
1368 
1369 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1370 		printf("Stopping port %d...", pt_id);
1371 		fflush(stdout);
1372 		rte_eth_dev_close(pt_id);
1373 		printf("done\n");
1374 	}
1375 	printf("bye...\n");
1376 }
1377 
1378 typedef void (*cmd_func_t)(void);
1379 struct pmd_test_command {
1380 	const char *cmd_name;
1381 	cmd_func_t cmd_func;
1382 };
1383 
1384 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1385 
1386 /* Check the link status of all ports in up to 9s, and print them finally */
1387 static void
1388 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1389 {
1390 #define CHECK_INTERVAL 100 /* 100ms */
1391 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1392 	uint8_t portid, count, all_ports_up, print_flag = 0;
1393 	struct rte_eth_link link;
1394 
1395 	printf("Checking link statuses...\n");
1396 	fflush(stdout);
1397 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1398 		all_ports_up = 1;
1399 		for (portid = 0; portid < port_num; portid++) {
1400 			if ((port_mask & (1 << portid)) == 0)
1401 				continue;
1402 			memset(&link, 0, sizeof(link));
1403 			rte_eth_link_get_nowait(portid, &link);
1404 			/* print link status if flag set */
1405 			if (print_flag == 1) {
1406 				if (link.link_status)
1407 					printf("Port %d Link Up - speed %u "
1408 						"Mbps - %s\n", (uint8_t)portid,
1409 						(unsigned)link.link_speed,
1410 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1411 					("full-duplex") : ("half-duplex\n"));
1412 				else
1413 					printf("Port %d Link Down\n",
1414 						(uint8_t)portid);
1415 				continue;
1416 			}
1417 			/* clear all_ports_up flag if any link down */
1418 			if (link.link_status == 0) {
1419 				all_ports_up = 0;
1420 				break;
1421 			}
1422 		}
1423 		/* after finally printing all link status, get out */
1424 		if (print_flag == 1)
1425 			break;
1426 
1427 		if (all_ports_up == 0) {
1428 			fflush(stdout);
1429 			rte_delay_ms(CHECK_INTERVAL);
1430 		}
1431 
1432 		/* set the print_flag if all ports up or timeout */
1433 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1434 			print_flag = 1;
1435 		}
1436 	}
1437 }
1438 
1439 static int
1440 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1441 {
1442 	uint16_t i;
1443 	int diag;
1444 	uint8_t mapping_found = 0;
1445 
1446 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1447 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1448 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1449 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1450 					tx_queue_stats_mappings[i].queue_id,
1451 					tx_queue_stats_mappings[i].stats_counter_id);
1452 			if (diag != 0)
1453 				return diag;
1454 			mapping_found = 1;
1455 		}
1456 	}
1457 	if (mapping_found)
1458 		port->tx_queue_stats_mapping_enabled = 1;
1459 	return 0;
1460 }
1461 
1462 static int
1463 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1464 {
1465 	uint16_t i;
1466 	int diag;
1467 	uint8_t mapping_found = 0;
1468 
1469 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1470 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1471 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1472 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1473 					rx_queue_stats_mappings[i].queue_id,
1474 					rx_queue_stats_mappings[i].stats_counter_id);
1475 			if (diag != 0)
1476 				return diag;
1477 			mapping_found = 1;
1478 		}
1479 	}
1480 	if (mapping_found)
1481 		port->rx_queue_stats_mapping_enabled = 1;
1482 	return 0;
1483 }
1484 
1485 static void
1486 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1487 {
1488 	int diag = 0;
1489 
1490 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1491 	if (diag != 0) {
1492 		if (diag == -ENOTSUP) {
1493 			port->tx_queue_stats_mapping_enabled = 0;
1494 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1495 		}
1496 		else
1497 			rte_exit(EXIT_FAILURE,
1498 					"set_tx_queue_stats_mapping_registers "
1499 					"failed for port id=%d diag=%d\n",
1500 					pi, diag);
1501 	}
1502 
1503 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1504 	if (diag != 0) {
1505 		if (diag == -ENOTSUP) {
1506 			port->rx_queue_stats_mapping_enabled = 0;
1507 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1508 		}
1509 		else
1510 			rte_exit(EXIT_FAILURE,
1511 					"set_rx_queue_stats_mapping_registers "
1512 					"failed for port id=%d diag=%d\n",
1513 					pi, diag);
1514 	}
1515 }
1516 
1517 void
1518 init_port_config(void)
1519 {
1520 	portid_t pid;
1521 	struct rte_port *port;
1522 
1523 	for (pid = 0; pid < nb_ports; pid++) {
1524 		port = &ports[pid];
1525 		port->dev_conf.rxmode = rx_mode;
1526 		port->dev_conf.fdir_conf = fdir_conf;
1527 		if (nb_rxq > 0) {
1528 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1529 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1530 		} else {
1531 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1532 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1533 		}
1534 		port->rx_conf.rx_thresh = rx_thresh;
1535 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1536 		port->rx_conf.rx_drop_en = rx_drop_en;
1537 		port->tx_conf.tx_thresh = tx_thresh;
1538 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1539 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1540 		port->tx_conf.txq_flags = txq_flags;
1541 
1542 		rte_eth_macaddr_get(pid, &port->eth_addr);
1543 
1544 		map_port_queue_stats_mapping_registers(pid, port);
1545 	}
1546 }
1547 
1548 const uint16_t vlan_tags[] = {
1549 		0,  1,  2,  3,  4,  5,  6,  7,
1550 		8,  9, 10, 11,  12, 13, 14, 15,
1551 		16, 17, 18, 19, 20, 21, 22, 23,
1552 		24, 25, 26, 27, 28, 29, 30, 31
1553 };
1554 
1555 static  int
1556 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1557 {
1558         uint8_t i;
1559 
1560  	/*
1561  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1562  	 * given above, and the number of traffic classes available for use.
1563  	 */
1564 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1565 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1566 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1567 
1568 		/* VMDQ+DCB RX and TX configrations */
1569 		vmdq_rx_conf.enable_default_pool = 0;
1570 		vmdq_rx_conf.default_pool = 0;
1571 		vmdq_rx_conf.nb_queue_pools =
1572 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1573 		vmdq_tx_conf.nb_queue_pools =
1574 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1575 
1576 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1577 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1578 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1579 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1580 		}
1581 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1582 			vmdq_rx_conf.dcb_queue[i] = i;
1583 			vmdq_tx_conf.dcb_queue[i] = i;
1584 		}
1585 
1586 		/*set DCB mode of RX and TX of multiple queues*/
1587 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1588 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1589 		if (dcb_conf->pfc_en)
1590 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1591 		else
1592 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1593 
1594 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1595                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1596 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1597                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1598 	}
1599 	else {
1600 		struct rte_eth_dcb_rx_conf rx_conf;
1601 		struct rte_eth_dcb_tx_conf tx_conf;
1602 
1603 		/* queue mapping configuration of DCB RX and TX */
1604 		if (dcb_conf->num_tcs == ETH_4_TCS)
1605 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1606 		else
1607 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1608 
1609 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1610 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1611 
1612 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1613 			rx_conf.dcb_queue[i] = i;
1614 			tx_conf.dcb_queue[i] = i;
1615 		}
1616 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1617 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1618 		if (dcb_conf->pfc_en)
1619 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1620 		else
1621 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1622 
1623 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1624                                 sizeof(struct rte_eth_dcb_rx_conf)));
1625 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1626                                 sizeof(struct rte_eth_dcb_tx_conf)));
1627 	}
1628 
1629 	return 0;
1630 }
1631 
1632 int
1633 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1634 {
1635 	struct rte_eth_conf port_conf;
1636 	struct rte_port *rte_port;
1637 	int retval;
1638 	uint16_t nb_vlan;
1639 	uint16_t i;
1640 
1641 	/* rxq and txq configuration in dcb mode */
1642 	nb_rxq = 128;
1643 	nb_txq = 128;
1644 	rx_free_thresh = 64;
1645 
1646 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1647 	/* Enter DCB configuration status */
1648 	dcb_config = 1;
1649 
1650 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1651 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1652 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1653 	if (retval < 0)
1654 		return retval;
1655 
1656 	rte_port = &ports[pid];
1657 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1658 
1659 	rte_port->rx_conf.rx_thresh = rx_thresh;
1660 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1661 	rte_port->tx_conf.tx_thresh = tx_thresh;
1662 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1663 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1664 	/* VLAN filter */
1665 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1666 	for (i = 0; i < nb_vlan; i++){
1667 		rx_vft_set(pid, vlan_tags[i], 1);
1668 	}
1669 
1670 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1671 	map_port_queue_stats_mapping_registers(pid, rte_port);
1672 
1673 	rte_port->dcb_flag = 1;
1674 
1675 	return 0;
1676 }
1677 
1678 #ifdef RTE_EXEC_ENV_BAREMETAL
1679 #define main _main
1680 #endif
1681 
1682 int
1683 main(int argc, char** argv)
1684 {
1685 	int  diag;
1686 	uint8_t port_id;
1687 
1688 	diag = rte_eal_init(argc, argv);
1689 	if (diag < 0)
1690 		rte_panic("Cannot init EAL\n");
1691 
1692 	if (rte_pmd_init_all())
1693 		rte_panic("Cannot init PMD\n");
1694 
1695 	if (rte_eal_pci_probe())
1696 		rte_panic("Cannot probe PCI\n");
1697 
1698 	nb_ports = (portid_t) rte_eth_dev_count();
1699 	if (nb_ports == 0)
1700 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1701 							"check that "
1702 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1703 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1704 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1705 			  "configuration file\n");
1706 
1707 	set_def_fwd_config();
1708 	if (nb_lcores == 0)
1709 		rte_panic("Empty set of forwarding logical cores - check the "
1710 			  "core mask supplied in the command parameters\n");
1711 
1712 	argc -= diag;
1713 	argv += diag;
1714 	if (argc > 1)
1715 		launch_args_parse(argc, argv);
1716 
1717 	if (nb_rxq > nb_txq)
1718 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1719 		       "but nb_txq=%d will prevent to fully test it.\n",
1720 		       nb_rxq, nb_txq);
1721 
1722 	init_config();
1723 	start_port(RTE_PORT_ALL);
1724 
1725 	/* set all ports to promiscuous mode by default */
1726 	for (port_id = 0; port_id < nb_ports; port_id++)
1727 		rte_eth_promiscuous_enable(port_id);
1728 
1729 	if (interactive == 1)
1730 		prompt();
1731 	else {
1732 		char c;
1733 		int rc;
1734 
1735 		printf("No commandline core given, start packet forwarding\n");
1736 		start_packet_forwarding(0);
1737 		printf("Press enter to exit\n");
1738 		rc = read(0, &c, 1);
1739 		if (rc < 0)
1740 			return 1;
1741 	}
1742 
1743 	return 0;
1744 }
1745