xref: /dpdk/app/test-pmd/testpmd.c (revision 1c1d4d7a923d4804f1926fc5264f9ecdd8977b04)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 
76 #include "testpmd.h"
77 
78 uint16_t verbose_level = 0; /**< Silent by default. */
79 
80 /* use master core for command line ? */
81 uint8_t interactive = 0;
82 
83 /*
84  * NUMA support configuration.
85  * When set, the NUMA support attempts to dispatch the allocation of the
86  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
87  * probed ports among the CPU sockets 0 and 1.
88  * Otherwise, all memory is allocated from CPU socket 0.
89  */
90 uint8_t numa_support = 0; /**< No numa support by default */
91 
92 /*
93  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
94  * not configured.
95  */
96 uint8_t socket_num = UMA_NO_CONFIG;
97 
98 /*
99  * Record the Ethernet address of peer target ports to which packets are
100  * forwarded.
101  * Must be instanciated with the ethernet addresses of peer traffic generator
102  * ports.
103  */
104 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
105 portid_t nb_peer_eth_addrs = 0;
106 
107 /*
108  * Probed Target Environment.
109  */
110 struct rte_port *ports;	       /**< For all probed ethernet ports. */
111 portid_t nb_ports;             /**< Number of probed ethernet ports. */
112 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
113 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
114 
115 /*
116  * Test Forwarding Configuration.
117  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
118  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
119  */
120 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
121 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
122 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
123 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
124 
125 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
126 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
127 
128 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
129 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
130 
131 /*
132  * Forwarding engines.
133  */
134 struct fwd_engine * fwd_engines[] = {
135 	&io_fwd_engine,
136 	&mac_fwd_engine,
137 	&rx_only_engine,
138 	&tx_only_engine,
139 	&csum_fwd_engine,
140 #ifdef RTE_LIBRTE_IEEE1588
141 	&ieee1588_fwd_engine,
142 #endif
143 	NULL,
144 };
145 
146 struct fwd_config cur_fwd_config;
147 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
148 
149 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
150 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
151                                       * specified on command-line. */
152 
153 /*
154  * Configuration of packet segments used by the "txonly" processing engine.
155  */
156 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
157 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
158 	TXONLY_DEF_PACKET_LEN,
159 };
160 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
161 
162 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
163 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
164 
165 /* current configuration is in DCB or not,0 means it is not in DCB mode */
166 uint8_t dcb_config = 0;
167 
168 /* Whether the dcb is in testing status */
169 uint8_t dcb_test = 0;
170 
171 /* DCB on and VT on mapping is default */
172 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
173 
174 /*
175  * Configurable number of RX/TX queues.
176  */
177 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
178 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
179 
180 /*
181  * Configurable number of RX/TX ring descriptors.
182  */
183 #define RTE_TEST_RX_DESC_DEFAULT 128
184 #define RTE_TEST_TX_DESC_DEFAULT 512
185 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
186 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
187 
188 /*
189  * Configurable values of RX and TX ring threshold registers.
190  */
191 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
192 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
193 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
194 
195 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
196 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
197 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
198 
199 struct rte_eth_thresh rx_thresh = {
200 	.pthresh = RX_PTHRESH,
201 	.hthresh = RX_HTHRESH,
202 	.wthresh = RX_WTHRESH,
203 };
204 
205 struct rte_eth_thresh tx_thresh = {
206 	.pthresh = TX_PTHRESH,
207 	.hthresh = TX_HTHRESH,
208 	.wthresh = TX_WTHRESH,
209 };
210 
211 /*
212  * Configurable value of RX free threshold.
213  */
214 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
215 
216 /*
217  * Configurable value of RX drop enable.
218  */
219 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
220 
221 /*
222  * Configurable value of TX free threshold.
223  */
224 uint16_t tx_free_thresh = 0; /* Use default values. */
225 
226 /*
227  * Configurable value of TX RS bit threshold.
228  */
229 uint16_t tx_rs_thresh = 0; /* Use default values. */
230 
231 /*
232  * Configurable value of TX queue flags.
233  */
234 uint32_t txq_flags = 0; /* No flags set. */
235 
236 /*
237  * Receive Side Scaling (RSS) configuration.
238  */
239 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
240 
241 /*
242  * Port topology configuration
243  */
244 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
245 
246 /*
247  * Ethernet device configuration.
248  */
249 struct rte_eth_rxmode rx_mode = {
250 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
251 	.split_hdr_size = 0,
252 	.header_split   = 0, /**< Header Split disabled. */
253 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
254 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
255 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
256 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
257 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
258 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
259 };
260 
261 struct rte_fdir_conf fdir_conf = {
262 	.mode = RTE_FDIR_MODE_NONE,
263 	.pballoc = RTE_FDIR_PBALLOC_64K,
264 	.status = RTE_FDIR_REPORT_STATUS,
265 	.flexbytes_offset = 0x6,
266 	.drop_queue = 127,
267 };
268 
269 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
270 
271 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
272 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
273 
274 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
275 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
276 
277 uint16_t nb_tx_queue_stats_mappings = 0;
278 uint16_t nb_rx_queue_stats_mappings = 0;
279 
280 /* Forward function declarations */
281 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
282 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
283 
284 /*
285  * Check if all the ports are started.
286  * If yes, return positive value. If not, return zero.
287  */
288 static int all_ports_started(void);
289 
290 /*
291  * Setup default configuration.
292  */
293 static void
294 set_default_fwd_lcores_config(void)
295 {
296 	unsigned int i;
297 	unsigned int nb_lc;
298 
299 	nb_lc = 0;
300 	for (i = 0; i < RTE_MAX_LCORE; i++) {
301 		if (! rte_lcore_is_enabled(i))
302 			continue;
303 		if (i == rte_get_master_lcore())
304 			continue;
305 		fwd_lcores_cpuids[nb_lc++] = i;
306 	}
307 	nb_lcores = (lcoreid_t) nb_lc;
308 	nb_cfg_lcores = nb_lcores;
309 	nb_fwd_lcores = 1;
310 }
311 
312 static void
313 set_def_peer_eth_addrs(void)
314 {
315 	portid_t i;
316 
317 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
318 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
319 		peer_eth_addrs[i].addr_bytes[5] = i;
320 	}
321 }
322 
323 static void
324 set_default_fwd_ports_config(void)
325 {
326 	portid_t pt_id;
327 
328 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
329 		fwd_ports_ids[pt_id] = pt_id;
330 
331 	nb_cfg_ports = nb_ports;
332 	nb_fwd_ports = nb_ports;
333 }
334 
335 void
336 set_def_fwd_config(void)
337 {
338 	set_default_fwd_lcores_config();
339 	set_def_peer_eth_addrs();
340 	set_default_fwd_ports_config();
341 }
342 
343 /*
344  * Configuration initialisation done once at init time.
345  */
346 struct mbuf_ctor_arg {
347 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
348 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
349 };
350 
351 struct mbuf_pool_ctor_arg {
352 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
353 };
354 
355 static void
356 testpmd_mbuf_ctor(struct rte_mempool *mp,
357 		  void *opaque_arg,
358 		  void *raw_mbuf,
359 		  __attribute__((unused)) unsigned i)
360 {
361 	struct mbuf_ctor_arg *mb_ctor_arg;
362 	struct rte_mbuf    *mb;
363 
364 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
365 	mb = (struct rte_mbuf *) raw_mbuf;
366 
367 	mb->type         = RTE_MBUF_PKT;
368 	mb->pool         = mp;
369 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
370 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
371 			mb_ctor_arg->seg_buf_offset);
372 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
373 	mb->type         = RTE_MBUF_PKT;
374 	mb->ol_flags     = 0;
375 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
376 	mb->pkt.nb_segs  = 1;
377 	mb->pkt.vlan_macip.data = 0;
378 	mb->pkt.hash.rss = 0;
379 }
380 
381 static void
382 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
383 		       void *opaque_arg)
384 {
385 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
386 	struct rte_pktmbuf_pool_private *mbp_priv;
387 
388 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
389 		printf("%s(%s) private_data_size %d < %d\n",
390 		       __func__, mp->name, (int) mp->private_data_size,
391 		       (int) sizeof(struct rte_pktmbuf_pool_private));
392 		return;
393 	}
394 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
395 	mbp_priv = (struct rte_pktmbuf_pool_private *)
396 		((char *)mp + sizeof(struct rte_mempool));
397 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
398 }
399 
400 static void
401 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
402 		 unsigned int socket_id)
403 {
404 	char pool_name[RTE_MEMPOOL_NAMESIZE];
405 	struct rte_mempool *rte_mp;
406 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
407 	struct mbuf_ctor_arg mb_ctor_arg;
408 	uint32_t mb_size;
409 
410 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
411 						mbuf_seg_size);
412 	mb_ctor_arg.seg_buf_offset =
413 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
414 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
415 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
416 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
417 	rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
418 				    (unsigned) mb_mempool_cache,
419 				    sizeof(struct rte_pktmbuf_pool_private),
420 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
421 				    testpmd_mbuf_ctor, &mb_ctor_arg,
422 				    socket_id, 0);
423 	if (rte_mp == NULL) {
424 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
425 						"failed\n", socket_id);
426 	}
427 }
428 
429 static void
430 init_config(void)
431 {
432 	portid_t pid;
433 	struct rte_port *port;
434 	struct rte_mempool *mbp;
435 	unsigned int nb_mbuf_per_pool;
436 	lcoreid_t  lc_id;
437 	uint8_t port_per_socket[MAX_SOCKET];
438 
439 	memset(port_per_socket,0,MAX_SOCKET);
440 	/* Configuration of logical cores. */
441 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
442 				sizeof(struct fwd_lcore *) * nb_lcores,
443 				CACHE_LINE_SIZE);
444 	if (fwd_lcores == NULL) {
445 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
446 							"failed\n", nb_lcores);
447 	}
448 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
449 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
450 					       sizeof(struct fwd_lcore),
451 					       CACHE_LINE_SIZE);
452 		if (fwd_lcores[lc_id] == NULL) {
453 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
454 								"failed\n");
455 		}
456 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
457 	}
458 
459 	/*
460 	 * Create pools of mbuf.
461 	 * If NUMA support is disabled, create a single pool of mbuf in
462 	 * socket 0 memory by default.
463 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
464 	 *
465 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
466 	 * nb_txd can be configured at run time.
467 	 */
468 	if (param_total_num_mbufs)
469 		nb_mbuf_per_pool = param_total_num_mbufs;
470 	else {
471 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
472 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
473 
474 		if (!numa_support)
475 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
476 	}
477 
478 	if (!numa_support) {
479 		if (socket_num == UMA_NO_CONFIG)
480 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
481 		else
482 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
483 						 socket_num);
484 	}
485 	/*
486 	 * Records which Mbuf pool to use by each logical core, if needed.
487 	 */
488 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
489 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
490 		if (mbp == NULL)
491 			mbp = mbuf_pool_find(0);
492 		fwd_lcores[lc_id]->mbp = mbp;
493 	}
494 
495 	/* Configuration of Ethernet ports. */
496 	ports = rte_zmalloc("testpmd: ports",
497 			    sizeof(struct rte_port) * nb_ports,
498 			    CACHE_LINE_SIZE);
499 	if (ports == NULL) {
500 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
501 							"failed\n", nb_ports);
502 	}
503 
504 	for (pid = 0; pid < nb_ports; pid++) {
505 		port = &ports[pid];
506 		rte_eth_dev_info_get(pid, &port->dev_info);
507 
508 		if (numa_support) {
509 			if (port_numa[pid] != NUMA_NO_CONFIG)
510 				port_per_socket[port_numa[pid]]++;
511 			else {
512 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
513 				port_per_socket[socket_id]++;
514 			}
515 		}
516 
517 		/* set flag to initialize port/queue */
518 		port->need_reconfig = 1;
519 		port->need_reconfig_queues = 1;
520 	}
521 
522 	if (numa_support) {
523 		uint8_t i;
524 		unsigned int nb_mbuf;
525 
526 		if (param_total_num_mbufs)
527 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
528 
529 		for (i = 0; i < MAX_SOCKET; i++) {
530 			nb_mbuf = (nb_mbuf_per_pool *
531 						port_per_socket[i]);
532 			if (nb_mbuf)
533 				mbuf_pool_create(mbuf_data_size,
534 						nb_mbuf,i);
535 		}
536 	}
537 	init_port_config();
538 	/* Configuration of packet forwarding streams. */
539 	if (init_fwd_streams() < 0)
540 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
541 }
542 
543 int
544 init_fwd_streams(void)
545 {
546 	portid_t pid;
547 	struct rte_port *port;
548 	streamid_t sm_id, nb_fwd_streams_new;
549 
550 	/* set socket id according to numa or not */
551 	for (pid = 0; pid < nb_ports; pid++) {
552 		port = &ports[pid];
553 		if (nb_rxq > port->dev_info.max_rx_queues) {
554 			printf("Fail: nb_rxq(%d) is greater than "
555 				"max_rx_queues(%d)\n", nb_rxq,
556 				port->dev_info.max_rx_queues);
557 			return -1;
558 		}
559 		if (nb_txq > port->dev_info.max_tx_queues) {
560 			printf("Fail: nb_txq(%d) is greater than "
561 				"max_tx_queues(%d)\n", nb_txq,
562 				port->dev_info.max_tx_queues);
563 			return -1;
564 		}
565 		if (numa_support)
566 			port->socket_id = rte_eth_dev_socket_id(pid);
567 		else {
568 			if (socket_num == UMA_NO_CONFIG)
569 				port->socket_id = 0;
570 			else
571 				port->socket_id = socket_num;
572 		}
573 	}
574 
575 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
576 	if (nb_fwd_streams_new == nb_fwd_streams)
577 		return 0;
578 	/* clear the old */
579 	if (fwd_streams != NULL) {
580 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
581 			if (fwd_streams[sm_id] == NULL)
582 				continue;
583 			rte_free(fwd_streams[sm_id]);
584 			fwd_streams[sm_id] = NULL;
585 		}
586 		rte_free(fwd_streams);
587 		fwd_streams = NULL;
588 	}
589 
590 	/* init new */
591 	nb_fwd_streams = nb_fwd_streams_new;
592 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
593 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
594 	if (fwd_streams == NULL)
595 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
596 						"failed\n", nb_fwd_streams);
597 
598 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
599 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
600 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
601 		if (fwd_streams[sm_id] == NULL)
602 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
603 								" failed\n");
604 	}
605 
606 	return 0;
607 }
608 
609 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
610 static void
611 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
612 {
613 	unsigned int total_burst;
614 	unsigned int nb_burst;
615 	unsigned int burst_stats[3];
616 	uint16_t pktnb_stats[3];
617 	uint16_t nb_pkt;
618 	int burst_percent[3];
619 
620 	/*
621 	 * First compute the total number of packet bursts and the
622 	 * two highest numbers of bursts of the same number of packets.
623 	 */
624 	total_burst = 0;
625 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
626 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
627 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
628 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
629 		if (nb_burst == 0)
630 			continue;
631 		total_burst += nb_burst;
632 		if (nb_burst > burst_stats[0]) {
633 			burst_stats[1] = burst_stats[0];
634 			pktnb_stats[1] = pktnb_stats[0];
635 			burst_stats[0] = nb_burst;
636 			pktnb_stats[0] = nb_pkt;
637 		}
638 	}
639 	if (total_burst == 0)
640 		return;
641 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
642 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
643 	       burst_percent[0], (int) pktnb_stats[0]);
644 	if (burst_stats[0] == total_burst) {
645 		printf("]\n");
646 		return;
647 	}
648 	if (burst_stats[0] + burst_stats[1] == total_burst) {
649 		printf(" + %d%% of %d pkts]\n",
650 		       100 - burst_percent[0], pktnb_stats[1]);
651 		return;
652 	}
653 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
654 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
655 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
656 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
657 		return;
658 	}
659 	printf(" + %d%% of %d pkts + %d%% of others]\n",
660 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
661 }
662 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
663 
664 static void
665 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
666 {
667 	struct rte_port *port;
668 	uint8_t i;
669 
670 	static const char *fwd_stats_border = "----------------------";
671 
672 	port = &ports[port_id];
673 	printf("\n  %s Forward statistics for port %-2d %s\n",
674 	       fwd_stats_border, port_id, fwd_stats_border);
675 
676 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
677 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
678 		       "%-"PRIu64"\n",
679 		       stats->ipackets, stats->ierrors,
680 		       (uint64_t) (stats->ipackets + stats->ierrors));
681 
682 		if (cur_fwd_eng == &csum_fwd_engine)
683 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
684 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
685 
686 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
687 		       "%-"PRIu64"\n",
688 		       stats->opackets, port->tx_dropped,
689 		       (uint64_t) (stats->opackets + port->tx_dropped));
690 
691 		if (stats->rx_nombuf > 0)
692 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
693 
694 	}
695 	else {
696 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
697 		       "%14"PRIu64"\n",
698 		       stats->ipackets, stats->ierrors,
699 		       (uint64_t) (stats->ipackets + stats->ierrors));
700 
701 		if (cur_fwd_eng == &csum_fwd_engine)
702 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
703 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
704 
705 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
706 		       "%14"PRIu64"\n",
707 		       stats->opackets, port->tx_dropped,
708 		       (uint64_t) (stats->opackets + port->tx_dropped));
709 
710 		if (stats->rx_nombuf > 0)
711 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
712 	}
713 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
714 	if (port->rx_stream)
715 		pkt_burst_stats_display("RX",
716 			&port->rx_stream->rx_burst_stats);
717 	if (port->tx_stream)
718 		pkt_burst_stats_display("TX",
719 			&port->tx_stream->tx_burst_stats);
720 #endif
721 	/* stats fdir */
722 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
723 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
724 		       stats->fdirmiss,
725 		       stats->fdirmatch);
726 
727 	if (port->rx_queue_stats_mapping_enabled) {
728 		printf("\n");
729 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
730 			printf("  Stats reg %2d RX-packets:%14"PRIu64
731 			       "     RX-errors:%14"PRIu64
732 			       "    RX-bytes:%14"PRIu64"\n",
733 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
734 		}
735 		printf("\n");
736 	}
737 	if (port->tx_queue_stats_mapping_enabled) {
738 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
739 			printf("  Stats reg %2d TX-packets:%14"PRIu64
740 			       "                                 TX-bytes:%14"PRIu64"\n",
741 			       i, stats->q_opackets[i], stats->q_obytes[i]);
742 		}
743 	}
744 
745 	printf("  %s--------------------------------%s\n",
746 	       fwd_stats_border, fwd_stats_border);
747 }
748 
749 static void
750 fwd_stream_stats_display(streamid_t stream_id)
751 {
752 	struct fwd_stream *fs;
753 	static const char *fwd_top_stats_border = "-------";
754 
755 	fs = fwd_streams[stream_id];
756 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
757 	    (fs->fwd_dropped == 0))
758 		return;
759 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
760 	       "TX Port=%2d/Queue=%2d %s\n",
761 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
762 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
763 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
764 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
765 
766 	/* if checksum mode */
767 	if (cur_fwd_eng == &csum_fwd_engine) {
768 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
769 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
770 	}
771 
772 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
773 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
774 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
775 #endif
776 }
777 
778 static void
779 flush_all_rx_queues(void)
780 {
781 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
782 	portid_t  rxp;
783 	queueid_t rxq;
784 	uint16_t  nb_rx;
785 	uint16_t  i;
786 	uint8_t   j;
787 
788 	for (j = 0; j < 2; j++) {
789 		for (rxp = 0; rxp < nb_ports; rxp++) {
790 			for (rxq = 0; rxq < nb_rxq; rxq++) {
791 				do {
792 					nb_rx = rte_eth_rx_burst(rxp, rxq,
793 						pkts_burst, MAX_PKT_BURST);
794 					for (i = 0; i < nb_rx; i++)
795 						rte_pktmbuf_free(pkts_burst[i]);
796 				} while (nb_rx > 0);
797 			}
798 		}
799 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
800 	}
801 }
802 
803 static void
804 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
805 {
806 	struct fwd_stream **fsm;
807 	streamid_t nb_fs;
808 	streamid_t sm_id;
809 
810 	fsm = &fwd_streams[fc->stream_idx];
811 	nb_fs = fc->stream_nb;
812 	do {
813 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
814 			(*pkt_fwd)(fsm[sm_id]);
815 	} while (! fc->stopped);
816 }
817 
818 static int
819 start_pkt_forward_on_core(void *fwd_arg)
820 {
821 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
822 			     cur_fwd_config.fwd_eng->packet_fwd);
823 	return 0;
824 }
825 
826 /*
827  * Run the TXONLY packet forwarding engine to send a single burst of packets.
828  * Used to start communication flows in network loopback test configurations.
829  */
830 static int
831 run_one_txonly_burst_on_core(void *fwd_arg)
832 {
833 	struct fwd_lcore *fwd_lc;
834 	struct fwd_lcore tmp_lcore;
835 
836 	fwd_lc = (struct fwd_lcore *) fwd_arg;
837 	tmp_lcore = *fwd_lc;
838 	tmp_lcore.stopped = 1;
839 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
840 	return 0;
841 }
842 
843 /*
844  * Launch packet forwarding:
845  *     - Setup per-port forwarding context.
846  *     - launch logical cores with their forwarding configuration.
847  */
848 static void
849 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
850 {
851 	port_fwd_begin_t port_fwd_begin;
852 	unsigned int i;
853 	unsigned int lc_id;
854 	int diag;
855 
856 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
857 	if (port_fwd_begin != NULL) {
858 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
859 			(*port_fwd_begin)(fwd_ports_ids[i]);
860 	}
861 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
862 		lc_id = fwd_lcores_cpuids[i];
863 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
864 			fwd_lcores[i]->stopped = 0;
865 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
866 						     fwd_lcores[i], lc_id);
867 			if (diag != 0)
868 				printf("launch lcore %u failed - diag=%d\n",
869 				       lc_id, diag);
870 		}
871 	}
872 }
873 
874 /*
875  * Launch packet forwarding configuration.
876  */
877 void
878 start_packet_forwarding(int with_tx_first)
879 {
880 	port_fwd_begin_t port_fwd_begin;
881 	port_fwd_end_t  port_fwd_end;
882 	struct rte_port *port;
883 	unsigned int i;
884 	portid_t   pt_id;
885 	streamid_t sm_id;
886 
887 	if (all_ports_started() == 0) {
888 		printf("Not all ports were started\n");
889 		return;
890 	}
891 	if (test_done == 0) {
892 		printf("Packet forwarding already started\n");
893 		return;
894 	}
895 	if((dcb_test) && (nb_fwd_lcores == 1)) {
896 		printf("In DCB mode,the nb forwarding cores should be larger than 1.\n");
897 		return;
898 	}
899 	test_done = 0;
900 	flush_all_rx_queues();
901 	fwd_config_setup();
902 	rxtx_config_display();
903 
904 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
905 		pt_id = fwd_ports_ids[i];
906 		port = &ports[pt_id];
907 		rte_eth_stats_get(pt_id, &port->stats);
908 		port->tx_dropped = 0;
909 
910 		map_port_queue_stats_mapping_registers(pt_id, port);
911 	}
912 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
913 		fwd_streams[sm_id]->rx_packets = 0;
914 		fwd_streams[sm_id]->tx_packets = 0;
915 		fwd_streams[sm_id]->fwd_dropped = 0;
916 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
917 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
918 
919 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
920 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
921 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
922 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
923 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
924 #endif
925 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
926 		fwd_streams[sm_id]->core_cycles = 0;
927 #endif
928 	}
929 	if (with_tx_first) {
930 		port_fwd_begin = tx_only_engine.port_fwd_begin;
931 		if (port_fwd_begin != NULL) {
932 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
933 				(*port_fwd_begin)(fwd_ports_ids[i]);
934 		}
935 		launch_packet_forwarding(run_one_txonly_burst_on_core);
936 		rte_eal_mp_wait_lcore();
937 		port_fwd_end = tx_only_engine.port_fwd_end;
938 		if (port_fwd_end != NULL) {
939 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
940 				(*port_fwd_end)(fwd_ports_ids[i]);
941 		}
942 	}
943 	launch_packet_forwarding(start_pkt_forward_on_core);
944 }
945 
946 void
947 stop_packet_forwarding(void)
948 {
949 	struct rte_eth_stats stats;
950 	struct rte_port *port;
951 	port_fwd_end_t  port_fwd_end;
952 	int i;
953 	portid_t   pt_id;
954 	streamid_t sm_id;
955 	lcoreid_t  lc_id;
956 	uint64_t total_recv;
957 	uint64_t total_xmit;
958 	uint64_t total_rx_dropped;
959 	uint64_t total_tx_dropped;
960 	uint64_t total_rx_nombuf;
961 	uint64_t tx_dropped;
962 	uint64_t rx_bad_ip_csum;
963 	uint64_t rx_bad_l4_csum;
964 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
965 	uint64_t fwd_cycles;
966 #endif
967 	static const char *acc_stats_border = "+++++++++++++++";
968 
969 	if (all_ports_started() == 0) {
970 		printf("Not all ports were started\n");
971 		return;
972 	}
973 	if (test_done) {
974 		printf("Packet forwarding not started\n");
975 		return;
976 	}
977 	printf("Telling cores to stop...");
978 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
979 		fwd_lcores[lc_id]->stopped = 1;
980 	printf("\nWaiting for lcores to finish...\n");
981 	rte_eal_mp_wait_lcore();
982 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
983 	if (port_fwd_end != NULL) {
984 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
985 			pt_id = fwd_ports_ids[i];
986 			(*port_fwd_end)(pt_id);
987 		}
988 	}
989 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
990 	fwd_cycles = 0;
991 #endif
992 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
993 		if (cur_fwd_config.nb_fwd_streams >
994 		    cur_fwd_config.nb_fwd_ports) {
995 			fwd_stream_stats_display(sm_id);
996 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
997 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
998 		} else {
999 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1000 				fwd_streams[sm_id];
1001 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1002 				fwd_streams[sm_id];
1003 		}
1004 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1005 		tx_dropped = (uint64_t) (tx_dropped +
1006 					 fwd_streams[sm_id]->fwd_dropped);
1007 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1008 
1009 		rx_bad_ip_csum =
1010 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1011 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1012 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1013 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1014 							rx_bad_ip_csum;
1015 
1016 		rx_bad_l4_csum =
1017 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1018 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1019 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1020 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1021 							rx_bad_l4_csum;
1022 
1023 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1024 		fwd_cycles = (uint64_t) (fwd_cycles +
1025 					 fwd_streams[sm_id]->core_cycles);
1026 #endif
1027 	}
1028 	total_recv = 0;
1029 	total_xmit = 0;
1030 	total_rx_dropped = 0;
1031 	total_tx_dropped = 0;
1032 	total_rx_nombuf  = 0;
1033 	for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
1034 		pt_id = fwd_ports_ids[i];
1035 
1036 		port = &ports[pt_id];
1037 		rte_eth_stats_get(pt_id, &stats);
1038 		stats.ipackets -= port->stats.ipackets;
1039 		port->stats.ipackets = 0;
1040 		stats.opackets -= port->stats.opackets;
1041 		port->stats.opackets = 0;
1042 		stats.ibytes   -= port->stats.ibytes;
1043 		port->stats.ibytes = 0;
1044 		stats.obytes   -= port->stats.obytes;
1045 		port->stats.obytes = 0;
1046 		stats.ierrors  -= port->stats.ierrors;
1047 		port->stats.ierrors = 0;
1048 		stats.oerrors  -= port->stats.oerrors;
1049 		port->stats.oerrors = 0;
1050 		stats.rx_nombuf -= port->stats.rx_nombuf;
1051 		port->stats.rx_nombuf = 0;
1052 		stats.fdirmatch -= port->stats.fdirmatch;
1053 		port->stats.rx_nombuf = 0;
1054 		stats.fdirmiss -= port->stats.fdirmiss;
1055 		port->stats.rx_nombuf = 0;
1056 
1057 		total_recv += stats.ipackets;
1058 		total_xmit += stats.opackets;
1059 		total_rx_dropped += stats.ierrors;
1060 		total_tx_dropped += port->tx_dropped;
1061 		total_rx_nombuf  += stats.rx_nombuf;
1062 
1063 		fwd_port_stats_display(pt_id, &stats);
1064 	}
1065 	printf("\n  %s Accumulated forward statistics for all ports"
1066 	       "%s\n",
1067 	       acc_stats_border, acc_stats_border);
1068 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1069 	       "%-"PRIu64"\n"
1070 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1071 	       "%-"PRIu64"\n",
1072 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1073 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1074 	if (total_rx_nombuf > 0)
1075 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1076 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1077 	       "%s\n",
1078 	       acc_stats_border, acc_stats_border);
1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1080 	if (total_recv > 0)
1081 		printf("\n  CPU cycles/packet=%u (total cycles="
1082 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1083 		       (unsigned int)(fwd_cycles / total_recv),
1084 		       fwd_cycles, total_recv);
1085 #endif
1086 	printf("\nDone.\n");
1087 	test_done = 1;
1088 }
1089 
1090 static int
1091 all_ports_started(void)
1092 {
1093 	portid_t pi;
1094 	struct rte_port *port;
1095 
1096 	for (pi = 0; pi < nb_ports; pi++) {
1097 		port = &ports[pi];
1098 		/* Check if there is a port which is not started */
1099 		if (port->port_status != RTE_PORT_STARTED)
1100 			return 0;
1101 	}
1102 
1103 	/* No port is not started */
1104 	return 1;
1105 }
1106 
1107 void
1108 start_port(portid_t pid)
1109 {
1110 	int diag, need_check_link_status = 0;
1111 	portid_t pi;
1112 	queueid_t qi;
1113 	struct rte_port *port;
1114 
1115 	if (test_done == 0) {
1116 		printf("Please stop forwarding first\n");
1117 		return;
1118 	}
1119 
1120 	if (init_fwd_streams() < 0) {
1121 		printf("Fail from init_fwd_streams()\n");
1122 		return;
1123 	}
1124 
1125 	if(dcb_config)
1126 		dcb_test = 1;
1127 	for (pi = 0; pi < nb_ports; pi++) {
1128 		if (pid < nb_ports && pid != pi)
1129 			continue;
1130 
1131 		port = &ports[pi];
1132 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1133 						 RTE_PORT_HANDLING) == 0) {
1134 			printf("Port %d is now not stopped\n", pi);
1135 			continue;
1136 		}
1137 
1138 		if (port->need_reconfig > 0) {
1139 			port->need_reconfig = 0;
1140 
1141 			printf("Configuring Port %d (socket %d)\n", pi,
1142 					rte_eth_dev_socket_id(pi));
1143 			/* configure port */
1144 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1145 						&(port->dev_conf));
1146 			if (diag != 0) {
1147 				if (rte_atomic16_cmpset(&(port->port_status),
1148 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1149 					printf("Port %d can not be set back "
1150 							"to stopped\n", pi);
1151 				printf("Fail to configure port %d\n", pi);
1152 				/* try to reconfigure port next time */
1153 				port->need_reconfig = 1;
1154 				return;
1155 			}
1156 		}
1157 		if (port->need_reconfig_queues > 0) {
1158 			port->need_reconfig_queues = 0;
1159 			/* setup tx queues */
1160 			for (qi = 0; qi < nb_txq; qi++) {
1161 				if ((numa_support) &&
1162 					(txring_numa[pi] != NUMA_NO_CONFIG))
1163 					diag = rte_eth_tx_queue_setup(pi, qi,
1164 						nb_txd,txring_numa[pi],
1165 						&(port->tx_conf));
1166 				else
1167 					diag = rte_eth_tx_queue_setup(pi, qi,
1168 						nb_txd,port->socket_id,
1169 						&(port->tx_conf));
1170 
1171 				if (diag == 0)
1172 					continue;
1173 
1174 				/* Fail to setup tx queue, return */
1175 				if (rte_atomic16_cmpset(&(port->port_status),
1176 							RTE_PORT_HANDLING,
1177 							RTE_PORT_STOPPED) == 0)
1178 					printf("Port %d can not be set back "
1179 							"to stopped\n", pi);
1180 				printf("Fail to configure port %d tx queues\n", pi);
1181 				/* try to reconfigure queues next time */
1182 				port->need_reconfig_queues = 1;
1183 				return;
1184 			}
1185 			/* setup rx queues */
1186 			for (qi = 0; qi < nb_rxq; qi++) {
1187 				if ((numa_support) &&
1188 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1189 					struct rte_mempool * mp =
1190 						mbuf_pool_find(rxring_numa[pi]);
1191 					if (mp == NULL) {
1192 						printf("Failed to setup RX queue:"
1193 							"No mempool allocation"
1194 							"on the socket %d\n",
1195 							rxring_numa[pi]);
1196 						return;
1197 					}
1198 
1199 					diag = rte_eth_rx_queue_setup(pi, qi,
1200 					     nb_rxd,rxring_numa[pi],
1201 					     &(port->rx_conf),mp);
1202 				}
1203 				else
1204 					diag = rte_eth_rx_queue_setup(pi, qi,
1205 					     nb_rxd,port->socket_id,
1206 					     &(port->rx_conf),
1207 				             mbuf_pool_find(port->socket_id));
1208 
1209 				if (diag == 0)
1210 					continue;
1211 
1212 
1213 				/* Fail to setup rx queue, return */
1214 				if (rte_atomic16_cmpset(&(port->port_status),
1215 							RTE_PORT_HANDLING,
1216 							RTE_PORT_STOPPED) == 0)
1217 					printf("Port %d can not be set back "
1218 							"to stopped\n", pi);
1219 				printf("Fail to configure port %d rx queues\n", pi);
1220 				/* try to reconfigure queues next time */
1221 				port->need_reconfig_queues = 1;
1222 				return;
1223 			}
1224 		}
1225 		/* start port */
1226 		if (rte_eth_dev_start(pi) < 0) {
1227 			printf("Fail to start port %d\n", pi);
1228 
1229 			/* Fail to setup rx queue, return */
1230 			if (rte_atomic16_cmpset(&(port->port_status),
1231 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1232 				printf("Port %d can not be set back to "
1233 							"stopped\n", pi);
1234 			continue;
1235 		}
1236 
1237 		if (rte_atomic16_cmpset(&(port->port_status),
1238 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1239 			printf("Port %d can not be set into started\n", pi);
1240 
1241 		/* at least one port started, need checking link status */
1242 		need_check_link_status = 1;
1243 	}
1244 
1245 	if (need_check_link_status)
1246 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1247 	else
1248 		printf("Please stop the ports first\n");
1249 
1250 	printf("Done\n");
1251 }
1252 
1253 void
1254 stop_port(portid_t pid)
1255 {
1256 	portid_t pi;
1257 	struct rte_port *port;
1258 	int need_check_link_status = 0;
1259 
1260 	if (test_done == 0) {
1261 		printf("Please stop forwarding first\n");
1262 		return;
1263 	}
1264 	if (dcb_test) {
1265 		dcb_test = 0;
1266 		dcb_config = 0;
1267 	}
1268 	printf("Stopping ports...\n");
1269 
1270 	for (pi = 0; pi < nb_ports; pi++) {
1271 		if (pid < nb_ports && pid != pi)
1272 			continue;
1273 
1274 		port = &ports[pi];
1275 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1276 						RTE_PORT_HANDLING) == 0)
1277 			continue;
1278 
1279 		rte_eth_dev_stop(pi);
1280 
1281 		if (rte_atomic16_cmpset(&(port->port_status),
1282 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1283 			printf("Port %d can not be set into stopped\n", pi);
1284 		need_check_link_status = 1;
1285 	}
1286 	if (need_check_link_status)
1287 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1288 
1289 	printf("Done\n");
1290 }
1291 
1292 void
1293 close_port(portid_t pid)
1294 {
1295 	portid_t pi;
1296 	struct rte_port *port;
1297 
1298 	if (test_done == 0) {
1299 		printf("Please stop forwarding first\n");
1300 		return;
1301 	}
1302 
1303 	printf("Closing ports...\n");
1304 
1305 	for (pi = 0; pi < nb_ports; pi++) {
1306 		if (pid < nb_ports && pid != pi)
1307 			continue;
1308 
1309 		port = &ports[pi];
1310 		if (rte_atomic16_cmpset(&(port->port_status),
1311 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1312 			printf("Port %d is now not stopped\n", pi);
1313 			continue;
1314 		}
1315 
1316 		rte_eth_dev_close(pi);
1317 
1318 		if (rte_atomic16_cmpset(&(port->port_status),
1319 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1320 			printf("Port %d can not be set into stopped\n", pi);
1321 	}
1322 
1323 	printf("Done\n");
1324 }
1325 
1326 int
1327 all_ports_stopped(void)
1328 {
1329 	portid_t pi;
1330 	struct rte_port *port;
1331 
1332 	for (pi = 0; pi < nb_ports; pi++) {
1333 		port = &ports[pi];
1334 		if (port->port_status != RTE_PORT_STOPPED)
1335 			return 0;
1336 	}
1337 
1338 	return 1;
1339 }
1340 
1341 void
1342 pmd_test_exit(void)
1343 {
1344 	portid_t pt_id;
1345 
1346 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1347 		printf("Stopping port %d...", pt_id);
1348 		fflush(stdout);
1349 		rte_eth_dev_close(pt_id);
1350 		printf("done\n");
1351 	}
1352 	printf("bye...\n");
1353 }
1354 
1355 typedef void (*cmd_func_t)(void);
1356 struct pmd_test_command {
1357 	const char *cmd_name;
1358 	cmd_func_t cmd_func;
1359 };
1360 
1361 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1362 
1363 /* Check the link status of all ports in up to 9s, and print them finally */
1364 static void
1365 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1366 {
1367 #define CHECK_INTERVAL 100 /* 100ms */
1368 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1369 	uint8_t portid, count, all_ports_up, print_flag = 0;
1370 	struct rte_eth_link link;
1371 
1372 	printf("Checking link statuses...\n");
1373 	fflush(stdout);
1374 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1375 		all_ports_up = 1;
1376 		for (portid = 0; portid < port_num; portid++) {
1377 			if ((port_mask & (1 << portid)) == 0)
1378 				continue;
1379 			memset(&link, 0, sizeof(link));
1380 			rte_eth_link_get_nowait(portid, &link);
1381 			/* print link status if flag set */
1382 			if (print_flag == 1) {
1383 				if (link.link_status)
1384 					printf("Port %d Link Up - speed %u "
1385 						"Mbps - %s\n", (uint8_t)portid,
1386 						(unsigned)link.link_speed,
1387 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1388 					("full-duplex") : ("half-duplex\n"));
1389 				else
1390 					printf("Port %d Link Down\n",
1391 						(uint8_t)portid);
1392 				continue;
1393 			}
1394 			/* clear all_ports_up flag if any link down */
1395 			if (link.link_status == 0) {
1396 				all_ports_up = 0;
1397 				break;
1398 			}
1399 		}
1400 		/* after finally printing all link status, get out */
1401 		if (print_flag == 1)
1402 			break;
1403 
1404 		if (all_ports_up == 0) {
1405 			fflush(stdout);
1406 			rte_delay_ms(CHECK_INTERVAL);
1407 		}
1408 
1409 		/* set the print_flag if all ports up or timeout */
1410 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1411 			print_flag = 1;
1412 		}
1413 	}
1414 }
1415 
1416 static int
1417 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1418 {
1419 	uint16_t i;
1420 	int diag;
1421 	uint8_t mapping_found = 0;
1422 
1423 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1424 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1425 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1426 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1427 					tx_queue_stats_mappings[i].queue_id,
1428 					tx_queue_stats_mappings[i].stats_counter_id);
1429 			if (diag != 0)
1430 				return diag;
1431 			mapping_found = 1;
1432 		}
1433 	}
1434 	if (mapping_found)
1435 		port->tx_queue_stats_mapping_enabled = 1;
1436 	return 0;
1437 }
1438 
1439 static int
1440 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1441 {
1442 	uint16_t i;
1443 	int diag;
1444 	uint8_t mapping_found = 0;
1445 
1446 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1447 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1448 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1449 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1450 					rx_queue_stats_mappings[i].queue_id,
1451 					rx_queue_stats_mappings[i].stats_counter_id);
1452 			if (diag != 0)
1453 				return diag;
1454 			mapping_found = 1;
1455 		}
1456 	}
1457 	if (mapping_found)
1458 		port->rx_queue_stats_mapping_enabled = 1;
1459 	return 0;
1460 }
1461 
1462 static void
1463 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1464 {
1465 	int diag = 0;
1466 
1467 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1468 	if (diag != 0) {
1469 		if (diag == -ENOTSUP) {
1470 			port->tx_queue_stats_mapping_enabled = 0;
1471 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1472 		}
1473 		else
1474 			rte_exit(EXIT_FAILURE,
1475 					"set_tx_queue_stats_mapping_registers "
1476 					"failed for port id=%d diag=%d\n",
1477 					pi, diag);
1478 	}
1479 
1480 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1481 	if (diag != 0) {
1482 		if (diag == -ENOTSUP) {
1483 			port->rx_queue_stats_mapping_enabled = 0;
1484 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1485 		}
1486 		else
1487 			rte_exit(EXIT_FAILURE,
1488 					"set_rx_queue_stats_mapping_registers "
1489 					"failed for port id=%d diag=%d\n",
1490 					pi, diag);
1491 	}
1492 }
1493 
1494 void
1495 init_port_config(void)
1496 {
1497 	portid_t pid;
1498 	struct rte_port *port;
1499 
1500 	for (pid = 0; pid < nb_ports; pid++) {
1501 		port = &ports[pid];
1502 		port->dev_conf.rxmode = rx_mode;
1503 		port->dev_conf.fdir_conf = fdir_conf;
1504 		if (nb_rxq > 0) {
1505 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1506 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1507 		} else {
1508 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1509 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1510 		}
1511 		port->rx_conf.rx_thresh = rx_thresh;
1512 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1513 		port->rx_conf.rx_drop_en = rx_drop_en;
1514 		port->tx_conf.tx_thresh = tx_thresh;
1515 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1516 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1517 		port->tx_conf.txq_flags = txq_flags;
1518 
1519 		rte_eth_macaddr_get(pid, &port->eth_addr);
1520 
1521 		map_port_queue_stats_mapping_registers(pid, port);
1522 	}
1523 }
1524 
1525 const uint16_t vlan_tags[] = {
1526 		0,  1,  2,  3,  4,  5,  6,  7,
1527 		8,  9, 10, 11,  12, 13, 14, 15,
1528 		16, 17, 18, 19, 20, 21, 22, 23,
1529 		24, 25, 26, 27, 28, 29, 30, 31
1530 };
1531 
1532 static  int
1533 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1534 {
1535         uint8_t i;
1536 
1537  	/*
1538  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1539  	 * given above, and the number of traffic classes available for use.
1540  	 */
1541 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1542 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1543 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1544 
1545 		/* VMDQ+DCB RX and TX configrations */
1546 		vmdq_rx_conf.enable_default_pool = 0;
1547 		vmdq_rx_conf.default_pool = 0;
1548 		vmdq_rx_conf.nb_queue_pools =
1549 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1550 		vmdq_tx_conf.nb_queue_pools =
1551 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1552 
1553 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1554 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1555 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1556 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1557 		}
1558 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1559 			vmdq_rx_conf.dcb_queue[i] = i;
1560 			vmdq_tx_conf.dcb_queue[i] = i;
1561 		}
1562 
1563 		/*set DCB mode of RX and TX of multiple queues*/
1564 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1565 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1566 		if (dcb_conf->pfc_en)
1567 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1568 		else
1569 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1570 
1571 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1572                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1573 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1574                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1575 	}
1576 	else {
1577 		struct rte_eth_dcb_rx_conf rx_conf;
1578 		struct rte_eth_dcb_tx_conf tx_conf;
1579 
1580 		/* queue mapping configuration of DCB RX and TX */
1581 		if (dcb_conf->num_tcs == ETH_4_TCS)
1582 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1583 		else
1584 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1585 
1586 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1587 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1588 
1589 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1590 			rx_conf.dcb_queue[i] = i;
1591 			tx_conf.dcb_queue[i] = i;
1592 		}
1593 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1594 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1595 		if (dcb_conf->pfc_en)
1596 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1597 		else
1598 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1599 
1600 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1601                                 sizeof(struct rte_eth_dcb_rx_conf)));
1602 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1603                                 sizeof(struct rte_eth_dcb_tx_conf)));
1604 	}
1605 
1606 	return 0;
1607 }
1608 
1609 int
1610 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1611 {
1612 	struct rte_eth_conf port_conf;
1613 	struct rte_port *rte_port;
1614 	int retval;
1615 	uint16_t nb_vlan;
1616 	uint16_t i;
1617 
1618 	/* rxq and txq configuration in dcb mode */
1619 	nb_rxq = 128;
1620 	nb_txq = 128;
1621 	rx_free_thresh = 64;
1622 
1623 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1624 	/* Enter DCB configuration status */
1625 	dcb_config = 1;
1626 
1627 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1628 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1629 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1630 	if (retval < 0)
1631 		return retval;
1632 
1633 	rte_port = &ports[pid];
1634 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1635 
1636 	rte_port->rx_conf.rx_thresh = rx_thresh;
1637 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1638 	rte_port->tx_conf.tx_thresh = tx_thresh;
1639 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1640 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1641 	/* VLAN filter */
1642 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1643 	for (i = 0; i < nb_vlan; i++){
1644 		rx_vft_set(pid, vlan_tags[i], 1);
1645 	}
1646 
1647 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1648 	map_port_queue_stats_mapping_registers(pid, rte_port);
1649 
1650 	return 0;
1651 }
1652 
1653 #ifdef RTE_EXEC_ENV_BAREMETAL
1654 #define main _main
1655 #endif
1656 
1657 int
1658 main(int argc, char** argv)
1659 {
1660 	int  diag;
1661 	uint8_t port_id;
1662 
1663 	diag = rte_eal_init(argc, argv);
1664 	if (diag < 0)
1665 		rte_panic("Cannot init EAL\n");
1666 
1667 	if (rte_pmd_init_all())
1668 		rte_panic("Cannot init PMD\n");
1669 
1670 	if (rte_eal_pci_probe())
1671 		rte_panic("Cannot probe PCI\n");
1672 
1673 	nb_ports = (portid_t) rte_eth_dev_count();
1674 	if (nb_ports == 0)
1675 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1676 							"check that "
1677 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1678 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1679 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1680 			  "configuration file\n");
1681 
1682 	set_def_fwd_config();
1683 	if (nb_lcores == 0)
1684 		rte_panic("Empty set of forwarding logical cores - check the "
1685 			  "core mask supplied in the command parameters\n");
1686 
1687 	argc -= diag;
1688 	argv += diag;
1689 	if (argc > 1)
1690 		launch_args_parse(argc, argv);
1691 
1692 	if (nb_rxq > nb_txq)
1693 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1694 		       "but nb_txq=%d will prevent to fully test it.\n",
1695 		       nb_rxq, nb_txq);
1696 
1697 	init_config();
1698 	start_port(RTE_PORT_ALL);
1699 
1700 	/* set all ports to promiscuous mode by default */
1701 	for (port_id = 0; port_id < nb_ports; port_id++)
1702 		rte_eth_promiscuous_enable(port_id);
1703 
1704 	if (interactive == 1)
1705 		prompt();
1706 	else {
1707 		char c;
1708 		int rc;
1709 
1710 		printf("No commandline core given, start packet forwarding\n");
1711 		start_packet_forwarding(0);
1712 		printf("Press enter to exit\n");
1713 		rc = read(0, &c, 1);
1714 		if (rc < 0)
1715 			return 1;
1716 	}
1717 
1718 	return 0;
1719 }
1720