xref: /dpdk/examples/vmdq/main.c (revision 60efb44f73c9a5f3f19cc78ba2d6eaf398ab51dc)
16bb97df5SIntel /*-
26bb97df5SIntel  *   BSD LICENSE
36bb97df5SIntel  *
4e9d48c00SBruce Richardson  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
56bb97df5SIntel  *   All rights reserved.
66bb97df5SIntel  *
76bb97df5SIntel  *   Redistribution and use in source and binary forms, with or without
86bb97df5SIntel  *   modification, are permitted provided that the following conditions
96bb97df5SIntel  *   are met:
106bb97df5SIntel  *
116bb97df5SIntel  *     * Redistributions of source code must retain the above copyright
126bb97df5SIntel  *       notice, this list of conditions and the following disclaimer.
136bb97df5SIntel  *     * Redistributions in binary form must reproduce the above copyright
146bb97df5SIntel  *       notice, this list of conditions and the following disclaimer in
156bb97df5SIntel  *       the documentation and/or other materials provided with the
166bb97df5SIntel  *       distribution.
176bb97df5SIntel  *     * Neither the name of Intel Corporation nor the names of its
186bb97df5SIntel  *       contributors may be used to endorse or promote products derived
196bb97df5SIntel  *       from this software without specific prior written permission.
206bb97df5SIntel  *
216bb97df5SIntel  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
226bb97df5SIntel  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
236bb97df5SIntel  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
246bb97df5SIntel  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
256bb97df5SIntel  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
266bb97df5SIntel  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
276bb97df5SIntel  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
286bb97df5SIntel  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
296bb97df5SIntel  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
306bb97df5SIntel  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
316bb97df5SIntel  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
326bb97df5SIntel  */
336bb97df5SIntel 
346bb97df5SIntel #include <stdint.h>
356bb97df5SIntel #include <sys/queue.h>
366bb97df5SIntel #include <stdlib.h>
376bb97df5SIntel #include <string.h>
386bb97df5SIntel #include <stdio.h>
396bb97df5SIntel #include <assert.h>
406bb97df5SIntel #include <errno.h>
416bb97df5SIntel #include <signal.h>
426bb97df5SIntel #include <stdarg.h>
436bb97df5SIntel #include <inttypes.h>
446bb97df5SIntel #include <getopt.h>
456bb97df5SIntel 
466bb97df5SIntel #include <rte_common.h>
476bb97df5SIntel #include <rte_log.h>
486bb97df5SIntel #include <rte_memory.h>
496bb97df5SIntel #include <rte_memcpy.h>
506bb97df5SIntel #include <rte_memzone.h>
516bb97df5SIntel #include <rte_eal.h>
526bb97df5SIntel #include <rte_per_lcore.h>
536bb97df5SIntel #include <rte_launch.h>
546bb97df5SIntel #include <rte_atomic.h>
556bb97df5SIntel #include <rte_cycles.h>
566bb97df5SIntel #include <rte_prefetch.h>
576bb97df5SIntel #include <rte_lcore.h>
586bb97df5SIntel #include <rte_per_lcore.h>
596bb97df5SIntel #include <rte_branch_prediction.h>
606bb97df5SIntel #include <rte_interrupts.h>
616bb97df5SIntel #include <rte_pci.h>
626bb97df5SIntel #include <rte_random.h>
636bb97df5SIntel #include <rte_debug.h>
646bb97df5SIntel #include <rte_ether.h>
656bb97df5SIntel #include <rte_ethdev.h>
666bb97df5SIntel #include <rte_log.h>
676bb97df5SIntel #include <rte_mempool.h>
686bb97df5SIntel #include <rte_mbuf.h>
696bb97df5SIntel #include <rte_memcpy.h>
706bb97df5SIntel 
71e4363e81SXutao Sun #define MAX_QUEUES 1024
726bb97df5SIntel /*
73e4363e81SXutao Sun  * 1024 queues require to meet the needs of a large number of vmdq_pools.
74e4363e81SXutao Sun  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
756bb97df5SIntel  */
76e4363e81SXutao Sun #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
77e4363e81SXutao Sun 						RTE_TEST_TX_DESC_DEFAULT))
786bb97df5SIntel #define MBUF_CACHE_SIZE 64
796bb97df5SIntel 
806bb97df5SIntel #define MAX_PKT_BURST 32
816bb97df5SIntel 
826bb97df5SIntel /*
836bb97df5SIntel  * Configurable number of RX/TX ring descriptors
846bb97df5SIntel  */
856bb97df5SIntel #define RTE_TEST_RX_DESC_DEFAULT 128
866bb97df5SIntel #define RTE_TEST_TX_DESC_DEFAULT 512
876bb97df5SIntel 
886bb97df5SIntel #define INVALID_PORT_ID 0xFF
896bb97df5SIntel 
906bb97df5SIntel /* mask of enabled ports */
91b30eb1d2SHuawei Xie static uint32_t enabled_port_mask;
926bb97df5SIntel 
936bb97df5SIntel /* number of pools (if user does not specify any, 8 by default */
946bb97df5SIntel static uint32_t num_queues = 8;
956bb97df5SIntel static uint32_t num_pools = 8;
966bb97df5SIntel 
976bb97df5SIntel /* empty vmdq configuration structure. Filled in programatically */
986bb97df5SIntel static const struct rte_eth_conf vmdq_conf_default = {
996bb97df5SIntel 	.rxmode = {
1006bb97df5SIntel 		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
1016bb97df5SIntel 		.split_hdr_size = 0,
1026bb97df5SIntel 		.header_split   = 0, /**< Header Split disabled */
1036bb97df5SIntel 		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
1046bb97df5SIntel 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
1056bb97df5SIntel 		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
1066bb97df5SIntel 	},
1076bb97df5SIntel 
1086bb97df5SIntel 	.txmode = {
1096bb97df5SIntel 		.mq_mode = ETH_MQ_TX_NONE,
1106bb97df5SIntel 	},
1116bb97df5SIntel 	.rx_adv_conf = {
1126bb97df5SIntel 		/*
1136bb97df5SIntel 		 * should be overridden separately in code with
1146bb97df5SIntel 		 * appropriate values
1156bb97df5SIntel 		 */
1166bb97df5SIntel 		.vmdq_rx_conf = {
1176bb97df5SIntel 			.nb_queue_pools = ETH_8_POOLS,
1186bb97df5SIntel 			.enable_default_pool = 0,
1196bb97df5SIntel 			.default_pool = 0,
1206bb97df5SIntel 			.nb_pool_maps = 0,
1216bb97df5SIntel 			.pool_map = {{0, 0},},
1226bb97df5SIntel 		},
1236bb97df5SIntel 	},
1246bb97df5SIntel };
1256bb97df5SIntel 
1266bb97df5SIntel static unsigned lcore_ids[RTE_MAX_LCORE];
1276bb97df5SIntel static uint8_t ports[RTE_MAX_ETHPORTS];
128b30eb1d2SHuawei Xie static unsigned num_ports; /**< The number of ports specified in command line */
1296bb97df5SIntel 
1306bb97df5SIntel /* array used for printing out statistics */
1316bb97df5SIntel volatile unsigned long rxPackets[MAX_QUEUES] = {0};
1326bb97df5SIntel 
1336bb97df5SIntel const uint16_t vlan_tags[] = {
1346bb97df5SIntel 	0,  1,  2,  3,  4,  5,  6,  7,
1356bb97df5SIntel 	8,  9, 10, 11,	12, 13, 14, 15,
1366bb97df5SIntel 	16, 17, 18, 19, 20, 21, 22, 23,
1376bb97df5SIntel 	24, 25, 26, 27, 28, 29, 30, 31,
1386bb97df5SIntel 	32, 33, 34, 35, 36, 37, 38, 39,
1396bb97df5SIntel 	40, 41, 42, 43, 44, 45, 46, 47,
1406bb97df5SIntel 	48, 49, 50, 51, 52, 53, 54, 55,
1416bb97df5SIntel 	56, 57, 58, 59, 60, 61, 62, 63,
1426bb97df5SIntel };
1432a13a5a0SHuawei Xie const uint16_t num_vlans = RTE_DIM(vlan_tags);
1442a13a5a0SHuawei Xie static uint16_t num_pf_queues,  num_vmdq_queues;
1452a13a5a0SHuawei Xie static uint16_t vmdq_pool_base, vmdq_queue_base;
1462a13a5a0SHuawei Xie /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
1472a13a5a0SHuawei Xie static struct ether_addr pool_addr_template = {
1482a13a5a0SHuawei Xie 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
1492a13a5a0SHuawei Xie };
1506bb97df5SIntel 
1516bb97df5SIntel /* ethernet addresses of ports */
1526bb97df5SIntel static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
1536bb97df5SIntel 
1546bb97df5SIntel #define MAX_QUEUE_NUM_10G 128
1556bb97df5SIntel #define MAX_QUEUE_NUM_1G 8
1566bb97df5SIntel #define MAX_POOL_MAP_NUM_10G 64
1576bb97df5SIntel #define MAX_POOL_MAP_NUM_1G 32
1586bb97df5SIntel #define MAX_POOL_NUM_10G 64
1596bb97df5SIntel #define MAX_POOL_NUM_1G 8
160b30eb1d2SHuawei Xie /*
161b30eb1d2SHuawei Xie  * Builds up the correct configuration for vmdq based on the vlan tags array
162b30eb1d2SHuawei Xie  * given above, and determine the queue number and pool map number according to
163b30eb1d2SHuawei Xie  * valid pool number
164b30eb1d2SHuawei Xie  */
1656bb97df5SIntel static inline int
1666bb97df5SIntel get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
1676bb97df5SIntel {
1686bb97df5SIntel 	struct rte_eth_vmdq_rx_conf conf;
1696bb97df5SIntel 	unsigned i;
1706bb97df5SIntel 
1716bb97df5SIntel 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
1722a13a5a0SHuawei Xie 	conf.nb_pool_maps = num_pools;
1736bb97df5SIntel 	conf.enable_default_pool = 0;
1746bb97df5SIntel 	conf.default_pool = 0; /* set explicit value, even if not used */
1756bb97df5SIntel 
1766bb97df5SIntel 	for (i = 0; i < conf.nb_pool_maps; i++) {
1776bb97df5SIntel 		conf.pool_map[i].vlan_id = vlan_tags[i];
1786bb97df5SIntel 		conf.pool_map[i].pools = (1UL << (i % num_pools));
1796bb97df5SIntel 	}
1806bb97df5SIntel 
1816bb97df5SIntel 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
1826bb97df5SIntel 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
1836bb97df5SIntel 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
1846bb97df5SIntel 	return 0;
1856bb97df5SIntel }
1866bb97df5SIntel 
1876bb97df5SIntel /*
1886bb97df5SIntel  * Initialises a given port using global settings and with the rx buffers
1896bb97df5SIntel  * coming from the mbuf_pool passed as parameter
1906bb97df5SIntel  */
1916bb97df5SIntel static inline int
1926bb97df5SIntel port_init(uint8_t port, struct rte_mempool *mbuf_pool)
1936bb97df5SIntel {
1946bb97df5SIntel 	struct rte_eth_dev_info dev_info;
19581f7ecd9SPablo de Lara 	struct rte_eth_rxconf *rxconf;
1966bb97df5SIntel 	struct rte_eth_conf port_conf;
1972a13a5a0SHuawei Xie 	uint16_t rxRings, txRings;
198*60efb44fSRoman Zhukov 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
199*60efb44fSRoman Zhukov 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
2006bb97df5SIntel 	int retval;
2016bb97df5SIntel 	uint16_t q;
2022a13a5a0SHuawei Xie 	uint16_t queues_per_pool;
2036bb97df5SIntel 	uint32_t max_nb_pools;
2046bb97df5SIntel 
205b30eb1d2SHuawei Xie 	/*
206b30eb1d2SHuawei Xie 	 * The max pool number from dev_info will be used to validate the pool
207b30eb1d2SHuawei Xie 	 * number specified in cmd line
208b30eb1d2SHuawei Xie 	 */
2096bb97df5SIntel 	rte_eth_dev_info_get(port, &dev_info);
2106bb97df5SIntel 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
2112a13a5a0SHuawei Xie 	/*
2122a13a5a0SHuawei Xie 	 * We allow to process part of VMDQ pools specified by num_pools in
2132a13a5a0SHuawei Xie 	 * command line.
2142a13a5a0SHuawei Xie 	 */
2152a13a5a0SHuawei Xie 	if (num_pools > max_nb_pools) {
2162a13a5a0SHuawei Xie 		printf("num_pools %d >max_nb_pools %d\n",
2172a13a5a0SHuawei Xie 			num_pools, max_nb_pools);
2182a13a5a0SHuawei Xie 		return -1;
2192a13a5a0SHuawei Xie 	}
2202a13a5a0SHuawei Xie 	retval = get_eth_conf(&port_conf, max_nb_pools);
2216bb97df5SIntel 	if (retval < 0)
2226bb97df5SIntel 		return retval;
2236bb97df5SIntel 
2242a13a5a0SHuawei Xie 	/*
2252a13a5a0SHuawei Xie 	 * NIC queues are divided into pf queues and vmdq queues.
2262a13a5a0SHuawei Xie 	 */
2272a13a5a0SHuawei Xie 	/* There is assumption here all ports have the same configuration! */
2282a13a5a0SHuawei Xie 	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
2292a13a5a0SHuawei Xie 	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
2302a13a5a0SHuawei Xie 	num_vmdq_queues = num_pools * queues_per_pool;
2312a13a5a0SHuawei Xie 	num_queues = num_pf_queues + num_vmdq_queues;
2322a13a5a0SHuawei Xie 	vmdq_queue_base = dev_info.vmdq_queue_base;
2332a13a5a0SHuawei Xie 	vmdq_pool_base  = dev_info.vmdq_pool_base;
2346bb97df5SIntel 
2352a13a5a0SHuawei Xie 	printf("pf queue num: %u, configured vmdq pool num: %u,"
2362a13a5a0SHuawei Xie 		" each vmdq pool has %u queues\n",
2372a13a5a0SHuawei Xie 		num_pf_queues, num_pools, queues_per_pool);
2382a13a5a0SHuawei Xie 	printf("vmdq queue base: %d pool base %d\n",
2392a13a5a0SHuawei Xie 		vmdq_queue_base, vmdq_pool_base);
240b30eb1d2SHuawei Xie 	if (port >= rte_eth_dev_count())
241b30eb1d2SHuawei Xie 		return -1;
2426bb97df5SIntel 
2432a13a5a0SHuawei Xie 	/*
2442a13a5a0SHuawei Xie 	 * Though in this example, we only receive packets from the first queue
2452a13a5a0SHuawei Xie 	 * of each pool and send packets through first rte_lcore_count() tx
2462a13a5a0SHuawei Xie 	 * queues of vmdq queues, all queues including pf queues are setup.
2472a13a5a0SHuawei Xie 	 * This is because VMDQ queues doesn't always start from zero, and the
2482a13a5a0SHuawei Xie 	 * PMD layer doesn't support selectively initialising part of rx/tx
2492a13a5a0SHuawei Xie 	 * queues.
2502a13a5a0SHuawei Xie 	 */
2512a13a5a0SHuawei Xie 	rxRings = (uint16_t)dev_info.max_rx_queues;
2522a13a5a0SHuawei Xie 	txRings = (uint16_t)dev_info.max_tx_queues;
2536bb97df5SIntel 	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
2546bb97df5SIntel 	if (retval != 0)
2556bb97df5SIntel 		return retval;
2566bb97df5SIntel 
257*60efb44fSRoman Zhukov 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
258*60efb44fSRoman Zhukov 				&txRingSize);
259*60efb44fSRoman Zhukov 	if (retval != 0)
260*60efb44fSRoman Zhukov 		return retval;
261*60efb44fSRoman Zhukov 	if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
262*60efb44fSRoman Zhukov 			RTE_TEST_TX_DESC_DEFAULT)) {
263*60efb44fSRoman Zhukov 		printf("Mbuf pool has an insufficient size for port %u.\n",
264*60efb44fSRoman Zhukov 			port);
265*60efb44fSRoman Zhukov 		return -1;
266*60efb44fSRoman Zhukov 	}
267*60efb44fSRoman Zhukov 
26881f7ecd9SPablo de Lara 	rte_eth_dev_info_get(port, &dev_info);
26981f7ecd9SPablo de Lara 	rxconf = &dev_info.default_rxconf;
27081f7ecd9SPablo de Lara 	rxconf->rx_drop_en = 1;
2716bb97df5SIntel 	for (q = 0; q < rxRings; q++) {
2726bb97df5SIntel 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
27381f7ecd9SPablo de Lara 					rte_eth_dev_socket_id(port),
27481f7ecd9SPablo de Lara 					rxconf,
2756bb97df5SIntel 					mbuf_pool);
2762a13a5a0SHuawei Xie 		if (retval < 0) {
2772a13a5a0SHuawei Xie 			printf("initialise rx queue %d failed\n", q);
2786bb97df5SIntel 			return retval;
2796bb97df5SIntel 		}
2802a13a5a0SHuawei Xie 	}
2816bb97df5SIntel 
2826bb97df5SIntel 	for (q = 0; q < txRings; q++) {
2836bb97df5SIntel 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
28481f7ecd9SPablo de Lara 					rte_eth_dev_socket_id(port),
28581f7ecd9SPablo de Lara 					NULL);
2862a13a5a0SHuawei Xie 		if (retval < 0) {
2872a13a5a0SHuawei Xie 			printf("initialise tx queue %d failed\n", q);
2886bb97df5SIntel 			return retval;
2896bb97df5SIntel 		}
2902a13a5a0SHuawei Xie 	}
2916bb97df5SIntel 
2926bb97df5SIntel 	retval  = rte_eth_dev_start(port);
2932a13a5a0SHuawei Xie 	if (retval < 0) {
2942a13a5a0SHuawei Xie 		printf("port %d start failed\n", port);
2956bb97df5SIntel 		return retval;
2962a13a5a0SHuawei Xie 	}
2976bb97df5SIntel 
2986bb97df5SIntel 	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
2996bb97df5SIntel 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
3006bb97df5SIntel 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
3016bb97df5SIntel 			(unsigned)port,
3026bb97df5SIntel 			vmdq_ports_eth_addr[port].addr_bytes[0],
3036bb97df5SIntel 			vmdq_ports_eth_addr[port].addr_bytes[1],
3046bb97df5SIntel 			vmdq_ports_eth_addr[port].addr_bytes[2],
3056bb97df5SIntel 			vmdq_ports_eth_addr[port].addr_bytes[3],
3066bb97df5SIntel 			vmdq_ports_eth_addr[port].addr_bytes[4],
3076bb97df5SIntel 			vmdq_ports_eth_addr[port].addr_bytes[5]);
3086bb97df5SIntel 
3092a13a5a0SHuawei Xie 	/*
3102a13a5a0SHuawei Xie 	 * Set mac for each pool.
3112a13a5a0SHuawei Xie 	 * There is no default mac for the pools in i40.
3122a13a5a0SHuawei Xie 	 * Removes this after i40e fixes this issue.
3132a13a5a0SHuawei Xie 	 */
3142a13a5a0SHuawei Xie 	for (q = 0; q < num_pools; q++) {
3152a13a5a0SHuawei Xie 		struct ether_addr mac;
3162a13a5a0SHuawei Xie 		mac = pool_addr_template;
3172a13a5a0SHuawei Xie 		mac.addr_bytes[4] = port;
3182a13a5a0SHuawei Xie 		mac.addr_bytes[5] = q;
3192a13a5a0SHuawei Xie 		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
3202a13a5a0SHuawei Xie 			port, q,
3212a13a5a0SHuawei Xie 			mac.addr_bytes[0], mac.addr_bytes[1],
3222a13a5a0SHuawei Xie 			mac.addr_bytes[2], mac.addr_bytes[3],
3232a13a5a0SHuawei Xie 			mac.addr_bytes[4], mac.addr_bytes[5]);
3242a13a5a0SHuawei Xie 		retval = rte_eth_dev_mac_addr_add(port, &mac,
3252a13a5a0SHuawei Xie 				q + vmdq_pool_base);
3262a13a5a0SHuawei Xie 		if (retval) {
3272a13a5a0SHuawei Xie 			printf("mac addr add failed at pool %d\n", q);
3282a13a5a0SHuawei Xie 			return retval;
3292a13a5a0SHuawei Xie 		}
3302a13a5a0SHuawei Xie 	}
3312a13a5a0SHuawei Xie 
3326bb97df5SIntel 	return 0;
3336bb97df5SIntel }
3346bb97df5SIntel 
3356bb97df5SIntel /* Check num_pools parameter and set it if OK*/
3366bb97df5SIntel static int
3376bb97df5SIntel vmdq_parse_num_pools(const char *q_arg)
3386bb97df5SIntel {
3396bb97df5SIntel 	char *end = NULL;
3406bb97df5SIntel 	int n;
3416bb97df5SIntel 
3426bb97df5SIntel 	/* parse number string */
3436bb97df5SIntel 	n = strtol(q_arg, &end, 10);
3446bb97df5SIntel 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
3456bb97df5SIntel 		return -1;
3466bb97df5SIntel 
3472a13a5a0SHuawei Xie 	if (num_pools > num_vlans) {
3482a13a5a0SHuawei Xie 		printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans);
3492a13a5a0SHuawei Xie 		return -1;
3502a13a5a0SHuawei Xie 	}
3512a13a5a0SHuawei Xie 
3526bb97df5SIntel 	num_pools = n;
3536bb97df5SIntel 
3546bb97df5SIntel 	return 0;
3556bb97df5SIntel }
3566bb97df5SIntel 
3576bb97df5SIntel 
3586bb97df5SIntel static int
3596bb97df5SIntel parse_portmask(const char *portmask)
3606bb97df5SIntel {
3616bb97df5SIntel 	char *end = NULL;
3626bb97df5SIntel 	unsigned long pm;
3636bb97df5SIntel 
3646bb97df5SIntel 	/* parse hexadecimal string */
3656bb97df5SIntel 	pm = strtoul(portmask, &end, 16);
3666bb97df5SIntel 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
3676bb97df5SIntel 		return -1;
3686bb97df5SIntel 
3696bb97df5SIntel 	if (pm == 0)
3706bb97df5SIntel 		return -1;
3716bb97df5SIntel 
3726bb97df5SIntel 	return pm;
3736bb97df5SIntel }
3746bb97df5SIntel 
3756bb97df5SIntel /* Display usage */
3766bb97df5SIntel static void
3776bb97df5SIntel vmdq_usage(const char *prgname)
3786bb97df5SIntel {
3796bb97df5SIntel 	printf("%s [EAL options] -- -p PORTMASK]\n"
3806bb97df5SIntel 	"  --nb-pools NP: number of pools\n",
3816bb97df5SIntel 	       prgname);
3826bb97df5SIntel }
3836bb97df5SIntel 
3846bb97df5SIntel /*  Parse the argument (num_pools) given in the command line of the application */
3856bb97df5SIntel static int
3866bb97df5SIntel vmdq_parse_args(int argc, char **argv)
3876bb97df5SIntel {
3886bb97df5SIntel 	int opt;
3896bb97df5SIntel 	int option_index;
3906bb97df5SIntel 	unsigned i;
3916bb97df5SIntel 	const char *prgname = argv[0];
3926bb97df5SIntel 	static struct option long_option[] = {
3936bb97df5SIntel 		{"nb-pools", required_argument, NULL, 0},
3946bb97df5SIntel 		{NULL, 0, 0, 0}
3956bb97df5SIntel 	};
3966bb97df5SIntel 
3976bb97df5SIntel 	/* Parse command line */
398b30eb1d2SHuawei Xie 	while ((opt = getopt_long(argc, argv, "p:", long_option,
399b30eb1d2SHuawei Xie 		&option_index)) != EOF) {
4006bb97df5SIntel 		switch (opt) {
4016bb97df5SIntel 		/* portmask */
4026bb97df5SIntel 		case 'p':
4036bb97df5SIntel 			enabled_port_mask = parse_portmask(optarg);
4046bb97df5SIntel 			if (enabled_port_mask == 0) {
4056bb97df5SIntel 				printf("invalid portmask\n");
4066bb97df5SIntel 				vmdq_usage(prgname);
4076bb97df5SIntel 				return -1;
4086bb97df5SIntel 			}
4096bb97df5SIntel 			break;
4106bb97df5SIntel 		case 0:
4116bb97df5SIntel 			if (vmdq_parse_num_pools(optarg) == -1) {
4126bb97df5SIntel 				printf("invalid number of pools\n");
4136bb97df5SIntel 				vmdq_usage(prgname);
4146bb97df5SIntel 				return -1;
4156bb97df5SIntel 			}
4166bb97df5SIntel 			break;
4176bb97df5SIntel 
4186bb97df5SIntel 		default:
4196bb97df5SIntel 			vmdq_usage(prgname);
4206bb97df5SIntel 			return -1;
4216bb97df5SIntel 		}
4226bb97df5SIntel 	}
4236bb97df5SIntel 
4246bb97df5SIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
4256bb97df5SIntel 		if (enabled_port_mask & (1 << i))
4266bb97df5SIntel 			ports[num_ports++] = (uint8_t)i;
4276bb97df5SIntel 	}
4286bb97df5SIntel 
4296bb97df5SIntel 	if (num_ports < 2 || num_ports % 2) {
4306bb97df5SIntel 		printf("Current enabled port number is %u,"
4316bb97df5SIntel 			"but it should be even and at least 2\n", num_ports);
4326bb97df5SIntel 		return -1;
4336bb97df5SIntel 	}
4346bb97df5SIntel 
4356bb97df5SIntel 	return 0;
4366bb97df5SIntel }
4376bb97df5SIntel 
4386bb97df5SIntel static void
4396bb97df5SIntel update_mac_address(struct rte_mbuf *m, unsigned dst_port)
4406bb97df5SIntel {
4416bb97df5SIntel 	struct ether_hdr *eth;
4426bb97df5SIntel 	void *tmp;
4436bb97df5SIntel 
4446bb97df5SIntel 	eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
4456bb97df5SIntel 
4466bb97df5SIntel 	/* 02:00:00:00:00:xx */
4476bb97df5SIntel 	tmp = &eth->d_addr.addr_bytes[0];
4486bb97df5SIntel 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
4496bb97df5SIntel 
4506bb97df5SIntel 	/* src addr */
4516bb97df5SIntel 	ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
4526bb97df5SIntel }
4536bb97df5SIntel 
4546bb97df5SIntel /* When we receive a HUP signal, print out our stats */
4556bb97df5SIntel static void
4566bb97df5SIntel sighup_handler(int signum)
4576bb97df5SIntel {
4586bb97df5SIntel 	unsigned q;
4596bb97df5SIntel 	for (q = 0; q < num_queues; q++) {
4606bb97df5SIntel 		if (q % (num_queues/num_pools) == 0)
4616bb97df5SIntel 			printf("\nPool %u: ", q/(num_queues/num_pools));
4626bb97df5SIntel 		printf("%lu ", rxPackets[q]);
4636bb97df5SIntel 	}
4646bb97df5SIntel 	printf("\nFinished handling signal %d\n", signum);
4656bb97df5SIntel }
4666bb97df5SIntel 
4676bb97df5SIntel /*
4686bb97df5SIntel  * Main thread that does the work, reading from INPUT_PORT
4696bb97df5SIntel  * and writing to OUTPUT_PORT
4706bb97df5SIntel  */
47113c4ebd6SBruce Richardson static int
4726bb97df5SIntel lcore_main(__attribute__((__unused__)) void *dummy)
4736bb97df5SIntel {
4746bb97df5SIntel 	const uint16_t lcore_id = (uint16_t)rte_lcore_id();
4756bb97df5SIntel 	const uint16_t num_cores = (uint16_t)rte_lcore_count();
4766bb97df5SIntel 	uint16_t core_id = 0;
4776bb97df5SIntel 	uint16_t startQueue, endQueue;
4786bb97df5SIntel 	uint16_t q, i, p;
4792a13a5a0SHuawei Xie 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
4806bb97df5SIntel 
4816bb97df5SIntel 	for (i = 0; i < num_cores; i++)
4826bb97df5SIntel 		if (lcore_ids[i] == lcore_id) {
4836bb97df5SIntel 			core_id = i;
4846bb97df5SIntel 			break;
4856bb97df5SIntel 		}
4866bb97df5SIntel 
4876bb97df5SIntel 	if (remainder != 0) {
4886bb97df5SIntel 		if (core_id < remainder) {
4892a13a5a0SHuawei Xie 			startQueue = (uint16_t)(core_id *
4902a13a5a0SHuawei Xie 					(num_vmdq_queues / num_cores + 1));
4912a13a5a0SHuawei Xie 			endQueue = (uint16_t)(startQueue +
4922a13a5a0SHuawei Xie 					(num_vmdq_queues / num_cores) + 1);
4936bb97df5SIntel 		} else {
4942a13a5a0SHuawei Xie 			startQueue = (uint16_t)(core_id *
4952a13a5a0SHuawei Xie 					(num_vmdq_queues / num_cores) +
4962a13a5a0SHuawei Xie 					remainder);
4972a13a5a0SHuawei Xie 			endQueue = (uint16_t)(startQueue +
4982a13a5a0SHuawei Xie 					(num_vmdq_queues / num_cores));
4996bb97df5SIntel 		}
5006bb97df5SIntel 	} else {
5012a13a5a0SHuawei Xie 		startQueue = (uint16_t)(core_id *
5022a13a5a0SHuawei Xie 				(num_vmdq_queues / num_cores));
5032a13a5a0SHuawei Xie 		endQueue = (uint16_t)(startQueue +
5042a13a5a0SHuawei Xie 				(num_vmdq_queues / num_cores));
5056bb97df5SIntel 	}
5066bb97df5SIntel 
5072a13a5a0SHuawei Xie 	/* vmdq queue idx doesn't always start from zero.*/
5082a13a5a0SHuawei Xie 	startQueue += vmdq_queue_base;
5092a13a5a0SHuawei Xie 	endQueue   += vmdq_queue_base;
5106bb97df5SIntel 	printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
5116bb97df5SIntel 		(unsigned)lcore_id, startQueue, endQueue - 1);
5126bb97df5SIntel 
51313c4ebd6SBruce Richardson 	if (startQueue == endQueue) {
51413c4ebd6SBruce Richardson 		printf("lcore %u has nothing to do\n", lcore_id);
515b30eb1d2SHuawei Xie 		return 0;
51613c4ebd6SBruce Richardson 	}
51713c4ebd6SBruce Richardson 
5186bb97df5SIntel 	for (;;) {
5196bb97df5SIntel 		struct rte_mbuf *buf[MAX_PKT_BURST];
5206bb97df5SIntel 		const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);
5216bb97df5SIntel 
5226bb97df5SIntel 		for (p = 0; p < num_ports; p++) {
5236bb97df5SIntel 			const uint8_t sport = ports[p];
524b30eb1d2SHuawei Xie 			/* 0 <-> 1, 2 <-> 3 etc */
525b30eb1d2SHuawei Xie 			const uint8_t dport = ports[p ^ 1];
5266bb97df5SIntel 			if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID))
5276bb97df5SIntel 				continue;
5286bb97df5SIntel 
5296bb97df5SIntel 			for (q = startQueue; q < endQueue; q++) {
5306bb97df5SIntel 				const uint16_t rxCount = rte_eth_rx_burst(sport,
5316bb97df5SIntel 					q, buf, buf_size);
5326bb97df5SIntel 
5336bb97df5SIntel 				if (unlikely(rxCount == 0))
5346bb97df5SIntel 					continue;
5356bb97df5SIntel 
5366bb97df5SIntel 				rxPackets[q] += rxCount;
5376bb97df5SIntel 
5386bb97df5SIntel 				for (i = 0; i < rxCount; i++)
5396bb97df5SIntel 					update_mac_address(buf[i], dport);
5406bb97df5SIntel 
5416bb97df5SIntel 				const uint16_t txCount = rte_eth_tx_burst(dport,
5422a13a5a0SHuawei Xie 					vmdq_queue_base + core_id,
5432a13a5a0SHuawei Xie 					buf,
5442a13a5a0SHuawei Xie 					rxCount);
5456bb97df5SIntel 
5466bb97df5SIntel 				if (txCount != rxCount) {
5476bb97df5SIntel 					for (i = txCount; i < rxCount; i++)
5486bb97df5SIntel 						rte_pktmbuf_free(buf[i]);
5496bb97df5SIntel 				}
5506bb97df5SIntel 			}
5516bb97df5SIntel 		}
5526bb97df5SIntel 	}
5536bb97df5SIntel }
5546bb97df5SIntel 
5556bb97df5SIntel /*
5566bb97df5SIntel  * Update the global var NUM_PORTS and array PORTS according to system ports number
5576bb97df5SIntel  * and return valid ports number
5586bb97df5SIntel  */
5596bb97df5SIntel static unsigned check_ports_num(unsigned nb_ports)
5606bb97df5SIntel {
5616bb97df5SIntel 	unsigned valid_num_ports = num_ports;
5626bb97df5SIntel 	unsigned portid;
5636bb97df5SIntel 
5646bb97df5SIntel 	if (num_ports > nb_ports) {
5656bb97df5SIntel 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
5666bb97df5SIntel 			num_ports, nb_ports);
5676bb97df5SIntel 		num_ports = nb_ports;
5686bb97df5SIntel 	}
5696bb97df5SIntel 
5706bb97df5SIntel 	for (portid = 0; portid < num_ports; portid++) {
5716bb97df5SIntel 		if (ports[portid] >= nb_ports) {
5726bb97df5SIntel 			printf("\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
5736bb97df5SIntel 				ports[portid], (nb_ports - 1));
5746bb97df5SIntel 			ports[portid] = INVALID_PORT_ID;
5756bb97df5SIntel 			valid_num_ports--;
5766bb97df5SIntel 		}
5776bb97df5SIntel 	}
5786bb97df5SIntel 	return valid_num_ports;
5796bb97df5SIntel }
5806bb97df5SIntel 
5816bb97df5SIntel /* Main function, does initialisation and calls the per-lcore functions */
5826bb97df5SIntel int
58398a16481SDavid Marchand main(int argc, char *argv[])
5846bb97df5SIntel {
5856bb97df5SIntel 	struct rte_mempool *mbuf_pool;
5866bb97df5SIntel 	unsigned lcore_id, core_id = 0;
5876bb97df5SIntel 	int ret;
5886bb97df5SIntel 	unsigned nb_ports, valid_num_ports;
5896bb97df5SIntel 	uint8_t portid;
5906bb97df5SIntel 
5916bb97df5SIntel 	signal(SIGHUP, sighup_handler);
5926bb97df5SIntel 
5936bb97df5SIntel 	/* init EAL */
5946bb97df5SIntel 	ret = rte_eal_init(argc, argv);
5956bb97df5SIntel 	if (ret < 0)
5966bb97df5SIntel 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
5976bb97df5SIntel 	argc -= ret;
5986bb97df5SIntel 	argv += ret;
5996bb97df5SIntel 
6006bb97df5SIntel 	/* parse app arguments */
6016bb97df5SIntel 	ret = vmdq_parse_args(argc, argv);
6026bb97df5SIntel 	if (ret < 0)
6036bb97df5SIntel 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
6046bb97df5SIntel 
6056bb97df5SIntel 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
6066bb97df5SIntel 		if (rte_lcore_is_enabled(lcore_id))
6076bb97df5SIntel 			lcore_ids[core_id++] = lcore_id;
6086bb97df5SIntel 
6096bb97df5SIntel 	if (rte_lcore_count() > RTE_MAX_LCORE)
6106bb97df5SIntel 		rte_exit(EXIT_FAILURE, "Not enough cores\n");
6116bb97df5SIntel 
6126bb97df5SIntel 	nb_ports = rte_eth_dev_count();
6136bb97df5SIntel 
6146bb97df5SIntel 	/*
6156bb97df5SIntel 	 * Update the global var NUM_PORTS and global array PORTS
6166bb97df5SIntel 	 * and get value of var VALID_NUM_PORTS according to system ports number
6176bb97df5SIntel 	 */
6186bb97df5SIntel 	valid_num_ports = check_ports_num(nb_ports);
6196bb97df5SIntel 
6206bb97df5SIntel 	if (valid_num_ports < 2 || valid_num_ports % 2) {
6216bb97df5SIntel 		printf("Current valid ports number is %u\n", valid_num_ports);
6226bb97df5SIntel 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
6236bb97df5SIntel 	}
6246bb97df5SIntel 
625ea0c20eaSOlivier Matz 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
626ea0c20eaSOlivier Matz 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
627824cb29cSKonstantin Ananyev 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
6286bb97df5SIntel 	if (mbuf_pool == NULL)
6296bb97df5SIntel 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
6306bb97df5SIntel 
6316bb97df5SIntel 	/* initialize all ports */
6326bb97df5SIntel 	for (portid = 0; portid < nb_ports; portid++) {
6336bb97df5SIntel 		/* skip ports that are not enabled */
6346bb97df5SIntel 		if ((enabled_port_mask & (1 << portid)) == 0) {
6356bb97df5SIntel 			printf("\nSkipping disabled port %d\n", portid);
6366bb97df5SIntel 			continue;
6376bb97df5SIntel 		}
6386bb97df5SIntel 		if (port_init(portid, mbuf_pool) != 0)
6396bb97df5SIntel 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
6406bb97df5SIntel 	}
6416bb97df5SIntel 
6426bb97df5SIntel 	/* call lcore_main() on every lcore */
6436bb97df5SIntel 	rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER);
6446bb97df5SIntel 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
6456bb97df5SIntel 		if (rte_eal_wait_lcore(lcore_id) < 0)
6466bb97df5SIntel 			return -1;
6476bb97df5SIntel 	}
6486bb97df5SIntel 
6496bb97df5SIntel 	return 0;
6506bb97df5SIntel }
651