16bb97df5SIntel /*- 26bb97df5SIntel * BSD LICENSE 36bb97df5SIntel * 4e9d48c00SBruce Richardson * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 56bb97df5SIntel * All rights reserved. 66bb97df5SIntel * 76bb97df5SIntel * Redistribution and use in source and binary forms, with or without 86bb97df5SIntel * modification, are permitted provided that the following conditions 96bb97df5SIntel * are met: 106bb97df5SIntel * 116bb97df5SIntel * * Redistributions of source code must retain the above copyright 126bb97df5SIntel * notice, this list of conditions and the following disclaimer. 136bb97df5SIntel * * Redistributions in binary form must reproduce the above copyright 146bb97df5SIntel * notice, this list of conditions and the following disclaimer in 156bb97df5SIntel * the documentation and/or other materials provided with the 166bb97df5SIntel * distribution. 176bb97df5SIntel * * Neither the name of Intel Corporation nor the names of its 186bb97df5SIntel * contributors may be used to endorse or promote products derived 196bb97df5SIntel * from this software without specific prior written permission. 206bb97df5SIntel * 216bb97df5SIntel * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 226bb97df5SIntel * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 236bb97df5SIntel * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 246bb97df5SIntel * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 256bb97df5SIntel * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 266bb97df5SIntel * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 276bb97df5SIntel * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 286bb97df5SIntel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 296bb97df5SIntel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 306bb97df5SIntel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 316bb97df5SIntel * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 326bb97df5SIntel */ 336bb97df5SIntel 346bb97df5SIntel #include <stdint.h> 356bb97df5SIntel #include <sys/queue.h> 366bb97df5SIntel #include <stdlib.h> 376bb97df5SIntel #include <string.h> 386bb97df5SIntel #include <stdio.h> 396bb97df5SIntel #include <assert.h> 406bb97df5SIntel #include <errno.h> 416bb97df5SIntel #include <signal.h> 426bb97df5SIntel #include <stdarg.h> 436bb97df5SIntel #include <inttypes.h> 446bb97df5SIntel #include <getopt.h> 456bb97df5SIntel 466bb97df5SIntel #include <rte_common.h> 476bb97df5SIntel #include <rte_log.h> 486bb97df5SIntel #include <rte_memory.h> 496bb97df5SIntel #include <rte_memcpy.h> 506bb97df5SIntel #include <rte_memzone.h> 516bb97df5SIntel #include <rte_eal.h> 526bb97df5SIntel #include <rte_per_lcore.h> 536bb97df5SIntel #include <rte_launch.h> 546bb97df5SIntel #include <rte_atomic.h> 556bb97df5SIntel #include <rte_cycles.h> 566bb97df5SIntel #include <rte_prefetch.h> 576bb97df5SIntel #include <rte_lcore.h> 586bb97df5SIntel #include <rte_per_lcore.h> 596bb97df5SIntel #include <rte_branch_prediction.h> 606bb97df5SIntel #include <rte_interrupts.h> 616bb97df5SIntel #include <rte_pci.h> 626bb97df5SIntel #include <rte_random.h> 636bb97df5SIntel #include <rte_debug.h> 646bb97df5SIntel #include <rte_ether.h> 656bb97df5SIntel #include <rte_ethdev.h> 666bb97df5SIntel #include <rte_ring.h> 676bb97df5SIntel #include <rte_log.h> 686bb97df5SIntel #include <rte_mempool.h> 696bb97df5SIntel #include <rte_mbuf.h> 706bb97df5SIntel #include <rte_memcpy.h> 716bb97df5SIntel 726bb97df5SIntel #define MAX_QUEUES 128 736bb97df5SIntel /* 746bb97df5SIntel * For 10 GbE, 128 queues require roughly 756bb97df5SIntel * 128*512 (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port. 766bb97df5SIntel */ 776bb97df5SIntel #define NUM_MBUFS_PER_PORT (128*512) 786bb97df5SIntel #define MBUF_CACHE_SIZE 64 79*ea0c20eaSOlivier Matz #define MBUF_DATA_SIZE (2048 + RTE_PKTMBUF_HEADROOM) 806bb97df5SIntel 816bb97df5SIntel #define MAX_PKT_BURST 32 826bb97df5SIntel 836bb97df5SIntel /* 846bb97df5SIntel * Configurable number of RX/TX ring descriptors 856bb97df5SIntel */ 866bb97df5SIntel #define RTE_TEST_RX_DESC_DEFAULT 128 876bb97df5SIntel #define RTE_TEST_TX_DESC_DEFAULT 512 886bb97df5SIntel 896bb97df5SIntel #define INVALID_PORT_ID 0xFF 906bb97df5SIntel 916bb97df5SIntel /* mask of enabled ports */ 92b30eb1d2SHuawei Xie static uint32_t enabled_port_mask; 936bb97df5SIntel 946bb97df5SIntel /* number of pools (if user does not specify any, 8 by default */ 956bb97df5SIntel static uint32_t num_queues = 8; 966bb97df5SIntel static uint32_t num_pools = 8; 976bb97df5SIntel 986bb97df5SIntel /* empty vmdq configuration structure. Filled in programatically */ 996bb97df5SIntel static const struct rte_eth_conf vmdq_conf_default = { 1006bb97df5SIntel .rxmode = { 1016bb97df5SIntel .mq_mode = ETH_MQ_RX_VMDQ_ONLY, 1026bb97df5SIntel .split_hdr_size = 0, 1036bb97df5SIntel .header_split = 0, /**< Header Split disabled */ 1046bb97df5SIntel .hw_ip_checksum = 0, /**< IP checksum offload disabled */ 1056bb97df5SIntel .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 1066bb97df5SIntel .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ 1076bb97df5SIntel }, 1086bb97df5SIntel 1096bb97df5SIntel .txmode = { 1106bb97df5SIntel .mq_mode = ETH_MQ_TX_NONE, 1116bb97df5SIntel }, 1126bb97df5SIntel .rx_adv_conf = { 1136bb97df5SIntel /* 1146bb97df5SIntel * should be overridden separately in code with 1156bb97df5SIntel * appropriate values 1166bb97df5SIntel */ 1176bb97df5SIntel .vmdq_rx_conf = { 1186bb97df5SIntel .nb_queue_pools = ETH_8_POOLS, 1196bb97df5SIntel .enable_default_pool = 0, 1206bb97df5SIntel .default_pool = 0, 1216bb97df5SIntel .nb_pool_maps = 0, 1226bb97df5SIntel .pool_map = {{0, 0},}, 1236bb97df5SIntel }, 1246bb97df5SIntel }, 1256bb97df5SIntel }; 1266bb97df5SIntel 1276bb97df5SIntel static unsigned lcore_ids[RTE_MAX_LCORE]; 1286bb97df5SIntel static uint8_t ports[RTE_MAX_ETHPORTS]; 129b30eb1d2SHuawei Xie static unsigned num_ports; /**< The number of ports specified in command line */ 1306bb97df5SIntel 1316bb97df5SIntel /* array used for printing out statistics */ 1326bb97df5SIntel volatile unsigned long rxPackets[MAX_QUEUES] = {0}; 1336bb97df5SIntel 1346bb97df5SIntel const uint16_t vlan_tags[] = { 1356bb97df5SIntel 0, 1, 2, 3, 4, 5, 6, 7, 1366bb97df5SIntel 8, 9, 10, 11, 12, 13, 14, 15, 1376bb97df5SIntel 16, 17, 18, 19, 20, 21, 22, 23, 1386bb97df5SIntel 24, 25, 26, 27, 28, 29, 30, 31, 1396bb97df5SIntel 32, 33, 34, 35, 36, 37, 38, 39, 1406bb97df5SIntel 40, 41, 42, 43, 44, 45, 46, 47, 1416bb97df5SIntel 48, 49, 50, 51, 52, 53, 54, 55, 1426bb97df5SIntel 56, 57, 58, 59, 60, 61, 62, 63, 1436bb97df5SIntel }; 1442a13a5a0SHuawei Xie const uint16_t num_vlans = RTE_DIM(vlan_tags); 1452a13a5a0SHuawei Xie static uint16_t num_pf_queues, num_vmdq_queues; 1462a13a5a0SHuawei Xie static uint16_t vmdq_pool_base, vmdq_queue_base; 1472a13a5a0SHuawei Xie /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */ 1482a13a5a0SHuawei Xie static struct ether_addr pool_addr_template = { 1492a13a5a0SHuawei Xie .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00} 1502a13a5a0SHuawei Xie }; 1516bb97df5SIntel 1526bb97df5SIntel /* ethernet addresses of ports */ 1536bb97df5SIntel static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 1546bb97df5SIntel 1556bb97df5SIntel #define MAX_QUEUE_NUM_10G 128 1566bb97df5SIntel #define MAX_QUEUE_NUM_1G 8 1576bb97df5SIntel #define MAX_POOL_MAP_NUM_10G 64 1586bb97df5SIntel #define MAX_POOL_MAP_NUM_1G 32 1596bb97df5SIntel #define MAX_POOL_NUM_10G 64 1606bb97df5SIntel #define MAX_POOL_NUM_1G 8 161b30eb1d2SHuawei Xie /* 162b30eb1d2SHuawei Xie * Builds up the correct configuration for vmdq based on the vlan tags array 163b30eb1d2SHuawei Xie * given above, and determine the queue number and pool map number according to 164b30eb1d2SHuawei Xie * valid pool number 165b30eb1d2SHuawei Xie */ 1666bb97df5SIntel static inline int 1676bb97df5SIntel get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools) 1686bb97df5SIntel { 1696bb97df5SIntel struct rte_eth_vmdq_rx_conf conf; 1706bb97df5SIntel unsigned i; 1716bb97df5SIntel 1726bb97df5SIntel conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 1732a13a5a0SHuawei Xie conf.nb_pool_maps = num_pools; 1746bb97df5SIntel conf.enable_default_pool = 0; 1756bb97df5SIntel conf.default_pool = 0; /* set explicit value, even if not used */ 1766bb97df5SIntel 1776bb97df5SIntel for (i = 0; i < conf.nb_pool_maps; i++) { 1786bb97df5SIntel conf.pool_map[i].vlan_id = vlan_tags[i]; 1796bb97df5SIntel conf.pool_map[i].pools = (1UL << (i % num_pools)); 1806bb97df5SIntel } 1816bb97df5SIntel 1826bb97df5SIntel (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf))); 1836bb97df5SIntel (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf, 1846bb97df5SIntel sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf))); 1856bb97df5SIntel return 0; 1866bb97df5SIntel } 1876bb97df5SIntel 1886bb97df5SIntel /* 1896bb97df5SIntel * Initialises a given port using global settings and with the rx buffers 1906bb97df5SIntel * coming from the mbuf_pool passed as parameter 1916bb97df5SIntel */ 1926bb97df5SIntel static inline int 1936bb97df5SIntel port_init(uint8_t port, struct rte_mempool *mbuf_pool) 1946bb97df5SIntel { 1956bb97df5SIntel struct rte_eth_dev_info dev_info; 19681f7ecd9SPablo de Lara struct rte_eth_rxconf *rxconf; 1976bb97df5SIntel struct rte_eth_conf port_conf; 1982a13a5a0SHuawei Xie uint16_t rxRings, txRings; 1996bb97df5SIntel const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize = RTE_TEST_TX_DESC_DEFAULT; 2006bb97df5SIntel int retval; 2016bb97df5SIntel uint16_t q; 2022a13a5a0SHuawei Xie uint16_t queues_per_pool; 2036bb97df5SIntel uint32_t max_nb_pools; 2046bb97df5SIntel 205b30eb1d2SHuawei Xie /* 206b30eb1d2SHuawei Xie * The max pool number from dev_info will be used to validate the pool 207b30eb1d2SHuawei Xie * number specified in cmd line 208b30eb1d2SHuawei Xie */ 2096bb97df5SIntel rte_eth_dev_info_get(port, &dev_info); 2106bb97df5SIntel max_nb_pools = (uint32_t)dev_info.max_vmdq_pools; 2112a13a5a0SHuawei Xie /* 2122a13a5a0SHuawei Xie * We allow to process part of VMDQ pools specified by num_pools in 2132a13a5a0SHuawei Xie * command line. 2142a13a5a0SHuawei Xie */ 2152a13a5a0SHuawei Xie if (num_pools > max_nb_pools) { 2162a13a5a0SHuawei Xie printf("num_pools %d >max_nb_pools %d\n", 2172a13a5a0SHuawei Xie num_pools, max_nb_pools); 2182a13a5a0SHuawei Xie return -1; 2192a13a5a0SHuawei Xie } 2202a13a5a0SHuawei Xie retval = get_eth_conf(&port_conf, max_nb_pools); 2216bb97df5SIntel if (retval < 0) 2226bb97df5SIntel return retval; 2236bb97df5SIntel 2242a13a5a0SHuawei Xie /* 2252a13a5a0SHuawei Xie * NIC queues are divided into pf queues and vmdq queues. 2262a13a5a0SHuawei Xie */ 2272a13a5a0SHuawei Xie /* There is assumption here all ports have the same configuration! */ 2282a13a5a0SHuawei Xie num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; 2292a13a5a0SHuawei Xie queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; 2302a13a5a0SHuawei Xie num_vmdq_queues = num_pools * queues_per_pool; 2312a13a5a0SHuawei Xie num_queues = num_pf_queues + num_vmdq_queues; 2322a13a5a0SHuawei Xie vmdq_queue_base = dev_info.vmdq_queue_base; 2332a13a5a0SHuawei Xie vmdq_pool_base = dev_info.vmdq_pool_base; 2346bb97df5SIntel 2352a13a5a0SHuawei Xie printf("pf queue num: %u, configured vmdq pool num: %u," 2362a13a5a0SHuawei Xie " each vmdq pool has %u queues\n", 2372a13a5a0SHuawei Xie num_pf_queues, num_pools, queues_per_pool); 2382a13a5a0SHuawei Xie printf("vmdq queue base: %d pool base %d\n", 2392a13a5a0SHuawei Xie vmdq_queue_base, vmdq_pool_base); 240b30eb1d2SHuawei Xie if (port >= rte_eth_dev_count()) 241b30eb1d2SHuawei Xie return -1; 2426bb97df5SIntel 2432a13a5a0SHuawei Xie /* 2442a13a5a0SHuawei Xie * Though in this example, we only receive packets from the first queue 2452a13a5a0SHuawei Xie * of each pool and send packets through first rte_lcore_count() tx 2462a13a5a0SHuawei Xie * queues of vmdq queues, all queues including pf queues are setup. 2472a13a5a0SHuawei Xie * This is because VMDQ queues doesn't always start from zero, and the 2482a13a5a0SHuawei Xie * PMD layer doesn't support selectively initialising part of rx/tx 2492a13a5a0SHuawei Xie * queues. 2502a13a5a0SHuawei Xie */ 2512a13a5a0SHuawei Xie rxRings = (uint16_t)dev_info.max_rx_queues; 2522a13a5a0SHuawei Xie txRings = (uint16_t)dev_info.max_tx_queues; 2536bb97df5SIntel retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf); 2546bb97df5SIntel if (retval != 0) 2556bb97df5SIntel return retval; 2566bb97df5SIntel 25781f7ecd9SPablo de Lara rte_eth_dev_info_get(port, &dev_info); 25881f7ecd9SPablo de Lara rxconf = &dev_info.default_rxconf; 25981f7ecd9SPablo de Lara rxconf->rx_drop_en = 1; 2606bb97df5SIntel for (q = 0; q < rxRings; q++) { 2616bb97df5SIntel retval = rte_eth_rx_queue_setup(port, q, rxRingSize, 26281f7ecd9SPablo de Lara rte_eth_dev_socket_id(port), 26381f7ecd9SPablo de Lara rxconf, 2646bb97df5SIntel mbuf_pool); 2652a13a5a0SHuawei Xie if (retval < 0) { 2662a13a5a0SHuawei Xie printf("initialise rx queue %d failed\n", q); 2676bb97df5SIntel return retval; 2686bb97df5SIntel } 2692a13a5a0SHuawei Xie } 2706bb97df5SIntel 2716bb97df5SIntel for (q = 0; q < txRings; q++) { 2726bb97df5SIntel retval = rte_eth_tx_queue_setup(port, q, txRingSize, 27381f7ecd9SPablo de Lara rte_eth_dev_socket_id(port), 27481f7ecd9SPablo de Lara NULL); 2752a13a5a0SHuawei Xie if (retval < 0) { 2762a13a5a0SHuawei Xie printf("initialise tx queue %d failed\n", q); 2776bb97df5SIntel return retval; 2786bb97df5SIntel } 2792a13a5a0SHuawei Xie } 2806bb97df5SIntel 2816bb97df5SIntel retval = rte_eth_dev_start(port); 2822a13a5a0SHuawei Xie if (retval < 0) { 2832a13a5a0SHuawei Xie printf("port %d start failed\n", port); 2846bb97df5SIntel return retval; 2852a13a5a0SHuawei Xie } 2866bb97df5SIntel 2876bb97df5SIntel rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 2886bb97df5SIntel printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 2896bb97df5SIntel " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 2906bb97df5SIntel (unsigned)port, 2916bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[0], 2926bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[1], 2936bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[2], 2946bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[3], 2956bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[4], 2966bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[5]); 2976bb97df5SIntel 2982a13a5a0SHuawei Xie /* 2992a13a5a0SHuawei Xie * Set mac for each pool. 3002a13a5a0SHuawei Xie * There is no default mac for the pools in i40. 3012a13a5a0SHuawei Xie * Removes this after i40e fixes this issue. 3022a13a5a0SHuawei Xie */ 3032a13a5a0SHuawei Xie for (q = 0; q < num_pools; q++) { 3042a13a5a0SHuawei Xie struct ether_addr mac; 3052a13a5a0SHuawei Xie mac = pool_addr_template; 3062a13a5a0SHuawei Xie mac.addr_bytes[4] = port; 3072a13a5a0SHuawei Xie mac.addr_bytes[5] = q; 3082a13a5a0SHuawei Xie printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n", 3092a13a5a0SHuawei Xie port, q, 3102a13a5a0SHuawei Xie mac.addr_bytes[0], mac.addr_bytes[1], 3112a13a5a0SHuawei Xie mac.addr_bytes[2], mac.addr_bytes[3], 3122a13a5a0SHuawei Xie mac.addr_bytes[4], mac.addr_bytes[5]); 3132a13a5a0SHuawei Xie retval = rte_eth_dev_mac_addr_add(port, &mac, 3142a13a5a0SHuawei Xie q + vmdq_pool_base); 3152a13a5a0SHuawei Xie if (retval) { 3162a13a5a0SHuawei Xie printf("mac addr add failed at pool %d\n", q); 3172a13a5a0SHuawei Xie return retval; 3182a13a5a0SHuawei Xie } 3192a13a5a0SHuawei Xie } 3202a13a5a0SHuawei Xie 3216bb97df5SIntel return 0; 3226bb97df5SIntel } 3236bb97df5SIntel 3246bb97df5SIntel /* Check num_pools parameter and set it if OK*/ 3256bb97df5SIntel static int 3266bb97df5SIntel vmdq_parse_num_pools(const char *q_arg) 3276bb97df5SIntel { 3286bb97df5SIntel char *end = NULL; 3296bb97df5SIntel int n; 3306bb97df5SIntel 3316bb97df5SIntel /* parse number string */ 3326bb97df5SIntel n = strtol(q_arg, &end, 10); 3336bb97df5SIntel if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 3346bb97df5SIntel return -1; 3356bb97df5SIntel 3362a13a5a0SHuawei Xie if (num_pools > num_vlans) { 3372a13a5a0SHuawei Xie printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans); 3382a13a5a0SHuawei Xie return -1; 3392a13a5a0SHuawei Xie } 3402a13a5a0SHuawei Xie 3416bb97df5SIntel num_pools = n; 3426bb97df5SIntel 3436bb97df5SIntel return 0; 3446bb97df5SIntel } 3456bb97df5SIntel 3466bb97df5SIntel 3476bb97df5SIntel static int 3486bb97df5SIntel parse_portmask(const char *portmask) 3496bb97df5SIntel { 3506bb97df5SIntel char *end = NULL; 3516bb97df5SIntel unsigned long pm; 3526bb97df5SIntel 3536bb97df5SIntel /* parse hexadecimal string */ 3546bb97df5SIntel pm = strtoul(portmask, &end, 16); 3556bb97df5SIntel if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 3566bb97df5SIntel return -1; 3576bb97df5SIntel 3586bb97df5SIntel if (pm == 0) 3596bb97df5SIntel return -1; 3606bb97df5SIntel 3616bb97df5SIntel return pm; 3626bb97df5SIntel } 3636bb97df5SIntel 3646bb97df5SIntel /* Display usage */ 3656bb97df5SIntel static void 3666bb97df5SIntel vmdq_usage(const char *prgname) 3676bb97df5SIntel { 3686bb97df5SIntel printf("%s [EAL options] -- -p PORTMASK]\n" 3696bb97df5SIntel " --nb-pools NP: number of pools\n", 3706bb97df5SIntel prgname); 3716bb97df5SIntel } 3726bb97df5SIntel 3736bb97df5SIntel /* Parse the argument (num_pools) given in the command line of the application */ 3746bb97df5SIntel static int 3756bb97df5SIntel vmdq_parse_args(int argc, char **argv) 3766bb97df5SIntel { 3776bb97df5SIntel int opt; 3786bb97df5SIntel int option_index; 3796bb97df5SIntel unsigned i; 3806bb97df5SIntel const char *prgname = argv[0]; 3816bb97df5SIntel static struct option long_option[] = { 3826bb97df5SIntel {"nb-pools", required_argument, NULL, 0}, 3836bb97df5SIntel {NULL, 0, 0, 0} 3846bb97df5SIntel }; 3856bb97df5SIntel 3866bb97df5SIntel /* Parse command line */ 387b30eb1d2SHuawei Xie while ((opt = getopt_long(argc, argv, "p:", long_option, 388b30eb1d2SHuawei Xie &option_index)) != EOF) { 3896bb97df5SIntel switch (opt) { 3906bb97df5SIntel /* portmask */ 3916bb97df5SIntel case 'p': 3926bb97df5SIntel enabled_port_mask = parse_portmask(optarg); 3936bb97df5SIntel if (enabled_port_mask == 0) { 3946bb97df5SIntel printf("invalid portmask\n"); 3956bb97df5SIntel vmdq_usage(prgname); 3966bb97df5SIntel return -1; 3976bb97df5SIntel } 3986bb97df5SIntel break; 3996bb97df5SIntel case 0: 4006bb97df5SIntel if (vmdq_parse_num_pools(optarg) == -1) { 4016bb97df5SIntel printf("invalid number of pools\n"); 4026bb97df5SIntel vmdq_usage(prgname); 4036bb97df5SIntel return -1; 4046bb97df5SIntel } 4056bb97df5SIntel break; 4066bb97df5SIntel 4076bb97df5SIntel default: 4086bb97df5SIntel vmdq_usage(prgname); 4096bb97df5SIntel return -1; 4106bb97df5SIntel } 4116bb97df5SIntel } 4126bb97df5SIntel 4136bb97df5SIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 4146bb97df5SIntel if (enabled_port_mask & (1 << i)) 4156bb97df5SIntel ports[num_ports++] = (uint8_t)i; 4166bb97df5SIntel } 4176bb97df5SIntel 4186bb97df5SIntel if (num_ports < 2 || num_ports % 2) { 4196bb97df5SIntel printf("Current enabled port number is %u," 4206bb97df5SIntel "but it should be even and at least 2\n", num_ports); 4216bb97df5SIntel return -1; 4226bb97df5SIntel } 4236bb97df5SIntel 4246bb97df5SIntel return 0; 4256bb97df5SIntel } 4266bb97df5SIntel 4276bb97df5SIntel static void 4286bb97df5SIntel update_mac_address(struct rte_mbuf *m, unsigned dst_port) 4296bb97df5SIntel { 4306bb97df5SIntel struct ether_hdr *eth; 4316bb97df5SIntel void *tmp; 4326bb97df5SIntel 4336bb97df5SIntel eth = rte_pktmbuf_mtod(m, struct ether_hdr *); 4346bb97df5SIntel 4356bb97df5SIntel /* 02:00:00:00:00:xx */ 4366bb97df5SIntel tmp = ð->d_addr.addr_bytes[0]; 4376bb97df5SIntel *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 4386bb97df5SIntel 4396bb97df5SIntel /* src addr */ 4406bb97df5SIntel ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->s_addr); 4416bb97df5SIntel } 4426bb97df5SIntel 4436bb97df5SIntel /* When we receive a HUP signal, print out our stats */ 4446bb97df5SIntel static void 4456bb97df5SIntel sighup_handler(int signum) 4466bb97df5SIntel { 4476bb97df5SIntel unsigned q; 4486bb97df5SIntel for (q = 0; q < num_queues; q++) { 4496bb97df5SIntel if (q % (num_queues/num_pools) == 0) 4506bb97df5SIntel printf("\nPool %u: ", q/(num_queues/num_pools)); 4516bb97df5SIntel printf("%lu ", rxPackets[q]); 4526bb97df5SIntel } 4536bb97df5SIntel printf("\nFinished handling signal %d\n", signum); 4546bb97df5SIntel } 4556bb97df5SIntel 4566bb97df5SIntel /* 4576bb97df5SIntel * Main thread that does the work, reading from INPUT_PORT 4586bb97df5SIntel * and writing to OUTPUT_PORT 4596bb97df5SIntel */ 46013c4ebd6SBruce Richardson static int 4616bb97df5SIntel lcore_main(__attribute__((__unused__)) void *dummy) 4626bb97df5SIntel { 4636bb97df5SIntel const uint16_t lcore_id = (uint16_t)rte_lcore_id(); 4646bb97df5SIntel const uint16_t num_cores = (uint16_t)rte_lcore_count(); 4656bb97df5SIntel uint16_t core_id = 0; 4666bb97df5SIntel uint16_t startQueue, endQueue; 4676bb97df5SIntel uint16_t q, i, p; 4682a13a5a0SHuawei Xie const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores); 4696bb97df5SIntel 4706bb97df5SIntel for (i = 0; i < num_cores; i++) 4716bb97df5SIntel if (lcore_ids[i] == lcore_id) { 4726bb97df5SIntel core_id = i; 4736bb97df5SIntel break; 4746bb97df5SIntel } 4756bb97df5SIntel 4766bb97df5SIntel if (remainder != 0) { 4776bb97df5SIntel if (core_id < remainder) { 4782a13a5a0SHuawei Xie startQueue = (uint16_t)(core_id * 4792a13a5a0SHuawei Xie (num_vmdq_queues / num_cores + 1)); 4802a13a5a0SHuawei Xie endQueue = (uint16_t)(startQueue + 4812a13a5a0SHuawei Xie (num_vmdq_queues / num_cores) + 1); 4826bb97df5SIntel } else { 4832a13a5a0SHuawei Xie startQueue = (uint16_t)(core_id * 4842a13a5a0SHuawei Xie (num_vmdq_queues / num_cores) + 4852a13a5a0SHuawei Xie remainder); 4862a13a5a0SHuawei Xie endQueue = (uint16_t)(startQueue + 4872a13a5a0SHuawei Xie (num_vmdq_queues / num_cores)); 4886bb97df5SIntel } 4896bb97df5SIntel } else { 4902a13a5a0SHuawei Xie startQueue = (uint16_t)(core_id * 4912a13a5a0SHuawei Xie (num_vmdq_queues / num_cores)); 4922a13a5a0SHuawei Xie endQueue = (uint16_t)(startQueue + 4932a13a5a0SHuawei Xie (num_vmdq_queues / num_cores)); 4946bb97df5SIntel } 4956bb97df5SIntel 4962a13a5a0SHuawei Xie /* vmdq queue idx doesn't always start from zero.*/ 4972a13a5a0SHuawei Xie startQueue += vmdq_queue_base; 4982a13a5a0SHuawei Xie endQueue += vmdq_queue_base; 4996bb97df5SIntel printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id, 5006bb97df5SIntel (unsigned)lcore_id, startQueue, endQueue - 1); 5016bb97df5SIntel 50213c4ebd6SBruce Richardson if (startQueue == endQueue) { 50313c4ebd6SBruce Richardson printf("lcore %u has nothing to do\n", lcore_id); 504b30eb1d2SHuawei Xie return 0; 50513c4ebd6SBruce Richardson } 50613c4ebd6SBruce Richardson 5076bb97df5SIntel for (;;) { 5086bb97df5SIntel struct rte_mbuf *buf[MAX_PKT_BURST]; 5096bb97df5SIntel const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]); 5106bb97df5SIntel 5116bb97df5SIntel for (p = 0; p < num_ports; p++) { 5126bb97df5SIntel const uint8_t sport = ports[p]; 513b30eb1d2SHuawei Xie /* 0 <-> 1, 2 <-> 3 etc */ 514b30eb1d2SHuawei Xie const uint8_t dport = ports[p ^ 1]; 5156bb97df5SIntel if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID)) 5166bb97df5SIntel continue; 5176bb97df5SIntel 5186bb97df5SIntel for (q = startQueue; q < endQueue; q++) { 5196bb97df5SIntel const uint16_t rxCount = rte_eth_rx_burst(sport, 5206bb97df5SIntel q, buf, buf_size); 5216bb97df5SIntel 5226bb97df5SIntel if (unlikely(rxCount == 0)) 5236bb97df5SIntel continue; 5246bb97df5SIntel 5256bb97df5SIntel rxPackets[q] += rxCount; 5266bb97df5SIntel 5276bb97df5SIntel for (i = 0; i < rxCount; i++) 5286bb97df5SIntel update_mac_address(buf[i], dport); 5296bb97df5SIntel 5306bb97df5SIntel const uint16_t txCount = rte_eth_tx_burst(dport, 5312a13a5a0SHuawei Xie vmdq_queue_base + core_id, 5322a13a5a0SHuawei Xie buf, 5332a13a5a0SHuawei Xie rxCount); 5346bb97df5SIntel 5356bb97df5SIntel if (txCount != rxCount) { 5366bb97df5SIntel for (i = txCount; i < rxCount; i++) 5376bb97df5SIntel rte_pktmbuf_free(buf[i]); 5386bb97df5SIntel } 5396bb97df5SIntel } 5406bb97df5SIntel } 5416bb97df5SIntel } 5426bb97df5SIntel } 5436bb97df5SIntel 5446bb97df5SIntel /* 5456bb97df5SIntel * Update the global var NUM_PORTS and array PORTS according to system ports number 5466bb97df5SIntel * and return valid ports number 5476bb97df5SIntel */ 5486bb97df5SIntel static unsigned check_ports_num(unsigned nb_ports) 5496bb97df5SIntel { 5506bb97df5SIntel unsigned valid_num_ports = num_ports; 5516bb97df5SIntel unsigned portid; 5526bb97df5SIntel 5536bb97df5SIntel if (num_ports > nb_ports) { 5546bb97df5SIntel printf("\nSpecified port number(%u) exceeds total system port number(%u)\n", 5556bb97df5SIntel num_ports, nb_ports); 5566bb97df5SIntel num_ports = nb_ports; 5576bb97df5SIntel } 5586bb97df5SIntel 5596bb97df5SIntel for (portid = 0; portid < num_ports; portid++) { 5606bb97df5SIntel if (ports[portid] >= nb_ports) { 5616bb97df5SIntel printf("\nSpecified port ID(%u) exceeds max system port ID(%u)\n", 5626bb97df5SIntel ports[portid], (nb_ports - 1)); 5636bb97df5SIntel ports[portid] = INVALID_PORT_ID; 5646bb97df5SIntel valid_num_ports--; 5656bb97df5SIntel } 5666bb97df5SIntel } 5676bb97df5SIntel return valid_num_ports; 5686bb97df5SIntel } 5696bb97df5SIntel 5706bb97df5SIntel /* Main function, does initialisation and calls the per-lcore functions */ 5716bb97df5SIntel int 57298a16481SDavid Marchand main(int argc, char *argv[]) 5736bb97df5SIntel { 5746bb97df5SIntel struct rte_mempool *mbuf_pool; 5756bb97df5SIntel unsigned lcore_id, core_id = 0; 5766bb97df5SIntel int ret; 5776bb97df5SIntel unsigned nb_ports, valid_num_ports; 5786bb97df5SIntel uint8_t portid; 5796bb97df5SIntel 5806bb97df5SIntel signal(SIGHUP, sighup_handler); 5816bb97df5SIntel 5826bb97df5SIntel /* init EAL */ 5836bb97df5SIntel ret = rte_eal_init(argc, argv); 5846bb97df5SIntel if (ret < 0) 5856bb97df5SIntel rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 5866bb97df5SIntel argc -= ret; 5876bb97df5SIntel argv += ret; 5886bb97df5SIntel 5896bb97df5SIntel /* parse app arguments */ 5906bb97df5SIntel ret = vmdq_parse_args(argc, argv); 5916bb97df5SIntel if (ret < 0) 5926bb97df5SIntel rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n"); 5936bb97df5SIntel 5946bb97df5SIntel for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) 5956bb97df5SIntel if (rte_lcore_is_enabled(lcore_id)) 5966bb97df5SIntel lcore_ids[core_id++] = lcore_id; 5976bb97df5SIntel 5986bb97df5SIntel if (rte_lcore_count() > RTE_MAX_LCORE) 5996bb97df5SIntel rte_exit(EXIT_FAILURE, "Not enough cores\n"); 6006bb97df5SIntel 6016bb97df5SIntel nb_ports = rte_eth_dev_count(); 6026bb97df5SIntel if (nb_ports > RTE_MAX_ETHPORTS) 6036bb97df5SIntel nb_ports = RTE_MAX_ETHPORTS; 6046bb97df5SIntel 6056bb97df5SIntel /* 6066bb97df5SIntel * Update the global var NUM_PORTS and global array PORTS 6076bb97df5SIntel * and get value of var VALID_NUM_PORTS according to system ports number 6086bb97df5SIntel */ 6096bb97df5SIntel valid_num_ports = check_ports_num(nb_ports); 6106bb97df5SIntel 6116bb97df5SIntel if (valid_num_ports < 2 || valid_num_ports % 2) { 6126bb97df5SIntel printf("Current valid ports number is %u\n", valid_num_ports); 6136bb97df5SIntel rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n"); 6146bb97df5SIntel } 6156bb97df5SIntel 616*ea0c20eaSOlivier Matz mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 617*ea0c20eaSOlivier Matz NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE, 618*ea0c20eaSOlivier Matz 0, MBUF_DATA_SIZE, rte_socket_id()); 6196bb97df5SIntel if (mbuf_pool == NULL) 6206bb97df5SIntel rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 6216bb97df5SIntel 6226bb97df5SIntel /* initialize all ports */ 6236bb97df5SIntel for (portid = 0; portid < nb_ports; portid++) { 6246bb97df5SIntel /* skip ports that are not enabled */ 6256bb97df5SIntel if ((enabled_port_mask & (1 << portid)) == 0) { 6266bb97df5SIntel printf("\nSkipping disabled port %d\n", portid); 6276bb97df5SIntel continue; 6286bb97df5SIntel } 6296bb97df5SIntel if (port_init(portid, mbuf_pool) != 0) 6306bb97df5SIntel rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); 6316bb97df5SIntel } 6326bb97df5SIntel 6336bb97df5SIntel /* call lcore_main() on every lcore */ 6346bb97df5SIntel rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER); 6356bb97df5SIntel RTE_LCORE_FOREACH_SLAVE(lcore_id) { 6366bb97df5SIntel if (rte_eal_wait_lcore(lcore_id) < 0) 6376bb97df5SIntel return -1; 6386bb97df5SIntel } 6396bb97df5SIntel 6406bb97df5SIntel return 0; 6416bb97df5SIntel } 642