16bb97df5SIntel /*- 26bb97df5SIntel * BSD LICENSE 36bb97df5SIntel * 4e9d48c00SBruce Richardson * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 56bb97df5SIntel * All rights reserved. 66bb97df5SIntel * 76bb97df5SIntel * Redistribution and use in source and binary forms, with or without 86bb97df5SIntel * modification, are permitted provided that the following conditions 96bb97df5SIntel * are met: 106bb97df5SIntel * 116bb97df5SIntel * * Redistributions of source code must retain the above copyright 126bb97df5SIntel * notice, this list of conditions and the following disclaimer. 136bb97df5SIntel * * Redistributions in binary form must reproduce the above copyright 146bb97df5SIntel * notice, this list of conditions and the following disclaimer in 156bb97df5SIntel * the documentation and/or other materials provided with the 166bb97df5SIntel * distribution. 176bb97df5SIntel * * Neither the name of Intel Corporation nor the names of its 186bb97df5SIntel * contributors may be used to endorse or promote products derived 196bb97df5SIntel * from this software without specific prior written permission. 206bb97df5SIntel * 216bb97df5SIntel * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 226bb97df5SIntel * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 236bb97df5SIntel * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 246bb97df5SIntel * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 256bb97df5SIntel * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 266bb97df5SIntel * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 276bb97df5SIntel * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 286bb97df5SIntel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 296bb97df5SIntel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 306bb97df5SIntel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 316bb97df5SIntel * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 326bb97df5SIntel */ 336bb97df5SIntel 346bb97df5SIntel #include <stdint.h> 356bb97df5SIntel #include <sys/queue.h> 366bb97df5SIntel #include <stdlib.h> 376bb97df5SIntel #include <string.h> 386bb97df5SIntel #include <stdio.h> 396bb97df5SIntel #include <assert.h> 406bb97df5SIntel #include <errno.h> 416bb97df5SIntel #include <signal.h> 426bb97df5SIntel #include <stdarg.h> 436bb97df5SIntel #include <inttypes.h> 446bb97df5SIntel #include <getopt.h> 456bb97df5SIntel 466bb97df5SIntel #include <rte_common.h> 476bb97df5SIntel #include <rte_log.h> 486bb97df5SIntel #include <rte_memory.h> 496bb97df5SIntel #include <rte_memcpy.h> 506bb97df5SIntel #include <rte_memzone.h> 516bb97df5SIntel #include <rte_tailq.h> 526bb97df5SIntel #include <rte_eal.h> 536bb97df5SIntel #include <rte_per_lcore.h> 546bb97df5SIntel #include <rte_launch.h> 556bb97df5SIntel #include <rte_atomic.h> 566bb97df5SIntel #include <rte_cycles.h> 576bb97df5SIntel #include <rte_prefetch.h> 586bb97df5SIntel #include <rte_lcore.h> 596bb97df5SIntel #include <rte_per_lcore.h> 606bb97df5SIntel #include <rte_branch_prediction.h> 616bb97df5SIntel #include <rte_interrupts.h> 626bb97df5SIntel #include <rte_pci.h> 636bb97df5SIntel #include <rte_random.h> 646bb97df5SIntel #include <rte_debug.h> 656bb97df5SIntel #include <rte_ether.h> 666bb97df5SIntel #include <rte_ethdev.h> 676bb97df5SIntel #include <rte_ring.h> 686bb97df5SIntel #include <rte_log.h> 696bb97df5SIntel #include <rte_mempool.h> 706bb97df5SIntel #include <rte_mbuf.h> 716bb97df5SIntel #include <rte_memcpy.h> 726bb97df5SIntel 736bb97df5SIntel #include "main.h" 746bb97df5SIntel 756bb97df5SIntel #define MAX_QUEUES 128 766bb97df5SIntel /* 776bb97df5SIntel * For 10 GbE, 128 queues require roughly 786bb97df5SIntel * 128*512 (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port. 796bb97df5SIntel */ 806bb97df5SIntel #define NUM_MBUFS_PER_PORT (128*512) 816bb97df5SIntel #define MBUF_CACHE_SIZE 64 826bb97df5SIntel #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) 836bb97df5SIntel 846bb97df5SIntel #define MAX_PKT_BURST 32 856bb97df5SIntel 866bb97df5SIntel /* 876bb97df5SIntel * Configurable number of RX/TX ring descriptors 886bb97df5SIntel */ 896bb97df5SIntel #define RTE_TEST_RX_DESC_DEFAULT 128 906bb97df5SIntel #define RTE_TEST_TX_DESC_DEFAULT 512 916bb97df5SIntel 926bb97df5SIntel #define INVALID_PORT_ID 0xFF 936bb97df5SIntel 946bb97df5SIntel /* mask of enabled ports */ 956bb97df5SIntel static uint32_t enabled_port_mask = 0; 966bb97df5SIntel 976bb97df5SIntel /* number of pools (if user does not specify any, 8 by default */ 986bb97df5SIntel static uint32_t num_queues = 8; 996bb97df5SIntel static uint32_t num_pools = 8; 1006bb97df5SIntel 1016bb97df5SIntel /* empty vmdq configuration structure. Filled in programatically */ 1026bb97df5SIntel static const struct rte_eth_conf vmdq_conf_default = { 1036bb97df5SIntel .rxmode = { 1046bb97df5SIntel .mq_mode = ETH_MQ_RX_VMDQ_ONLY, 1056bb97df5SIntel .split_hdr_size = 0, 1066bb97df5SIntel .header_split = 0, /**< Header Split disabled */ 1076bb97df5SIntel .hw_ip_checksum = 0, /**< IP checksum offload disabled */ 1086bb97df5SIntel .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 1096bb97df5SIntel .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ 1106bb97df5SIntel }, 1116bb97df5SIntel 1126bb97df5SIntel .txmode = { 1136bb97df5SIntel .mq_mode = ETH_MQ_TX_NONE, 1146bb97df5SIntel }, 1156bb97df5SIntel .rx_adv_conf = { 1166bb97df5SIntel /* 1176bb97df5SIntel * should be overridden separately in code with 1186bb97df5SIntel * appropriate values 1196bb97df5SIntel */ 1206bb97df5SIntel .vmdq_rx_conf = { 1216bb97df5SIntel .nb_queue_pools = ETH_8_POOLS, 1226bb97df5SIntel .enable_default_pool = 0, 1236bb97df5SIntel .default_pool = 0, 1246bb97df5SIntel .nb_pool_maps = 0, 1256bb97df5SIntel .pool_map = {{0, 0},}, 1266bb97df5SIntel }, 1276bb97df5SIntel }, 1286bb97df5SIntel }; 1296bb97df5SIntel 1306bb97df5SIntel static unsigned lcore_ids[RTE_MAX_LCORE]; 1316bb97df5SIntel static uint8_t ports[RTE_MAX_ETHPORTS]; 1326bb97df5SIntel static unsigned num_ports = 0; /**< The number of ports specified in command line */ 1336bb97df5SIntel 1346bb97df5SIntel /* array used for printing out statistics */ 1356bb97df5SIntel volatile unsigned long rxPackets[ MAX_QUEUES ] = {0}; 1366bb97df5SIntel 1376bb97df5SIntel const uint16_t vlan_tags[] = { 1386bb97df5SIntel 0, 1, 2, 3, 4, 5, 6, 7, 1396bb97df5SIntel 8, 9, 10, 11, 12, 13, 14, 15, 1406bb97df5SIntel 16, 17, 18, 19, 20, 21, 22, 23, 1416bb97df5SIntel 24, 25, 26, 27, 28, 29, 30, 31, 1426bb97df5SIntel 32, 33, 34, 35, 36, 37, 38, 39, 1436bb97df5SIntel 40, 41, 42, 43, 44, 45, 46, 47, 1446bb97df5SIntel 48, 49, 50, 51, 52, 53, 54, 55, 1456bb97df5SIntel 56, 57, 58, 59, 60, 61, 62, 63, 1466bb97df5SIntel }; 1476bb97df5SIntel 1486bb97df5SIntel /* ethernet addresses of ports */ 1496bb97df5SIntel static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 1506bb97df5SIntel 1516bb97df5SIntel #define MAX_QUEUE_NUM_10G 128 1526bb97df5SIntel #define MAX_QUEUE_NUM_1G 8 1536bb97df5SIntel #define MAX_POOL_MAP_NUM_10G 64 1546bb97df5SIntel #define MAX_POOL_MAP_NUM_1G 32 1556bb97df5SIntel #define MAX_POOL_NUM_10G 64 1566bb97df5SIntel #define MAX_POOL_NUM_1G 8 1576bb97df5SIntel /* Builds up the correct configuration for vmdq based on the vlan tags array 1586bb97df5SIntel * given above, and determine the queue number and pool map number according to valid pool number */ 1596bb97df5SIntel static inline int 1606bb97df5SIntel get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools) 1616bb97df5SIntel { 1626bb97df5SIntel struct rte_eth_vmdq_rx_conf conf; 1636bb97df5SIntel unsigned i; 1646bb97df5SIntel 1656bb97df5SIntel conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 1666bb97df5SIntel conf.enable_default_pool = 0; 1676bb97df5SIntel conf.default_pool = 0; /* set explicit value, even if not used */ 1686bb97df5SIntel switch (num_pools) { 1696bb97df5SIntel /* For 10G NIC like 82599, 128 is valid for queue number */ 1706bb97df5SIntel case MAX_POOL_NUM_10G: 1716bb97df5SIntel num_queues = MAX_QUEUE_NUM_10G; 1726bb97df5SIntel conf.nb_pool_maps = MAX_POOL_MAP_NUM_10G; 1736bb97df5SIntel break; 1746bb97df5SIntel /* For 1G NIC like i350, 82580 and 82576, 8 is valid for queue number */ 1756bb97df5SIntel case MAX_POOL_NUM_1G: 1766bb97df5SIntel num_queues = MAX_QUEUE_NUM_1G; 1776bb97df5SIntel conf.nb_pool_maps = MAX_POOL_MAP_NUM_1G; 1786bb97df5SIntel break; 1796bb97df5SIntel default: 1806bb97df5SIntel return -1; 1816bb97df5SIntel } 1826bb97df5SIntel 1836bb97df5SIntel for (i = 0; i < conf.nb_pool_maps; i++){ 1846bb97df5SIntel conf.pool_map[i].vlan_id = vlan_tags[ i ]; 1856bb97df5SIntel conf.pool_map[i].pools = (1UL << (i % num_pools)); 1866bb97df5SIntel } 1876bb97df5SIntel 1886bb97df5SIntel (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf))); 1896bb97df5SIntel (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf, 1906bb97df5SIntel sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf))); 1916bb97df5SIntel return 0; 1926bb97df5SIntel } 1936bb97df5SIntel 1946bb97df5SIntel /* 1956bb97df5SIntel * Validate the pool number accrording to the max pool number gotten form dev_info 1966bb97df5SIntel * If the pool number is invalid, give the error message and return -1 1976bb97df5SIntel */ 1986bb97df5SIntel static inline int 1996bb97df5SIntel validate_num_pools(uint32_t max_nb_pools) 2006bb97df5SIntel { 2016bb97df5SIntel if (num_pools > max_nb_pools) { 2026bb97df5SIntel printf("invalid number of pools\n"); 2036bb97df5SIntel return -1; 2046bb97df5SIntel } 2056bb97df5SIntel 2066bb97df5SIntel switch (max_nb_pools) { 2076bb97df5SIntel /* For 10G NIC like 82599, 64 is valid for pool number */ 2086bb97df5SIntel case MAX_POOL_NUM_10G: 2096bb97df5SIntel if (num_pools != MAX_POOL_NUM_10G) { 2106bb97df5SIntel printf("invalid number of pools\n"); 2116bb97df5SIntel return -1; 2126bb97df5SIntel } 2136bb97df5SIntel break; 2146bb97df5SIntel /* For 1G NIC like i350, 82580 and 82576, 8 is valid for pool number */ 2156bb97df5SIntel case MAX_POOL_NUM_1G: 2166bb97df5SIntel if (num_pools != MAX_POOL_NUM_1G) { 2176bb97df5SIntel printf("invalid number of pools\n"); 2186bb97df5SIntel return -1; 2196bb97df5SIntel } 2206bb97df5SIntel break; 2216bb97df5SIntel default: 2226bb97df5SIntel return -1; 2236bb97df5SIntel } 2246bb97df5SIntel 2256bb97df5SIntel return 0; 2266bb97df5SIntel } 2276bb97df5SIntel 2286bb97df5SIntel /* 2296bb97df5SIntel * Initialises a given port using global settings and with the rx buffers 2306bb97df5SIntel * coming from the mbuf_pool passed as parameter 2316bb97df5SIntel */ 2326bb97df5SIntel static inline int 2336bb97df5SIntel port_init(uint8_t port, struct rte_mempool *mbuf_pool) 2346bb97df5SIntel { 2356bb97df5SIntel struct rte_eth_dev_info dev_info; 236*81f7ecd9SPablo de Lara struct rte_eth_rxconf *rxconf; 2376bb97df5SIntel struct rte_eth_conf port_conf; 2386bb97df5SIntel uint16_t rxRings, txRings = (uint16_t)rte_lcore_count(); 2396bb97df5SIntel const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize = RTE_TEST_TX_DESC_DEFAULT; 2406bb97df5SIntel int retval; 2416bb97df5SIntel uint16_t q; 2426bb97df5SIntel uint32_t max_nb_pools; 2436bb97df5SIntel 2446bb97df5SIntel /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ 2456bb97df5SIntel rte_eth_dev_info_get (port, &dev_info); 2466bb97df5SIntel max_nb_pools = (uint32_t)dev_info.max_vmdq_pools; 2476bb97df5SIntel retval = validate_num_pools(max_nb_pools); 2486bb97df5SIntel if (retval < 0) 2496bb97df5SIntel return retval; 2506bb97df5SIntel 2516bb97df5SIntel retval = get_eth_conf(&port_conf, num_pools); 2526bb97df5SIntel if (retval < 0) 2536bb97df5SIntel return retval; 2546bb97df5SIntel 2556bb97df5SIntel if (port >= rte_eth_dev_count()) return -1; 2566bb97df5SIntel 2576bb97df5SIntel rxRings = (uint16_t)num_queues, 2586bb97df5SIntel retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf); 2596bb97df5SIntel if (retval != 0) 2606bb97df5SIntel return retval; 2616bb97df5SIntel 262*81f7ecd9SPablo de Lara rte_eth_dev_info_get(port, &dev_info); 263*81f7ecd9SPablo de Lara rxconf = &dev_info.default_rxconf; 264*81f7ecd9SPablo de Lara rxconf->rx_drop_en = 1; 2656bb97df5SIntel for (q = 0; q < rxRings; q ++) { 2666bb97df5SIntel retval = rte_eth_rx_queue_setup(port, q, rxRingSize, 267*81f7ecd9SPablo de Lara rte_eth_dev_socket_id(port), 268*81f7ecd9SPablo de Lara rxconf, 2696bb97df5SIntel mbuf_pool); 2706bb97df5SIntel if (retval < 0) 2716bb97df5SIntel return retval; 2726bb97df5SIntel } 2736bb97df5SIntel 2746bb97df5SIntel for (q = 0; q < txRings; q ++) { 2756bb97df5SIntel retval = rte_eth_tx_queue_setup(port, q, txRingSize, 276*81f7ecd9SPablo de Lara rte_eth_dev_socket_id(port), 277*81f7ecd9SPablo de Lara NULL); 2786bb97df5SIntel if (retval < 0) 2796bb97df5SIntel return retval; 2806bb97df5SIntel } 2816bb97df5SIntel 2826bb97df5SIntel retval = rte_eth_dev_start(port); 2836bb97df5SIntel if (retval < 0) 2846bb97df5SIntel return retval; 2856bb97df5SIntel 2866bb97df5SIntel rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 2876bb97df5SIntel printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 2886bb97df5SIntel " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 2896bb97df5SIntel (unsigned)port, 2906bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[0], 2916bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[1], 2926bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[2], 2936bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[3], 2946bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[4], 2956bb97df5SIntel vmdq_ports_eth_addr[port].addr_bytes[5]); 2966bb97df5SIntel 2976bb97df5SIntel return 0; 2986bb97df5SIntel } 2996bb97df5SIntel 3006bb97df5SIntel /* Check num_pools parameter and set it if OK*/ 3016bb97df5SIntel static int 3026bb97df5SIntel vmdq_parse_num_pools(const char *q_arg) 3036bb97df5SIntel { 3046bb97df5SIntel char *end = NULL; 3056bb97df5SIntel int n; 3066bb97df5SIntel 3076bb97df5SIntel /* parse number string */ 3086bb97df5SIntel n = strtol(q_arg, &end, 10); 3096bb97df5SIntel if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 3106bb97df5SIntel return -1; 3116bb97df5SIntel 3126bb97df5SIntel num_pools = n; 3136bb97df5SIntel 3146bb97df5SIntel return 0; 3156bb97df5SIntel } 3166bb97df5SIntel 3176bb97df5SIntel 3186bb97df5SIntel static int 3196bb97df5SIntel parse_portmask(const char *portmask) 3206bb97df5SIntel { 3216bb97df5SIntel char *end = NULL; 3226bb97df5SIntel unsigned long pm; 3236bb97df5SIntel 3246bb97df5SIntel /* parse hexadecimal string */ 3256bb97df5SIntel pm = strtoul(portmask, &end, 16); 3266bb97df5SIntel if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 3276bb97df5SIntel return -1; 3286bb97df5SIntel 3296bb97df5SIntel if (pm == 0) 3306bb97df5SIntel return -1; 3316bb97df5SIntel 3326bb97df5SIntel return pm; 3336bb97df5SIntel } 3346bb97df5SIntel 3356bb97df5SIntel /* Display usage */ 3366bb97df5SIntel static void 3376bb97df5SIntel vmdq_usage(const char *prgname) 3386bb97df5SIntel { 3396bb97df5SIntel printf("%s [EAL options] -- -p PORTMASK]\n" 3406bb97df5SIntel " --nb-pools NP: number of pools\n", 3416bb97df5SIntel prgname); 3426bb97df5SIntel } 3436bb97df5SIntel 3446bb97df5SIntel /* Parse the argument (num_pools) given in the command line of the application */ 3456bb97df5SIntel static int 3466bb97df5SIntel vmdq_parse_args(int argc, char **argv) 3476bb97df5SIntel { 3486bb97df5SIntel int opt; 3496bb97df5SIntel int option_index; 3506bb97df5SIntel unsigned i; 3516bb97df5SIntel const char *prgname = argv[0]; 3526bb97df5SIntel static struct option long_option[] = { 3536bb97df5SIntel {"nb-pools", required_argument, NULL, 0}, 3546bb97df5SIntel {NULL, 0, 0, 0} 3556bb97df5SIntel }; 3566bb97df5SIntel 3576bb97df5SIntel /* Parse command line */ 3586bb97df5SIntel while ((opt = getopt_long(argc, argv, "p:",long_option,&option_index)) != EOF) { 3596bb97df5SIntel switch (opt) { 3606bb97df5SIntel /* portmask */ 3616bb97df5SIntel case 'p': 3626bb97df5SIntel enabled_port_mask = parse_portmask(optarg); 3636bb97df5SIntel if (enabled_port_mask == 0) { 3646bb97df5SIntel printf("invalid portmask\n"); 3656bb97df5SIntel vmdq_usage(prgname); 3666bb97df5SIntel return -1; 3676bb97df5SIntel } 3686bb97df5SIntel break; 3696bb97df5SIntel case 0: 3706bb97df5SIntel if (vmdq_parse_num_pools(optarg) == -1){ 3716bb97df5SIntel printf("invalid number of pools\n"); 3726bb97df5SIntel vmdq_usage(prgname); 3736bb97df5SIntel return -1; 3746bb97df5SIntel } 3756bb97df5SIntel break; 3766bb97df5SIntel 3776bb97df5SIntel default: 3786bb97df5SIntel vmdq_usage(prgname); 3796bb97df5SIntel return -1; 3806bb97df5SIntel } 3816bb97df5SIntel } 3826bb97df5SIntel 3836bb97df5SIntel for(i = 0; i < RTE_MAX_ETHPORTS; i++) { 3846bb97df5SIntel if (enabled_port_mask & (1 << i)) 3856bb97df5SIntel ports[num_ports++] = (uint8_t)i; 3866bb97df5SIntel } 3876bb97df5SIntel 3886bb97df5SIntel if (num_ports < 2 || num_ports % 2) { 3896bb97df5SIntel printf("Current enabled port number is %u," 3906bb97df5SIntel "but it should be even and at least 2\n",num_ports); 3916bb97df5SIntel return -1; 3926bb97df5SIntel } 3936bb97df5SIntel 3946bb97df5SIntel return 0; 3956bb97df5SIntel } 3966bb97df5SIntel 3976bb97df5SIntel static void 3986bb97df5SIntel update_mac_address(struct rte_mbuf *m, unsigned dst_port) 3996bb97df5SIntel { 4006bb97df5SIntel struct ether_hdr *eth; 4016bb97df5SIntel void *tmp; 4026bb97df5SIntel 4036bb97df5SIntel eth = rte_pktmbuf_mtod(m, struct ether_hdr *); 4046bb97df5SIntel 4056bb97df5SIntel /* 02:00:00:00:00:xx */ 4066bb97df5SIntel tmp = ð->d_addr.addr_bytes[0]; 4076bb97df5SIntel *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 4086bb97df5SIntel 4096bb97df5SIntel /* src addr */ 4106bb97df5SIntel ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->s_addr); 4116bb97df5SIntel } 4126bb97df5SIntel 4136bb97df5SIntel #ifndef RTE_EXEC_ENV_BAREMETAL 4146bb97df5SIntel /* When we receive a HUP signal, print out our stats */ 4156bb97df5SIntel static void 4166bb97df5SIntel sighup_handler(int signum) 4176bb97df5SIntel { 4186bb97df5SIntel unsigned q; 4196bb97df5SIntel for (q = 0; q < num_queues; q ++) { 4206bb97df5SIntel if (q % (num_queues/num_pools) == 0) 4216bb97df5SIntel printf("\nPool %u: ", q/(num_queues/num_pools)); 4226bb97df5SIntel printf("%lu ", rxPackets[ q ]); 4236bb97df5SIntel } 4246bb97df5SIntel printf("\nFinished handling signal %d\n", signum); 4256bb97df5SIntel } 4266bb97df5SIntel #endif 4276bb97df5SIntel 4286bb97df5SIntel /* 4296bb97df5SIntel * Main thread that does the work, reading from INPUT_PORT 4306bb97df5SIntel * and writing to OUTPUT_PORT 4316bb97df5SIntel */ 43213c4ebd6SBruce Richardson static int 4336bb97df5SIntel lcore_main(__attribute__((__unused__)) void* dummy) 4346bb97df5SIntel { 4356bb97df5SIntel const uint16_t lcore_id = (uint16_t)rte_lcore_id(); 4366bb97df5SIntel const uint16_t num_cores = (uint16_t)rte_lcore_count(); 4376bb97df5SIntel uint16_t core_id = 0; 4386bb97df5SIntel uint16_t startQueue, endQueue; 4396bb97df5SIntel uint16_t q, i, p; 4406bb97df5SIntel const uint16_t remainder = (uint16_t)(num_queues % num_cores); 4416bb97df5SIntel 4426bb97df5SIntel for (i = 0; i < num_cores; i ++) 4436bb97df5SIntel if (lcore_ids[i] == lcore_id) { 4446bb97df5SIntel core_id = i; 4456bb97df5SIntel break; 4466bb97df5SIntel } 4476bb97df5SIntel 4486bb97df5SIntel if (remainder != 0) { 4496bb97df5SIntel if (core_id < remainder) { 4506bb97df5SIntel startQueue = (uint16_t)(core_id * (num_queues/num_cores + 1)); 4516bb97df5SIntel endQueue = (uint16_t)(startQueue + (num_queues/num_cores) + 1); 4526bb97df5SIntel } else { 4536bb97df5SIntel startQueue = (uint16_t)(core_id * (num_queues/num_cores) + remainder); 4546bb97df5SIntel endQueue = (uint16_t)(startQueue + (num_queues/num_cores)); 4556bb97df5SIntel } 4566bb97df5SIntel } else { 4576bb97df5SIntel startQueue = (uint16_t)(core_id * (num_queues/num_cores)); 4586bb97df5SIntel endQueue = (uint16_t)(startQueue + (num_queues/num_cores)); 4596bb97df5SIntel } 4606bb97df5SIntel 4616bb97df5SIntel printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id, 4626bb97df5SIntel (unsigned)lcore_id, startQueue, endQueue - 1); 4636bb97df5SIntel 46413c4ebd6SBruce Richardson if (startQueue == endQueue) { 46513c4ebd6SBruce Richardson printf("lcore %u has nothing to do\n", lcore_id); 46613c4ebd6SBruce Richardson return (0); 46713c4ebd6SBruce Richardson } 46813c4ebd6SBruce Richardson 4696bb97df5SIntel for (;;) { 4706bb97df5SIntel struct rte_mbuf *buf[MAX_PKT_BURST]; 4716bb97df5SIntel const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]); 4726bb97df5SIntel 4736bb97df5SIntel for (p = 0; p < num_ports; p++) { 4746bb97df5SIntel const uint8_t sport = ports[p]; 4756bb97df5SIntel const uint8_t dport = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */ 4766bb97df5SIntel 4776bb97df5SIntel if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID)) 4786bb97df5SIntel continue; 4796bb97df5SIntel 4806bb97df5SIntel for (q = startQueue; q < endQueue; q++) { 4816bb97df5SIntel const uint16_t rxCount = rte_eth_rx_burst(sport, 4826bb97df5SIntel q, buf, buf_size); 4836bb97df5SIntel 4846bb97df5SIntel if (unlikely(rxCount == 0)) 4856bb97df5SIntel continue; 4866bb97df5SIntel 4876bb97df5SIntel rxPackets[q] += rxCount; 4886bb97df5SIntel 4896bb97df5SIntel for (i = 0; i < rxCount; i++) 4906bb97df5SIntel update_mac_address(buf[i], dport); 4916bb97df5SIntel 4926bb97df5SIntel const uint16_t txCount = rte_eth_tx_burst(dport, 493c2bebe5fSOuyang Changchun core_id, buf, rxCount); 4946bb97df5SIntel 4956bb97df5SIntel if (txCount != rxCount) { 4966bb97df5SIntel for (i = txCount; i < rxCount; i++) 4976bb97df5SIntel rte_pktmbuf_free(buf[i]); 4986bb97df5SIntel } 4996bb97df5SIntel } 5006bb97df5SIntel } 5016bb97df5SIntel } 5026bb97df5SIntel } 5036bb97df5SIntel 5046bb97df5SIntel /* 5056bb97df5SIntel * Update the global var NUM_PORTS and array PORTS according to system ports number 5066bb97df5SIntel * and return valid ports number 5076bb97df5SIntel */ 5086bb97df5SIntel static unsigned check_ports_num(unsigned nb_ports) 5096bb97df5SIntel { 5106bb97df5SIntel unsigned valid_num_ports = num_ports; 5116bb97df5SIntel unsigned portid; 5126bb97df5SIntel 5136bb97df5SIntel if (num_ports > nb_ports) { 5146bb97df5SIntel printf("\nSpecified port number(%u) exceeds total system port number(%u)\n", 5156bb97df5SIntel num_ports, nb_ports); 5166bb97df5SIntel num_ports = nb_ports; 5176bb97df5SIntel } 5186bb97df5SIntel 5196bb97df5SIntel for (portid = 0; portid < num_ports; portid ++) { 5206bb97df5SIntel if (ports[portid] >= nb_ports) { 5216bb97df5SIntel printf("\nSpecified port ID(%u) exceeds max system port ID(%u)\n", 5226bb97df5SIntel ports[portid], (nb_ports - 1)); 5236bb97df5SIntel ports[portid] = INVALID_PORT_ID; 5246bb97df5SIntel valid_num_ports --; 5256bb97df5SIntel } 5266bb97df5SIntel } 5276bb97df5SIntel return valid_num_ports; 5286bb97df5SIntel } 5296bb97df5SIntel 5306bb97df5SIntel /* Main function, does initialisation and calls the per-lcore functions */ 5316bb97df5SIntel int 5326bb97df5SIntel MAIN(int argc, char *argv[]) 5336bb97df5SIntel { 5346bb97df5SIntel struct rte_mempool *mbuf_pool; 5356bb97df5SIntel unsigned lcore_id, core_id = 0; 5366bb97df5SIntel int ret; 5376bb97df5SIntel unsigned nb_ports, valid_num_ports; 5386bb97df5SIntel uint8_t portid; 5396bb97df5SIntel 5406bb97df5SIntel #ifndef RTE_EXEC_ENV_BAREMETAL 5416bb97df5SIntel signal(SIGHUP, sighup_handler); 5426bb97df5SIntel #endif 5436bb97df5SIntel 5446bb97df5SIntel /* init EAL */ 5456bb97df5SIntel ret = rte_eal_init(argc, argv); 5466bb97df5SIntel if (ret < 0) 5476bb97df5SIntel rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 5486bb97df5SIntel argc -= ret; 5496bb97df5SIntel argv += ret; 5506bb97df5SIntel 5516bb97df5SIntel /* parse app arguments */ 5526bb97df5SIntel ret = vmdq_parse_args(argc, argv); 5536bb97df5SIntel if (ret < 0) 5546bb97df5SIntel rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n"); 5556bb97df5SIntel 5566bb97df5SIntel for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) 5576bb97df5SIntel if (rte_lcore_is_enabled(lcore_id)) 5586bb97df5SIntel lcore_ids[core_id ++] = lcore_id; 5596bb97df5SIntel 5606bb97df5SIntel if (rte_lcore_count() > RTE_MAX_LCORE) 5616bb97df5SIntel rte_exit(EXIT_FAILURE,"Not enough cores\n"); 5626bb97df5SIntel 5636bb97df5SIntel nb_ports = rte_eth_dev_count(); 5646bb97df5SIntel if (nb_ports > RTE_MAX_ETHPORTS) 5656bb97df5SIntel nb_ports = RTE_MAX_ETHPORTS; 5666bb97df5SIntel 5676bb97df5SIntel /* 5686bb97df5SIntel * Update the global var NUM_PORTS and global array PORTS 5696bb97df5SIntel * and get value of var VALID_NUM_PORTS according to system ports number 5706bb97df5SIntel */ 5716bb97df5SIntel valid_num_ports = check_ports_num(nb_ports); 5726bb97df5SIntel 5736bb97df5SIntel if (valid_num_ports < 2 || valid_num_ports % 2) { 5746bb97df5SIntel printf("Current valid ports number is %u\n", valid_num_ports); 5756bb97df5SIntel rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n"); 5766bb97df5SIntel } 5776bb97df5SIntel 5786bb97df5SIntel mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS_PER_PORT * nb_ports, 5796bb97df5SIntel MBUF_SIZE, MBUF_CACHE_SIZE, 5806bb97df5SIntel sizeof(struct rte_pktmbuf_pool_private), 5816bb97df5SIntel rte_pktmbuf_pool_init, NULL, 5826bb97df5SIntel rte_pktmbuf_init, NULL, 5836bb97df5SIntel rte_socket_id(), 0); 5846bb97df5SIntel if (mbuf_pool == NULL) 5856bb97df5SIntel rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 5866bb97df5SIntel 5876bb97df5SIntel /* initialize all ports */ 5886bb97df5SIntel for (portid = 0; portid < nb_ports; portid++) { 5896bb97df5SIntel /* skip ports that are not enabled */ 5906bb97df5SIntel if ((enabled_port_mask & (1 << portid)) == 0) { 5916bb97df5SIntel printf("\nSkipping disabled port %d\n", portid); 5926bb97df5SIntel continue; 5936bb97df5SIntel } 5946bb97df5SIntel if (port_init(portid, mbuf_pool) != 0) 5956bb97df5SIntel rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); 5966bb97df5SIntel } 5976bb97df5SIntel 5986bb97df5SIntel /* call lcore_main() on every lcore */ 5996bb97df5SIntel rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER); 6006bb97df5SIntel RTE_LCORE_FOREACH_SLAVE(lcore_id) { 6016bb97df5SIntel if (rte_eal_wait_lcore(lcore_id) < 0) 6026bb97df5SIntel return -1; 6036bb97df5SIntel } 6046bb97df5SIntel 6056bb97df5SIntel return 0; 6066bb97df5SIntel } 607