1d19533e8SHuawei Xie /*- 2d19533e8SHuawei Xie * BSD LICENSE 3d19533e8SHuawei Xie * 4f17eb179SBernard Iremonger * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. 5d19533e8SHuawei Xie * All rights reserved. 6d19533e8SHuawei Xie * 7d19533e8SHuawei Xie * Redistribution and use in source and binary forms, with or without 8d19533e8SHuawei Xie * modification, are permitted provided that the following conditions 9d19533e8SHuawei Xie * are met: 10d19533e8SHuawei Xie * 11d19533e8SHuawei Xie * * Redistributions of source code must retain the above copyright 12d19533e8SHuawei Xie * notice, this list of conditions and the following disclaimer. 13d19533e8SHuawei Xie * * Redistributions in binary form must reproduce the above copyright 14d19533e8SHuawei Xie * notice, this list of conditions and the following disclaimer in 15d19533e8SHuawei Xie * the documentation and/or other materials provided with the 16d19533e8SHuawei Xie * distribution. 17d19533e8SHuawei Xie * * Neither the name of Intel Corporation nor the names of its 18d19533e8SHuawei Xie * contributors may be used to endorse or promote products derived 19d19533e8SHuawei Xie * from this software without specific prior written permission. 20d19533e8SHuawei Xie * 21d19533e8SHuawei Xie * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22d19533e8SHuawei Xie * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23d19533e8SHuawei Xie * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24d19533e8SHuawei Xie * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25d19533e8SHuawei Xie * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26d19533e8SHuawei Xie * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27d19533e8SHuawei Xie * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28d19533e8SHuawei Xie * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29d19533e8SHuawei Xie * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30d19533e8SHuawei Xie * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31d19533e8SHuawei Xie * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32d19533e8SHuawei Xie */ 33d19533e8SHuawei Xie 34d19533e8SHuawei Xie #include <arpa/inet.h> 35d19533e8SHuawei Xie #include <getopt.h> 36d19533e8SHuawei Xie #include <linux/if_ether.h> 37d19533e8SHuawei Xie #include <linux/if_vlan.h> 38d19533e8SHuawei Xie #include <linux/virtio_net.h> 39d19533e8SHuawei Xie #include <linux/virtio_ring.h> 40d19533e8SHuawei Xie #include <signal.h> 41d19533e8SHuawei Xie #include <stdint.h> 42d19533e8SHuawei Xie #include <sys/eventfd.h> 43d19533e8SHuawei Xie #include <sys/param.h> 44d19533e8SHuawei Xie #include <unistd.h> 45d19533e8SHuawei Xie 46d19533e8SHuawei Xie #include <rte_atomic.h> 47d19533e8SHuawei Xie #include <rte_cycles.h> 48d19533e8SHuawei Xie #include <rte_ethdev.h> 49d19533e8SHuawei Xie #include <rte_log.h> 50d19533e8SHuawei Xie #include <rte_string_fns.h> 51d19533e8SHuawei Xie #include <rte_malloc.h> 525cf27144SHuawei Xie #include <rte_virtio_net.h> 53691693c6SJijiang Liu #include <rte_ip.h> 54*9fd72e3cSJijiang Liu #include <rte_tcp.h> 55*9fd72e3cSJijiang Liu #include <rte_udp.h> 56*9fd72e3cSJijiang Liu #include <rte_sctp.h> 57d19533e8SHuawei Xie 58d19533e8SHuawei Xie #include "main.h" 59d19533e8SHuawei Xie 60f17eb179SBernard Iremonger #ifndef MAX_QUEUES 61f17eb179SBernard Iremonger #define MAX_QUEUES 128 62f17eb179SBernard Iremonger #endif 63d19533e8SHuawei Xie 64d19533e8SHuawei Xie /* the maximum number of external ports supported */ 65d19533e8SHuawei Xie #define MAX_SUP_PORTS 1 66d19533e8SHuawei Xie 67d19533e8SHuawei Xie /* 68d19533e8SHuawei Xie * Calculate the number of buffers needed per port 69d19533e8SHuawei Xie */ 70d19533e8SHuawei Xie #define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \ 71d19533e8SHuawei Xie (num_switching_cores*MAX_PKT_BURST) + \ 72d19533e8SHuawei Xie (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\ 73d19533e8SHuawei Xie (num_switching_cores*MBUF_CACHE_SIZE)) 74d19533e8SHuawei Xie 75d19533e8SHuawei Xie #define MBUF_CACHE_SIZE 128 76824cb29cSKonstantin Ananyev #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE 77d19533e8SHuawei Xie 78d19533e8SHuawei Xie /* 79d19533e8SHuawei Xie * No frame data buffer allocated from host are required for zero copy 80d19533e8SHuawei Xie * implementation, guest will allocate the frame data buffer, and vhost 81d19533e8SHuawei Xie * directly use it. 82d19533e8SHuawei Xie */ 83824cb29cSKonstantin Ananyev #define VIRTIO_DESCRIPTOR_LEN_ZCP RTE_MBUF_DEFAULT_DATAROOM 84824cb29cSKonstantin Ananyev #define MBUF_DATA_SIZE_ZCP RTE_MBUF_DEFAULT_BUF_SIZE 85d19533e8SHuawei Xie #define MBUF_CACHE_SIZE_ZCP 0 86d19533e8SHuawei Xie 87d19533e8SHuawei Xie #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ 88d19533e8SHuawei Xie #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 89d19533e8SHuawei Xie 90d19533e8SHuawei Xie #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */ 91d19533e8SHuawei Xie #define BURST_RX_RETRIES 4 /* Number of retries on RX. */ 92d19533e8SHuawei Xie 93d19533e8SHuawei Xie #define JUMBO_FRAME_MAX_SIZE 0x2600 94d19533e8SHuawei Xie 95d19533e8SHuawei Xie /* State of virtio device. */ 96d19533e8SHuawei Xie #define DEVICE_MAC_LEARNING 0 97d19533e8SHuawei Xie #define DEVICE_RX 1 98d19533e8SHuawei Xie #define DEVICE_SAFE_REMOVE 2 99d19533e8SHuawei Xie 100d19533e8SHuawei Xie /* Config_core_flag status definitions. */ 101d19533e8SHuawei Xie #define REQUEST_DEV_REMOVAL 1 102d19533e8SHuawei Xie #define ACK_DEV_REMOVAL 0 103d19533e8SHuawei Xie 104d19533e8SHuawei Xie /* Configurable number of RX/TX ring descriptors */ 105d19533e8SHuawei Xie #define RTE_TEST_RX_DESC_DEFAULT 1024 106d19533e8SHuawei Xie #define RTE_TEST_TX_DESC_DEFAULT 512 107d19533e8SHuawei Xie 108d19533e8SHuawei Xie /* 109d19533e8SHuawei Xie * Need refine these 2 macros for legacy and DPDK based front end: 110d19533e8SHuawei Xie * Max vring avail descriptor/entries from guest - MAX_PKT_BURST 111d19533e8SHuawei Xie * And then adjust power 2. 112d19533e8SHuawei Xie */ 113d19533e8SHuawei Xie /* 114d19533e8SHuawei Xie * For legacy front end, 128 descriptors, 115d19533e8SHuawei Xie * half for virtio header, another half for mbuf. 116d19533e8SHuawei Xie */ 117d19533e8SHuawei Xie #define RTE_TEST_RX_DESC_DEFAULT_ZCP 32 /* legacy: 32, DPDK virt FE: 128. */ 118d19533e8SHuawei Xie #define RTE_TEST_TX_DESC_DEFAULT_ZCP 64 /* legacy: 64, DPDK virt FE: 64. */ 119d19533e8SHuawei Xie 120d19533e8SHuawei Xie /* Get first 4 bytes in mbuf headroom. */ 121d19533e8SHuawei Xie #define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \ 122d19533e8SHuawei Xie + sizeof(struct rte_mbuf))) 123d19533e8SHuawei Xie 124d19533e8SHuawei Xie /* true if x is a power of 2 */ 125d19533e8SHuawei Xie #define POWEROF2(x) ((((x)-1) & (x)) == 0) 126d19533e8SHuawei Xie 127d19533e8SHuawei Xie #define INVALID_PORT_ID 0xFF 128d19533e8SHuawei Xie 129d19533e8SHuawei Xie /* Max number of devices. Limited by vmdq. */ 130d19533e8SHuawei Xie #define MAX_DEVICES 64 131d19533e8SHuawei Xie 132d19533e8SHuawei Xie /* Size of buffers used for snprintfs. */ 133d19533e8SHuawei Xie #define MAX_PRINT_BUFF 6072 134d19533e8SHuawei Xie 135d19533e8SHuawei Xie /* Maximum character device basename size. */ 136d19533e8SHuawei Xie #define MAX_BASENAME_SZ 10 137d19533e8SHuawei Xie 138d19533e8SHuawei Xie /* Maximum long option length for option parsing. */ 139d19533e8SHuawei Xie #define MAX_LONG_OPT_SZ 64 140d19533e8SHuawei Xie 141d19533e8SHuawei Xie /* Used to compare MAC addresses. */ 142d19533e8SHuawei Xie #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL 143d19533e8SHuawei Xie 144d19533e8SHuawei Xie /* Number of descriptors per cacheline. */ 145fdf20fa7SSergio Gonzalez Monroy #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) 146d19533e8SHuawei Xie 147355e6735SOlivier Matz #define MBUF_EXT_MEM(mb) (rte_mbuf_from_indirect(mb) != (mb)) 148e8b9ef87SSergio Gonzalez Monroy 149d19533e8SHuawei Xie /* mask of enabled ports */ 150d19533e8SHuawei Xie static uint32_t enabled_port_mask = 0; 151d19533e8SHuawei Xie 15290924cafSOuyang Changchun /* Promiscuous mode */ 15390924cafSOuyang Changchun static uint32_t promiscuous; 15490924cafSOuyang Changchun 155d19533e8SHuawei Xie /*Number of switching cores enabled*/ 156d19533e8SHuawei Xie static uint32_t num_switching_cores = 0; 157d19533e8SHuawei Xie 158d19533e8SHuawei Xie /* number of devices/queues to support*/ 159d19533e8SHuawei Xie static uint32_t num_queues = 0; 160a981294bSHuawei Xie static uint32_t num_devices; 161d19533e8SHuawei Xie 162d19533e8SHuawei Xie /* 163d19533e8SHuawei Xie * Enable zero copy, pkts buffer will directly dma to hw descriptor, 164d19533e8SHuawei Xie * disabled on default. 165d19533e8SHuawei Xie */ 166d19533e8SHuawei Xie static uint32_t zero_copy; 16728deb020SHuawei Xie static int mergeable; 168d19533e8SHuawei Xie 169e3d61d16SOuyang Changchun /* Do vlan strip on host, enabled on default */ 170e3d61d16SOuyang Changchun static uint32_t vlan_strip = 1; 171e3d61d16SOuyang Changchun 172d19533e8SHuawei Xie /* number of descriptors to apply*/ 173d19533e8SHuawei Xie static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP; 174d19533e8SHuawei Xie static uint32_t num_tx_descriptor = RTE_TEST_TX_DESC_DEFAULT_ZCP; 175d19533e8SHuawei Xie 176d19533e8SHuawei Xie /* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */ 177d19533e8SHuawei Xie #define MAX_RING_DESC 4096 178d19533e8SHuawei Xie 179d19533e8SHuawei Xie struct vpool { 180d19533e8SHuawei Xie struct rte_mempool *pool; 181d19533e8SHuawei Xie struct rte_ring *ring; 182d19533e8SHuawei Xie uint32_t buf_size; 183d19533e8SHuawei Xie } vpool_array[MAX_QUEUES+MAX_QUEUES]; 184d19533e8SHuawei Xie 185d19533e8SHuawei Xie /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */ 186d19533e8SHuawei Xie typedef enum { 187d19533e8SHuawei Xie VM2VM_DISABLED = 0, 188d19533e8SHuawei Xie VM2VM_SOFTWARE = 1, 189d19533e8SHuawei Xie VM2VM_HARDWARE = 2, 190d19533e8SHuawei Xie VM2VM_LAST 191d19533e8SHuawei Xie } vm2vm_type; 192d19533e8SHuawei Xie static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE; 193d19533e8SHuawei Xie 194d19533e8SHuawei Xie /* The type of host physical address translated from guest physical address. */ 195d19533e8SHuawei Xie typedef enum { 196d19533e8SHuawei Xie PHYS_ADDR_CONTINUOUS = 0, 197d19533e8SHuawei Xie PHYS_ADDR_CROSS_SUBREG = 1, 198d19533e8SHuawei Xie PHYS_ADDR_INVALID = 2, 199d19533e8SHuawei Xie PHYS_ADDR_LAST 200d19533e8SHuawei Xie } hpa_type; 201d19533e8SHuawei Xie 202d19533e8SHuawei Xie /* Enable stats. */ 203d19533e8SHuawei Xie static uint32_t enable_stats = 0; 204d19533e8SHuawei Xie /* Enable retries on RX. */ 205d19533e8SHuawei Xie static uint32_t enable_retry = 1; 206*9fd72e3cSJijiang Liu 207*9fd72e3cSJijiang Liu /* Disable TX checksum offload */ 208*9fd72e3cSJijiang Liu static uint32_t enable_tx_csum; 209*9fd72e3cSJijiang Liu 210*9fd72e3cSJijiang Liu /* Disable TSO offload */ 211*9fd72e3cSJijiang Liu static uint32_t enable_tso; 212*9fd72e3cSJijiang Liu 213d19533e8SHuawei Xie /* Specify timeout (in useconds) between retries on RX. */ 214d19533e8SHuawei Xie static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US; 215d19533e8SHuawei Xie /* Specify the number of retries on RX. */ 216d19533e8SHuawei Xie static uint32_t burst_rx_retry_num = BURST_RX_RETRIES; 217d19533e8SHuawei Xie 218d19533e8SHuawei Xie /* Character device basename. Can be set by user. */ 219d19533e8SHuawei Xie static char dev_basename[MAX_BASENAME_SZ] = "vhost-net"; 220d19533e8SHuawei Xie 221d19533e8SHuawei Xie /* empty vmdq configuration structure. Filled in programatically */ 222d19533e8SHuawei Xie static struct rte_eth_conf vmdq_conf_default = { 223d19533e8SHuawei Xie .rxmode = { 224d19533e8SHuawei Xie .mq_mode = ETH_MQ_RX_VMDQ_ONLY, 225d19533e8SHuawei Xie .split_hdr_size = 0, 226d19533e8SHuawei Xie .header_split = 0, /**< Header Split disabled */ 227d19533e8SHuawei Xie .hw_ip_checksum = 0, /**< IP checksum offload disabled */ 228d19533e8SHuawei Xie .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 229d19533e8SHuawei Xie /* 230d19533e8SHuawei Xie * It is necessary for 1G NIC such as I350, 231d19533e8SHuawei Xie * this fixes bug of ipv4 forwarding in guest can't 232d19533e8SHuawei Xie * forward pakets from one virtio dev to another virtio dev. 233d19533e8SHuawei Xie */ 234d19533e8SHuawei Xie .hw_vlan_strip = 1, /**< VLAN strip enabled. */ 235d19533e8SHuawei Xie .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ 236d19533e8SHuawei Xie .hw_strip_crc = 0, /**< CRC stripped by hardware */ 237d19533e8SHuawei Xie }, 238d19533e8SHuawei Xie 239d19533e8SHuawei Xie .txmode = { 240d19533e8SHuawei Xie .mq_mode = ETH_MQ_TX_NONE, 241d19533e8SHuawei Xie }, 242d19533e8SHuawei Xie .rx_adv_conf = { 243d19533e8SHuawei Xie /* 244d19533e8SHuawei Xie * should be overridden separately in code with 245d19533e8SHuawei Xie * appropriate values 246d19533e8SHuawei Xie */ 247d19533e8SHuawei Xie .vmdq_rx_conf = { 248d19533e8SHuawei Xie .nb_queue_pools = ETH_8_POOLS, 249d19533e8SHuawei Xie .enable_default_pool = 0, 250d19533e8SHuawei Xie .default_pool = 0, 251d19533e8SHuawei Xie .nb_pool_maps = 0, 252d19533e8SHuawei Xie .pool_map = {{0, 0},}, 253d19533e8SHuawei Xie }, 254d19533e8SHuawei Xie }, 255d19533e8SHuawei Xie }; 256d19533e8SHuawei Xie 257d19533e8SHuawei Xie static unsigned lcore_ids[RTE_MAX_LCORE]; 258d19533e8SHuawei Xie static uint8_t ports[RTE_MAX_ETHPORTS]; 259d19533e8SHuawei Xie static unsigned num_ports = 0; /**< The number of ports specified in command line */ 26084b02d16SHuawei Xie static uint16_t num_pf_queues, num_vmdq_queues; 26184b02d16SHuawei Xie static uint16_t vmdq_pool_base, vmdq_queue_base; 26284b02d16SHuawei Xie static uint16_t queues_per_pool; 263d19533e8SHuawei Xie 264d19533e8SHuawei Xie static const uint16_t external_pkt_default_vlan_tag = 2000; 265d19533e8SHuawei Xie const uint16_t vlan_tags[] = { 266d19533e8SHuawei Xie 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 267d19533e8SHuawei Xie 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 268d19533e8SHuawei Xie 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 269d19533e8SHuawei Xie 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 270d19533e8SHuawei Xie 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 271d19533e8SHuawei Xie 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 272d19533e8SHuawei Xie 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 273d19533e8SHuawei Xie 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 274d19533e8SHuawei Xie }; 275d19533e8SHuawei Xie 276d19533e8SHuawei Xie /* ethernet addresses of ports */ 277d19533e8SHuawei Xie static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 278d19533e8SHuawei Xie 279d19533e8SHuawei Xie /* heads for the main used and free linked lists for the data path. */ 280d19533e8SHuawei Xie static struct virtio_net_data_ll *ll_root_used = NULL; 281d19533e8SHuawei Xie static struct virtio_net_data_ll *ll_root_free = NULL; 282d19533e8SHuawei Xie 283d19533e8SHuawei Xie /* Array of data core structures containing information on individual core linked lists. */ 284d19533e8SHuawei Xie static struct lcore_info lcore_info[RTE_MAX_LCORE]; 285d19533e8SHuawei Xie 286d19533e8SHuawei Xie /* Used for queueing bursts of TX packets. */ 287d19533e8SHuawei Xie struct mbuf_table { 288d19533e8SHuawei Xie unsigned len; 289d19533e8SHuawei Xie unsigned txq_id; 290d19533e8SHuawei Xie struct rte_mbuf *m_table[MAX_PKT_BURST]; 291d19533e8SHuawei Xie }; 292d19533e8SHuawei Xie 293d19533e8SHuawei Xie /* TX queue for each data core. */ 294d19533e8SHuawei Xie struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE]; 295d19533e8SHuawei Xie 296d19533e8SHuawei Xie /* TX queue fori each virtio device for zero copy. */ 297d19533e8SHuawei Xie struct mbuf_table tx_queue_zcp[MAX_QUEUES]; 298d19533e8SHuawei Xie 299d19533e8SHuawei Xie /* Vlan header struct used to insert vlan tags on TX. */ 300d19533e8SHuawei Xie struct vlan_ethhdr { 301d19533e8SHuawei Xie unsigned char h_dest[ETH_ALEN]; 302d19533e8SHuawei Xie unsigned char h_source[ETH_ALEN]; 303d19533e8SHuawei Xie __be16 h_vlan_proto; 304d19533e8SHuawei Xie __be16 h_vlan_TCI; 305d19533e8SHuawei Xie __be16 h_vlan_encapsulated_proto; 306d19533e8SHuawei Xie }; 307d19533e8SHuawei Xie 308d19533e8SHuawei Xie /* Header lengths. */ 309d19533e8SHuawei Xie #define VLAN_HLEN 4 310d19533e8SHuawei Xie #define VLAN_ETH_HLEN 18 311d19533e8SHuawei Xie 312d19533e8SHuawei Xie /* Per-device statistics struct */ 313d19533e8SHuawei Xie struct device_statistics { 314d19533e8SHuawei Xie uint64_t tx_total; 315d19533e8SHuawei Xie rte_atomic64_t rx_total_atomic; 316d19533e8SHuawei Xie uint64_t rx_total; 317d19533e8SHuawei Xie uint64_t tx; 318d19533e8SHuawei Xie rte_atomic64_t rx_atomic; 319d19533e8SHuawei Xie uint64_t rx; 320d19533e8SHuawei Xie } __rte_cache_aligned; 321d19533e8SHuawei Xie struct device_statistics dev_statistics[MAX_DEVICES]; 322d19533e8SHuawei Xie 323d19533e8SHuawei Xie /* 324d19533e8SHuawei Xie * Builds up the correct configuration for VMDQ VLAN pool map 325d19533e8SHuawei Xie * according to the pool & queue limits. 326d19533e8SHuawei Xie */ 327d19533e8SHuawei Xie static inline int 328d19533e8SHuawei Xie get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices) 329d19533e8SHuawei Xie { 330d19533e8SHuawei Xie struct rte_eth_vmdq_rx_conf conf; 33190924cafSOuyang Changchun struct rte_eth_vmdq_rx_conf *def_conf = 33290924cafSOuyang Changchun &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf; 333d19533e8SHuawei Xie unsigned i; 334d19533e8SHuawei Xie 335d19533e8SHuawei Xie memset(&conf, 0, sizeof(conf)); 336d19533e8SHuawei Xie conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices; 337d19533e8SHuawei Xie conf.nb_pool_maps = num_devices; 33890924cafSOuyang Changchun conf.enable_loop_back = def_conf->enable_loop_back; 33990924cafSOuyang Changchun conf.rx_mode = def_conf->rx_mode; 340d19533e8SHuawei Xie 341d19533e8SHuawei Xie for (i = 0; i < conf.nb_pool_maps; i++) { 342d19533e8SHuawei Xie conf.pool_map[i].vlan_id = vlan_tags[ i ]; 343d19533e8SHuawei Xie conf.pool_map[i].pools = (1UL << i); 344d19533e8SHuawei Xie } 345d19533e8SHuawei Xie 346d19533e8SHuawei Xie (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf))); 347d19533e8SHuawei Xie (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf, 348d19533e8SHuawei Xie sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf))); 349d19533e8SHuawei Xie return 0; 350d19533e8SHuawei Xie } 351d19533e8SHuawei Xie 352d19533e8SHuawei Xie /* 353d19533e8SHuawei Xie * Validate the device number according to the max pool number gotten form 354d19533e8SHuawei Xie * dev_info. If the device number is invalid, give the error message and 355d19533e8SHuawei Xie * return -1. Each device must have its own pool. 356d19533e8SHuawei Xie */ 357d19533e8SHuawei Xie static inline int 358d19533e8SHuawei Xie validate_num_devices(uint32_t max_nb_devices) 359d19533e8SHuawei Xie { 360d19533e8SHuawei Xie if (num_devices > max_nb_devices) { 361d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n"); 362d19533e8SHuawei Xie return -1; 363d19533e8SHuawei Xie } 364d19533e8SHuawei Xie return 0; 365d19533e8SHuawei Xie } 366d19533e8SHuawei Xie 367d19533e8SHuawei Xie /* 368d19533e8SHuawei Xie * Initialises a given port using global settings and with the rx buffers 369d19533e8SHuawei Xie * coming from the mbuf_pool passed as parameter 370d19533e8SHuawei Xie */ 371d19533e8SHuawei Xie static inline int 372d19533e8SHuawei Xie port_init(uint8_t port) 373d19533e8SHuawei Xie { 374d19533e8SHuawei Xie struct rte_eth_dev_info dev_info; 375d19533e8SHuawei Xie struct rte_eth_conf port_conf; 376db4014f2SHuawei Xie struct rte_eth_rxconf *rxconf; 377db4014f2SHuawei Xie struct rte_eth_txconf *txconf; 378db4014f2SHuawei Xie int16_t rx_rings, tx_rings; 379d19533e8SHuawei Xie uint16_t rx_ring_size, tx_ring_size; 380d19533e8SHuawei Xie int retval; 381d19533e8SHuawei Xie uint16_t q; 382d19533e8SHuawei Xie 383d19533e8SHuawei Xie /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ 384d19533e8SHuawei Xie rte_eth_dev_info_get (port, &dev_info); 385d19533e8SHuawei Xie 3868bd6c395SHuawei Xie if (dev_info.max_rx_queues > MAX_QUEUES) { 3878bd6c395SHuawei Xie rte_exit(EXIT_FAILURE, 3888bd6c395SHuawei Xie "please define MAX_QUEUES no less than %u in %s\n", 3898bd6c395SHuawei Xie dev_info.max_rx_queues, __FILE__); 3908bd6c395SHuawei Xie } 3918bd6c395SHuawei Xie 392db4014f2SHuawei Xie rxconf = &dev_info.default_rxconf; 393db4014f2SHuawei Xie txconf = &dev_info.default_txconf; 394db4014f2SHuawei Xie rxconf->rx_drop_en = 1; 395db4014f2SHuawei Xie 396f0adccd4SOuyang Changchun /* Enable vlan offload */ 397f0adccd4SOuyang Changchun txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL; 398f0adccd4SOuyang Changchun 399db4014f2SHuawei Xie /* 400db4014f2SHuawei Xie * Zero copy defers queue RX/TX start to the time when guest 401db4014f2SHuawei Xie * finishes its startup and packet buffers from that guest are 402db4014f2SHuawei Xie * available. 403db4014f2SHuawei Xie */ 404db4014f2SHuawei Xie if (zero_copy) { 405db4014f2SHuawei Xie rxconf->rx_deferred_start = 1; 406db4014f2SHuawei Xie rxconf->rx_drop_en = 0; 407db4014f2SHuawei Xie txconf->tx_deferred_start = 1; 408db4014f2SHuawei Xie } 409db4014f2SHuawei Xie 410d19533e8SHuawei Xie /*configure the number of supported virtio devices based on VMDQ limits */ 411d19533e8SHuawei Xie num_devices = dev_info.max_vmdq_pools; 412d19533e8SHuawei Xie 413d19533e8SHuawei Xie if (zero_copy) { 414d19533e8SHuawei Xie rx_ring_size = num_rx_descriptor; 415d19533e8SHuawei Xie tx_ring_size = num_tx_descriptor; 416d19533e8SHuawei Xie tx_rings = dev_info.max_tx_queues; 417d19533e8SHuawei Xie } else { 418d19533e8SHuawei Xie rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; 419d19533e8SHuawei Xie tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; 420d19533e8SHuawei Xie tx_rings = (uint16_t)rte_lcore_count(); 421d19533e8SHuawei Xie } 422d19533e8SHuawei Xie 423d19533e8SHuawei Xie retval = validate_num_devices(MAX_DEVICES); 424d19533e8SHuawei Xie if (retval < 0) 425d19533e8SHuawei Xie return retval; 426d19533e8SHuawei Xie 427d19533e8SHuawei Xie /* Get port configuration. */ 428d19533e8SHuawei Xie retval = get_eth_conf(&port_conf, num_devices); 429d19533e8SHuawei Xie if (retval < 0) 430d19533e8SHuawei Xie return retval; 43184b02d16SHuawei Xie /* NIC queues are divided into pf queues and vmdq queues. */ 43284b02d16SHuawei Xie num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; 43384b02d16SHuawei Xie queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; 43484b02d16SHuawei Xie num_vmdq_queues = num_devices * queues_per_pool; 43584b02d16SHuawei Xie num_queues = num_pf_queues + num_vmdq_queues; 43684b02d16SHuawei Xie vmdq_queue_base = dev_info.vmdq_queue_base; 43784b02d16SHuawei Xie vmdq_pool_base = dev_info.vmdq_pool_base; 43884b02d16SHuawei Xie printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n", 43984b02d16SHuawei Xie num_pf_queues, num_devices, queues_per_pool); 440d19533e8SHuawei Xie 441d19533e8SHuawei Xie if (port >= rte_eth_dev_count()) return -1; 442d19533e8SHuawei Xie 443*9fd72e3cSJijiang Liu if (enable_tx_csum == 0) 444*9fd72e3cSJijiang Liu rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM); 445*9fd72e3cSJijiang Liu 446*9fd72e3cSJijiang Liu if (enable_tso == 0) { 447*9fd72e3cSJijiang Liu rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4); 448*9fd72e3cSJijiang Liu rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6); 449*9fd72e3cSJijiang Liu } 450*9fd72e3cSJijiang Liu 45184b02d16SHuawei Xie rx_rings = (uint16_t)dev_info.max_rx_queues; 452d19533e8SHuawei Xie /* Configure ethernet device. */ 453d19533e8SHuawei Xie retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 454d19533e8SHuawei Xie if (retval != 0) 455d19533e8SHuawei Xie return retval; 456d19533e8SHuawei Xie 457d19533e8SHuawei Xie /* Setup the queues. */ 458d19533e8SHuawei Xie for (q = 0; q < rx_rings; q ++) { 459d19533e8SHuawei Xie retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, 460db4014f2SHuawei Xie rte_eth_dev_socket_id(port), 461db4014f2SHuawei Xie rxconf, 462d19533e8SHuawei Xie vpool_array[q].pool); 463d19533e8SHuawei Xie if (retval < 0) 464d19533e8SHuawei Xie return retval; 465d19533e8SHuawei Xie } 466d19533e8SHuawei Xie for (q = 0; q < tx_rings; q ++) { 467d19533e8SHuawei Xie retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, 468db4014f2SHuawei Xie rte_eth_dev_socket_id(port), 469db4014f2SHuawei Xie txconf); 470d19533e8SHuawei Xie if (retval < 0) 471d19533e8SHuawei Xie return retval; 472d19533e8SHuawei Xie } 473d19533e8SHuawei Xie 474d19533e8SHuawei Xie /* Start the device. */ 475d19533e8SHuawei Xie retval = rte_eth_dev_start(port); 476d19533e8SHuawei Xie if (retval < 0) { 477d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n"); 478d19533e8SHuawei Xie return retval; 479d19533e8SHuawei Xie } 480d19533e8SHuawei Xie 48190924cafSOuyang Changchun if (promiscuous) 48290924cafSOuyang Changchun rte_eth_promiscuous_enable(port); 48390924cafSOuyang Changchun 484d19533e8SHuawei Xie rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 485d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); 486d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 487d19533e8SHuawei Xie " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 488d19533e8SHuawei Xie (unsigned)port, 489d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[0], 490d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[1], 491d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[2], 492d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[3], 493d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[4], 494d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[5]); 495d19533e8SHuawei Xie 496d19533e8SHuawei Xie return 0; 497d19533e8SHuawei Xie } 498d19533e8SHuawei Xie 499d19533e8SHuawei Xie /* 500d19533e8SHuawei Xie * Set character device basename. 501d19533e8SHuawei Xie */ 502d19533e8SHuawei Xie static int 503d19533e8SHuawei Xie us_vhost_parse_basename(const char *q_arg) 504d19533e8SHuawei Xie { 505d19533e8SHuawei Xie /* parse number string */ 506d19533e8SHuawei Xie 507d19533e8SHuawei Xie if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ) 508d19533e8SHuawei Xie return -1; 509d19533e8SHuawei Xie else 510d19533e8SHuawei Xie snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg); 511d19533e8SHuawei Xie 512d19533e8SHuawei Xie return 0; 513d19533e8SHuawei Xie } 514d19533e8SHuawei Xie 515d19533e8SHuawei Xie /* 516d19533e8SHuawei Xie * Parse the portmask provided at run time. 517d19533e8SHuawei Xie */ 518d19533e8SHuawei Xie static int 519d19533e8SHuawei Xie parse_portmask(const char *portmask) 520d19533e8SHuawei Xie { 521d19533e8SHuawei Xie char *end = NULL; 522d19533e8SHuawei Xie unsigned long pm; 523d19533e8SHuawei Xie 524d19533e8SHuawei Xie errno = 0; 525d19533e8SHuawei Xie 526d19533e8SHuawei Xie /* parse hexadecimal string */ 527d19533e8SHuawei Xie pm = strtoul(portmask, &end, 16); 528d19533e8SHuawei Xie if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) 529d19533e8SHuawei Xie return -1; 530d19533e8SHuawei Xie 531d19533e8SHuawei Xie if (pm == 0) 532d19533e8SHuawei Xie return -1; 533d19533e8SHuawei Xie 534d19533e8SHuawei Xie return pm; 535d19533e8SHuawei Xie 536d19533e8SHuawei Xie } 537d19533e8SHuawei Xie 538d19533e8SHuawei Xie /* 539d19533e8SHuawei Xie * Parse num options at run time. 540d19533e8SHuawei Xie */ 541d19533e8SHuawei Xie static int 542d19533e8SHuawei Xie parse_num_opt(const char *q_arg, uint32_t max_valid_value) 543d19533e8SHuawei Xie { 544d19533e8SHuawei Xie char *end = NULL; 545d19533e8SHuawei Xie unsigned long num; 546d19533e8SHuawei Xie 547d19533e8SHuawei Xie errno = 0; 548d19533e8SHuawei Xie 549d19533e8SHuawei Xie /* parse unsigned int string */ 550d19533e8SHuawei Xie num = strtoul(q_arg, &end, 10); 551d19533e8SHuawei Xie if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) 552d19533e8SHuawei Xie return -1; 553d19533e8SHuawei Xie 554d19533e8SHuawei Xie if (num > max_valid_value) 555d19533e8SHuawei Xie return -1; 556d19533e8SHuawei Xie 557d19533e8SHuawei Xie return num; 558d19533e8SHuawei Xie 559d19533e8SHuawei Xie } 560d19533e8SHuawei Xie 561d19533e8SHuawei Xie /* 562d19533e8SHuawei Xie * Display usage 563d19533e8SHuawei Xie */ 564d19533e8SHuawei Xie static void 565d19533e8SHuawei Xie us_vhost_usage(const char *prgname) 566d19533e8SHuawei Xie { 567d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n" 568d19533e8SHuawei Xie " --vm2vm [0|1|2]\n" 569d19533e8SHuawei Xie " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n" 5705cf27144SHuawei Xie " --dev-basename <name>\n" 571d19533e8SHuawei Xie " --nb-devices ND\n" 572d19533e8SHuawei Xie " -p PORTMASK: Set mask for ports to be used by application\n" 573d19533e8SHuawei Xie " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n" 574d19533e8SHuawei Xie " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n" 575d19533e8SHuawei Xie " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" 576d19533e8SHuawei Xie " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" 577d19533e8SHuawei Xie " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" 578e3d61d16SOuyang Changchun " --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n" 579d19533e8SHuawei Xie " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" 580d19533e8SHuawei Xie " --dev-basename: The basename to be used for the character device.\n" 581d19533e8SHuawei Xie " --zero-copy [0|1]: disable(default)/enable rx/tx " 582d19533e8SHuawei Xie "zero copy\n" 583d19533e8SHuawei Xie " --rx-desc-num [0-N]: the number of descriptors on rx, " 584d19533e8SHuawei Xie "used only when zero copy is enabled.\n" 585d19533e8SHuawei Xie " --tx-desc-num [0-N]: the number of descriptors on tx, " 586*9fd72e3cSJijiang Liu "used only when zero copy is enabled.\n" 587*9fd72e3cSJijiang Liu " --tx-csum [0|1] disable/enable TX checksum offload.\n" 588*9fd72e3cSJijiang Liu " --tso [0|1] disable/enable TCP segment offload.\n", 589d19533e8SHuawei Xie prgname); 590d19533e8SHuawei Xie } 591d19533e8SHuawei Xie 592d19533e8SHuawei Xie /* 593d19533e8SHuawei Xie * Parse the arguments given in the command line of the application. 594d19533e8SHuawei Xie */ 595d19533e8SHuawei Xie static int 596d19533e8SHuawei Xie us_vhost_parse_args(int argc, char **argv) 597d19533e8SHuawei Xie { 598d19533e8SHuawei Xie int opt, ret; 599d19533e8SHuawei Xie int option_index; 600d19533e8SHuawei Xie unsigned i; 601d19533e8SHuawei Xie const char *prgname = argv[0]; 602d19533e8SHuawei Xie static struct option long_option[] = { 603d19533e8SHuawei Xie {"vm2vm", required_argument, NULL, 0}, 604d19533e8SHuawei Xie {"rx-retry", required_argument, NULL, 0}, 605d19533e8SHuawei Xie {"rx-retry-delay", required_argument, NULL, 0}, 606d19533e8SHuawei Xie {"rx-retry-num", required_argument, NULL, 0}, 607d19533e8SHuawei Xie {"mergeable", required_argument, NULL, 0}, 608e3d61d16SOuyang Changchun {"vlan-strip", required_argument, NULL, 0}, 609d19533e8SHuawei Xie {"stats", required_argument, NULL, 0}, 610d19533e8SHuawei Xie {"dev-basename", required_argument, NULL, 0}, 611d19533e8SHuawei Xie {"zero-copy", required_argument, NULL, 0}, 612d19533e8SHuawei Xie {"rx-desc-num", required_argument, NULL, 0}, 613d19533e8SHuawei Xie {"tx-desc-num", required_argument, NULL, 0}, 614*9fd72e3cSJijiang Liu {"tx-csum", required_argument, NULL, 0}, 615*9fd72e3cSJijiang Liu {"tso", required_argument, NULL, 0}, 616d19533e8SHuawei Xie {NULL, 0, 0, 0}, 617d19533e8SHuawei Xie }; 618d19533e8SHuawei Xie 619d19533e8SHuawei Xie /* Parse command line */ 62090924cafSOuyang Changchun while ((opt = getopt_long(argc, argv, "p:P", 62190924cafSOuyang Changchun long_option, &option_index)) != EOF) { 622d19533e8SHuawei Xie switch (opt) { 623d19533e8SHuawei Xie /* Portmask */ 624d19533e8SHuawei Xie case 'p': 625d19533e8SHuawei Xie enabled_port_mask = parse_portmask(optarg); 626d19533e8SHuawei Xie if (enabled_port_mask == 0) { 627d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n"); 628d19533e8SHuawei Xie us_vhost_usage(prgname); 629d19533e8SHuawei Xie return -1; 630d19533e8SHuawei Xie } 631d19533e8SHuawei Xie break; 632d19533e8SHuawei Xie 63390924cafSOuyang Changchun case 'P': 63490924cafSOuyang Changchun promiscuous = 1; 63590924cafSOuyang Changchun vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode = 63690924cafSOuyang Changchun ETH_VMDQ_ACCEPT_BROADCAST | 63790924cafSOuyang Changchun ETH_VMDQ_ACCEPT_MULTICAST; 63890924cafSOuyang Changchun rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX); 63990924cafSOuyang Changchun 64090924cafSOuyang Changchun break; 64190924cafSOuyang Changchun 642d19533e8SHuawei Xie case 0: 643d19533e8SHuawei Xie /* Enable/disable vm2vm comms. */ 644d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "vm2vm", 645d19533e8SHuawei Xie MAX_LONG_OPT_SZ)) { 646d19533e8SHuawei Xie ret = parse_num_opt(optarg, (VM2VM_LAST - 1)); 647d19533e8SHuawei Xie if (ret == -1) { 648d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, 649d19533e8SHuawei Xie "Invalid argument for " 650d19533e8SHuawei Xie "vm2vm [0|1|2]\n"); 651d19533e8SHuawei Xie us_vhost_usage(prgname); 652d19533e8SHuawei Xie return -1; 653d19533e8SHuawei Xie } else { 654d19533e8SHuawei Xie vm2vm_mode = (vm2vm_type)ret; 655d19533e8SHuawei Xie } 656d19533e8SHuawei Xie } 657d19533e8SHuawei Xie 658d19533e8SHuawei Xie /* Enable/disable retries on RX. */ 659d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) { 660d19533e8SHuawei Xie ret = parse_num_opt(optarg, 1); 661d19533e8SHuawei Xie if (ret == -1) { 662d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n"); 663d19533e8SHuawei Xie us_vhost_usage(prgname); 664d19533e8SHuawei Xie return -1; 665d19533e8SHuawei Xie } else { 666d19533e8SHuawei Xie enable_retry = ret; 667d19533e8SHuawei Xie } 668d19533e8SHuawei Xie } 669d19533e8SHuawei Xie 670*9fd72e3cSJijiang Liu /* Enable/disable TX checksum offload. */ 671*9fd72e3cSJijiang Liu if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) { 672*9fd72e3cSJijiang Liu ret = parse_num_opt(optarg, 1); 673*9fd72e3cSJijiang Liu if (ret == -1) { 674*9fd72e3cSJijiang Liu RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n"); 675*9fd72e3cSJijiang Liu us_vhost_usage(prgname); 676*9fd72e3cSJijiang Liu return -1; 677*9fd72e3cSJijiang Liu } else 678*9fd72e3cSJijiang Liu enable_tx_csum = ret; 679*9fd72e3cSJijiang Liu } 680*9fd72e3cSJijiang Liu 681*9fd72e3cSJijiang Liu /* Enable/disable TSO offload. */ 682*9fd72e3cSJijiang Liu if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) { 683*9fd72e3cSJijiang Liu ret = parse_num_opt(optarg, 1); 684*9fd72e3cSJijiang Liu if (ret == -1) { 685*9fd72e3cSJijiang Liu RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n"); 686*9fd72e3cSJijiang Liu us_vhost_usage(prgname); 687*9fd72e3cSJijiang Liu return -1; 688*9fd72e3cSJijiang Liu } else 689*9fd72e3cSJijiang Liu enable_tso = ret; 690*9fd72e3cSJijiang Liu } 691*9fd72e3cSJijiang Liu 692d19533e8SHuawei Xie /* Specify the retries delay time (in useconds) on RX. */ 693d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) { 694d19533e8SHuawei Xie ret = parse_num_opt(optarg, INT32_MAX); 695d19533e8SHuawei Xie if (ret == -1) { 696d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n"); 697d19533e8SHuawei Xie us_vhost_usage(prgname); 698d19533e8SHuawei Xie return -1; 699d19533e8SHuawei Xie } else { 700d19533e8SHuawei Xie burst_rx_delay_time = ret; 701d19533e8SHuawei Xie } 702d19533e8SHuawei Xie } 703d19533e8SHuawei Xie 704d19533e8SHuawei Xie /* Specify the retries number on RX. */ 705d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) { 706d19533e8SHuawei Xie ret = parse_num_opt(optarg, INT32_MAX); 707d19533e8SHuawei Xie if (ret == -1) { 708d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n"); 709d19533e8SHuawei Xie us_vhost_usage(prgname); 710d19533e8SHuawei Xie return -1; 711d19533e8SHuawei Xie } else { 712d19533e8SHuawei Xie burst_rx_retry_num = ret; 713d19533e8SHuawei Xie } 714d19533e8SHuawei Xie } 715d19533e8SHuawei Xie 716d19533e8SHuawei Xie /* Enable/disable RX mergeable buffers. */ 717d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) { 718d19533e8SHuawei Xie ret = parse_num_opt(optarg, 1); 719d19533e8SHuawei Xie if (ret == -1) { 720d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n"); 721d19533e8SHuawei Xie us_vhost_usage(prgname); 722d19533e8SHuawei Xie return -1; 723d19533e8SHuawei Xie } else { 72428deb020SHuawei Xie mergeable = !!ret; 725d19533e8SHuawei Xie if (ret) { 726d19533e8SHuawei Xie vmdq_conf_default.rxmode.jumbo_frame = 1; 727d19533e8SHuawei Xie vmdq_conf_default.rxmode.max_rx_pkt_len 728d19533e8SHuawei Xie = JUMBO_FRAME_MAX_SIZE; 729d19533e8SHuawei Xie } 730d19533e8SHuawei Xie } 731d19533e8SHuawei Xie } 732d19533e8SHuawei Xie 733e3d61d16SOuyang Changchun /* Enable/disable RX VLAN strip on host. */ 734e3d61d16SOuyang Changchun if (!strncmp(long_option[option_index].name, 735e3d61d16SOuyang Changchun "vlan-strip", MAX_LONG_OPT_SZ)) { 736e3d61d16SOuyang Changchun ret = parse_num_opt(optarg, 1); 737e3d61d16SOuyang Changchun if (ret == -1) { 738e3d61d16SOuyang Changchun RTE_LOG(INFO, VHOST_CONFIG, 739e3d61d16SOuyang Changchun "Invalid argument for VLAN strip [0|1]\n"); 740e3d61d16SOuyang Changchun us_vhost_usage(prgname); 741e3d61d16SOuyang Changchun return -1; 742e3d61d16SOuyang Changchun } else { 743e3d61d16SOuyang Changchun vlan_strip = !!ret; 744e3d61d16SOuyang Changchun vmdq_conf_default.rxmode.hw_vlan_strip = 745e3d61d16SOuyang Changchun vlan_strip; 746e3d61d16SOuyang Changchun } 747e3d61d16SOuyang Changchun } 748e3d61d16SOuyang Changchun 749d19533e8SHuawei Xie /* Enable/disable stats. */ 750d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { 751d19533e8SHuawei Xie ret = parse_num_opt(optarg, INT32_MAX); 752d19533e8SHuawei Xie if (ret == -1) { 753d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n"); 754d19533e8SHuawei Xie us_vhost_usage(prgname); 755d19533e8SHuawei Xie return -1; 756d19533e8SHuawei Xie } else { 757d19533e8SHuawei Xie enable_stats = ret; 758d19533e8SHuawei Xie } 759d19533e8SHuawei Xie } 760d19533e8SHuawei Xie 761d19533e8SHuawei Xie /* Set character device basename. */ 762d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) { 763d19533e8SHuawei Xie if (us_vhost_parse_basename(optarg) == -1) { 764d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ); 765d19533e8SHuawei Xie us_vhost_usage(prgname); 766d19533e8SHuawei Xie return -1; 767d19533e8SHuawei Xie } 768d19533e8SHuawei Xie } 769d19533e8SHuawei Xie 770d19533e8SHuawei Xie /* Enable/disable rx/tx zero copy. */ 771d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, 772d19533e8SHuawei Xie "zero-copy", MAX_LONG_OPT_SZ)) { 773d19533e8SHuawei Xie ret = parse_num_opt(optarg, 1); 774d19533e8SHuawei Xie if (ret == -1) { 775d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, 776d19533e8SHuawei Xie "Invalid argument" 777d19533e8SHuawei Xie " for zero-copy [0|1]\n"); 778d19533e8SHuawei Xie us_vhost_usage(prgname); 779d19533e8SHuawei Xie return -1; 780d19533e8SHuawei Xie } else 781d19533e8SHuawei Xie zero_copy = ret; 782d19533e8SHuawei Xie } 783d19533e8SHuawei Xie 784d19533e8SHuawei Xie /* Specify the descriptor number on RX. */ 785d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, 786d19533e8SHuawei Xie "rx-desc-num", MAX_LONG_OPT_SZ)) { 787d19533e8SHuawei Xie ret = parse_num_opt(optarg, MAX_RING_DESC); 788d19533e8SHuawei Xie if ((ret == -1) || (!POWEROF2(ret))) { 789d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, 790d19533e8SHuawei Xie "Invalid argument for rx-desc-num[0-N]," 791d19533e8SHuawei Xie "power of 2 required.\n"); 792d19533e8SHuawei Xie us_vhost_usage(prgname); 793d19533e8SHuawei Xie return -1; 794d19533e8SHuawei Xie } else { 795d19533e8SHuawei Xie num_rx_descriptor = ret; 796d19533e8SHuawei Xie } 797d19533e8SHuawei Xie } 798d19533e8SHuawei Xie 799d19533e8SHuawei Xie /* Specify the descriptor number on TX. */ 800d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, 801d19533e8SHuawei Xie "tx-desc-num", MAX_LONG_OPT_SZ)) { 802d19533e8SHuawei Xie ret = parse_num_opt(optarg, MAX_RING_DESC); 803d19533e8SHuawei Xie if ((ret == -1) || (!POWEROF2(ret))) { 804d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, 805d19533e8SHuawei Xie "Invalid argument for tx-desc-num [0-N]," 806d19533e8SHuawei Xie "power of 2 required.\n"); 807d19533e8SHuawei Xie us_vhost_usage(prgname); 808d19533e8SHuawei Xie return -1; 809d19533e8SHuawei Xie } else { 810d19533e8SHuawei Xie num_tx_descriptor = ret; 811d19533e8SHuawei Xie } 812d19533e8SHuawei Xie } 813d19533e8SHuawei Xie 814d19533e8SHuawei Xie break; 815d19533e8SHuawei Xie 816d19533e8SHuawei Xie /* Invalid option - print options. */ 817d19533e8SHuawei Xie default: 818d19533e8SHuawei Xie us_vhost_usage(prgname); 819d19533e8SHuawei Xie return -1; 820d19533e8SHuawei Xie } 821d19533e8SHuawei Xie } 822d19533e8SHuawei Xie 823d19533e8SHuawei Xie for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 824d19533e8SHuawei Xie if (enabled_port_mask & (1 << i)) 825d19533e8SHuawei Xie ports[num_ports++] = (uint8_t)i; 826d19533e8SHuawei Xie } 827d19533e8SHuawei Xie 828d19533e8SHuawei Xie if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) { 829d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u," 830d19533e8SHuawei Xie "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS); 831d19533e8SHuawei Xie return -1; 832d19533e8SHuawei Xie } 833d19533e8SHuawei Xie 834d19533e8SHuawei Xie if ((zero_copy == 1) && (vm2vm_mode == VM2VM_SOFTWARE)) { 835d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, 836d19533e8SHuawei Xie "Vhost zero copy doesn't support software vm2vm," 837d19533e8SHuawei Xie "please specify 'vm2vm 2' to use hardware vm2vm.\n"); 838d19533e8SHuawei Xie return -1; 839d19533e8SHuawei Xie } 840d19533e8SHuawei Xie 841d19533e8SHuawei Xie if ((zero_copy == 1) && (vmdq_conf_default.rxmode.jumbo_frame == 1)) { 842d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, 843d19533e8SHuawei Xie "Vhost zero copy doesn't support jumbo frame," 844d19533e8SHuawei Xie "please specify '--mergeable 0' to disable the " 845d19533e8SHuawei Xie "mergeable feature.\n"); 846d19533e8SHuawei Xie return -1; 847d19533e8SHuawei Xie } 848d19533e8SHuawei Xie 849d19533e8SHuawei Xie return 0; 850d19533e8SHuawei Xie } 851d19533e8SHuawei Xie 852d19533e8SHuawei Xie /* 853d19533e8SHuawei Xie * Update the global var NUM_PORTS and array PORTS according to system ports number 854d19533e8SHuawei Xie * and return valid ports number 855d19533e8SHuawei Xie */ 856d19533e8SHuawei Xie static unsigned check_ports_num(unsigned nb_ports) 857d19533e8SHuawei Xie { 858d19533e8SHuawei Xie unsigned valid_num_ports = num_ports; 859d19533e8SHuawei Xie unsigned portid; 860d19533e8SHuawei Xie 861d19533e8SHuawei Xie if (num_ports > nb_ports) { 862d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n", 863d19533e8SHuawei Xie num_ports, nb_ports); 864d19533e8SHuawei Xie num_ports = nb_ports; 865d19533e8SHuawei Xie } 866d19533e8SHuawei Xie 867d19533e8SHuawei Xie for (portid = 0; portid < num_ports; portid ++) { 868d19533e8SHuawei Xie if (ports[portid] >= nb_ports) { 869d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n", 870d19533e8SHuawei Xie ports[portid], (nb_ports - 1)); 871d19533e8SHuawei Xie ports[portid] = INVALID_PORT_ID; 872d19533e8SHuawei Xie valid_num_ports--; 873d19533e8SHuawei Xie } 874d19533e8SHuawei Xie } 875d19533e8SHuawei Xie return valid_num_ports; 876d19533e8SHuawei Xie } 877d19533e8SHuawei Xie 878d19533e8SHuawei Xie /* 879d19533e8SHuawei Xie * Macro to print out packet contents. Wrapped in debug define so that the 880d19533e8SHuawei Xie * data path is not effected when debug is disabled. 881d19533e8SHuawei Xie */ 882d19533e8SHuawei Xie #ifdef DEBUG 883d19533e8SHuawei Xie #define PRINT_PACKET(device, addr, size, header) do { \ 884d19533e8SHuawei Xie char *pkt_addr = (char*)(addr); \ 885d19533e8SHuawei Xie unsigned int index; \ 886d19533e8SHuawei Xie char packet[MAX_PRINT_BUFF]; \ 887d19533e8SHuawei Xie \ 888d19533e8SHuawei Xie if ((header)) \ 889d19533e8SHuawei Xie snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \ 890d19533e8SHuawei Xie else \ 891d19533e8SHuawei Xie snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \ 892d19533e8SHuawei Xie for (index = 0; index < (size); index++) { \ 893d19533e8SHuawei Xie snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \ 894d19533e8SHuawei Xie "%02hhx ", pkt_addr[index]); \ 895d19533e8SHuawei Xie } \ 896d19533e8SHuawei Xie snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \ 897d19533e8SHuawei Xie \ 898d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "%s", packet); \ 899d19533e8SHuawei Xie } while(0) 900d19533e8SHuawei Xie #else 901d19533e8SHuawei Xie #define PRINT_PACKET(device, addr, size, header) do{} while(0) 902d19533e8SHuawei Xie #endif 903d19533e8SHuawei Xie 904d19533e8SHuawei Xie /* 905d19533e8SHuawei Xie * Function to convert guest physical addresses to vhost physical addresses. 906d19533e8SHuawei Xie * This is used to convert virtio buffer addresses. 907d19533e8SHuawei Xie */ 908d19533e8SHuawei Xie static inline uint64_t __attribute__((always_inline)) 909e571e6b4SHuawei Xie gpa_to_hpa(struct vhost_dev *vdev, uint64_t guest_pa, 910d19533e8SHuawei Xie uint32_t buf_len, hpa_type *addr_type) 911d19533e8SHuawei Xie { 912d19533e8SHuawei Xie struct virtio_memory_regions_hpa *region; 913d19533e8SHuawei Xie uint32_t regionidx; 914d19533e8SHuawei Xie uint64_t vhost_pa = 0; 915d19533e8SHuawei Xie 916d19533e8SHuawei Xie *addr_type = PHYS_ADDR_INVALID; 917d19533e8SHuawei Xie 918e571e6b4SHuawei Xie for (regionidx = 0; regionidx < vdev->nregions_hpa; regionidx++) { 919e571e6b4SHuawei Xie region = &vdev->regions_hpa[regionidx]; 920d19533e8SHuawei Xie if ((guest_pa >= region->guest_phys_address) && 921d19533e8SHuawei Xie (guest_pa <= region->guest_phys_address_end)) { 922d19533e8SHuawei Xie vhost_pa = region->host_phys_addr_offset + guest_pa; 923d19533e8SHuawei Xie if (likely((guest_pa + buf_len - 1) 924d19533e8SHuawei Xie <= region->guest_phys_address_end)) 925d19533e8SHuawei Xie *addr_type = PHYS_ADDR_CONTINUOUS; 926d19533e8SHuawei Xie else 927d19533e8SHuawei Xie *addr_type = PHYS_ADDR_CROSS_SUBREG; 928d19533e8SHuawei Xie break; 929d19533e8SHuawei Xie } 930d19533e8SHuawei Xie } 931d19533e8SHuawei Xie 932d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| HPA %p\n", 933e571e6b4SHuawei Xie vdev->dev->device_fh, (void *)(uintptr_t)guest_pa, 934d19533e8SHuawei Xie (void *)(uintptr_t)vhost_pa); 935d19533e8SHuawei Xie 936d19533e8SHuawei Xie return vhost_pa; 937d19533e8SHuawei Xie } 938d19533e8SHuawei Xie 939d19533e8SHuawei Xie /* 940d19533e8SHuawei Xie * Compares a packet destination MAC address to a device MAC address. 941d19533e8SHuawei Xie */ 942d19533e8SHuawei Xie static inline int __attribute__((always_inline)) 943d19533e8SHuawei Xie ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb) 944d19533e8SHuawei Xie { 945693f715dSHuawei Xie return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0; 946d19533e8SHuawei Xie } 947d19533e8SHuawei Xie 948d19533e8SHuawei Xie /* 949d19533e8SHuawei Xie * This function learns the MAC address of the device and registers this along with a 950d19533e8SHuawei Xie * vlan tag to a VMDQ. 951d19533e8SHuawei Xie */ 952d19533e8SHuawei Xie static int 953e571e6b4SHuawei Xie link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) 954d19533e8SHuawei Xie { 955d19533e8SHuawei Xie struct ether_hdr *pkt_hdr; 956d19533e8SHuawei Xie struct virtio_net_data_ll *dev_ll; 957e571e6b4SHuawei Xie struct virtio_net *dev = vdev->dev; 958d19533e8SHuawei Xie int i, ret; 959d19533e8SHuawei Xie 960d19533e8SHuawei Xie /* Learn MAC address of guest device from packet */ 961d19533e8SHuawei Xie pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 962d19533e8SHuawei Xie 963d19533e8SHuawei Xie dev_ll = ll_root_used; 964d19533e8SHuawei Xie 965d19533e8SHuawei Xie while (dev_ll != NULL) { 966e571e6b4SHuawei Xie if (ether_addr_cmp(&(pkt_hdr->s_addr), &dev_ll->vdev->mac_address)) { 967d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh); 968d19533e8SHuawei Xie return -1; 969d19533e8SHuawei Xie } 970d19533e8SHuawei Xie dev_ll = dev_ll->next; 971d19533e8SHuawei Xie } 972d19533e8SHuawei Xie 973d19533e8SHuawei Xie for (i = 0; i < ETHER_ADDR_LEN; i++) 974e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i]; 975d19533e8SHuawei Xie 976d19533e8SHuawei Xie /* vlan_tag currently uses the device_id. */ 977e571e6b4SHuawei Xie vdev->vlan_tag = vlan_tags[dev->device_fh]; 978d19533e8SHuawei Xie 979d19533e8SHuawei Xie /* Print out VMDQ registration info. */ 980d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n", 981d19533e8SHuawei Xie dev->device_fh, 982e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1], 983e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3], 984e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5], 985e571e6b4SHuawei Xie vdev->vlan_tag); 986d19533e8SHuawei Xie 987d19533e8SHuawei Xie /* Register the MAC address. */ 98884b02d16SHuawei Xie ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, 98984b02d16SHuawei Xie (uint32_t)dev->device_fh + vmdq_pool_base); 990d19533e8SHuawei Xie if (ret) 991d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n", 992d19533e8SHuawei Xie dev->device_fh); 993d19533e8SHuawei Xie 994d19533e8SHuawei Xie /* Enable stripping of the vlan tag as we handle routing. */ 995e3d61d16SOuyang Changchun if (vlan_strip) 996e3d61d16SOuyang Changchun rte_eth_dev_set_vlan_strip_on_queue(ports[0], 997e3d61d16SOuyang Changchun (uint16_t)vdev->vmdq_rx_q, 1); 998d19533e8SHuawei Xie 999d19533e8SHuawei Xie /* Set device as ready for RX. */ 1000e571e6b4SHuawei Xie vdev->ready = DEVICE_RX; 1001d19533e8SHuawei Xie 1002d19533e8SHuawei Xie return 0; 1003d19533e8SHuawei Xie } 1004d19533e8SHuawei Xie 1005d19533e8SHuawei Xie /* 1006d19533e8SHuawei Xie * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX 1007d19533e8SHuawei Xie * queue before disabling RX on the device. 1008d19533e8SHuawei Xie */ 1009d19533e8SHuawei Xie static inline void 1010e571e6b4SHuawei Xie unlink_vmdq(struct vhost_dev *vdev) 1011d19533e8SHuawei Xie { 1012d19533e8SHuawei Xie unsigned i = 0; 1013d19533e8SHuawei Xie unsigned rx_count; 1014d19533e8SHuawei Xie struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1015d19533e8SHuawei Xie 1016e571e6b4SHuawei Xie if (vdev->ready == DEVICE_RX) { 1017d19533e8SHuawei Xie /*clear MAC and VLAN settings*/ 1018e571e6b4SHuawei Xie rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address); 1019d19533e8SHuawei Xie for (i = 0; i < 6; i++) 1020e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[i] = 0; 1021d19533e8SHuawei Xie 1022e571e6b4SHuawei Xie vdev->vlan_tag = 0; 1023d19533e8SHuawei Xie 1024d19533e8SHuawei Xie /*Clear out the receive buffers*/ 1025d19533e8SHuawei Xie rx_count = rte_eth_rx_burst(ports[0], 1026e571e6b4SHuawei Xie (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); 1027d19533e8SHuawei Xie 1028d19533e8SHuawei Xie while (rx_count) { 1029d19533e8SHuawei Xie for (i = 0; i < rx_count; i++) 1030d19533e8SHuawei Xie rte_pktmbuf_free(pkts_burst[i]); 1031d19533e8SHuawei Xie 1032d19533e8SHuawei Xie rx_count = rte_eth_rx_burst(ports[0], 1033e571e6b4SHuawei Xie (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); 1034d19533e8SHuawei Xie } 1035d19533e8SHuawei Xie 1036e571e6b4SHuawei Xie vdev->ready = DEVICE_MAC_LEARNING; 1037d19533e8SHuawei Xie } 1038d19533e8SHuawei Xie } 1039d19533e8SHuawei Xie 1040d19533e8SHuawei Xie /* 1041d19533e8SHuawei Xie * Check if the packet destination MAC address is for a local device. If so then put 1042d19533e8SHuawei Xie * the packet on that devices RX queue. If not then return. 1043d19533e8SHuawei Xie */ 1044a981294bSHuawei Xie static inline int __attribute__((always_inline)) 1045e571e6b4SHuawei Xie virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) 1046d19533e8SHuawei Xie { 1047d19533e8SHuawei Xie struct virtio_net_data_ll *dev_ll; 1048d19533e8SHuawei Xie struct ether_hdr *pkt_hdr; 1049d19533e8SHuawei Xie uint64_t ret = 0; 1050e571e6b4SHuawei Xie struct virtio_net *dev = vdev->dev; 1051e571e6b4SHuawei Xie struct virtio_net *tdev; /* destination virito device */ 1052d19533e8SHuawei Xie 1053d19533e8SHuawei Xie pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 1054d19533e8SHuawei Xie 1055d19533e8SHuawei Xie /*get the used devices list*/ 1056d19533e8SHuawei Xie dev_ll = ll_root_used; 1057d19533e8SHuawei Xie 1058d19533e8SHuawei Xie while (dev_ll != NULL) { 1059e571e6b4SHuawei Xie if ((dev_ll->vdev->ready == DEVICE_RX) && ether_addr_cmp(&(pkt_hdr->d_addr), 1060e571e6b4SHuawei Xie &dev_ll->vdev->mac_address)) { 1061d19533e8SHuawei Xie 1062d19533e8SHuawei Xie /* Drop the packet if the TX packet is destined for the TX device. */ 1063e571e6b4SHuawei Xie if (dev_ll->vdev->dev->device_fh == dev->device_fh) { 1064d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n", 1065e571e6b4SHuawei Xie dev->device_fh); 1066d19533e8SHuawei Xie return 0; 1067d19533e8SHuawei Xie } 1068e571e6b4SHuawei Xie tdev = dev_ll->vdev->dev; 1069d19533e8SHuawei Xie 1070d19533e8SHuawei Xie 1071e571e6b4SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", tdev->device_fh); 1072d19533e8SHuawei Xie 1073364dddcdSHuawei Xie if (unlikely(dev_ll->vdev->remove)) { 1074d19533e8SHuawei Xie /*drop the packet if the device is marked for removal*/ 1075e571e6b4SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", tdev->device_fh); 1076d19533e8SHuawei Xie } else { 1077d19533e8SHuawei Xie /*send the packet to the local virtio device*/ 1078be800696SHuawei Xie ret = rte_vhost_enqueue_burst(tdev, VIRTIO_RXQ, &m, 1); 1079d19533e8SHuawei Xie if (enable_stats) { 1080d19533e8SHuawei Xie rte_atomic64_add( 1081e571e6b4SHuawei Xie &dev_statistics[tdev->device_fh].rx_total_atomic, 1082d19533e8SHuawei Xie 1); 1083d19533e8SHuawei Xie rte_atomic64_add( 1084e571e6b4SHuawei Xie &dev_statistics[tdev->device_fh].rx_atomic, 1085d19533e8SHuawei Xie ret); 10864c7bcd79SJianfeng Tan dev_statistics[dev->device_fh].tx_total++; 10874c7bcd79SJianfeng Tan dev_statistics[dev->device_fh].tx += ret; 1088d19533e8SHuawei Xie } 1089d19533e8SHuawei Xie } 1090d19533e8SHuawei Xie 1091d19533e8SHuawei Xie return 0; 1092d19533e8SHuawei Xie } 1093d19533e8SHuawei Xie dev_ll = dev_ll->next; 1094d19533e8SHuawei Xie } 1095d19533e8SHuawei Xie 1096d19533e8SHuawei Xie return -1; 1097d19533e8SHuawei Xie } 1098d19533e8SHuawei Xie 1099d19533e8SHuawei Xie /* 110072ec8d77SOuyang Changchun * Check if the destination MAC of a packet is one local VM, 110172ec8d77SOuyang Changchun * and get its vlan tag, and offset if it is. 1102d19533e8SHuawei Xie */ 110372ec8d77SOuyang Changchun static inline int __attribute__((always_inline)) 110472ec8d77SOuyang Changchun find_local_dest(struct virtio_net *dev, struct rte_mbuf *m, 110572ec8d77SOuyang Changchun uint32_t *offset, uint16_t *vlan_tag) 1106d19533e8SHuawei Xie { 1107d19533e8SHuawei Xie struct virtio_net_data_ll *dev_ll = ll_root_used; 1108d19533e8SHuawei Xie struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 1109d19533e8SHuawei Xie 1110d19533e8SHuawei Xie while (dev_ll != NULL) { 1111e571e6b4SHuawei Xie if ((dev_ll->vdev->ready == DEVICE_RX) 1112d19533e8SHuawei Xie && ether_addr_cmp(&(pkt_hdr->d_addr), 1113e571e6b4SHuawei Xie &dev_ll->vdev->mac_address)) { 1114d19533e8SHuawei Xie /* 1115d19533e8SHuawei Xie * Drop the packet if the TX packet is 1116d19533e8SHuawei Xie * destined for the TX device. 1117d19533e8SHuawei Xie */ 1118e571e6b4SHuawei Xie if (dev_ll->vdev->dev->device_fh == dev->device_fh) { 1119d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1120d19533e8SHuawei Xie "(%"PRIu64") TX: Source and destination" 1121d19533e8SHuawei Xie " MAC addresses are the same. Dropping " 1122d19533e8SHuawei Xie "packet.\n", 11234d50b6acSHuawei Xie dev_ll->vdev->dev->device_fh); 112472ec8d77SOuyang Changchun return -1; 1125d19533e8SHuawei Xie } 1126e44fb8a4SOuyang Changchun 1127e44fb8a4SOuyang Changchun /* 1128e44fb8a4SOuyang Changchun * HW vlan strip will reduce the packet length 1129e44fb8a4SOuyang Changchun * by minus length of vlan tag, so need restore 1130e44fb8a4SOuyang Changchun * the packet length by plus it. 1131e44fb8a4SOuyang Changchun */ 113272ec8d77SOuyang Changchun *offset = VLAN_HLEN; 113372ec8d77SOuyang Changchun *vlan_tag = 1134d19533e8SHuawei Xie (uint16_t) 1135e571e6b4SHuawei Xie vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh]; 1136d19533e8SHuawei Xie 1137d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1138d19533e8SHuawei Xie "(%"PRIu64") TX: pkt to local VM device id:" 1139d19533e8SHuawei Xie "(%"PRIu64") vlan tag: %d.\n", 1140e571e6b4SHuawei Xie dev->device_fh, dev_ll->vdev->dev->device_fh, 1141e6eff1baSOuyang Changchun (int)*vlan_tag); 1142d19533e8SHuawei Xie 1143d19533e8SHuawei Xie break; 1144d19533e8SHuawei Xie } 1145d19533e8SHuawei Xie dev_ll = dev_ll->next; 1146d19533e8SHuawei Xie } 114772ec8d77SOuyang Changchun return 0; 114872ec8d77SOuyang Changchun } 114972ec8d77SOuyang Changchun 1150*9fd72e3cSJijiang Liu static uint16_t 1151*9fd72e3cSJijiang Liu get_psd_sum(void *l3_hdr, uint64_t ol_flags) 1152*9fd72e3cSJijiang Liu { 1153*9fd72e3cSJijiang Liu if (ol_flags & PKT_TX_IPV4) 1154*9fd72e3cSJijiang Liu return rte_ipv4_phdr_cksum(l3_hdr, ol_flags); 1155*9fd72e3cSJijiang Liu else /* assume ethertype == ETHER_TYPE_IPv6 */ 1156*9fd72e3cSJijiang Liu return rte_ipv6_phdr_cksum(l3_hdr, ol_flags); 1157*9fd72e3cSJijiang Liu } 1158*9fd72e3cSJijiang Liu 1159*9fd72e3cSJijiang Liu static void virtio_tx_offload(struct rte_mbuf *m) 1160*9fd72e3cSJijiang Liu { 1161*9fd72e3cSJijiang Liu void *l3_hdr; 1162*9fd72e3cSJijiang Liu struct ipv4_hdr *ipv4_hdr = NULL; 1163*9fd72e3cSJijiang Liu struct tcp_hdr *tcp_hdr = NULL; 1164*9fd72e3cSJijiang Liu struct udp_hdr *udp_hdr = NULL; 1165*9fd72e3cSJijiang Liu struct sctp_hdr *sctp_hdr = NULL; 1166*9fd72e3cSJijiang Liu struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); 1167*9fd72e3cSJijiang Liu 1168*9fd72e3cSJijiang Liu l3_hdr = (char *)eth_hdr + m->l2_len; 1169*9fd72e3cSJijiang Liu 1170*9fd72e3cSJijiang Liu if (m->tso_segsz != 0) { 1171*9fd72e3cSJijiang Liu ipv4_hdr = (struct ipv4_hdr *)l3_hdr; 1172*9fd72e3cSJijiang Liu tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len); 1173*9fd72e3cSJijiang Liu m->ol_flags |= PKT_TX_IP_CKSUM; 1174*9fd72e3cSJijiang Liu ipv4_hdr->hdr_checksum = 0; 1175*9fd72e3cSJijiang Liu tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags); 1176*9fd72e3cSJijiang Liu return; 1177*9fd72e3cSJijiang Liu } 1178*9fd72e3cSJijiang Liu 1179*9fd72e3cSJijiang Liu if (m->ol_flags & PKT_TX_L4_MASK) { 1180*9fd72e3cSJijiang Liu switch (m->ol_flags & PKT_TX_L4_MASK) { 1181*9fd72e3cSJijiang Liu case PKT_TX_TCP_CKSUM: 1182*9fd72e3cSJijiang Liu tcp_hdr = (struct tcp_hdr *) 1183*9fd72e3cSJijiang Liu ((char *)l3_hdr + m->l3_len); 1184*9fd72e3cSJijiang Liu tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags); 1185*9fd72e3cSJijiang Liu break; 1186*9fd72e3cSJijiang Liu case PKT_TX_UDP_CKSUM: 1187*9fd72e3cSJijiang Liu udp_hdr = (struct udp_hdr *) 1188*9fd72e3cSJijiang Liu ((char *)l3_hdr + m->l3_len); 1189*9fd72e3cSJijiang Liu udp_hdr->dgram_cksum = get_psd_sum(l3_hdr, m->ol_flags); 1190*9fd72e3cSJijiang Liu break; 1191*9fd72e3cSJijiang Liu case PKT_TX_SCTP_CKSUM: 1192*9fd72e3cSJijiang Liu sctp_hdr = (struct sctp_hdr *) 1193*9fd72e3cSJijiang Liu ((char *)l3_hdr + m->l3_len); 1194*9fd72e3cSJijiang Liu sctp_hdr->cksum = 0; 1195*9fd72e3cSJijiang Liu break; 1196*9fd72e3cSJijiang Liu default: 1197*9fd72e3cSJijiang Liu break; 1198*9fd72e3cSJijiang Liu } 1199*9fd72e3cSJijiang Liu } 1200*9fd72e3cSJijiang Liu } 1201*9fd72e3cSJijiang Liu 120272ec8d77SOuyang Changchun /* 120372ec8d77SOuyang Changchun * This function routes the TX packet to the correct interface. This may be a local device 120472ec8d77SOuyang Changchun * or the physical port. 120572ec8d77SOuyang Changchun */ 120672ec8d77SOuyang Changchun static inline void __attribute__((always_inline)) 120772ec8d77SOuyang Changchun virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) 120872ec8d77SOuyang Changchun { 120972ec8d77SOuyang Changchun struct mbuf_table *tx_q; 121072ec8d77SOuyang Changchun struct rte_mbuf **m_table; 121172ec8d77SOuyang Changchun unsigned len, ret, offset = 0; 121272ec8d77SOuyang Changchun const uint16_t lcore_id = rte_lcore_id(); 121372ec8d77SOuyang Changchun struct virtio_net *dev = vdev->dev; 12148b9bb988SOuyang Changchun struct ether_hdr *nh; 121572ec8d77SOuyang Changchun 121672ec8d77SOuyang Changchun /*check if destination is local VM*/ 121772ec8d77SOuyang Changchun if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) { 121872ec8d77SOuyang Changchun rte_pktmbuf_free(m); 121972ec8d77SOuyang Changchun return; 122072ec8d77SOuyang Changchun } 122172ec8d77SOuyang Changchun 1222c2ab5162SOuyang Changchun if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { 1223c2ab5162SOuyang Changchun if (unlikely(find_local_dest(dev, m, &offset, &vlan_tag) != 0)) { 122472ec8d77SOuyang Changchun rte_pktmbuf_free(m); 122572ec8d77SOuyang Changchun return; 122672ec8d77SOuyang Changchun } 1227d19533e8SHuawei Xie } 1228d19533e8SHuawei Xie 1229d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh); 1230d19533e8SHuawei Xie 1231d19533e8SHuawei Xie /*Add packet to the port tx queue*/ 1232d19533e8SHuawei Xie tx_q = &lcore_tx_queue[lcore_id]; 1233d19533e8SHuawei Xie len = tx_q->len; 1234d19533e8SHuawei Xie 12358b9bb988SOuyang Changchun nh = rte_pktmbuf_mtod(m, struct ether_hdr *); 12368b9bb988SOuyang Changchun if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) { 12378b9bb988SOuyang Changchun /* Guest has inserted the vlan tag. */ 12388b9bb988SOuyang Changchun struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1); 12398b9bb988SOuyang Changchun uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag); 12408b9bb988SOuyang Changchun if ((vm2vm_mode == VM2VM_HARDWARE) && 12418b9bb988SOuyang Changchun (vh->vlan_tci != vlan_tag_be)) 12428b9bb988SOuyang Changchun vh->vlan_tci = vlan_tag_be; 12438b9bb988SOuyang Changchun } else { 1244*9fd72e3cSJijiang Liu m->ol_flags |= PKT_TX_VLAN_PKT; 1245e44fb8a4SOuyang Changchun 1246c2ab5162SOuyang Changchun /* 1247c2ab5162SOuyang Changchun * Find the right seg to adjust the data len when offset is 1248c2ab5162SOuyang Changchun * bigger than tail room size. 1249c2ab5162SOuyang Changchun */ 1250c2ab5162SOuyang Changchun if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { 1251c2ab5162SOuyang Changchun if (likely(offset <= rte_pktmbuf_tailroom(m))) 12524d50b6acSHuawei Xie m->data_len += offset; 1253c2ab5162SOuyang Changchun else { 1254c2ab5162SOuyang Changchun struct rte_mbuf *seg = m; 1255c2ab5162SOuyang Changchun 1256c2ab5162SOuyang Changchun while ((seg->next != NULL) && 1257c2ab5162SOuyang Changchun (offset > rte_pktmbuf_tailroom(seg))) 1258c2ab5162SOuyang Changchun seg = seg->next; 1259c2ab5162SOuyang Changchun 1260c2ab5162SOuyang Changchun seg->data_len += offset; 1261c2ab5162SOuyang Changchun } 1262e44fb8a4SOuyang Changchun m->pkt_len += offset; 1263c2ab5162SOuyang Changchun } 1264e44fb8a4SOuyang Changchun 12654d50b6acSHuawei Xie m->vlan_tci = vlan_tag; 12668b9bb988SOuyang Changchun } 1267d19533e8SHuawei Xie 1268*9fd72e3cSJijiang Liu if ((m->ol_flags & PKT_TX_L4_MASK) || (m->ol_flags & PKT_TX_TCP_SEG)) 1269*9fd72e3cSJijiang Liu virtio_tx_offload(m); 1270*9fd72e3cSJijiang Liu 12714d50b6acSHuawei Xie tx_q->m_table[len] = m; 1272d19533e8SHuawei Xie len++; 1273d19533e8SHuawei Xie if (enable_stats) { 1274d19533e8SHuawei Xie dev_statistics[dev->device_fh].tx_total++; 1275d19533e8SHuawei Xie dev_statistics[dev->device_fh].tx++; 1276d19533e8SHuawei Xie } 1277d19533e8SHuawei Xie 1278d19533e8SHuawei Xie if (unlikely(len == MAX_PKT_BURST)) { 1279d19533e8SHuawei Xie m_table = (struct rte_mbuf **)tx_q->m_table; 1280d19533e8SHuawei Xie ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len); 1281d19533e8SHuawei Xie /* Free any buffers not handled by TX and update the port stats. */ 1282d19533e8SHuawei Xie if (unlikely(ret < len)) { 1283d19533e8SHuawei Xie do { 1284d19533e8SHuawei Xie rte_pktmbuf_free(m_table[ret]); 1285d19533e8SHuawei Xie } while (++ret < len); 1286d19533e8SHuawei Xie } 1287d19533e8SHuawei Xie 1288d19533e8SHuawei Xie len = 0; 1289d19533e8SHuawei Xie } 1290d19533e8SHuawei Xie 1291d19533e8SHuawei Xie tx_q->len = len; 1292d19533e8SHuawei Xie return; 1293d19533e8SHuawei Xie } 1294d19533e8SHuawei Xie /* 1295d19533e8SHuawei Xie * This function is called by each data core. It handles all RX/TX registered with the 1296d19533e8SHuawei Xie * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared 1297d19533e8SHuawei Xie * with all devices in the main linked list. 1298d19533e8SHuawei Xie */ 1299d19533e8SHuawei Xie static int 1300d19533e8SHuawei Xie switch_worker(__attribute__((unused)) void *arg) 1301d19533e8SHuawei Xie { 1302d19533e8SHuawei Xie struct rte_mempool *mbuf_pool = arg; 1303d19533e8SHuawei Xie struct virtio_net *dev = NULL; 1304e571e6b4SHuawei Xie struct vhost_dev *vdev = NULL; 1305d19533e8SHuawei Xie struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 1306d19533e8SHuawei Xie struct virtio_net_data_ll *dev_ll; 1307d19533e8SHuawei Xie struct mbuf_table *tx_q; 1308d19533e8SHuawei Xie volatile struct lcore_ll_info *lcore_ll; 1309d19533e8SHuawei Xie const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; 1310d19533e8SHuawei Xie uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0; 1311d19533e8SHuawei Xie unsigned ret, i; 1312d19533e8SHuawei Xie const uint16_t lcore_id = rte_lcore_id(); 1313d19533e8SHuawei Xie const uint16_t num_cores = (uint16_t)rte_lcore_count(); 1314d19533e8SHuawei Xie uint16_t rx_count = 0; 1315be800696SHuawei Xie uint16_t tx_count; 1316be800696SHuawei Xie uint32_t retry = 0; 1317d19533e8SHuawei Xie 1318d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id); 1319d19533e8SHuawei Xie lcore_ll = lcore_info[lcore_id].lcore_ll; 1320d19533e8SHuawei Xie prev_tsc = 0; 1321d19533e8SHuawei Xie 1322d19533e8SHuawei Xie tx_q = &lcore_tx_queue[lcore_id]; 1323d19533e8SHuawei Xie for (i = 0; i < num_cores; i ++) { 1324d19533e8SHuawei Xie if (lcore_ids[i] == lcore_id) { 1325d19533e8SHuawei Xie tx_q->txq_id = i; 1326d19533e8SHuawei Xie break; 1327d19533e8SHuawei Xie } 1328d19533e8SHuawei Xie } 1329d19533e8SHuawei Xie 1330d19533e8SHuawei Xie while(1) { 1331d19533e8SHuawei Xie cur_tsc = rte_rdtsc(); 1332d19533e8SHuawei Xie /* 1333d19533e8SHuawei Xie * TX burst queue drain 1334d19533e8SHuawei Xie */ 1335d19533e8SHuawei Xie diff_tsc = cur_tsc - prev_tsc; 1336d19533e8SHuawei Xie if (unlikely(diff_tsc > drain_tsc)) { 1337d19533e8SHuawei Xie 1338d19533e8SHuawei Xie if (tx_q->len) { 1339d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len); 1340d19533e8SHuawei Xie 1341d19533e8SHuawei Xie /*Tx any packets in the queue*/ 1342d19533e8SHuawei Xie ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, 1343d19533e8SHuawei Xie (struct rte_mbuf **)tx_q->m_table, 1344d19533e8SHuawei Xie (uint16_t)tx_q->len); 1345d19533e8SHuawei Xie if (unlikely(ret < tx_q->len)) { 1346d19533e8SHuawei Xie do { 1347d19533e8SHuawei Xie rte_pktmbuf_free(tx_q->m_table[ret]); 1348d19533e8SHuawei Xie } while (++ret < tx_q->len); 1349d19533e8SHuawei Xie } 1350d19533e8SHuawei Xie 1351d19533e8SHuawei Xie tx_q->len = 0; 1352d19533e8SHuawei Xie } 1353d19533e8SHuawei Xie 1354d19533e8SHuawei Xie prev_tsc = cur_tsc; 1355d19533e8SHuawei Xie 1356d19533e8SHuawei Xie } 1357d19533e8SHuawei Xie 1358d19533e8SHuawei Xie rte_prefetch0(lcore_ll->ll_root_used); 1359d19533e8SHuawei Xie /* 1360d19533e8SHuawei Xie * Inform the configuration core that we have exited the linked list and that no devices are 1361d19533e8SHuawei Xie * in use if requested. 1362d19533e8SHuawei Xie */ 1363d19533e8SHuawei Xie if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL) 1364d19533e8SHuawei Xie lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL; 1365d19533e8SHuawei Xie 1366d19533e8SHuawei Xie /* 1367d19533e8SHuawei Xie * Process devices 1368d19533e8SHuawei Xie */ 1369d19533e8SHuawei Xie dev_ll = lcore_ll->ll_root_used; 1370d19533e8SHuawei Xie 1371d19533e8SHuawei Xie while (dev_ll != NULL) { 1372d19533e8SHuawei Xie /*get virtio device ID*/ 1373e571e6b4SHuawei Xie vdev = dev_ll->vdev; 1374e571e6b4SHuawei Xie dev = vdev->dev; 1375d19533e8SHuawei Xie 1376364dddcdSHuawei Xie if (unlikely(vdev->remove)) { 1377d19533e8SHuawei Xie dev_ll = dev_ll->next; 1378e571e6b4SHuawei Xie unlink_vmdq(vdev); 1379e571e6b4SHuawei Xie vdev->ready = DEVICE_SAFE_REMOVE; 1380d19533e8SHuawei Xie continue; 1381d19533e8SHuawei Xie } 1382e571e6b4SHuawei Xie if (likely(vdev->ready == DEVICE_RX)) { 1383d19533e8SHuawei Xie /*Handle guest RX*/ 1384d19533e8SHuawei Xie rx_count = rte_eth_rx_burst(ports[0], 1385e571e6b4SHuawei Xie vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); 1386d19533e8SHuawei Xie 1387d19533e8SHuawei Xie if (rx_count) { 1388be800696SHuawei Xie /* 1389be800696SHuawei Xie * Retry is enabled and the queue is full then we wait and retry to avoid packet loss 1390be800696SHuawei Xie * Here MAX_PKT_BURST must be less than virtio queue size 1391be800696SHuawei Xie */ 1392be800696SHuawei Xie if (enable_retry && unlikely(rx_count > rte_vring_available_entries(dev, VIRTIO_RXQ))) { 1393be800696SHuawei Xie for (retry = 0; retry < burst_rx_retry_num; retry++) { 1394be800696SHuawei Xie rte_delay_us(burst_rx_delay_time); 1395be800696SHuawei Xie if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ)) 1396be800696SHuawei Xie break; 1397be800696SHuawei Xie } 1398be800696SHuawei Xie } 1399be800696SHuawei Xie ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_burst, rx_count); 1400d19533e8SHuawei Xie if (enable_stats) { 1401d19533e8SHuawei Xie rte_atomic64_add( 1402e571e6b4SHuawei Xie &dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic, 1403d19533e8SHuawei Xie rx_count); 1404d19533e8SHuawei Xie rte_atomic64_add( 1405e571e6b4SHuawei Xie &dev_statistics[dev_ll->vdev->dev->device_fh].rx_atomic, ret_count); 1406d19533e8SHuawei Xie } 1407d19533e8SHuawei Xie while (likely(rx_count)) { 1408d19533e8SHuawei Xie rx_count--; 1409d19533e8SHuawei Xie rte_pktmbuf_free(pkts_burst[rx_count]); 1410d19533e8SHuawei Xie } 1411d19533e8SHuawei Xie 1412d19533e8SHuawei Xie } 1413d19533e8SHuawei Xie } 1414d19533e8SHuawei Xie 1415364dddcdSHuawei Xie if (likely(!vdev->remove)) { 1416d19533e8SHuawei Xie /* Handle guest TX*/ 1417be800696SHuawei Xie tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST); 1418be800696SHuawei Xie /* If this is the first received packet we need to learn the MAC and setup VMDQ */ 1419be800696SHuawei Xie if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) { 1420be800696SHuawei Xie if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) { 142151ec3ae2SHuawei Xie while (tx_count) 142251ec3ae2SHuawei Xie rte_pktmbuf_free(pkts_burst[--tx_count]); 1423be800696SHuawei Xie } 1424be800696SHuawei Xie } 1425be800696SHuawei Xie while (tx_count) 14264d50b6acSHuawei Xie virtio_tx_route(vdev, pkts_burst[--tx_count], (uint16_t)dev->device_fh); 1427d19533e8SHuawei Xie } 1428d19533e8SHuawei Xie 1429d19533e8SHuawei Xie /*move to the next device in the list*/ 1430d19533e8SHuawei Xie dev_ll = dev_ll->next; 1431d19533e8SHuawei Xie } 1432d19533e8SHuawei Xie } 1433d19533e8SHuawei Xie 1434d19533e8SHuawei Xie return 0; 1435d19533e8SHuawei Xie } 1436d19533e8SHuawei Xie 1437d19533e8SHuawei Xie /* 1438d19533e8SHuawei Xie * This function gets available ring number for zero copy rx. 1439d19533e8SHuawei Xie * Only one thread will call this funciton for a paticular virtio device, 1440d19533e8SHuawei Xie * so, it is designed as non-thread-safe function. 1441d19533e8SHuawei Xie */ 1442d19533e8SHuawei Xie static inline uint32_t __attribute__((always_inline)) 1443d19533e8SHuawei Xie get_available_ring_num_zcp(struct virtio_net *dev) 1444d19533e8SHuawei Xie { 1445d19533e8SHuawei Xie struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ]; 1446d19533e8SHuawei Xie uint16_t avail_idx; 1447d19533e8SHuawei Xie 1448d19533e8SHuawei Xie avail_idx = *((volatile uint16_t *)&vq->avail->idx); 1449d19533e8SHuawei Xie return (uint32_t)(avail_idx - vq->last_used_idx_res); 1450d19533e8SHuawei Xie } 1451d19533e8SHuawei Xie 1452d19533e8SHuawei Xie /* 1453d19533e8SHuawei Xie * This function gets available ring index for zero copy rx, 1454d19533e8SHuawei Xie * it will retry 'burst_rx_retry_num' times till it get enough ring index. 1455d19533e8SHuawei Xie * Only one thread will call this funciton for a paticular virtio device, 1456d19533e8SHuawei Xie * so, it is designed as non-thread-safe function. 1457d19533e8SHuawei Xie */ 1458d19533e8SHuawei Xie static inline uint32_t __attribute__((always_inline)) 1459d19533e8SHuawei Xie get_available_ring_index_zcp(struct virtio_net *dev, 1460d19533e8SHuawei Xie uint16_t *res_base_idx, uint32_t count) 1461d19533e8SHuawei Xie { 1462d19533e8SHuawei Xie struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ]; 1463d19533e8SHuawei Xie uint16_t avail_idx; 1464d19533e8SHuawei Xie uint32_t retry = 0; 1465d19533e8SHuawei Xie uint16_t free_entries; 1466d19533e8SHuawei Xie 1467d19533e8SHuawei Xie *res_base_idx = vq->last_used_idx_res; 1468d19533e8SHuawei Xie avail_idx = *((volatile uint16_t *)&vq->avail->idx); 1469d19533e8SHuawei Xie free_entries = (avail_idx - *res_base_idx); 1470d19533e8SHuawei Xie 1471d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") in get_available_ring_index_zcp: " 1472d19533e8SHuawei Xie "avail idx: %d, " 1473d19533e8SHuawei Xie "res base idx:%d, free entries:%d\n", 1474d19533e8SHuawei Xie dev->device_fh, avail_idx, *res_base_idx, 1475d19533e8SHuawei Xie free_entries); 1476d19533e8SHuawei Xie 1477d19533e8SHuawei Xie /* 1478d19533e8SHuawei Xie * If retry is enabled and the queue is full then we wait 1479d19533e8SHuawei Xie * and retry to avoid packet loss. 1480d19533e8SHuawei Xie */ 1481d19533e8SHuawei Xie if (enable_retry && unlikely(count > free_entries)) { 1482d19533e8SHuawei Xie for (retry = 0; retry < burst_rx_retry_num; retry++) { 1483d19533e8SHuawei Xie rte_delay_us(burst_rx_delay_time); 1484d19533e8SHuawei Xie avail_idx = *((volatile uint16_t *)&vq->avail->idx); 1485d19533e8SHuawei Xie free_entries = (avail_idx - *res_base_idx); 1486d19533e8SHuawei Xie if (count <= free_entries) 1487d19533e8SHuawei Xie break; 1488d19533e8SHuawei Xie } 1489d19533e8SHuawei Xie } 1490d19533e8SHuawei Xie 1491d19533e8SHuawei Xie /*check that we have enough buffers*/ 1492d19533e8SHuawei Xie if (unlikely(count > free_entries)) 1493d19533e8SHuawei Xie count = free_entries; 1494d19533e8SHuawei Xie 1495d19533e8SHuawei Xie if (unlikely(count == 0)) { 1496d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1497d19533e8SHuawei Xie "(%"PRIu64") Fail in get_available_ring_index_zcp: " 1498d19533e8SHuawei Xie "avail idx: %d, res base idx:%d, free entries:%d\n", 1499d19533e8SHuawei Xie dev->device_fh, avail_idx, 1500d19533e8SHuawei Xie *res_base_idx, free_entries); 1501d19533e8SHuawei Xie return 0; 1502d19533e8SHuawei Xie } 1503d19533e8SHuawei Xie 1504d19533e8SHuawei Xie vq->last_used_idx_res = *res_base_idx + count; 1505d19533e8SHuawei Xie 1506d19533e8SHuawei Xie return count; 1507d19533e8SHuawei Xie } 1508d19533e8SHuawei Xie 1509d19533e8SHuawei Xie /* 1510d19533e8SHuawei Xie * This function put descriptor back to used list. 1511d19533e8SHuawei Xie */ 1512d19533e8SHuawei Xie static inline void __attribute__((always_inline)) 1513d19533e8SHuawei Xie put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx) 1514d19533e8SHuawei Xie { 1515d19533e8SHuawei Xie uint16_t res_cur_idx = vq->last_used_idx; 1516d19533e8SHuawei Xie vq->used->ring[res_cur_idx & (vq->size - 1)].id = (uint32_t)desc_idx; 1517d19533e8SHuawei Xie vq->used->ring[res_cur_idx & (vq->size - 1)].len = 0; 1518d19533e8SHuawei Xie rte_compiler_barrier(); 1519d19533e8SHuawei Xie *(volatile uint16_t *)&vq->used->idx += 1; 1520d19533e8SHuawei Xie vq->last_used_idx += 1; 1521d19533e8SHuawei Xie 1522d19533e8SHuawei Xie /* Kick the guest if necessary. */ 1523d19533e8SHuawei Xie if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) 15249702b2b5SYuanhan Liu eventfd_write(vq->callfd, (eventfd_t)1); 1525d19533e8SHuawei Xie } 1526d19533e8SHuawei Xie 1527d19533e8SHuawei Xie /* 1528d19533e8SHuawei Xie * This function get available descriptor from vitio vring and un-attached mbuf 1529d19533e8SHuawei Xie * from vpool->ring, and then attach them together. It needs adjust the offset 1530d19533e8SHuawei Xie * for buff_addr and phys_addr accroding to PMD implementation, otherwise the 1531d19533e8SHuawei Xie * frame data may be put to wrong location in mbuf. 1532d19533e8SHuawei Xie */ 1533d19533e8SHuawei Xie static inline void __attribute__((always_inline)) 1534d19533e8SHuawei Xie attach_rxmbuf_zcp(struct virtio_net *dev) 1535d19533e8SHuawei Xie { 1536d19533e8SHuawei Xie uint16_t res_base_idx, desc_idx; 1537d19533e8SHuawei Xie uint64_t buff_addr, phys_addr; 1538d19533e8SHuawei Xie struct vhost_virtqueue *vq; 1539d19533e8SHuawei Xie struct vring_desc *desc; 15401befe9caSPablo de Lara void *obj = NULL; 15411befe9caSPablo de Lara struct rte_mbuf *mbuf; 1542d19533e8SHuawei Xie struct vpool *vpool; 1543d19533e8SHuawei Xie hpa_type addr_type; 1544e571e6b4SHuawei Xie struct vhost_dev *vdev = (struct vhost_dev *)dev->priv; 1545d19533e8SHuawei Xie 1546e571e6b4SHuawei Xie vpool = &vpool_array[vdev->vmdq_rx_q]; 1547d19533e8SHuawei Xie vq = dev->virtqueue[VIRTIO_RXQ]; 1548d19533e8SHuawei Xie 1549d19533e8SHuawei Xie do { 1550e571e6b4SHuawei Xie if (unlikely(get_available_ring_index_zcp(vdev->dev, &res_base_idx, 1551d19533e8SHuawei Xie 1) != 1)) 1552d19533e8SHuawei Xie return; 1553d19533e8SHuawei Xie desc_idx = vq->avail->ring[(res_base_idx) & (vq->size - 1)]; 1554d19533e8SHuawei Xie 1555d19533e8SHuawei Xie desc = &vq->desc[desc_idx]; 1556d19533e8SHuawei Xie if (desc->flags & VRING_DESC_F_NEXT) { 1557d19533e8SHuawei Xie desc = &vq->desc[desc->next]; 1558d19533e8SHuawei Xie buff_addr = gpa_to_vva(dev, desc->addr); 1559e571e6b4SHuawei Xie phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len, 1560d19533e8SHuawei Xie &addr_type); 1561d19533e8SHuawei Xie } else { 1562d19533e8SHuawei Xie buff_addr = gpa_to_vva(dev, 1563d19533e8SHuawei Xie desc->addr + vq->vhost_hlen); 1564e571e6b4SHuawei Xie phys_addr = gpa_to_hpa(vdev, 1565d19533e8SHuawei Xie desc->addr + vq->vhost_hlen, 1566d19533e8SHuawei Xie desc->len, &addr_type); 1567d19533e8SHuawei Xie } 1568d19533e8SHuawei Xie 1569d19533e8SHuawei Xie if (unlikely(addr_type == PHYS_ADDR_INVALID)) { 1570d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Invalid frame buffer" 1571d19533e8SHuawei Xie " address found when attaching RX frame buffer" 1572d19533e8SHuawei Xie " address!\n", dev->device_fh); 1573d19533e8SHuawei Xie put_desc_to_used_list_zcp(vq, desc_idx); 1574d19533e8SHuawei Xie continue; 1575d19533e8SHuawei Xie } 1576d19533e8SHuawei Xie 1577d19533e8SHuawei Xie /* 1578d19533e8SHuawei Xie * Check if the frame buffer address from guest crosses 1579d19533e8SHuawei Xie * sub-region or not. 1580d19533e8SHuawei Xie */ 1581d19533e8SHuawei Xie if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) { 1582d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_DATA, 1583d19533e8SHuawei Xie "(%"PRIu64") Frame buffer address cross " 1584d19533e8SHuawei Xie "sub-regioin found when attaching RX frame " 1585d19533e8SHuawei Xie "buffer address!\n", 1586d19533e8SHuawei Xie dev->device_fh); 1587d19533e8SHuawei Xie put_desc_to_used_list_zcp(vq, desc_idx); 1588d19533e8SHuawei Xie continue; 1589d19533e8SHuawei Xie } 1590d19533e8SHuawei Xie } while (unlikely(phys_addr == 0)); 1591d19533e8SHuawei Xie 15921befe9caSPablo de Lara rte_ring_sc_dequeue(vpool->ring, &obj); 15931befe9caSPablo de Lara mbuf = obj; 1594d19533e8SHuawei Xie if (unlikely(mbuf == NULL)) { 1595d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1596d19533e8SHuawei Xie "(%"PRIu64") in attach_rxmbuf_zcp: " 1597d19533e8SHuawei Xie "ring_sc_dequeue fail.\n", 1598d19533e8SHuawei Xie dev->device_fh); 1599d19533e8SHuawei Xie put_desc_to_used_list_zcp(vq, desc_idx); 1600d19533e8SHuawei Xie return; 1601d19533e8SHuawei Xie } 1602d19533e8SHuawei Xie 1603d19533e8SHuawei Xie if (unlikely(vpool->buf_size > desc->len)) { 1604d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1605d19533e8SHuawei Xie "(%"PRIu64") in attach_rxmbuf_zcp: frame buffer " 1606d19533e8SHuawei Xie "length(%d) of descriptor idx: %d less than room " 1607d19533e8SHuawei Xie "size required: %d\n", 1608d19533e8SHuawei Xie dev->device_fh, desc->len, desc_idx, vpool->buf_size); 1609d19533e8SHuawei Xie put_desc_to_used_list_zcp(vq, desc_idx); 16101befe9caSPablo de Lara rte_ring_sp_enqueue(vpool->ring, obj); 1611d19533e8SHuawei Xie return; 1612d19533e8SHuawei Xie } 1613d19533e8SHuawei Xie 1614d19533e8SHuawei Xie mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM); 1615d19533e8SHuawei Xie mbuf->data_off = RTE_PKTMBUF_HEADROOM; 1616d19533e8SHuawei Xie mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM; 1617d19533e8SHuawei Xie mbuf->data_len = desc->len; 1618d19533e8SHuawei Xie MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx; 1619d19533e8SHuawei Xie 1620d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1621d19533e8SHuawei Xie "(%"PRIu64") in attach_rxmbuf_zcp: res base idx:%d, " 1622d19533e8SHuawei Xie "descriptor idx:%d\n", 1623d19533e8SHuawei Xie dev->device_fh, res_base_idx, desc_idx); 1624d19533e8SHuawei Xie 1625d19533e8SHuawei Xie __rte_mbuf_raw_free(mbuf); 1626d19533e8SHuawei Xie 1627d19533e8SHuawei Xie return; 1628d19533e8SHuawei Xie } 1629d19533e8SHuawei Xie 1630d19533e8SHuawei Xie /* 1631d19533e8SHuawei Xie * Detach an attched packet mbuf - 1632d19533e8SHuawei Xie * - restore original mbuf address and length values. 1633d19533e8SHuawei Xie * - reset pktmbuf data and data_len to their default values. 1634d19533e8SHuawei Xie * All other fields of the given packet mbuf will be left intact. 1635d19533e8SHuawei Xie * 1636d19533e8SHuawei Xie * @param m 1637d19533e8SHuawei Xie * The attached packet mbuf. 1638d19533e8SHuawei Xie */ 1639d19533e8SHuawei Xie static inline void pktmbuf_detach_zcp(struct rte_mbuf *m) 1640d19533e8SHuawei Xie { 1641d19533e8SHuawei Xie const struct rte_mempool *mp = m->pool; 1642355e6735SOlivier Matz void *buf = rte_mbuf_to_baddr(m); 1643d19533e8SHuawei Xie uint32_t buf_ofs; 1644d19533e8SHuawei Xie uint32_t buf_len = mp->elt_size - sizeof(*m); 1645d19533e8SHuawei Xie m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof(*m); 1646d19533e8SHuawei Xie 1647d19533e8SHuawei Xie m->buf_addr = buf; 1648d19533e8SHuawei Xie m->buf_len = (uint16_t)buf_len; 1649d19533e8SHuawei Xie 1650d19533e8SHuawei Xie buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ? 1651d19533e8SHuawei Xie RTE_PKTMBUF_HEADROOM : m->buf_len; 1652d19533e8SHuawei Xie m->data_off = buf_ofs; 1653d19533e8SHuawei Xie 1654d19533e8SHuawei Xie m->data_len = 0; 1655d19533e8SHuawei Xie } 1656d19533e8SHuawei Xie 1657d19533e8SHuawei Xie /* 1658d19533e8SHuawei Xie * This function is called after packets have been transimited. It fetchs mbuf 1659d19533e8SHuawei Xie * from vpool->pool, detached it and put into vpool->ring. It also update the 1660d19533e8SHuawei Xie * used index and kick the guest if necessary. 1661d19533e8SHuawei Xie */ 1662d19533e8SHuawei Xie static inline uint32_t __attribute__((always_inline)) 1663d19533e8SHuawei Xie txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool) 1664d19533e8SHuawei Xie { 1665d19533e8SHuawei Xie struct rte_mbuf *mbuf; 1666d19533e8SHuawei Xie struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ]; 1667d19533e8SHuawei Xie uint32_t used_idx = vq->last_used_idx & (vq->size - 1); 1668d19533e8SHuawei Xie uint32_t index = 0; 1669d19533e8SHuawei Xie uint32_t mbuf_count = rte_mempool_count(vpool->pool); 1670d19533e8SHuawei Xie 1671d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1672d19533e8SHuawei Xie "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool before " 1673d19533e8SHuawei Xie "clean is: %d\n", 1674d19533e8SHuawei Xie dev->device_fh, mbuf_count); 1675d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1676d19533e8SHuawei Xie "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring before " 1677d19533e8SHuawei Xie "clean is : %d\n", 1678d19533e8SHuawei Xie dev->device_fh, rte_ring_count(vpool->ring)); 1679d19533e8SHuawei Xie 1680d19533e8SHuawei Xie for (index = 0; index < mbuf_count; index++) { 1681d19533e8SHuawei Xie mbuf = __rte_mbuf_raw_alloc(vpool->pool); 1682e8b9ef87SSergio Gonzalez Monroy if (likely(MBUF_EXT_MEM(mbuf))) 1683d19533e8SHuawei Xie pktmbuf_detach_zcp(mbuf); 1684d19533e8SHuawei Xie rte_ring_sp_enqueue(vpool->ring, mbuf); 1685d19533e8SHuawei Xie 1686d19533e8SHuawei Xie /* Update used index buffer information. */ 1687d19533e8SHuawei Xie vq->used->ring[used_idx].id = MBUF_HEADROOM_UINT32(mbuf); 1688d19533e8SHuawei Xie vq->used->ring[used_idx].len = 0; 1689d19533e8SHuawei Xie 1690d19533e8SHuawei Xie used_idx = (used_idx + 1) & (vq->size - 1); 1691d19533e8SHuawei Xie } 1692d19533e8SHuawei Xie 1693d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1694d19533e8SHuawei Xie "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after " 1695d19533e8SHuawei Xie "clean is: %d\n", 1696d19533e8SHuawei Xie dev->device_fh, rte_mempool_count(vpool->pool)); 1697d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1698d19533e8SHuawei Xie "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after " 1699d19533e8SHuawei Xie "clean is : %d\n", 1700d19533e8SHuawei Xie dev->device_fh, rte_ring_count(vpool->ring)); 1701d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1702d19533e8SHuawei Xie "(%"PRIu64") in txmbuf_clean_zcp: before updated " 1703d19533e8SHuawei Xie "vq->last_used_idx:%d\n", 1704d19533e8SHuawei Xie dev->device_fh, vq->last_used_idx); 1705d19533e8SHuawei Xie 1706d19533e8SHuawei Xie vq->last_used_idx += mbuf_count; 1707d19533e8SHuawei Xie 1708d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1709d19533e8SHuawei Xie "(%"PRIu64") in txmbuf_clean_zcp: after updated " 1710d19533e8SHuawei Xie "vq->last_used_idx:%d\n", 1711d19533e8SHuawei Xie dev->device_fh, vq->last_used_idx); 1712d19533e8SHuawei Xie 1713d19533e8SHuawei Xie rte_compiler_barrier(); 1714d19533e8SHuawei Xie 1715d19533e8SHuawei Xie *(volatile uint16_t *)&vq->used->idx += mbuf_count; 1716d19533e8SHuawei Xie 1717d19533e8SHuawei Xie /* Kick guest if required. */ 1718d19533e8SHuawei Xie if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) 17199702b2b5SYuanhan Liu eventfd_write(vq->callfd, (eventfd_t)1); 1720d19533e8SHuawei Xie 1721d19533e8SHuawei Xie return 0; 1722d19533e8SHuawei Xie } 1723d19533e8SHuawei Xie 1724d19533e8SHuawei Xie /* 1725d19533e8SHuawei Xie * This function is called when a virtio device is destroy. 1726d19533e8SHuawei Xie * It fetchs mbuf from vpool->pool, and detached it, and put into vpool->ring. 1727d19533e8SHuawei Xie */ 1728d19533e8SHuawei Xie static void mbuf_destroy_zcp(struct vpool *vpool) 1729d19533e8SHuawei Xie { 1730d19533e8SHuawei Xie struct rte_mbuf *mbuf = NULL; 1731d19533e8SHuawei Xie uint32_t index, mbuf_count = rte_mempool_count(vpool->pool); 1732d19533e8SHuawei Xie 1733d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 1734d19533e8SHuawei Xie "in mbuf_destroy_zcp: mbuf count in mempool before " 1735d19533e8SHuawei Xie "mbuf_destroy_zcp is: %d\n", 1736d19533e8SHuawei Xie mbuf_count); 1737d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 1738d19533e8SHuawei Xie "in mbuf_destroy_zcp: mbuf count in ring before " 1739d19533e8SHuawei Xie "mbuf_destroy_zcp is : %d\n", 1740d19533e8SHuawei Xie rte_ring_count(vpool->ring)); 1741d19533e8SHuawei Xie 1742d19533e8SHuawei Xie for (index = 0; index < mbuf_count; index++) { 1743d19533e8SHuawei Xie mbuf = __rte_mbuf_raw_alloc(vpool->pool); 1744d19533e8SHuawei Xie if (likely(mbuf != NULL)) { 1745e8b9ef87SSergio Gonzalez Monroy if (likely(MBUF_EXT_MEM(mbuf))) 1746d19533e8SHuawei Xie pktmbuf_detach_zcp(mbuf); 1747d19533e8SHuawei Xie rte_ring_sp_enqueue(vpool->ring, (void *)mbuf); 1748d19533e8SHuawei Xie } 1749d19533e8SHuawei Xie } 1750d19533e8SHuawei Xie 1751d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 1752d19533e8SHuawei Xie "in mbuf_destroy_zcp: mbuf count in mempool after " 1753d19533e8SHuawei Xie "mbuf_destroy_zcp is: %d\n", 1754d19533e8SHuawei Xie rte_mempool_count(vpool->pool)); 1755d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 1756d19533e8SHuawei Xie "in mbuf_destroy_zcp: mbuf count in ring after " 1757d19533e8SHuawei Xie "mbuf_destroy_zcp is : %d\n", 1758d19533e8SHuawei Xie rte_ring_count(vpool->ring)); 1759d19533e8SHuawei Xie } 1760d19533e8SHuawei Xie 1761d19533e8SHuawei Xie /* 1762d19533e8SHuawei Xie * This function update the use flag and counter. 1763d19533e8SHuawei Xie */ 1764d19533e8SHuawei Xie static inline uint32_t __attribute__((always_inline)) 1765d19533e8SHuawei Xie virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts, 1766d19533e8SHuawei Xie uint32_t count) 1767d19533e8SHuawei Xie { 1768d19533e8SHuawei Xie struct vhost_virtqueue *vq; 1769d19533e8SHuawei Xie struct vring_desc *desc; 1770d19533e8SHuawei Xie struct rte_mbuf *buff; 1771d19533e8SHuawei Xie /* The virtio_hdr is initialised to 0. */ 1772d19533e8SHuawei Xie struct virtio_net_hdr_mrg_rxbuf virtio_hdr 1773d19533e8SHuawei Xie = {{0, 0, 0, 0, 0, 0}, 0}; 1774d19533e8SHuawei Xie uint64_t buff_hdr_addr = 0; 1775d19533e8SHuawei Xie uint32_t head[MAX_PKT_BURST], packet_len = 0; 1776d19533e8SHuawei Xie uint32_t head_idx, packet_success = 0; 1777d19533e8SHuawei Xie uint16_t res_cur_idx; 1778d19533e8SHuawei Xie 1779d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh); 1780d19533e8SHuawei Xie 1781d19533e8SHuawei Xie if (count == 0) 1782d19533e8SHuawei Xie return 0; 1783d19533e8SHuawei Xie 1784d19533e8SHuawei Xie vq = dev->virtqueue[VIRTIO_RXQ]; 1785d19533e8SHuawei Xie count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count; 1786d19533e8SHuawei Xie 1787d19533e8SHuawei Xie res_cur_idx = vq->last_used_idx; 1788d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", 1789d19533e8SHuawei Xie dev->device_fh, res_cur_idx, res_cur_idx + count); 1790d19533e8SHuawei Xie 1791d19533e8SHuawei Xie /* Retrieve all of the head indexes first to avoid caching issues. */ 1792d19533e8SHuawei Xie for (head_idx = 0; head_idx < count; head_idx++) 1793d19533e8SHuawei Xie head[head_idx] = MBUF_HEADROOM_UINT32(pkts[head_idx]); 1794d19533e8SHuawei Xie 1795d19533e8SHuawei Xie /*Prefetch descriptor index. */ 1796d19533e8SHuawei Xie rte_prefetch0(&vq->desc[head[packet_success]]); 1797d19533e8SHuawei Xie 1798d19533e8SHuawei Xie while (packet_success != count) { 1799d19533e8SHuawei Xie /* Get descriptor from available ring */ 1800d19533e8SHuawei Xie desc = &vq->desc[head[packet_success]]; 1801d19533e8SHuawei Xie 1802d19533e8SHuawei Xie buff = pkts[packet_success]; 1803d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1804d19533e8SHuawei Xie "(%"PRIu64") in dev_rx_zcp: update the used idx for " 1805d19533e8SHuawei Xie "pkt[%d] descriptor idx: %d\n", 1806d19533e8SHuawei Xie dev->device_fh, packet_success, 1807d19533e8SHuawei Xie MBUF_HEADROOM_UINT32(buff)); 1808d19533e8SHuawei Xie 1809d19533e8SHuawei Xie PRINT_PACKET(dev, 1810d19533e8SHuawei Xie (uintptr_t)(((uint64_t)(uintptr_t)buff->buf_addr) 1811d19533e8SHuawei Xie + RTE_PKTMBUF_HEADROOM), 1812d19533e8SHuawei Xie rte_pktmbuf_data_len(buff), 0); 1813d19533e8SHuawei Xie 1814d19533e8SHuawei Xie /* Buffer address translation for virtio header. */ 1815d19533e8SHuawei Xie buff_hdr_addr = gpa_to_vva(dev, desc->addr); 1816d19533e8SHuawei Xie packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen; 1817d19533e8SHuawei Xie 1818d19533e8SHuawei Xie /* 1819d19533e8SHuawei Xie * If the descriptors are chained the header and data are 1820d19533e8SHuawei Xie * placed in separate buffers. 1821d19533e8SHuawei Xie */ 1822d19533e8SHuawei Xie if (desc->flags & VRING_DESC_F_NEXT) { 1823d19533e8SHuawei Xie desc->len = vq->vhost_hlen; 1824d19533e8SHuawei Xie desc = &vq->desc[desc->next]; 1825d19533e8SHuawei Xie desc->len = rte_pktmbuf_data_len(buff); 1826d19533e8SHuawei Xie } else { 1827d19533e8SHuawei Xie desc->len = packet_len; 1828d19533e8SHuawei Xie } 1829d19533e8SHuawei Xie 1830d19533e8SHuawei Xie /* Update used ring with desc information */ 1831d19533e8SHuawei Xie vq->used->ring[res_cur_idx & (vq->size - 1)].id 1832d19533e8SHuawei Xie = head[packet_success]; 1833d19533e8SHuawei Xie vq->used->ring[res_cur_idx & (vq->size - 1)].len 1834d19533e8SHuawei Xie = packet_len; 1835d19533e8SHuawei Xie res_cur_idx++; 1836d19533e8SHuawei Xie packet_success++; 1837d19533e8SHuawei Xie 1838d19533e8SHuawei Xie /* A header is required per buffer. */ 1839d19533e8SHuawei Xie rte_memcpy((void *)(uintptr_t)buff_hdr_addr, 1840d19533e8SHuawei Xie (const void *)&virtio_hdr, vq->vhost_hlen); 1841d19533e8SHuawei Xie 1842d19533e8SHuawei Xie PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1); 1843d19533e8SHuawei Xie 1844d19533e8SHuawei Xie if (likely(packet_success < count)) { 1845d19533e8SHuawei Xie /* Prefetch descriptor index. */ 1846d19533e8SHuawei Xie rte_prefetch0(&vq->desc[head[packet_success]]); 1847d19533e8SHuawei Xie } 1848d19533e8SHuawei Xie } 1849d19533e8SHuawei Xie 1850d19533e8SHuawei Xie rte_compiler_barrier(); 1851d19533e8SHuawei Xie 1852d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1853d19533e8SHuawei Xie "(%"PRIu64") in dev_rx_zcp: before update used idx: " 1854d19533e8SHuawei Xie "vq.last_used_idx: %d, vq->used->idx: %d\n", 1855d19533e8SHuawei Xie dev->device_fh, vq->last_used_idx, vq->used->idx); 1856d19533e8SHuawei Xie 1857d19533e8SHuawei Xie *(volatile uint16_t *)&vq->used->idx += count; 1858d19533e8SHuawei Xie vq->last_used_idx += count; 1859d19533e8SHuawei Xie 1860d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1861d19533e8SHuawei Xie "(%"PRIu64") in dev_rx_zcp: after update used idx: " 1862d19533e8SHuawei Xie "vq.last_used_idx: %d, vq->used->idx: %d\n", 1863d19533e8SHuawei Xie dev->device_fh, vq->last_used_idx, vq->used->idx); 1864d19533e8SHuawei Xie 1865d19533e8SHuawei Xie /* Kick the guest if necessary. */ 1866d19533e8SHuawei Xie if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) 18679702b2b5SYuanhan Liu eventfd_write(vq->callfd, (eventfd_t)1); 1868d19533e8SHuawei Xie 1869d19533e8SHuawei Xie return count; 1870d19533e8SHuawei Xie } 1871d19533e8SHuawei Xie 1872d19533e8SHuawei Xie /* 1873d19533e8SHuawei Xie * This function routes the TX packet to the correct interface. 1874d19533e8SHuawei Xie * This may be a local device or the physical port. 1875d19533e8SHuawei Xie */ 1876d19533e8SHuawei Xie static inline void __attribute__((always_inline)) 1877d19533e8SHuawei Xie virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m, 1878d19533e8SHuawei Xie uint32_t desc_idx, uint8_t need_copy) 1879d19533e8SHuawei Xie { 1880d19533e8SHuawei Xie struct mbuf_table *tx_q; 1881d19533e8SHuawei Xie struct rte_mbuf **m_table; 18821befe9caSPablo de Lara void *obj = NULL; 18831befe9caSPablo de Lara struct rte_mbuf *mbuf; 1884d19533e8SHuawei Xie unsigned len, ret, offset = 0; 1885d19533e8SHuawei Xie struct vpool *vpool; 1886d19533e8SHuawei Xie uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh]; 1887e571e6b4SHuawei Xie uint16_t vmdq_rx_q = ((struct vhost_dev *)dev->priv)->vmdq_rx_q; 1888d19533e8SHuawei Xie 1889d19533e8SHuawei Xie /*Add packet to the port tx queue*/ 1890e571e6b4SHuawei Xie tx_q = &tx_queue_zcp[vmdq_rx_q]; 1891d19533e8SHuawei Xie len = tx_q->len; 1892d19533e8SHuawei Xie 1893d19533e8SHuawei Xie /* Allocate an mbuf and populate the structure. */ 1894e571e6b4SHuawei Xie vpool = &vpool_array[MAX_QUEUES + vmdq_rx_q]; 18951befe9caSPablo de Lara rte_ring_sc_dequeue(vpool->ring, &obj); 18961befe9caSPablo de Lara mbuf = obj; 1897d19533e8SHuawei Xie if (unlikely(mbuf == NULL)) { 1898d19533e8SHuawei Xie struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ]; 1899d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_DATA, 1900d19533e8SHuawei Xie "(%"PRIu64") Failed to allocate memory for mbuf.\n", 1901d19533e8SHuawei Xie dev->device_fh); 1902d19533e8SHuawei Xie put_desc_to_used_list_zcp(vq, desc_idx); 1903d19533e8SHuawei Xie return; 1904d19533e8SHuawei Xie } 1905d19533e8SHuawei Xie 1906d19533e8SHuawei Xie if (vm2vm_mode == VM2VM_HARDWARE) { 1907d19533e8SHuawei Xie /* Avoid using a vlan tag from any vm for external pkt, such as 1908d19533e8SHuawei Xie * vlan_tags[dev->device_fh], oterwise, it conflicts when pool 1909d19533e8SHuawei Xie * selection, MAC address determines it as an external pkt 1910d19533e8SHuawei Xie * which should go to network, while vlan tag determine it as 1911d19533e8SHuawei Xie * a vm2vm pkt should forward to another vm. Hardware confuse 1912d19533e8SHuawei Xie * such a ambiguous situation, so pkt will lost. 1913d19533e8SHuawei Xie */ 1914d19533e8SHuawei Xie vlan_tag = external_pkt_default_vlan_tag; 191572ec8d77SOuyang Changchun if (find_local_dest(dev, m, &offset, &vlan_tag) != 0) { 191672ec8d77SOuyang Changchun MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx; 1917d19533e8SHuawei Xie __rte_mbuf_raw_free(mbuf); 1918d19533e8SHuawei Xie return; 1919d19533e8SHuawei Xie } 1920d19533e8SHuawei Xie } 1921d19533e8SHuawei Xie 1922d19533e8SHuawei Xie mbuf->nb_segs = m->nb_segs; 1923d19533e8SHuawei Xie mbuf->next = m->next; 1924d19533e8SHuawei Xie mbuf->data_len = m->data_len + offset; 1925d19533e8SHuawei Xie mbuf->pkt_len = mbuf->data_len; 1926d19533e8SHuawei Xie if (unlikely(need_copy)) { 1927d19533e8SHuawei Xie /* Copy the packet contents to the mbuf. */ 1928d19533e8SHuawei Xie rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), 1929d19533e8SHuawei Xie rte_pktmbuf_mtod(m, void *), 1930d19533e8SHuawei Xie m->data_len); 1931d19533e8SHuawei Xie } else { 1932d19533e8SHuawei Xie mbuf->data_off = m->data_off; 1933d19533e8SHuawei Xie mbuf->buf_physaddr = m->buf_physaddr; 1934d19533e8SHuawei Xie mbuf->buf_addr = m->buf_addr; 1935d19533e8SHuawei Xie } 1936*9fd72e3cSJijiang Liu mbuf->ol_flags |= PKT_TX_VLAN_PKT; 1937d19533e8SHuawei Xie mbuf->vlan_tci = vlan_tag; 1938d19533e8SHuawei Xie mbuf->l2_len = sizeof(struct ether_hdr); 1939d19533e8SHuawei Xie mbuf->l3_len = sizeof(struct ipv4_hdr); 1940d19533e8SHuawei Xie MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx; 1941d19533e8SHuawei Xie 1942d19533e8SHuawei Xie tx_q->m_table[len] = mbuf; 1943d19533e8SHuawei Xie len++; 1944d19533e8SHuawei Xie 1945d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 1946d19533e8SHuawei Xie "(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n", 1947d19533e8SHuawei Xie dev->device_fh, 1948d19533e8SHuawei Xie mbuf->nb_segs, 1949d19533e8SHuawei Xie (mbuf->next == NULL) ? "null" : "non-null"); 1950d19533e8SHuawei Xie 1951d19533e8SHuawei Xie if (enable_stats) { 1952d19533e8SHuawei Xie dev_statistics[dev->device_fh].tx_total++; 1953d19533e8SHuawei Xie dev_statistics[dev->device_fh].tx++; 1954d19533e8SHuawei Xie } 1955d19533e8SHuawei Xie 1956d19533e8SHuawei Xie if (unlikely(len == MAX_PKT_BURST)) { 1957d19533e8SHuawei Xie m_table = (struct rte_mbuf **)tx_q->m_table; 1958d19533e8SHuawei Xie ret = rte_eth_tx_burst(ports[0], 1959d19533e8SHuawei Xie (uint16_t)tx_q->txq_id, m_table, (uint16_t) len); 1960d19533e8SHuawei Xie 1961d19533e8SHuawei Xie /* 1962d19533e8SHuawei Xie * Free any buffers not handled by TX and update 1963d19533e8SHuawei Xie * the port stats. 1964d19533e8SHuawei Xie */ 1965d19533e8SHuawei Xie if (unlikely(ret < len)) { 1966d19533e8SHuawei Xie do { 1967d19533e8SHuawei Xie rte_pktmbuf_free(m_table[ret]); 1968d19533e8SHuawei Xie } while (++ret < len); 1969d19533e8SHuawei Xie } 1970d19533e8SHuawei Xie 1971d19533e8SHuawei Xie len = 0; 1972d19533e8SHuawei Xie txmbuf_clean_zcp(dev, vpool); 1973d19533e8SHuawei Xie } 1974d19533e8SHuawei Xie 1975d19533e8SHuawei Xie tx_q->len = len; 1976d19533e8SHuawei Xie 1977d19533e8SHuawei Xie return; 1978d19533e8SHuawei Xie } 1979d19533e8SHuawei Xie 1980d19533e8SHuawei Xie /* 1981d19533e8SHuawei Xie * This function TX all available packets in virtio TX queue for one 1982d19533e8SHuawei Xie * virtio-net device. If it is first packet, it learns MAC address and 1983d19533e8SHuawei Xie * setup VMDQ. 1984d19533e8SHuawei Xie */ 1985d19533e8SHuawei Xie static inline void __attribute__((always_inline)) 1986d19533e8SHuawei Xie virtio_dev_tx_zcp(struct virtio_net *dev) 1987d19533e8SHuawei Xie { 1988d19533e8SHuawei Xie struct rte_mbuf m; 1989d19533e8SHuawei Xie struct vhost_virtqueue *vq; 1990d19533e8SHuawei Xie struct vring_desc *desc; 1991d19533e8SHuawei Xie uint64_t buff_addr = 0, phys_addr; 1992d19533e8SHuawei Xie uint32_t head[MAX_PKT_BURST]; 1993d19533e8SHuawei Xie uint32_t i; 1994d19533e8SHuawei Xie uint16_t free_entries, packet_success = 0; 1995d19533e8SHuawei Xie uint16_t avail_idx; 1996d19533e8SHuawei Xie uint8_t need_copy = 0; 1997d19533e8SHuawei Xie hpa_type addr_type; 1998e571e6b4SHuawei Xie struct vhost_dev *vdev = (struct vhost_dev *)dev->priv; 1999d19533e8SHuawei Xie 2000d19533e8SHuawei Xie vq = dev->virtqueue[VIRTIO_TXQ]; 2001d19533e8SHuawei Xie avail_idx = *((volatile uint16_t *)&vq->avail->idx); 2002d19533e8SHuawei Xie 2003d19533e8SHuawei Xie /* If there are no available buffers then return. */ 2004d19533e8SHuawei Xie if (vq->last_used_idx_res == avail_idx) 2005d19533e8SHuawei Xie return; 2006d19533e8SHuawei Xie 2007d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh); 2008d19533e8SHuawei Xie 2009d19533e8SHuawei Xie /* Prefetch available ring to retrieve head indexes. */ 2010d19533e8SHuawei Xie rte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]); 2011d19533e8SHuawei Xie 2012d19533e8SHuawei Xie /* Get the number of free entries in the ring */ 2013d19533e8SHuawei Xie free_entries = (avail_idx - vq->last_used_idx_res); 2014d19533e8SHuawei Xie 2015d19533e8SHuawei Xie /* Limit to MAX_PKT_BURST. */ 2016d19533e8SHuawei Xie free_entries 2017d19533e8SHuawei Xie = (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries; 2018d19533e8SHuawei Xie 2019d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", 2020d19533e8SHuawei Xie dev->device_fh, free_entries); 2021d19533e8SHuawei Xie 2022d19533e8SHuawei Xie /* Retrieve all of the head indexes first to avoid caching issues. */ 2023d19533e8SHuawei Xie for (i = 0; i < free_entries; i++) 2024d19533e8SHuawei Xie head[i] 2025d19533e8SHuawei Xie = vq->avail->ring[(vq->last_used_idx_res + i) 2026d19533e8SHuawei Xie & (vq->size - 1)]; 2027d19533e8SHuawei Xie 2028d19533e8SHuawei Xie vq->last_used_idx_res += free_entries; 2029d19533e8SHuawei Xie 2030d19533e8SHuawei Xie /* Prefetch descriptor index. */ 2031d19533e8SHuawei Xie rte_prefetch0(&vq->desc[head[packet_success]]); 2032d19533e8SHuawei Xie rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]); 2033d19533e8SHuawei Xie 2034d19533e8SHuawei Xie while (packet_success < free_entries) { 2035d19533e8SHuawei Xie desc = &vq->desc[head[packet_success]]; 2036d19533e8SHuawei Xie 2037d19533e8SHuawei Xie /* Discard first buffer as it is the virtio header */ 2038d19533e8SHuawei Xie desc = &vq->desc[desc->next]; 2039d19533e8SHuawei Xie 2040d19533e8SHuawei Xie /* Buffer address translation. */ 2041d19533e8SHuawei Xie buff_addr = gpa_to_vva(dev, desc->addr); 20426630bc42SOuyang Changchun /* Need check extra VLAN_HLEN size for inserting VLAN tag */ 20436630bc42SOuyang Changchun phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len + VLAN_HLEN, 20446630bc42SOuyang Changchun &addr_type); 2045d19533e8SHuawei Xie 2046d19533e8SHuawei Xie if (likely(packet_success < (free_entries - 1))) 2047d19533e8SHuawei Xie /* Prefetch descriptor index. */ 2048d19533e8SHuawei Xie rte_prefetch0(&vq->desc[head[packet_success + 1]]); 2049d19533e8SHuawei Xie 2050d19533e8SHuawei Xie if (unlikely(addr_type == PHYS_ADDR_INVALID)) { 2051d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_DATA, 2052d19533e8SHuawei Xie "(%"PRIu64") Invalid frame buffer address found" 2053d19533e8SHuawei Xie "when TX packets!\n", 2054d19533e8SHuawei Xie dev->device_fh); 2055d19533e8SHuawei Xie packet_success++; 2056d19533e8SHuawei Xie continue; 2057d19533e8SHuawei Xie } 2058d19533e8SHuawei Xie 2059d19533e8SHuawei Xie /* Prefetch buffer address. */ 2060d19533e8SHuawei Xie rte_prefetch0((void *)(uintptr_t)buff_addr); 2061d19533e8SHuawei Xie 2062d19533e8SHuawei Xie /* 2063d19533e8SHuawei Xie * Setup dummy mbuf. This is copied to a real mbuf if 2064d19533e8SHuawei Xie * transmitted out the physical port. 2065d19533e8SHuawei Xie */ 2066d19533e8SHuawei Xie m.data_len = desc->len; 2067d19533e8SHuawei Xie m.nb_segs = 1; 2068d19533e8SHuawei Xie m.next = NULL; 2069d19533e8SHuawei Xie m.data_off = 0; 2070d19533e8SHuawei Xie m.buf_addr = (void *)(uintptr_t)buff_addr; 2071d19533e8SHuawei Xie m.buf_physaddr = phys_addr; 2072d19533e8SHuawei Xie 2073d19533e8SHuawei Xie /* 2074d19533e8SHuawei Xie * Check if the frame buffer address from guest crosses 2075d19533e8SHuawei Xie * sub-region or not. 2076d19533e8SHuawei Xie */ 2077d19533e8SHuawei Xie if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) { 2078d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_DATA, 2079d19533e8SHuawei Xie "(%"PRIu64") Frame buffer address cross " 2080d19533e8SHuawei Xie "sub-regioin found when attaching TX frame " 2081d19533e8SHuawei Xie "buffer address!\n", 2082d19533e8SHuawei Xie dev->device_fh); 2083d19533e8SHuawei Xie need_copy = 1; 2084d19533e8SHuawei Xie } else 2085d19533e8SHuawei Xie need_copy = 0; 2086d19533e8SHuawei Xie 2087d19533e8SHuawei Xie PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0); 2088d19533e8SHuawei Xie 2089d19533e8SHuawei Xie /* 2090d19533e8SHuawei Xie * If this is the first received packet we need to learn 2091d19533e8SHuawei Xie * the MAC and setup VMDQ 2092d19533e8SHuawei Xie */ 2093e571e6b4SHuawei Xie if (unlikely(vdev->ready == DEVICE_MAC_LEARNING)) { 2094e571e6b4SHuawei Xie if (vdev->remove || (link_vmdq(vdev, &m) == -1)) { 2095d19533e8SHuawei Xie /* 2096d19533e8SHuawei Xie * Discard frame if device is scheduled for 2097d19533e8SHuawei Xie * removal or a duplicate MAC address is found. 2098d19533e8SHuawei Xie */ 2099d19533e8SHuawei Xie packet_success += free_entries; 2100d19533e8SHuawei Xie vq->last_used_idx += packet_success; 2101d19533e8SHuawei Xie break; 2102d19533e8SHuawei Xie } 2103d19533e8SHuawei Xie } 2104d19533e8SHuawei Xie 2105d19533e8SHuawei Xie virtio_tx_route_zcp(dev, &m, head[packet_success], need_copy); 2106d19533e8SHuawei Xie packet_success++; 2107d19533e8SHuawei Xie } 2108d19533e8SHuawei Xie } 2109d19533e8SHuawei Xie 2110d19533e8SHuawei Xie /* 2111d19533e8SHuawei Xie * This function is called by each data core. It handles all RX/TX registered 2112d19533e8SHuawei Xie * with the core. For TX the specific lcore linked list is used. For RX, MAC 2113d19533e8SHuawei Xie * addresses are compared with all devices in the main linked list. 2114d19533e8SHuawei Xie */ 2115d19533e8SHuawei Xie static int 2116d19533e8SHuawei Xie switch_worker_zcp(__attribute__((unused)) void *arg) 2117d19533e8SHuawei Xie { 2118d19533e8SHuawei Xie struct virtio_net *dev = NULL; 2119e571e6b4SHuawei Xie struct vhost_dev *vdev = NULL; 2120d19533e8SHuawei Xie struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 2121d19533e8SHuawei Xie struct virtio_net_data_ll *dev_ll; 2122d19533e8SHuawei Xie struct mbuf_table *tx_q; 2123d19533e8SHuawei Xie volatile struct lcore_ll_info *lcore_ll; 2124d19533e8SHuawei Xie const uint64_t drain_tsc 2125d19533e8SHuawei Xie = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S 2126d19533e8SHuawei Xie * BURST_TX_DRAIN_US; 2127d19533e8SHuawei Xie uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0; 2128d19533e8SHuawei Xie unsigned ret; 2129d19533e8SHuawei Xie const uint16_t lcore_id = rte_lcore_id(); 2130d19533e8SHuawei Xie uint16_t count_in_ring, rx_count = 0; 2131d19533e8SHuawei Xie 2132d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id); 2133d19533e8SHuawei Xie 2134d19533e8SHuawei Xie lcore_ll = lcore_info[lcore_id].lcore_ll; 2135d19533e8SHuawei Xie prev_tsc = 0; 2136d19533e8SHuawei Xie 2137d19533e8SHuawei Xie while (1) { 2138d19533e8SHuawei Xie cur_tsc = rte_rdtsc(); 2139d19533e8SHuawei Xie 2140d19533e8SHuawei Xie /* TX burst queue drain */ 2141d19533e8SHuawei Xie diff_tsc = cur_tsc - prev_tsc; 2142d19533e8SHuawei Xie if (unlikely(diff_tsc > drain_tsc)) { 2143d19533e8SHuawei Xie /* 2144d19533e8SHuawei Xie * Get mbuf from vpool.pool and detach mbuf and 2145d19533e8SHuawei Xie * put back into vpool.ring. 2146d19533e8SHuawei Xie */ 2147d19533e8SHuawei Xie dev_ll = lcore_ll->ll_root_used; 2148e571e6b4SHuawei Xie while ((dev_ll != NULL) && (dev_ll->vdev != NULL)) { 2149d19533e8SHuawei Xie /* Get virtio device ID */ 2150e571e6b4SHuawei Xie vdev = dev_ll->vdev; 2151e571e6b4SHuawei Xie dev = vdev->dev; 2152d19533e8SHuawei Xie 2153e571e6b4SHuawei Xie if (likely(!vdev->remove)) { 2154e571e6b4SHuawei Xie tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q]; 2155d19533e8SHuawei Xie if (tx_q->len) { 2156d19533e8SHuawei Xie LOG_DEBUG(VHOST_DATA, 2157d19533e8SHuawei Xie "TX queue drained after timeout" 2158d19533e8SHuawei Xie " with burst size %u\n", 2159d19533e8SHuawei Xie tx_q->len); 2160d19533e8SHuawei Xie 2161d19533e8SHuawei Xie /* 2162d19533e8SHuawei Xie * Tx any packets in the queue 2163d19533e8SHuawei Xie */ 2164d19533e8SHuawei Xie ret = rte_eth_tx_burst( 2165d19533e8SHuawei Xie ports[0], 2166d19533e8SHuawei Xie (uint16_t)tx_q->txq_id, 2167d19533e8SHuawei Xie (struct rte_mbuf **) 2168d19533e8SHuawei Xie tx_q->m_table, 2169d19533e8SHuawei Xie (uint16_t)tx_q->len); 2170d19533e8SHuawei Xie if (unlikely(ret < tx_q->len)) { 2171d19533e8SHuawei Xie do { 2172d19533e8SHuawei Xie rte_pktmbuf_free( 2173d19533e8SHuawei Xie tx_q->m_table[ret]); 2174d19533e8SHuawei Xie } while (++ret < tx_q->len); 2175d19533e8SHuawei Xie } 2176d19533e8SHuawei Xie tx_q->len = 0; 2177d19533e8SHuawei Xie 2178d19533e8SHuawei Xie txmbuf_clean_zcp(dev, 2179e571e6b4SHuawei Xie &vpool_array[MAX_QUEUES+vdev->vmdq_rx_q]); 2180d19533e8SHuawei Xie } 2181d19533e8SHuawei Xie } 2182d19533e8SHuawei Xie dev_ll = dev_ll->next; 2183d19533e8SHuawei Xie } 2184d19533e8SHuawei Xie prev_tsc = cur_tsc; 2185d19533e8SHuawei Xie } 2186d19533e8SHuawei Xie 2187d19533e8SHuawei Xie rte_prefetch0(lcore_ll->ll_root_used); 2188d19533e8SHuawei Xie 2189d19533e8SHuawei Xie /* 2190d19533e8SHuawei Xie * Inform the configuration core that we have exited the linked 2191d19533e8SHuawei Xie * list and that no devices are in use if requested. 2192d19533e8SHuawei Xie */ 2193d19533e8SHuawei Xie if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL) 2194d19533e8SHuawei Xie lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL; 2195d19533e8SHuawei Xie 2196d19533e8SHuawei Xie /* Process devices */ 2197d19533e8SHuawei Xie dev_ll = lcore_ll->ll_root_used; 2198d19533e8SHuawei Xie 2199e571e6b4SHuawei Xie while ((dev_ll != NULL) && (dev_ll->vdev != NULL)) { 2200e571e6b4SHuawei Xie vdev = dev_ll->vdev; 2201e571e6b4SHuawei Xie dev = vdev->dev; 2202e571e6b4SHuawei Xie if (unlikely(vdev->remove)) { 2203d19533e8SHuawei Xie dev_ll = dev_ll->next; 2204e571e6b4SHuawei Xie unlink_vmdq(vdev); 2205e571e6b4SHuawei Xie vdev->ready = DEVICE_SAFE_REMOVE; 2206d19533e8SHuawei Xie continue; 2207d19533e8SHuawei Xie } 2208d19533e8SHuawei Xie 2209e571e6b4SHuawei Xie if (likely(vdev->ready == DEVICE_RX)) { 2210e571e6b4SHuawei Xie uint32_t index = vdev->vmdq_rx_q; 2211d19533e8SHuawei Xie uint16_t i; 2212d19533e8SHuawei Xie count_in_ring 2213d19533e8SHuawei Xie = rte_ring_count(vpool_array[index].ring); 2214d19533e8SHuawei Xie uint16_t free_entries 2215d19533e8SHuawei Xie = (uint16_t)get_available_ring_num_zcp(dev); 2216d19533e8SHuawei Xie 2217d19533e8SHuawei Xie /* 2218d19533e8SHuawei Xie * Attach all mbufs in vpool.ring and put back 2219d19533e8SHuawei Xie * into vpool.pool. 2220d19533e8SHuawei Xie */ 2221d19533e8SHuawei Xie for (i = 0; 2222d19533e8SHuawei Xie i < RTE_MIN(free_entries, 2223d19533e8SHuawei Xie RTE_MIN(count_in_ring, MAX_PKT_BURST)); 2224d19533e8SHuawei Xie i++) 2225d19533e8SHuawei Xie attach_rxmbuf_zcp(dev); 2226d19533e8SHuawei Xie 2227d19533e8SHuawei Xie /* Handle guest RX */ 2228d19533e8SHuawei Xie rx_count = rte_eth_rx_burst(ports[0], 2229e571e6b4SHuawei Xie vdev->vmdq_rx_q, pkts_burst, 2230d19533e8SHuawei Xie MAX_PKT_BURST); 2231d19533e8SHuawei Xie 2232d19533e8SHuawei Xie if (rx_count) { 2233d19533e8SHuawei Xie ret_count = virtio_dev_rx_zcp(dev, 2234d19533e8SHuawei Xie pkts_burst, rx_count); 2235d19533e8SHuawei Xie if (enable_stats) { 2236d19533e8SHuawei Xie dev_statistics[dev->device_fh].rx_total 2237d19533e8SHuawei Xie += rx_count; 2238d19533e8SHuawei Xie dev_statistics[dev->device_fh].rx 2239d19533e8SHuawei Xie += ret_count; 2240d19533e8SHuawei Xie } 2241d19533e8SHuawei Xie while (likely(rx_count)) { 2242d19533e8SHuawei Xie rx_count--; 2243d19533e8SHuawei Xie pktmbuf_detach_zcp( 2244d19533e8SHuawei Xie pkts_burst[rx_count]); 2245d19533e8SHuawei Xie rte_ring_sp_enqueue( 2246d19533e8SHuawei Xie vpool_array[index].ring, 2247d19533e8SHuawei Xie (void *)pkts_burst[rx_count]); 2248d19533e8SHuawei Xie } 2249d19533e8SHuawei Xie } 2250d19533e8SHuawei Xie } 2251d19533e8SHuawei Xie 2252e571e6b4SHuawei Xie if (likely(!vdev->remove)) 2253d19533e8SHuawei Xie /* Handle guest TX */ 2254d19533e8SHuawei Xie virtio_dev_tx_zcp(dev); 2255d19533e8SHuawei Xie 2256d19533e8SHuawei Xie /* Move to the next device in the list */ 2257d19533e8SHuawei Xie dev_ll = dev_ll->next; 2258d19533e8SHuawei Xie } 2259d19533e8SHuawei Xie } 2260d19533e8SHuawei Xie 2261d19533e8SHuawei Xie return 0; 2262d19533e8SHuawei Xie } 2263d19533e8SHuawei Xie 2264d19533e8SHuawei Xie 2265d19533e8SHuawei Xie /* 2266d19533e8SHuawei Xie * Add an entry to a used linked list. A free entry must first be found 2267d19533e8SHuawei Xie * in the free linked list using get_data_ll_free_entry(); 2268d19533e8SHuawei Xie */ 2269d19533e8SHuawei Xie static void 2270d19533e8SHuawei Xie add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, 2271d19533e8SHuawei Xie struct virtio_net_data_ll *ll_dev) 2272d19533e8SHuawei Xie { 2273d19533e8SHuawei Xie struct virtio_net_data_ll *ll = *ll_root_addr; 2274d19533e8SHuawei Xie 2275d19533e8SHuawei Xie /* Set next as NULL and use a compiler barrier to avoid reordering. */ 2276d19533e8SHuawei Xie ll_dev->next = NULL; 2277d19533e8SHuawei Xie rte_compiler_barrier(); 2278d19533e8SHuawei Xie 2279d19533e8SHuawei Xie /* If ll == NULL then this is the first device. */ 2280d19533e8SHuawei Xie if (ll) { 2281d19533e8SHuawei Xie /* Increment to the tail of the linked list. */ 2282d19533e8SHuawei Xie while ((ll->next != NULL) ) 2283d19533e8SHuawei Xie ll = ll->next; 2284d19533e8SHuawei Xie 2285d19533e8SHuawei Xie ll->next = ll_dev; 2286d19533e8SHuawei Xie } else { 2287d19533e8SHuawei Xie *ll_root_addr = ll_dev; 2288d19533e8SHuawei Xie } 2289d19533e8SHuawei Xie } 2290d19533e8SHuawei Xie 2291d19533e8SHuawei Xie /* 2292d19533e8SHuawei Xie * Remove an entry from a used linked list. The entry must then be added to 2293d19533e8SHuawei Xie * the free linked list using put_data_ll_free_entry(). 2294d19533e8SHuawei Xie */ 2295d19533e8SHuawei Xie static void 2296d19533e8SHuawei Xie rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, 2297d19533e8SHuawei Xie struct virtio_net_data_ll *ll_dev, 2298d19533e8SHuawei Xie struct virtio_net_data_ll *ll_dev_last) 2299d19533e8SHuawei Xie { 2300d19533e8SHuawei Xie struct virtio_net_data_ll *ll = *ll_root_addr; 2301d19533e8SHuawei Xie 2302d19533e8SHuawei Xie if (unlikely((ll == NULL) || (ll_dev == NULL))) 2303d19533e8SHuawei Xie return; 2304d19533e8SHuawei Xie 2305d19533e8SHuawei Xie if (ll_dev == ll) 2306d19533e8SHuawei Xie *ll_root_addr = ll_dev->next; 2307d19533e8SHuawei Xie else 2308d19533e8SHuawei Xie if (likely(ll_dev_last != NULL)) 2309d19533e8SHuawei Xie ll_dev_last->next = ll_dev->next; 2310d19533e8SHuawei Xie else 2311d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_CONFIG, "Remove entry form ll failed.\n"); 2312d19533e8SHuawei Xie } 2313d19533e8SHuawei Xie 2314d19533e8SHuawei Xie /* 2315d19533e8SHuawei Xie * Find and return an entry from the free linked list. 2316d19533e8SHuawei Xie */ 2317d19533e8SHuawei Xie static struct virtio_net_data_ll * 2318d19533e8SHuawei Xie get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr) 2319d19533e8SHuawei Xie { 2320d19533e8SHuawei Xie struct virtio_net_data_ll *ll_free = *ll_root_addr; 2321d19533e8SHuawei Xie struct virtio_net_data_ll *ll_dev; 2322d19533e8SHuawei Xie 2323d19533e8SHuawei Xie if (ll_free == NULL) 2324d19533e8SHuawei Xie return NULL; 2325d19533e8SHuawei Xie 2326d19533e8SHuawei Xie ll_dev = ll_free; 2327d19533e8SHuawei Xie *ll_root_addr = ll_free->next; 2328d19533e8SHuawei Xie 2329d19533e8SHuawei Xie return ll_dev; 2330d19533e8SHuawei Xie } 2331d19533e8SHuawei Xie 2332d19533e8SHuawei Xie /* 2333d19533e8SHuawei Xie * Place an entry back on to the free linked list. 2334d19533e8SHuawei Xie */ 2335d19533e8SHuawei Xie static void 2336d19533e8SHuawei Xie put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, 2337d19533e8SHuawei Xie struct virtio_net_data_ll *ll_dev) 2338d19533e8SHuawei Xie { 2339d19533e8SHuawei Xie struct virtio_net_data_ll *ll_free = *ll_root_addr; 2340d19533e8SHuawei Xie 2341d19533e8SHuawei Xie if (ll_dev == NULL) 2342d19533e8SHuawei Xie return; 2343d19533e8SHuawei Xie 2344d19533e8SHuawei Xie ll_dev->next = ll_free; 2345d19533e8SHuawei Xie *ll_root_addr = ll_dev; 2346d19533e8SHuawei Xie } 2347d19533e8SHuawei Xie 2348d19533e8SHuawei Xie /* 2349d19533e8SHuawei Xie * Creates a linked list of a given size. 2350d19533e8SHuawei Xie */ 2351d19533e8SHuawei Xie static struct virtio_net_data_ll * 2352d19533e8SHuawei Xie alloc_data_ll(uint32_t size) 2353d19533e8SHuawei Xie { 2354d19533e8SHuawei Xie struct virtio_net_data_ll *ll_new; 2355d19533e8SHuawei Xie uint32_t i; 2356d19533e8SHuawei Xie 2357d19533e8SHuawei Xie /* Malloc and then chain the linked list. */ 2358d19533e8SHuawei Xie ll_new = malloc(size * sizeof(struct virtio_net_data_ll)); 2359d19533e8SHuawei Xie if (ll_new == NULL) { 2360d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n"); 2361d19533e8SHuawei Xie return NULL; 2362d19533e8SHuawei Xie } 2363d19533e8SHuawei Xie 2364d19533e8SHuawei Xie for (i = 0; i < size - 1; i++) { 2365e571e6b4SHuawei Xie ll_new[i].vdev = NULL; 2366d19533e8SHuawei Xie ll_new[i].next = &ll_new[i+1]; 2367d19533e8SHuawei Xie } 2368d19533e8SHuawei Xie ll_new[i].next = NULL; 2369d19533e8SHuawei Xie 2370693f715dSHuawei Xie return ll_new; 2371d19533e8SHuawei Xie } 2372d19533e8SHuawei Xie 2373d19533e8SHuawei Xie /* 2374d19533e8SHuawei Xie * Create the main linked list along with each individual cores linked list. A used and a free list 2375d19533e8SHuawei Xie * are created to manage entries. 2376d19533e8SHuawei Xie */ 2377d19533e8SHuawei Xie static int 2378d19533e8SHuawei Xie init_data_ll (void) 2379d19533e8SHuawei Xie { 2380d19533e8SHuawei Xie int lcore; 2381d19533e8SHuawei Xie 2382d19533e8SHuawei Xie RTE_LCORE_FOREACH_SLAVE(lcore) { 2383d19533e8SHuawei Xie lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info)); 2384d19533e8SHuawei Xie if (lcore_info[lcore].lcore_ll == NULL) { 2385d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n"); 2386d19533e8SHuawei Xie return -1; 2387d19533e8SHuawei Xie } 2388d19533e8SHuawei Xie 2389d19533e8SHuawei Xie lcore_info[lcore].lcore_ll->device_num = 0; 2390d19533e8SHuawei Xie lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL; 2391d19533e8SHuawei Xie lcore_info[lcore].lcore_ll->ll_root_used = NULL; 2392d19533e8SHuawei Xie if (num_devices % num_switching_cores) 2393d19533e8SHuawei Xie lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1); 2394d19533e8SHuawei Xie else 2395d19533e8SHuawei Xie lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores); 2396d19533e8SHuawei Xie } 2397d19533e8SHuawei Xie 2398d19533e8SHuawei Xie /* Allocate devices up to a maximum of MAX_DEVICES. */ 2399d19533e8SHuawei Xie ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES)); 2400d19533e8SHuawei Xie 2401d19533e8SHuawei Xie return 0; 2402d19533e8SHuawei Xie } 2403d19533e8SHuawei Xie 2404d19533e8SHuawei Xie /* 2405d19533e8SHuawei Xie * Remove a device from the specific data core linked list and from the main linked list. Synchonization 2406d19533e8SHuawei Xie * occurs through the use of the lcore dev_removal_flag. Device is made volatile here to avoid re-ordering 2407d19533e8SHuawei Xie * of dev->remove=1 which can cause an infinite loop in the rte_pause loop. 2408d19533e8SHuawei Xie */ 2409d19533e8SHuawei Xie static void 2410d19533e8SHuawei Xie destroy_device (volatile struct virtio_net *dev) 2411d19533e8SHuawei Xie { 2412d19533e8SHuawei Xie struct virtio_net_data_ll *ll_lcore_dev_cur; 2413d19533e8SHuawei Xie struct virtio_net_data_ll *ll_main_dev_cur; 2414d19533e8SHuawei Xie struct virtio_net_data_ll *ll_lcore_dev_last = NULL; 2415d19533e8SHuawei Xie struct virtio_net_data_ll *ll_main_dev_last = NULL; 2416e571e6b4SHuawei Xie struct vhost_dev *vdev; 2417d19533e8SHuawei Xie int lcore; 2418d19533e8SHuawei Xie 2419d19533e8SHuawei Xie dev->flags &= ~VIRTIO_DEV_RUNNING; 2420d19533e8SHuawei Xie 2421e571e6b4SHuawei Xie vdev = (struct vhost_dev *)dev->priv; 2422d19533e8SHuawei Xie /*set the remove flag. */ 2423e571e6b4SHuawei Xie vdev->remove = 1; 2424e571e6b4SHuawei Xie while(vdev->ready != DEVICE_SAFE_REMOVE) { 2425d19533e8SHuawei Xie rte_pause(); 2426d19533e8SHuawei Xie } 2427d19533e8SHuawei Xie 2428d19533e8SHuawei Xie /* Search for entry to be removed from lcore ll */ 2429e571e6b4SHuawei Xie ll_lcore_dev_cur = lcore_info[vdev->coreid].lcore_ll->ll_root_used; 2430d19533e8SHuawei Xie while (ll_lcore_dev_cur != NULL) { 2431e571e6b4SHuawei Xie if (ll_lcore_dev_cur->vdev == vdev) { 2432d19533e8SHuawei Xie break; 2433d19533e8SHuawei Xie } else { 2434d19533e8SHuawei Xie ll_lcore_dev_last = ll_lcore_dev_cur; 2435d19533e8SHuawei Xie ll_lcore_dev_cur = ll_lcore_dev_cur->next; 2436d19533e8SHuawei Xie } 2437d19533e8SHuawei Xie } 2438d19533e8SHuawei Xie 2439d19533e8SHuawei Xie if (ll_lcore_dev_cur == NULL) { 2440d19533e8SHuawei Xie RTE_LOG(ERR, VHOST_CONFIG, 2441d19533e8SHuawei Xie "(%"PRIu64") Failed to find the dev to be destroy.\n", 2442d19533e8SHuawei Xie dev->device_fh); 2443d19533e8SHuawei Xie return; 2444d19533e8SHuawei Xie } 2445d19533e8SHuawei Xie 2446d19533e8SHuawei Xie /* Search for entry to be removed from main ll */ 2447d19533e8SHuawei Xie ll_main_dev_cur = ll_root_used; 2448d19533e8SHuawei Xie ll_main_dev_last = NULL; 2449d19533e8SHuawei Xie while (ll_main_dev_cur != NULL) { 2450e571e6b4SHuawei Xie if (ll_main_dev_cur->vdev == vdev) { 2451d19533e8SHuawei Xie break; 2452d19533e8SHuawei Xie } else { 2453d19533e8SHuawei Xie ll_main_dev_last = ll_main_dev_cur; 2454d19533e8SHuawei Xie ll_main_dev_cur = ll_main_dev_cur->next; 2455d19533e8SHuawei Xie } 2456d19533e8SHuawei Xie } 2457d19533e8SHuawei Xie 2458d19533e8SHuawei Xie /* Remove entries from the lcore and main ll. */ 2459e571e6b4SHuawei Xie rm_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last); 2460d19533e8SHuawei Xie rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last); 2461d19533e8SHuawei Xie 2462d19533e8SHuawei Xie /* Set the dev_removal_flag on each lcore. */ 2463d19533e8SHuawei Xie RTE_LCORE_FOREACH_SLAVE(lcore) { 2464d19533e8SHuawei Xie lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL; 2465d19533e8SHuawei Xie } 2466d19533e8SHuawei Xie 2467d19533e8SHuawei Xie /* 2468d19533e8SHuawei Xie * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that 2469d19533e8SHuawei Xie * they can no longer access the device removed from the linked lists and that the devices 2470d19533e8SHuawei Xie * are no longer in use. 2471d19533e8SHuawei Xie */ 2472d19533e8SHuawei Xie RTE_LCORE_FOREACH_SLAVE(lcore) { 2473d19533e8SHuawei Xie while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) { 2474d19533e8SHuawei Xie rte_pause(); 2475d19533e8SHuawei Xie } 2476d19533e8SHuawei Xie } 2477d19533e8SHuawei Xie 2478d19533e8SHuawei Xie /* Add the entries back to the lcore and main free ll.*/ 2479e571e6b4SHuawei Xie put_data_ll_free_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur); 2480d19533e8SHuawei Xie put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur); 2481d19533e8SHuawei Xie 2482d19533e8SHuawei Xie /* Decrement number of device on the lcore. */ 2483e571e6b4SHuawei Xie lcore_info[vdev->coreid].lcore_ll->device_num--; 2484d19533e8SHuawei Xie 2485d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed from data core\n", dev->device_fh); 2486d19533e8SHuawei Xie 2487d19533e8SHuawei Xie if (zero_copy) { 2488e571e6b4SHuawei Xie struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q]; 2489d19533e8SHuawei Xie 2490d19533e8SHuawei Xie /* Stop the RX queue. */ 2491e571e6b4SHuawei Xie if (rte_eth_dev_rx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) { 2492d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2493d19533e8SHuawei Xie "(%"PRIu64") In destroy_device: Failed to stop " 2494d19533e8SHuawei Xie "rx queue:%d\n", 2495d19533e8SHuawei Xie dev->device_fh, 2496e571e6b4SHuawei Xie vdev->vmdq_rx_q); 2497d19533e8SHuawei Xie } 2498d19533e8SHuawei Xie 2499d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2500d19533e8SHuawei Xie "(%"PRIu64") in destroy_device: Start put mbuf in " 2501d19533e8SHuawei Xie "mempool back to ring for RX queue: %d\n", 2502e571e6b4SHuawei Xie dev->device_fh, vdev->vmdq_rx_q); 2503d19533e8SHuawei Xie 2504d19533e8SHuawei Xie mbuf_destroy_zcp(vpool); 2505d19533e8SHuawei Xie 2506d19533e8SHuawei Xie /* Stop the TX queue. */ 2507e571e6b4SHuawei Xie if (rte_eth_dev_tx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) { 2508d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2509d19533e8SHuawei Xie "(%"PRIu64") In destroy_device: Failed to " 2510d19533e8SHuawei Xie "stop tx queue:%d\n", 2511e571e6b4SHuawei Xie dev->device_fh, vdev->vmdq_rx_q); 2512d19533e8SHuawei Xie } 2513d19533e8SHuawei Xie 2514e571e6b4SHuawei Xie vpool = &vpool_array[vdev->vmdq_rx_q + MAX_QUEUES]; 2515d19533e8SHuawei Xie 2516d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2517d19533e8SHuawei Xie "(%"PRIu64") destroy_device: Start put mbuf in mempool " 2518d19533e8SHuawei Xie "back to ring for TX queue: %d, dev:(%"PRIu64")\n", 2519e571e6b4SHuawei Xie dev->device_fh, (vdev->vmdq_rx_q + MAX_QUEUES), 2520d19533e8SHuawei Xie dev->device_fh); 2521d19533e8SHuawei Xie 2522d19533e8SHuawei Xie mbuf_destroy_zcp(vpool); 25239915bb1fSHuawei Xie rte_free(vdev->regions_hpa); 2524d19533e8SHuawei Xie } 2525e571e6b4SHuawei Xie rte_free(vdev); 2526d19533e8SHuawei Xie 2527d19533e8SHuawei Xie } 2528d19533e8SHuawei Xie 2529d19533e8SHuawei Xie /* 25309915bb1fSHuawei Xie * Calculate the region count of physical continous regions for one particular 25319915bb1fSHuawei Xie * region of whose vhost virtual address is continous. The particular region 25329915bb1fSHuawei Xie * start from vva_start, with size of 'size' in argument. 25339915bb1fSHuawei Xie */ 25349915bb1fSHuawei Xie static uint32_t 25359915bb1fSHuawei Xie check_hpa_regions(uint64_t vva_start, uint64_t size) 25369915bb1fSHuawei Xie { 25379915bb1fSHuawei Xie uint32_t i, nregions = 0, page_size = getpagesize(); 25389915bb1fSHuawei Xie uint64_t cur_phys_addr = 0, next_phys_addr = 0; 25399915bb1fSHuawei Xie if (vva_start % page_size) { 25409915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, 25419915bb1fSHuawei Xie "in check_countinous: vva start(%p) mod page_size(%d) " 25429915bb1fSHuawei Xie "has remainder\n", 25439915bb1fSHuawei Xie (void *)(uintptr_t)vva_start, page_size); 25449915bb1fSHuawei Xie return 0; 25459915bb1fSHuawei Xie } 25469915bb1fSHuawei Xie if (size % page_size) { 25479915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, 25489915bb1fSHuawei Xie "in check_countinous: " 25499915bb1fSHuawei Xie "size((%"PRIu64")) mod page_size(%d) has remainder\n", 25509915bb1fSHuawei Xie size, page_size); 25519915bb1fSHuawei Xie return 0; 25529915bb1fSHuawei Xie } 25539915bb1fSHuawei Xie for (i = 0; i < size - page_size; i = i + page_size) { 25549915bb1fSHuawei Xie cur_phys_addr 25559915bb1fSHuawei Xie = rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i)); 25569915bb1fSHuawei Xie next_phys_addr = rte_mem_virt2phy( 25579915bb1fSHuawei Xie (void *)(uintptr_t)(vva_start + i + page_size)); 25589915bb1fSHuawei Xie if ((cur_phys_addr + page_size) != next_phys_addr) { 25599915bb1fSHuawei Xie ++nregions; 25609915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, 25619915bb1fSHuawei Xie "in check_continuous: hva addr:(%p) is not " 25629915bb1fSHuawei Xie "continuous with hva addr:(%p), diff:%d\n", 25639915bb1fSHuawei Xie (void *)(uintptr_t)(vva_start + (uint64_t)i), 25649915bb1fSHuawei Xie (void *)(uintptr_t)(vva_start + (uint64_t)i 25659915bb1fSHuawei Xie + page_size), page_size); 25669915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, 25679915bb1fSHuawei Xie "in check_continuous: hpa addr:(%p) is not " 25689915bb1fSHuawei Xie "continuous with hpa addr:(%p), " 25699915bb1fSHuawei Xie "diff:(%"PRIu64")\n", 25709915bb1fSHuawei Xie (void *)(uintptr_t)cur_phys_addr, 25719915bb1fSHuawei Xie (void *)(uintptr_t)next_phys_addr, 25729915bb1fSHuawei Xie (next_phys_addr-cur_phys_addr)); 25739915bb1fSHuawei Xie } 25749915bb1fSHuawei Xie } 25759915bb1fSHuawei Xie return nregions; 25769915bb1fSHuawei Xie } 25779915bb1fSHuawei Xie 25789915bb1fSHuawei Xie /* 25799915bb1fSHuawei Xie * Divide each region whose vhost virtual address is continous into a few 25809915bb1fSHuawei Xie * sub-regions, make sure the physical address within each sub-region are 25819915bb1fSHuawei Xie * continous. And fill offset(to GPA) and size etc. information of each 25829915bb1fSHuawei Xie * sub-region into regions_hpa. 25839915bb1fSHuawei Xie */ 25849915bb1fSHuawei Xie static uint32_t 25859915bb1fSHuawei Xie fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct virtio_memory *virtio_memory) 25869915bb1fSHuawei Xie { 25879915bb1fSHuawei Xie uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = getpagesize(); 25889915bb1fSHuawei Xie uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start; 25899915bb1fSHuawei Xie 25909915bb1fSHuawei Xie if (mem_region_hpa == NULL) 25919915bb1fSHuawei Xie return 0; 25929915bb1fSHuawei Xie 25939915bb1fSHuawei Xie for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) { 25949915bb1fSHuawei Xie vva_start = virtio_memory->regions[regionidx].guest_phys_address + 25959915bb1fSHuawei Xie virtio_memory->regions[regionidx].address_offset; 25969915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].guest_phys_address 25979915bb1fSHuawei Xie = virtio_memory->regions[regionidx].guest_phys_address; 25989915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].host_phys_addr_offset = 25999915bb1fSHuawei Xie rte_mem_virt2phy((void *)(uintptr_t)(vva_start)) - 26009915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].guest_phys_address; 26019915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, 26029915bb1fSHuawei Xie "in fill_hpa_regions: guest phys addr start[%d]:(%p)\n", 26039915bb1fSHuawei Xie regionidx_hpa, 26049915bb1fSHuawei Xie (void *)(uintptr_t) 26059915bb1fSHuawei Xie (mem_region_hpa[regionidx_hpa].guest_phys_address)); 26069915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, 26079915bb1fSHuawei Xie "in fill_hpa_regions: host phys addr start[%d]:(%p)\n", 26089915bb1fSHuawei Xie regionidx_hpa, 26099915bb1fSHuawei Xie (void *)(uintptr_t) 26109915bb1fSHuawei Xie (mem_region_hpa[regionidx_hpa].host_phys_addr_offset)); 26119915bb1fSHuawei Xie for (i = 0, k = 0; 26129915bb1fSHuawei Xie i < virtio_memory->regions[regionidx].memory_size - 26139915bb1fSHuawei Xie page_size; 26149915bb1fSHuawei Xie i += page_size) { 26159915bb1fSHuawei Xie cur_phys_addr = rte_mem_virt2phy( 26169915bb1fSHuawei Xie (void *)(uintptr_t)(vva_start + i)); 26179915bb1fSHuawei Xie next_phys_addr = rte_mem_virt2phy( 26189915bb1fSHuawei Xie (void *)(uintptr_t)(vva_start + 26199915bb1fSHuawei Xie i + page_size)); 26209915bb1fSHuawei Xie if ((cur_phys_addr + page_size) != next_phys_addr) { 26219915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].guest_phys_address_end = 26229915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].guest_phys_address + 26239915bb1fSHuawei Xie k + page_size; 26249915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].memory_size 26259915bb1fSHuawei Xie = k + page_size; 26269915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest " 26279915bb1fSHuawei Xie "phys addr end [%d]:(%p)\n", 26289915bb1fSHuawei Xie regionidx_hpa, 26299915bb1fSHuawei Xie (void *)(uintptr_t) 26309915bb1fSHuawei Xie (mem_region_hpa[regionidx_hpa].guest_phys_address_end)); 26319915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, 26329915bb1fSHuawei Xie "in fill_hpa_regions: guest phys addr " 26339915bb1fSHuawei Xie "size [%d]:(%p)\n", 26349915bb1fSHuawei Xie regionidx_hpa, 26359915bb1fSHuawei Xie (void *)(uintptr_t) 26369915bb1fSHuawei Xie (mem_region_hpa[regionidx_hpa].memory_size)); 26379915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa + 1].guest_phys_address 26389915bb1fSHuawei Xie = mem_region_hpa[regionidx_hpa].guest_phys_address_end; 26399915bb1fSHuawei Xie ++regionidx_hpa; 26409915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].host_phys_addr_offset = 26419915bb1fSHuawei Xie next_phys_addr - 26429915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].guest_phys_address; 26439915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest" 26449915bb1fSHuawei Xie " phys addr start[%d]:(%p)\n", 26459915bb1fSHuawei Xie regionidx_hpa, 26469915bb1fSHuawei Xie (void *)(uintptr_t) 26479915bb1fSHuawei Xie (mem_region_hpa[regionidx_hpa].guest_phys_address)); 26489915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, 26499915bb1fSHuawei Xie "in fill_hpa_regions: host phys addr " 26509915bb1fSHuawei Xie "start[%d]:(%p)\n", 26519915bb1fSHuawei Xie regionidx_hpa, 26529915bb1fSHuawei Xie (void *)(uintptr_t) 26539915bb1fSHuawei Xie (mem_region_hpa[regionidx_hpa].host_phys_addr_offset)); 26549915bb1fSHuawei Xie k = 0; 26559915bb1fSHuawei Xie } else { 26569915bb1fSHuawei Xie k += page_size; 26579915bb1fSHuawei Xie } 26589915bb1fSHuawei Xie } 26599915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].guest_phys_address_end 26609915bb1fSHuawei Xie = mem_region_hpa[regionidx_hpa].guest_phys_address 26619915bb1fSHuawei Xie + k + page_size; 26629915bb1fSHuawei Xie mem_region_hpa[regionidx_hpa].memory_size = k + page_size; 26639915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end " 26649915bb1fSHuawei Xie "[%d]:(%p)\n", regionidx_hpa, 26659915bb1fSHuawei Xie (void *)(uintptr_t) 26669915bb1fSHuawei Xie (mem_region_hpa[regionidx_hpa].guest_phys_address_end)); 26679915bb1fSHuawei Xie LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size " 26689915bb1fSHuawei Xie "[%d]:(%p)\n", regionidx_hpa, 26699915bb1fSHuawei Xie (void *)(uintptr_t) 26709915bb1fSHuawei Xie (mem_region_hpa[regionidx_hpa].memory_size)); 26719915bb1fSHuawei Xie ++regionidx_hpa; 26729915bb1fSHuawei Xie } 26739915bb1fSHuawei Xie return regionidx_hpa; 26749915bb1fSHuawei Xie } 26759915bb1fSHuawei Xie 26769915bb1fSHuawei Xie /* 2677d19533e8SHuawei Xie * A new device is added to a data core. First the device is added to the main linked list 2678d19533e8SHuawei Xie * and the allocated to a specific data core. 2679d19533e8SHuawei Xie */ 2680d19533e8SHuawei Xie static int 2681d19533e8SHuawei Xie new_device (struct virtio_net *dev) 2682d19533e8SHuawei Xie { 2683d19533e8SHuawei Xie struct virtio_net_data_ll *ll_dev; 2684d19533e8SHuawei Xie int lcore, core_add = 0; 2685d19533e8SHuawei Xie uint32_t device_num_min = num_devices; 2686e571e6b4SHuawei Xie struct vhost_dev *vdev; 26879915bb1fSHuawei Xie uint32_t regionidx; 2688e571e6b4SHuawei Xie 2689fdf20fa7SSergio Gonzalez Monroy vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); 2690e571e6b4SHuawei Xie if (vdev == NULL) { 2691e571e6b4SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n", 2692e571e6b4SHuawei Xie dev->device_fh); 2693e571e6b4SHuawei Xie return -1; 2694e571e6b4SHuawei Xie } 2695e571e6b4SHuawei Xie vdev->dev = dev; 2696e571e6b4SHuawei Xie dev->priv = vdev; 2697d19533e8SHuawei Xie 26989915bb1fSHuawei Xie if (zero_copy) { 26999915bb1fSHuawei Xie vdev->nregions_hpa = dev->mem->nregions; 27009915bb1fSHuawei Xie for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) { 27019915bb1fSHuawei Xie vdev->nregions_hpa 27029915bb1fSHuawei Xie += check_hpa_regions( 27039915bb1fSHuawei Xie dev->mem->regions[regionidx].guest_phys_address 27049915bb1fSHuawei Xie + dev->mem->regions[regionidx].address_offset, 27059915bb1fSHuawei Xie dev->mem->regions[regionidx].memory_size); 27069915bb1fSHuawei Xie 27079915bb1fSHuawei Xie } 27089915bb1fSHuawei Xie 2709435eb142SStephen Hemminger vdev->regions_hpa = rte_calloc("vhost hpa region", 2710435eb142SStephen Hemminger vdev->nregions_hpa, 2711435eb142SStephen Hemminger sizeof(struct virtio_memory_regions_hpa), 2712fdf20fa7SSergio Gonzalez Monroy RTE_CACHE_LINE_SIZE); 27139915bb1fSHuawei Xie if (vdev->regions_hpa == NULL) { 27149915bb1fSHuawei Xie RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n"); 27159915bb1fSHuawei Xie rte_free(vdev); 27169915bb1fSHuawei Xie return -1; 27179915bb1fSHuawei Xie } 27189915bb1fSHuawei Xie 27199915bb1fSHuawei Xie 27209915bb1fSHuawei Xie if (fill_hpa_memory_regions( 27219915bb1fSHuawei Xie vdev->regions_hpa, dev->mem 27229915bb1fSHuawei Xie ) != vdev->nregions_hpa) { 27239915bb1fSHuawei Xie 27249915bb1fSHuawei Xie RTE_LOG(ERR, VHOST_CONFIG, 27259915bb1fSHuawei Xie "hpa memory regions number mismatch: " 27269915bb1fSHuawei Xie "[%d]\n", vdev->nregions_hpa); 27279915bb1fSHuawei Xie rte_free(vdev->regions_hpa); 27289915bb1fSHuawei Xie rte_free(vdev); 27299915bb1fSHuawei Xie return -1; 27309915bb1fSHuawei Xie } 27319915bb1fSHuawei Xie } 27329915bb1fSHuawei Xie 27339915bb1fSHuawei Xie 2734d19533e8SHuawei Xie /* Add device to main ll */ 2735d19533e8SHuawei Xie ll_dev = get_data_ll_free_entry(&ll_root_free); 2736d19533e8SHuawei Xie if (ll_dev == NULL) { 2737d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit " 2738d19533e8SHuawei Xie "of %d devices per core has been reached\n", 2739d19533e8SHuawei Xie dev->device_fh, num_devices); 27409915bb1fSHuawei Xie if (vdev->regions_hpa) 27419915bb1fSHuawei Xie rte_free(vdev->regions_hpa); 2742e571e6b4SHuawei Xie rte_free(vdev); 2743d19533e8SHuawei Xie return -1; 2744d19533e8SHuawei Xie } 2745e571e6b4SHuawei Xie ll_dev->vdev = vdev; 2746d19533e8SHuawei Xie add_data_ll_entry(&ll_root_used, ll_dev); 2747e571e6b4SHuawei Xie vdev->vmdq_rx_q 274884b02d16SHuawei Xie = dev->device_fh * queues_per_pool + vmdq_queue_base; 2749d19533e8SHuawei Xie 2750d19533e8SHuawei Xie if (zero_copy) { 2751e571e6b4SHuawei Xie uint32_t index = vdev->vmdq_rx_q; 2752d19533e8SHuawei Xie uint32_t count_in_ring, i; 2753d19533e8SHuawei Xie struct mbuf_table *tx_q; 2754d19533e8SHuawei Xie 2755d19533e8SHuawei Xie count_in_ring = rte_ring_count(vpool_array[index].ring); 2756d19533e8SHuawei Xie 2757d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2758d19533e8SHuawei Xie "(%"PRIu64") in new_device: mbuf count in mempool " 2759d19533e8SHuawei Xie "before attach is: %d\n", 2760d19533e8SHuawei Xie dev->device_fh, 2761d19533e8SHuawei Xie rte_mempool_count(vpool_array[index].pool)); 2762d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2763d19533e8SHuawei Xie "(%"PRIu64") in new_device: mbuf count in ring " 2764d19533e8SHuawei Xie "before attach is : %d\n", 2765d19533e8SHuawei Xie dev->device_fh, count_in_ring); 2766d19533e8SHuawei Xie 2767d19533e8SHuawei Xie /* 2768d19533e8SHuawei Xie * Attach all mbufs in vpool.ring and put back intovpool.pool. 2769d19533e8SHuawei Xie */ 2770d19533e8SHuawei Xie for (i = 0; i < count_in_ring; i++) 2771d19533e8SHuawei Xie attach_rxmbuf_zcp(dev); 2772d19533e8SHuawei Xie 2773d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in " 2774d19533e8SHuawei Xie "mempool after attach is: %d\n", 2775d19533e8SHuawei Xie dev->device_fh, 2776d19533e8SHuawei Xie rte_mempool_count(vpool_array[index].pool)); 2777d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in " 2778d19533e8SHuawei Xie "ring after attach is : %d\n", 2779d19533e8SHuawei Xie dev->device_fh, 2780d19533e8SHuawei Xie rte_ring_count(vpool_array[index].ring)); 2781d19533e8SHuawei Xie 2782e571e6b4SHuawei Xie tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q]; 2783e571e6b4SHuawei Xie tx_q->txq_id = vdev->vmdq_rx_q; 2784d19533e8SHuawei Xie 2785e571e6b4SHuawei Xie if (rte_eth_dev_tx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) { 2786e571e6b4SHuawei Xie struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q]; 2787d19533e8SHuawei Xie 2788d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2789d19533e8SHuawei Xie "(%"PRIu64") In new_device: Failed to start " 2790d19533e8SHuawei Xie "tx queue:%d\n", 2791e571e6b4SHuawei Xie dev->device_fh, vdev->vmdq_rx_q); 2792d19533e8SHuawei Xie 2793d19533e8SHuawei Xie mbuf_destroy_zcp(vpool); 27949915bb1fSHuawei Xie rte_free(vdev->regions_hpa); 2795e571e6b4SHuawei Xie rte_free(vdev); 2796d19533e8SHuawei Xie return -1; 2797d19533e8SHuawei Xie } 2798d19533e8SHuawei Xie 2799e571e6b4SHuawei Xie if (rte_eth_dev_rx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) { 2800e571e6b4SHuawei Xie struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q]; 2801d19533e8SHuawei Xie 2802d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2803d19533e8SHuawei Xie "(%"PRIu64") In new_device: Failed to start " 2804d19533e8SHuawei Xie "rx queue:%d\n", 2805e571e6b4SHuawei Xie dev->device_fh, vdev->vmdq_rx_q); 2806d19533e8SHuawei Xie 2807d19533e8SHuawei Xie /* Stop the TX queue. */ 2808d19533e8SHuawei Xie if (rte_eth_dev_tx_queue_stop(ports[0], 2809e571e6b4SHuawei Xie vdev->vmdq_rx_q) != 0) { 2810d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2811d19533e8SHuawei Xie "(%"PRIu64") In new_device: Failed to " 2812d19533e8SHuawei Xie "stop tx queue:%d\n", 2813e571e6b4SHuawei Xie dev->device_fh, vdev->vmdq_rx_q); 2814d19533e8SHuawei Xie } 2815d19533e8SHuawei Xie 2816d19533e8SHuawei Xie mbuf_destroy_zcp(vpool); 28179915bb1fSHuawei Xie rte_free(vdev->regions_hpa); 2818e571e6b4SHuawei Xie rte_free(vdev); 2819d19533e8SHuawei Xie return -1; 2820d19533e8SHuawei Xie } 2821d19533e8SHuawei Xie 2822d19533e8SHuawei Xie } 2823d19533e8SHuawei Xie 2824d19533e8SHuawei Xie /*reset ready flag*/ 2825e571e6b4SHuawei Xie vdev->ready = DEVICE_MAC_LEARNING; 2826e571e6b4SHuawei Xie vdev->remove = 0; 2827d19533e8SHuawei Xie 2828d19533e8SHuawei Xie /* Find a suitable lcore to add the device. */ 2829d19533e8SHuawei Xie RTE_LCORE_FOREACH_SLAVE(lcore) { 2830d19533e8SHuawei Xie if (lcore_info[lcore].lcore_ll->device_num < device_num_min) { 2831d19533e8SHuawei Xie device_num_min = lcore_info[lcore].lcore_ll->device_num; 2832d19533e8SHuawei Xie core_add = lcore; 2833d19533e8SHuawei Xie } 2834d19533e8SHuawei Xie } 2835d19533e8SHuawei Xie /* Add device to lcore ll */ 2836a981294bSHuawei Xie ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free); 2837d19533e8SHuawei Xie if (ll_dev == NULL) { 2838d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh); 2839e571e6b4SHuawei Xie vdev->ready = DEVICE_SAFE_REMOVE; 2840d19533e8SHuawei Xie destroy_device(dev); 28419915bb1fSHuawei Xie rte_free(vdev->regions_hpa); 2842e571e6b4SHuawei Xie rte_free(vdev); 2843d19533e8SHuawei Xie return -1; 2844d19533e8SHuawei Xie } 2845e571e6b4SHuawei Xie ll_dev->vdev = vdev; 2846e571e6b4SHuawei Xie vdev->coreid = core_add; 2847e571e6b4SHuawei Xie 2848a981294bSHuawei Xie add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_dev); 2849d19533e8SHuawei Xie 2850d19533e8SHuawei Xie /* Initialize device stats */ 2851d19533e8SHuawei Xie memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics)); 2852d19533e8SHuawei Xie 2853d19533e8SHuawei Xie /* Disable notifications. */ 2854b5967c1fSHuawei Xie rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0); 2855b5967c1fSHuawei Xie rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0); 2856e571e6b4SHuawei Xie lcore_info[vdev->coreid].lcore_ll->device_num++; 2857d19533e8SHuawei Xie dev->flags |= VIRTIO_DEV_RUNNING; 2858d19533e8SHuawei Xie 2859e571e6b4SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, vdev->coreid); 2860d19533e8SHuawei Xie 2861d19533e8SHuawei Xie return 0; 2862d19533e8SHuawei Xie } 2863d19533e8SHuawei Xie 2864d19533e8SHuawei Xie /* 2865d19533e8SHuawei Xie * These callback allow devices to be added to the data core when configuration 2866d19533e8SHuawei Xie * has been fully complete. 2867d19533e8SHuawei Xie */ 2868d19533e8SHuawei Xie static const struct virtio_net_device_ops virtio_net_device_ops = 2869d19533e8SHuawei Xie { 2870d19533e8SHuawei Xie .new_device = new_device, 2871d19533e8SHuawei Xie .destroy_device = destroy_device, 2872d19533e8SHuawei Xie }; 2873d19533e8SHuawei Xie 2874d19533e8SHuawei Xie /* 2875d19533e8SHuawei Xie * This is a thread will wake up after a period to print stats if the user has 2876d19533e8SHuawei Xie * enabled them. 2877d19533e8SHuawei Xie */ 2878d19533e8SHuawei Xie static void 2879d19533e8SHuawei Xie print_stats(void) 2880d19533e8SHuawei Xie { 2881d19533e8SHuawei Xie struct virtio_net_data_ll *dev_ll; 2882d19533e8SHuawei Xie uint64_t tx_dropped, rx_dropped; 2883d19533e8SHuawei Xie uint64_t tx, tx_total, rx, rx_total; 2884d19533e8SHuawei Xie uint32_t device_fh; 2885d19533e8SHuawei Xie const char clr[] = { 27, '[', '2', 'J', '\0' }; 2886d19533e8SHuawei Xie const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' }; 2887d19533e8SHuawei Xie 2888d19533e8SHuawei Xie while(1) { 2889d19533e8SHuawei Xie sleep(enable_stats); 2890d19533e8SHuawei Xie 2891d19533e8SHuawei Xie /* Clear screen and move to top left */ 2892d19533e8SHuawei Xie printf("%s%s", clr, top_left); 2893d19533e8SHuawei Xie 2894d19533e8SHuawei Xie printf("\nDevice statistics ===================================="); 2895d19533e8SHuawei Xie 2896d19533e8SHuawei Xie dev_ll = ll_root_used; 2897d19533e8SHuawei Xie while (dev_ll != NULL) { 2898e571e6b4SHuawei Xie device_fh = (uint32_t)dev_ll->vdev->dev->device_fh; 2899d19533e8SHuawei Xie tx_total = dev_statistics[device_fh].tx_total; 2900d19533e8SHuawei Xie tx = dev_statistics[device_fh].tx; 2901d19533e8SHuawei Xie tx_dropped = tx_total - tx; 2902d19533e8SHuawei Xie if (zero_copy == 0) { 2903d19533e8SHuawei Xie rx_total = rte_atomic64_read( 2904d19533e8SHuawei Xie &dev_statistics[device_fh].rx_total_atomic); 2905d19533e8SHuawei Xie rx = rte_atomic64_read( 2906d19533e8SHuawei Xie &dev_statistics[device_fh].rx_atomic); 2907d19533e8SHuawei Xie } else { 2908d19533e8SHuawei Xie rx_total = dev_statistics[device_fh].rx_total; 2909d19533e8SHuawei Xie rx = dev_statistics[device_fh].rx; 2910d19533e8SHuawei Xie } 2911d19533e8SHuawei Xie rx_dropped = rx_total - rx; 2912d19533e8SHuawei Xie 2913d19533e8SHuawei Xie printf("\nStatistics for device %"PRIu32" ------------------------------" 2914d19533e8SHuawei Xie "\nTX total: %"PRIu64"" 2915d19533e8SHuawei Xie "\nTX dropped: %"PRIu64"" 2916d19533e8SHuawei Xie "\nTX successful: %"PRIu64"" 2917d19533e8SHuawei Xie "\nRX total: %"PRIu64"" 2918d19533e8SHuawei Xie "\nRX dropped: %"PRIu64"" 2919d19533e8SHuawei Xie "\nRX successful: %"PRIu64"", 2920d19533e8SHuawei Xie device_fh, 2921d19533e8SHuawei Xie tx_total, 2922d19533e8SHuawei Xie tx_dropped, 2923d19533e8SHuawei Xie tx, 2924d19533e8SHuawei Xie rx_total, 2925d19533e8SHuawei Xie rx_dropped, 2926d19533e8SHuawei Xie rx); 2927d19533e8SHuawei Xie 2928d19533e8SHuawei Xie dev_ll = dev_ll->next; 2929d19533e8SHuawei Xie } 2930d19533e8SHuawei Xie printf("\n======================================================\n"); 2931d19533e8SHuawei Xie } 2932d19533e8SHuawei Xie } 2933d19533e8SHuawei Xie 2934d19533e8SHuawei Xie static void 2935d19533e8SHuawei Xie setup_mempool_tbl(int socket, uint32_t index, char *pool_name, 2936d19533e8SHuawei Xie char *ring_name, uint32_t nb_mbuf) 2937d19533e8SHuawei Xie { 2938ea0c20eaSOlivier Matz vpool_array[index].pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf, 2939ea0c20eaSOlivier Matz MBUF_CACHE_SIZE_ZCP, 0, MBUF_DATA_SIZE_ZCP, socket); 2940d19533e8SHuawei Xie if (vpool_array[index].pool != NULL) { 2941d19533e8SHuawei Xie vpool_array[index].ring 2942d19533e8SHuawei Xie = rte_ring_create(ring_name, 2943d19533e8SHuawei Xie rte_align32pow2(nb_mbuf + 1), 2944d19533e8SHuawei Xie socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 2945d19533e8SHuawei Xie if (likely(vpool_array[index].ring != NULL)) { 2946d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2947d19533e8SHuawei Xie "in setup_mempool_tbl: mbuf count in " 2948d19533e8SHuawei Xie "mempool is: %d\n", 2949d19533e8SHuawei Xie rte_mempool_count(vpool_array[index].pool)); 2950d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 2951d19533e8SHuawei Xie "in setup_mempool_tbl: mbuf count in " 2952d19533e8SHuawei Xie "ring is: %d\n", 2953d19533e8SHuawei Xie rte_ring_count(vpool_array[index].ring)); 2954d19533e8SHuawei Xie } else { 2955d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, "ring_create(%s) failed", 2956d19533e8SHuawei Xie ring_name); 2957d19533e8SHuawei Xie } 2958d19533e8SHuawei Xie 2959d19533e8SHuawei Xie /* Need consider head room. */ 29601d493a49SOlivier Matz vpool_array[index].buf_size = VIRTIO_DESCRIPTOR_LEN_ZCP; 2961d19533e8SHuawei Xie } else { 2962d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", pool_name); 2963d19533e8SHuawei Xie } 2964d19533e8SHuawei Xie } 2965d19533e8SHuawei Xie 2966c83d2d00SOuyang Changchun /* When we receive a INT signal, unregister vhost driver */ 2967c83d2d00SOuyang Changchun static void 2968c83d2d00SOuyang Changchun sigint_handler(__rte_unused int signum) 2969c83d2d00SOuyang Changchun { 2970c83d2d00SOuyang Changchun /* Unregister vhost driver. */ 2971c83d2d00SOuyang Changchun int ret = rte_vhost_driver_unregister((char *)&dev_basename); 2972c83d2d00SOuyang Changchun if (ret != 0) 2973c83d2d00SOuyang Changchun rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n"); 2974c83d2d00SOuyang Changchun exit(0); 2975c83d2d00SOuyang Changchun } 2976d19533e8SHuawei Xie 2977d19533e8SHuawei Xie /* 2978d19533e8SHuawei Xie * Main function, does initialisation and calls the per-lcore functions. The CUSE 2979d19533e8SHuawei Xie * device is also registered here to handle the IOCTLs. 2980d19533e8SHuawei Xie */ 2981d19533e8SHuawei Xie int 298298a16481SDavid Marchand main(int argc, char *argv[]) 2983d19533e8SHuawei Xie { 2984d19533e8SHuawei Xie struct rte_mempool *mbuf_pool = NULL; 2985d19533e8SHuawei Xie unsigned lcore_id, core_id = 0; 2986d19533e8SHuawei Xie unsigned nb_ports, valid_num_ports; 2987d19533e8SHuawei Xie int ret; 298884b02d16SHuawei Xie uint8_t portid; 298984b02d16SHuawei Xie uint16_t queue_id; 2990d19533e8SHuawei Xie static pthread_t tid; 299167b6d303SRavi Kerur char thread_name[RTE_MAX_THREAD_NAME_LEN]; 2992d19533e8SHuawei Xie 2993c83d2d00SOuyang Changchun signal(SIGINT, sigint_handler); 2994c83d2d00SOuyang Changchun 2995d19533e8SHuawei Xie /* init EAL */ 2996d19533e8SHuawei Xie ret = rte_eal_init(argc, argv); 2997d19533e8SHuawei Xie if (ret < 0) 2998d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 2999d19533e8SHuawei Xie argc -= ret; 3000d19533e8SHuawei Xie argv += ret; 3001d19533e8SHuawei Xie 3002d19533e8SHuawei Xie /* parse app arguments */ 3003d19533e8SHuawei Xie ret = us_vhost_parse_args(argc, argv); 3004d19533e8SHuawei Xie if (ret < 0) 3005d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, "Invalid argument\n"); 3006d19533e8SHuawei Xie 3007d19533e8SHuawei Xie for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) 3008d19533e8SHuawei Xie if (rte_lcore_is_enabled(lcore_id)) 3009d19533e8SHuawei Xie lcore_ids[core_id ++] = lcore_id; 3010d19533e8SHuawei Xie 3011d19533e8SHuawei Xie if (rte_lcore_count() > RTE_MAX_LCORE) 3012d19533e8SHuawei Xie rte_exit(EXIT_FAILURE,"Not enough cores\n"); 3013d19533e8SHuawei Xie 3014d19533e8SHuawei Xie /*set the number of swithcing cores available*/ 3015d19533e8SHuawei Xie num_switching_cores = rte_lcore_count()-1; 3016d19533e8SHuawei Xie 3017d19533e8SHuawei Xie /* Get the number of physical ports. */ 3018d19533e8SHuawei Xie nb_ports = rte_eth_dev_count(); 3019d19533e8SHuawei Xie if (nb_ports > RTE_MAX_ETHPORTS) 3020d19533e8SHuawei Xie nb_ports = RTE_MAX_ETHPORTS; 3021d19533e8SHuawei Xie 3022d19533e8SHuawei Xie /* 3023d19533e8SHuawei Xie * Update the global var NUM_PORTS and global array PORTS 3024d19533e8SHuawei Xie * and get value of var VALID_NUM_PORTS according to system ports number 3025d19533e8SHuawei Xie */ 3026d19533e8SHuawei Xie valid_num_ports = check_ports_num(nb_ports); 3027d19533e8SHuawei Xie 3028d19533e8SHuawei Xie if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) { 3029d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u," 3030d19533e8SHuawei Xie "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS); 3031d19533e8SHuawei Xie return -1; 3032d19533e8SHuawei Xie } 3033d19533e8SHuawei Xie 3034d19533e8SHuawei Xie if (zero_copy == 0) { 3035d19533e8SHuawei Xie /* Create the mbuf pool. */ 3036ea0c20eaSOlivier Matz mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 3037ea0c20eaSOlivier Matz NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE, 3038ea0c20eaSOlivier Matz 0, MBUF_DATA_SIZE, rte_socket_id()); 3039d19533e8SHuawei Xie if (mbuf_pool == NULL) 3040d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 3041d19533e8SHuawei Xie 3042d19533e8SHuawei Xie for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++) 3043d19533e8SHuawei Xie vpool_array[queue_id].pool = mbuf_pool; 3044d19533e8SHuawei Xie 3045d19533e8SHuawei Xie if (vm2vm_mode == VM2VM_HARDWARE) { 3046d19533e8SHuawei Xie /* Enable VT loop back to let L2 switch to do it. */ 3047d19533e8SHuawei Xie vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1; 3048d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 3049d19533e8SHuawei Xie "Enable loop back for L2 switch in vmdq.\n"); 3050d19533e8SHuawei Xie } 3051d19533e8SHuawei Xie } else { 3052d19533e8SHuawei Xie uint32_t nb_mbuf; 3053d19533e8SHuawei Xie char pool_name[RTE_MEMPOOL_NAMESIZE]; 3054d19533e8SHuawei Xie char ring_name[RTE_MEMPOOL_NAMESIZE]; 3055d19533e8SHuawei Xie 3056d19533e8SHuawei Xie nb_mbuf = num_rx_descriptor 3057d19533e8SHuawei Xie + num_switching_cores * MBUF_CACHE_SIZE_ZCP 3058d19533e8SHuawei Xie + num_switching_cores * MAX_PKT_BURST; 3059d19533e8SHuawei Xie 3060d19533e8SHuawei Xie for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) { 3061d19533e8SHuawei Xie snprintf(pool_name, sizeof(pool_name), 3062d19533e8SHuawei Xie "rxmbuf_pool_%u", queue_id); 3063d19533e8SHuawei Xie snprintf(ring_name, sizeof(ring_name), 3064d19533e8SHuawei Xie "rxmbuf_ring_%u", queue_id); 3065d19533e8SHuawei Xie setup_mempool_tbl(rte_socket_id(), queue_id, 3066d19533e8SHuawei Xie pool_name, ring_name, nb_mbuf); 3067d19533e8SHuawei Xie } 3068d19533e8SHuawei Xie 3069d19533e8SHuawei Xie nb_mbuf = num_tx_descriptor 3070d19533e8SHuawei Xie + num_switching_cores * MBUF_CACHE_SIZE_ZCP 3071d19533e8SHuawei Xie + num_switching_cores * MAX_PKT_BURST; 3072d19533e8SHuawei Xie 3073d19533e8SHuawei Xie for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) { 3074d19533e8SHuawei Xie snprintf(pool_name, sizeof(pool_name), 3075d19533e8SHuawei Xie "txmbuf_pool_%u", queue_id); 3076d19533e8SHuawei Xie snprintf(ring_name, sizeof(ring_name), 3077d19533e8SHuawei Xie "txmbuf_ring_%u", queue_id); 3078d19533e8SHuawei Xie setup_mempool_tbl(rte_socket_id(), 3079d19533e8SHuawei Xie (queue_id + MAX_QUEUES), 3080d19533e8SHuawei Xie pool_name, ring_name, nb_mbuf); 3081d19533e8SHuawei Xie } 3082d19533e8SHuawei Xie 3083d19533e8SHuawei Xie if (vm2vm_mode == VM2VM_HARDWARE) { 3084d19533e8SHuawei Xie /* Enable VT loop back to let L2 switch to do it. */ 3085d19533e8SHuawei Xie vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1; 3086d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 3087d19533e8SHuawei Xie "Enable loop back for L2 switch in vmdq.\n"); 3088d19533e8SHuawei Xie } 3089d19533e8SHuawei Xie } 3090d19533e8SHuawei Xie /* Set log level. */ 3091d19533e8SHuawei Xie rte_set_log_level(LOG_LEVEL); 3092d19533e8SHuawei Xie 3093d19533e8SHuawei Xie /* initialize all ports */ 3094d19533e8SHuawei Xie for (portid = 0; portid < nb_ports; portid++) { 3095d19533e8SHuawei Xie /* skip ports that are not enabled */ 3096d19533e8SHuawei Xie if ((enabled_port_mask & (1 << portid)) == 0) { 3097d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, 3098d19533e8SHuawei Xie "Skipping disabled port %d\n", portid); 3099d19533e8SHuawei Xie continue; 3100d19533e8SHuawei Xie } 3101d19533e8SHuawei Xie if (port_init(portid) != 0) 3102d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, 3103d19533e8SHuawei Xie "Cannot initialize network ports\n"); 3104d19533e8SHuawei Xie } 3105d19533e8SHuawei Xie 3106d19533e8SHuawei Xie /* Initialise all linked lists. */ 3107d19533e8SHuawei Xie if (init_data_ll() == -1) 3108d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n"); 3109d19533e8SHuawei Xie 3110d19533e8SHuawei Xie /* Initialize device stats */ 3111d19533e8SHuawei Xie memset(&dev_statistics, 0, sizeof(dev_statistics)); 3112d19533e8SHuawei Xie 3113d19533e8SHuawei Xie /* Enable stats if the user option is set. */ 311467b6d303SRavi Kerur if (enable_stats) { 311567b6d303SRavi Kerur ret = pthread_create(&tid, NULL, (void *)print_stats, NULL); 311667b6d303SRavi Kerur if (ret != 0) 311767b6d303SRavi Kerur rte_exit(EXIT_FAILURE, 311867b6d303SRavi Kerur "Cannot create print-stats thread\n"); 311967b6d303SRavi Kerur 312067b6d303SRavi Kerur /* Set thread_name for aid in debugging. */ 312167b6d303SRavi Kerur snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats"); 3122badb3688SFerruh Yigit ret = rte_thread_setname(tid, thread_name); 312367b6d303SRavi Kerur if (ret != 0) 312467b6d303SRavi Kerur RTE_LOG(ERR, VHOST_CONFIG, 312567b6d303SRavi Kerur "Cannot set print-stats name\n"); 312667b6d303SRavi Kerur } 3127d19533e8SHuawei Xie 3128d19533e8SHuawei Xie /* Launch all data cores. */ 3129d19533e8SHuawei Xie if (zero_copy == 0) { 3130d19533e8SHuawei Xie RTE_LCORE_FOREACH_SLAVE(lcore_id) { 3131d19533e8SHuawei Xie rte_eal_remote_launch(switch_worker, 3132d19533e8SHuawei Xie mbuf_pool, lcore_id); 3133d19533e8SHuawei Xie } 3134d19533e8SHuawei Xie } else { 3135d19533e8SHuawei Xie uint32_t count_in_mempool, index, i; 3136d19533e8SHuawei Xie for (index = 0; index < 2*MAX_QUEUES; index++) { 3137d19533e8SHuawei Xie /* For all RX and TX queues. */ 3138d19533e8SHuawei Xie count_in_mempool 3139d19533e8SHuawei Xie = rte_mempool_count(vpool_array[index].pool); 3140d19533e8SHuawei Xie 3141d19533e8SHuawei Xie /* 3142d19533e8SHuawei Xie * Transfer all un-attached mbufs from vpool.pool 3143d19533e8SHuawei Xie * to vpoo.ring. 3144d19533e8SHuawei Xie */ 3145d19533e8SHuawei Xie for (i = 0; i < count_in_mempool; i++) { 3146d19533e8SHuawei Xie struct rte_mbuf *mbuf 3147d19533e8SHuawei Xie = __rte_mbuf_raw_alloc( 3148d19533e8SHuawei Xie vpool_array[index].pool); 3149d19533e8SHuawei Xie rte_ring_sp_enqueue(vpool_array[index].ring, 3150d19533e8SHuawei Xie (void *)mbuf); 3151d19533e8SHuawei Xie } 3152d19533e8SHuawei Xie 3153d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 315498a16481SDavid Marchand "in main: mbuf count in mempool at initial " 3155d19533e8SHuawei Xie "is: %d\n", count_in_mempool); 3156d19533e8SHuawei Xie LOG_DEBUG(VHOST_CONFIG, 315798a16481SDavid Marchand "in main: mbuf count in ring at initial is :" 3158d19533e8SHuawei Xie " %d\n", 3159d19533e8SHuawei Xie rte_ring_count(vpool_array[index].ring)); 3160d19533e8SHuawei Xie } 3161d19533e8SHuawei Xie 3162d19533e8SHuawei Xie RTE_LCORE_FOREACH_SLAVE(lcore_id) 3163d19533e8SHuawei Xie rte_eal_remote_launch(switch_worker_zcp, NULL, 3164d19533e8SHuawei Xie lcore_id); 3165d19533e8SHuawei Xie } 3166d19533e8SHuawei Xie 316728deb020SHuawei Xie if (mergeable == 0) 316828deb020SHuawei Xie rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF); 316928deb020SHuawei Xie 317029c0f3c8SOuyang Changchun /* Register vhost(cuse or user) driver to handle vhost messages. */ 31715cf27144SHuawei Xie ret = rte_vhost_driver_register((char *)&dev_basename); 3172d19533e8SHuawei Xie if (ret != 0) 317329c0f3c8SOuyang Changchun rte_exit(EXIT_FAILURE, "vhost driver register failure.\n"); 3174d19533e8SHuawei Xie 31755cf27144SHuawei Xie rte_vhost_driver_callback_register(&virtio_net_device_ops); 3176d19533e8SHuawei Xie 3177d19533e8SHuawei Xie /* Start CUSE session. */ 31785cf27144SHuawei Xie rte_vhost_driver_session_start(); 3179d19533e8SHuawei Xie return 0; 3180d19533e8SHuawei Xie 3181d19533e8SHuawei Xie } 3182