15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 25566a3e3SBruce Richardson * Copyright(c) 2010-2016 Intel Corporation 337a7eb2aSJianfeng Tan */ 437a7eb2aSJianfeng Tan 537a7eb2aSJianfeng Tan #include <stdint.h> 637a7eb2aSJianfeng Tan #include <stdio.h> 772b452c5SDmitry Kozlyuk #include <stdlib.h> 837a7eb2aSJianfeng Tan #include <fcntl.h> 937a7eb2aSJianfeng Tan #include <string.h> 1037a7eb2aSJianfeng Tan #include <errno.h> 1137a7eb2aSJianfeng Tan #include <sys/mman.h> 1237a7eb2aSJianfeng Tan #include <unistd.h> 1337a7eb2aSJianfeng Tan #include <sys/eventfd.h> 1433d24d65SJianfeng Tan #include <sys/types.h> 1533d24d65SJianfeng Tan #include <sys/stat.h> 1637a7eb2aSJianfeng Tan 1723abee9dSIlya Maximets #include <rte_alarm.h> 186723c0fcSBruce Richardson #include <rte_string_fns.h> 197ff26957STiwei Bie #include <rte_eal_memconfig.h> 20*6fdf32d1SMaxime Coquelin #include <rte_malloc.h> 217ff26957STiwei Bie 2237a7eb2aSJianfeng Tan #include "vhost.h" 2337a7eb2aSJianfeng Tan #include "virtio_user_dev.h" 2437a7eb2aSJianfeng Tan #include "../virtio_ethdev.h" 2537a7eb2aSJianfeng Tan 2612ecb2f6SMaxime Coquelin #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb" 2712ecb2f6SMaxime Coquelin 28b0395dc8SAdrian Moreno const char * const virtio_user_backend_strings[] = { 29b0395dc8SAdrian Moreno [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN", 30b0395dc8SAdrian Moreno [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER", 31b0395dc8SAdrian Moreno [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET", 32b0395dc8SAdrian Moreno [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA", 33b0395dc8SAdrian Moreno }; 34b0395dc8SAdrian Moreno 3537a7eb2aSJianfeng Tan static int 3657ae79a7SJianfeng Tan virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 3757ae79a7SJianfeng Tan { 3857ae79a7SJianfeng Tan /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come 3957ae79a7SJianfeng Tan * firstly because vhost depends on this msg to allocate virtqueue 4057ae79a7SJianfeng Tan * pair. 4157ae79a7SJianfeng Tan */ 4257ae79a7SJianfeng Tan struct vhost_vring_file file; 43a3fb6b1dSMaxime Coquelin int ret; 4457ae79a7SJianfeng Tan 4557ae79a7SJianfeng Tan file.index = queue_sel; 46e6e7ad8bSJianfeng Tan file.fd = dev->callfds[queue_sel]; 47a3fb6b1dSMaxime Coquelin ret = dev->ops->set_vring_call(dev, &file); 48a3fb6b1dSMaxime Coquelin if (ret < 0) { 49f3854ebaSThomas Monjalon PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u", dev->path, queue_sel); 50a3fb6b1dSMaxime Coquelin return -1; 51a3fb6b1dSMaxime Coquelin } 5257ae79a7SJianfeng Tan 5357ae79a7SJianfeng Tan return 0; 5457ae79a7SJianfeng Tan } 5557ae79a7SJianfeng Tan 5657ae79a7SJianfeng Tan static int 5737a7eb2aSJianfeng Tan virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 5837a7eb2aSJianfeng Tan { 59a3fb6b1dSMaxime Coquelin int ret; 6037a7eb2aSJianfeng Tan struct vhost_vring_file file; 6137a7eb2aSJianfeng Tan struct vhost_vring_state state; 62*6fdf32d1SMaxime Coquelin struct vring *vring = &dev->vrings.split[queue_sel]; 63*6fdf32d1SMaxime Coquelin struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel]; 6437a7eb2aSJianfeng Tan struct vhost_vring_addr addr = { 6537a7eb2aSJianfeng Tan .index = queue_sel, 6637a7eb2aSJianfeng Tan .log_guest_addr = 0, 6737a7eb2aSJianfeng Tan .flags = 0, /* disable log */ 6837a7eb2aSJianfeng Tan }; 6937a7eb2aSJianfeng Tan 7090966e8eSMaxime Coquelin if (queue_sel == dev->max_queue_pairs * 2) { 7190966e8eSMaxime Coquelin if (!dev->scvq) { 7290966e8eSMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Shadow control queue expected but missing", 7390966e8eSMaxime Coquelin dev->path); 7490966e8eSMaxime Coquelin goto err; 7590966e8eSMaxime Coquelin } 7690966e8eSMaxime Coquelin 7790966e8eSMaxime Coquelin /* Use shadow control queue information */ 7890966e8eSMaxime Coquelin vring = &dev->scvq->vq_split.ring; 7990966e8eSMaxime Coquelin pq_vring = &dev->scvq->vq_packed.ring; 8090966e8eSMaxime Coquelin } 8190966e8eSMaxime Coquelin 8248a44640SJens Freimann if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { 8348a44640SJens Freimann addr.desc_user_addr = 844cdc4d98STiwei Bie (uint64_t)(uintptr_t)pq_vring->desc; 8548a44640SJens Freimann addr.avail_user_addr = 864cdc4d98STiwei Bie (uint64_t)(uintptr_t)pq_vring->driver; 8748a44640SJens Freimann addr.used_user_addr = 884cdc4d98STiwei Bie (uint64_t)(uintptr_t)pq_vring->device; 8948a44640SJens Freimann } else { 9048a44640SJens Freimann addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; 9148a44640SJens Freimann addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; 9248a44640SJens Freimann addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; 9348a44640SJens Freimann } 9448a44640SJens Freimann 9537a7eb2aSJianfeng Tan state.index = queue_sel; 9637a7eb2aSJianfeng Tan state.num = vring->num; 97a3fb6b1dSMaxime Coquelin ret = dev->ops->set_vring_num(dev, &state); 98a3fb6b1dSMaxime Coquelin if (ret < 0) 99a3fb6b1dSMaxime Coquelin goto err; 10037a7eb2aSJianfeng Tan 101be7a4707SJianfeng Tan state.index = queue_sel; 10237a7eb2aSJianfeng Tan state.num = 0; /* no reservation */ 10334f3966cSYuanhan Liu if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) 10434f3966cSYuanhan Liu state.num |= (1 << 15); 105a3fb6b1dSMaxime Coquelin ret = dev->ops->set_vring_base(dev, &state); 106a3fb6b1dSMaxime Coquelin if (ret < 0) 107a3fb6b1dSMaxime Coquelin goto err; 10837a7eb2aSJianfeng Tan 109a3fb6b1dSMaxime Coquelin ret = dev->ops->set_vring_addr(dev, &addr); 110a3fb6b1dSMaxime Coquelin if (ret < 0) 111a3fb6b1dSMaxime Coquelin goto err; 11237a7eb2aSJianfeng Tan 11337a7eb2aSJianfeng Tan /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes 11437a7eb2aSJianfeng Tan * lastly because vhost depends on this msg to judge if 11537a7eb2aSJianfeng Tan * virtio is ready. 11637a7eb2aSJianfeng Tan */ 11757ae79a7SJianfeng Tan file.index = queue_sel; 118e6e7ad8bSJianfeng Tan file.fd = dev->kickfds[queue_sel]; 119a3fb6b1dSMaxime Coquelin ret = dev->ops->set_vring_kick(dev, &file); 120a3fb6b1dSMaxime Coquelin if (ret < 0) 121a3fb6b1dSMaxime Coquelin goto err; 12237a7eb2aSJianfeng Tan 12337a7eb2aSJianfeng Tan return 0; 124a3fb6b1dSMaxime Coquelin err: 125f3854ebaSThomas Monjalon PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u", dev->path, queue_sel); 126a3fb6b1dSMaxime Coquelin 127a3fb6b1dSMaxime Coquelin return -1; 12837a7eb2aSJianfeng Tan } 12937a7eb2aSJianfeng Tan 13057ae79a7SJianfeng Tan static int 13157ae79a7SJianfeng Tan virtio_user_queue_setup(struct virtio_user_dev *dev, 13257ae79a7SJianfeng Tan int (*fn)(struct virtio_user_dev *, uint32_t)) 13357ae79a7SJianfeng Tan { 13490966e8eSMaxime Coquelin uint32_t i, nr_vq; 13557ae79a7SJianfeng Tan 13690966e8eSMaxime Coquelin nr_vq = dev->max_queue_pairs * 2; 13790966e8eSMaxime Coquelin if (dev->hw_cvq) 13890966e8eSMaxime Coquelin nr_vq++; 13990966e8eSMaxime Coquelin 14090966e8eSMaxime Coquelin for (i = 0; i < nr_vq; i++) { 141da508066SMaxime Coquelin if (fn(dev, i) < 0) { 142da508066SMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) setup VQ %u failed", dev->path, i); 14357ae79a7SJianfeng Tan return -1; 14457ae79a7SJianfeng Tan } 14557ae79a7SJianfeng Tan } 14657ae79a7SJianfeng Tan 14757ae79a7SJianfeng Tan return 0; 14857ae79a7SJianfeng Tan } 14957ae79a7SJianfeng Tan 15037a7eb2aSJianfeng Tan int 151844e4683SMaxime Coquelin virtio_user_dev_set_features(struct virtio_user_dev *dev) 15237a7eb2aSJianfeng Tan { 15337a7eb2aSJianfeng Tan uint64_t features; 154844e4683SMaxime Coquelin int ret = -1; 155844e4683SMaxime Coquelin 156844e4683SMaxime Coquelin pthread_mutex_lock(&dev->mutex); 157844e4683SMaxime Coquelin 158844e4683SMaxime Coquelin /* Step 0: tell vhost to create queues */ 159844e4683SMaxime Coquelin if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) 160844e4683SMaxime Coquelin goto error; 161844e4683SMaxime Coquelin 162844e4683SMaxime Coquelin features = dev->features; 163844e4683SMaxime Coquelin 164844e4683SMaxime Coquelin /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ 165844e4683SMaxime Coquelin features &= ~(1ull << VIRTIO_NET_F_MAC); 16666b45ceaSMaxime Coquelin /* Strip VIRTIO_NET_F_CTRL_VQ if the devices does not really support control VQ */ 16766b45ceaSMaxime Coquelin if (!dev->hw_cvq) 168844e4683SMaxime Coquelin features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); 169844e4683SMaxime Coquelin features &= ~(1ull << VIRTIO_NET_F_STATUS); 170cc0151b3SMaxime Coquelin ret = dev->ops->set_features(dev, features); 171844e4683SMaxime Coquelin if (ret < 0) 172844e4683SMaxime Coquelin goto error; 173a3fb6b1dSMaxime Coquelin PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features); 174844e4683SMaxime Coquelin error: 175844e4683SMaxime Coquelin pthread_mutex_unlock(&dev->mutex); 176844e4683SMaxime Coquelin 177844e4683SMaxime Coquelin return ret; 178844e4683SMaxime Coquelin } 179844e4683SMaxime Coquelin 180844e4683SMaxime Coquelin int 181844e4683SMaxime Coquelin virtio_user_start_device(struct virtio_user_dev *dev) 182844e4683SMaxime Coquelin { 18337a7eb2aSJianfeng Tan int ret; 18437a7eb2aSJianfeng Tan 1857ff26957STiwei Bie /* 1867ff26957STiwei Bie * XXX workaround! 1877ff26957STiwei Bie * 1887ff26957STiwei Bie * We need to make sure that the locks will be 1897ff26957STiwei Bie * taken in the correct order to avoid deadlocks. 1907ff26957STiwei Bie * 1917ff26957STiwei Bie * Before releasing this lock, this thread should 1927ff26957STiwei Bie * not trigger any memory hotplug events. 1937ff26957STiwei Bie * 1947ff26957STiwei Bie * This is a temporary workaround, and should be 1957ff26957STiwei Bie * replaced when we get proper supports from the 1967ff26957STiwei Bie * memory subsystem in the future. 1977ff26957STiwei Bie */ 19876f80881SAnatoly Burakov rte_mcfg_mem_read_lock(); 19912ecb2f6SMaxime Coquelin pthread_mutex_lock(&dev->mutex); 20012ecb2f6SMaxime Coquelin 20157ae79a7SJianfeng Tan /* Step 2: share memory regions */ 202539d910cSMaxime Coquelin ret = dev->ops->set_memory_table(dev); 20357ae79a7SJianfeng Tan if (ret < 0) 20457ae79a7SJianfeng Tan goto error; 20557ae79a7SJianfeng Tan 20657ae79a7SJianfeng Tan /* Step 3: kick queues */ 207a3fb6b1dSMaxime Coquelin ret = virtio_user_queue_setup(dev, virtio_user_kick_queue); 208a3fb6b1dSMaxime Coquelin if (ret < 0) 20957ae79a7SJianfeng Tan goto error; 21057ae79a7SJianfeng Tan 21157ae79a7SJianfeng Tan /* Step 4: enable queues 21257ae79a7SJianfeng Tan * we enable the 1st queue pair by default. 21357ae79a7SJianfeng Tan */ 214a3fb6b1dSMaxime Coquelin ret = dev->ops->enable_qp(dev, 0, 1); 215a3fb6b1dSMaxime Coquelin if (ret < 0) 216a3fb6b1dSMaxime Coquelin goto error; 21757ae79a7SJianfeng Tan 21812ecb2f6SMaxime Coquelin dev->started = true; 2199af79db2SMaxime Coquelin 22012ecb2f6SMaxime Coquelin pthread_mutex_unlock(&dev->mutex); 22176f80881SAnatoly Burakov rte_mcfg_mem_read_unlock(); 22212ecb2f6SMaxime Coquelin 22337a7eb2aSJianfeng Tan return 0; 22437a7eb2aSJianfeng Tan error: 22512ecb2f6SMaxime Coquelin pthread_mutex_unlock(&dev->mutex); 22676f80881SAnatoly Burakov rte_mcfg_mem_read_unlock(); 227a3fb6b1dSMaxime Coquelin 228f3854ebaSThomas Monjalon PMD_INIT_LOG(ERR, "(%s) Failed to start device", dev->path); 229a3fb6b1dSMaxime Coquelin 23037a7eb2aSJianfeng Tan /* TODO: free resource here or caller to check */ 23137a7eb2aSJianfeng Tan return -1; 23237a7eb2aSJianfeng Tan } 23337a7eb2aSJianfeng Tan 23437a7eb2aSJianfeng Tan int virtio_user_stop_device(struct virtio_user_dev *dev) 23537a7eb2aSJianfeng Tan { 23674dc6746STiwei Bie struct vhost_vring_state state; 237c12a26eeSJianfeng Tan uint32_t i; 238a3fb6b1dSMaxime Coquelin int ret; 239c12a26eeSJianfeng Tan 24012ecb2f6SMaxime Coquelin pthread_mutex_lock(&dev->mutex); 241f457e900STiwei Bie if (!dev->started) 242f457e900STiwei Bie goto out; 243f457e900STiwei Bie 244a3fb6b1dSMaxime Coquelin for (i = 0; i < dev->max_queue_pairs; ++i) { 245a3fb6b1dSMaxime Coquelin ret = dev->ops->enable_qp(dev, i, 0); 246a3fb6b1dSMaxime Coquelin if (ret < 0) 247a3fb6b1dSMaxime Coquelin goto err; 248a3fb6b1dSMaxime Coquelin } 249c12a26eeSJianfeng Tan 25074dc6746STiwei Bie /* Stop the backend. */ 25174dc6746STiwei Bie for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 25274dc6746STiwei Bie state.index = i; 253a3fb6b1dSMaxime Coquelin ret = dev->ops->get_vring_base(dev, &state); 254a3fb6b1dSMaxime Coquelin if (ret < 0) { 255a3fb6b1dSMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i); 256a3fb6b1dSMaxime Coquelin goto err; 2570d6a8752SJianfeng Tan } 25874dc6746STiwei Bie } 25974dc6746STiwei Bie 26012ecb2f6SMaxime Coquelin dev->started = false; 261a3fb6b1dSMaxime Coquelin 262f457e900STiwei Bie out: 26312ecb2f6SMaxime Coquelin pthread_mutex_unlock(&dev->mutex); 2640d6a8752SJianfeng Tan 265a3fb6b1dSMaxime Coquelin return 0; 266a3fb6b1dSMaxime Coquelin err: 267a3fb6b1dSMaxime Coquelin pthread_mutex_unlock(&dev->mutex); 268a3fb6b1dSMaxime Coquelin 269f3854ebaSThomas Monjalon PMD_INIT_LOG(ERR, "(%s) Failed to stop device", dev->path); 270a3fb6b1dSMaxime Coquelin 271a3fb6b1dSMaxime Coquelin return -1; 27237a7eb2aSJianfeng Tan } 27337a7eb2aSJianfeng Tan 2747be72485SMaxime Coquelin static int 2757be72485SMaxime Coquelin virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_max_qp) 2767be72485SMaxime Coquelin { 2777be72485SMaxime Coquelin int ret; 2787be72485SMaxime Coquelin 2797be72485SMaxime Coquelin if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MQ))) { 2807be72485SMaxime Coquelin dev->max_queue_pairs = 1; 2817be72485SMaxime Coquelin return 0; 2827be72485SMaxime Coquelin } 2837be72485SMaxime Coquelin 2847be72485SMaxime Coquelin if (!dev->ops->get_config) { 2857be72485SMaxime Coquelin dev->max_queue_pairs = user_max_qp; 2867be72485SMaxime Coquelin return 0; 2877be72485SMaxime Coquelin } 2887be72485SMaxime Coquelin 2897be72485SMaxime Coquelin ret = dev->ops->get_config(dev, (uint8_t *)&dev->max_queue_pairs, 2907be72485SMaxime Coquelin offsetof(struct virtio_net_config, max_virtqueue_pairs), 2917be72485SMaxime Coquelin sizeof(uint16_t)); 2927be72485SMaxime Coquelin if (ret) { 2937be72485SMaxime Coquelin /* 2947be72485SMaxime Coquelin * We need to know the max queue pair from the device so that 2957be72485SMaxime Coquelin * the control queue gets the right index. 2967be72485SMaxime Coquelin */ 2977be72485SMaxime Coquelin dev->max_queue_pairs = 1; 2987be72485SMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) Failed to get max queue pairs from device", dev->path); 2997be72485SMaxime Coquelin 3007be72485SMaxime Coquelin return ret; 3017be72485SMaxime Coquelin } 3027be72485SMaxime Coquelin 3037be72485SMaxime Coquelin return 0; 3047be72485SMaxime Coquelin } 3057be72485SMaxime Coquelin 306c995b005SMaxime Coquelin int 307c995b005SMaxime Coquelin virtio_user_dev_set_mac(struct virtio_user_dev *dev) 30837a7eb2aSJianfeng Tan { 309c995b005SMaxime Coquelin int ret = 0; 31037a7eb2aSJianfeng Tan 311c995b005SMaxime Coquelin if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC))) 312c995b005SMaxime Coquelin return -ENOTSUP; 31337a7eb2aSJianfeng Tan 314c995b005SMaxime Coquelin if (!dev->ops->set_config) 315c995b005SMaxime Coquelin return -ENOTSUP; 316c995b005SMaxime Coquelin 317c995b005SMaxime Coquelin ret = dev->ops->set_config(dev, dev->mac_addr, 318c995b005SMaxime Coquelin offsetof(struct virtio_net_config, mac), 319c995b005SMaxime Coquelin RTE_ETHER_ADDR_LEN); 320c995b005SMaxime Coquelin if (ret) 321c995b005SMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) Failed to set MAC address in device", dev->path); 322c995b005SMaxime Coquelin 323c995b005SMaxime Coquelin return ret; 32437a7eb2aSJianfeng Tan } 325c995b005SMaxime Coquelin 326c995b005SMaxime Coquelin int 327c995b005SMaxime Coquelin virtio_user_dev_get_mac(struct virtio_user_dev *dev) 328c995b005SMaxime Coquelin { 329c995b005SMaxime Coquelin int ret = 0; 330c995b005SMaxime Coquelin 331c995b005SMaxime Coquelin if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC))) 332c995b005SMaxime Coquelin return -ENOTSUP; 333c995b005SMaxime Coquelin 334c995b005SMaxime Coquelin if (!dev->ops->get_config) 335c995b005SMaxime Coquelin return -ENOTSUP; 336c995b005SMaxime Coquelin 337c995b005SMaxime Coquelin ret = dev->ops->get_config(dev, dev->mac_addr, 338c995b005SMaxime Coquelin offsetof(struct virtio_net_config, mac), 339c995b005SMaxime Coquelin RTE_ETHER_ADDR_LEN); 340c995b005SMaxime Coquelin if (ret) 341c995b005SMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) Failed to get MAC address from device", dev->path); 342c995b005SMaxime Coquelin 343c995b005SMaxime Coquelin return ret; 344c995b005SMaxime Coquelin } 345c995b005SMaxime Coquelin 346c995b005SMaxime Coquelin static void 347c995b005SMaxime Coquelin virtio_user_dev_init_mac(struct virtio_user_dev *dev, const char *mac) 348c995b005SMaxime Coquelin { 349c995b005SMaxime Coquelin struct rte_ether_addr cmdline_mac; 350c995b005SMaxime Coquelin char buf[RTE_ETHER_ADDR_FMT_SIZE]; 351c995b005SMaxime Coquelin int ret; 352c995b005SMaxime Coquelin 353c995b005SMaxime Coquelin if (mac && rte_ether_unformat_addr(mac, &cmdline_mac) == 0) { 354c995b005SMaxime Coquelin /* 355c995b005SMaxime Coquelin * MAC address was passed from command-line, try to store 356c995b005SMaxime Coquelin * it in the device if it supports it. Otherwise try to use 357c995b005SMaxime Coquelin * the device one. 358c995b005SMaxime Coquelin */ 359c995b005SMaxime Coquelin memcpy(dev->mac_addr, &cmdline_mac, RTE_ETHER_ADDR_LEN); 360c995b005SMaxime Coquelin dev->mac_specified = 1; 361c995b005SMaxime Coquelin 362c995b005SMaxime Coquelin /* Setting MAC may fail, continue to get the device one in this case */ 363c995b005SMaxime Coquelin virtio_user_dev_set_mac(dev); 364c995b005SMaxime Coquelin ret = virtio_user_dev_get_mac(dev); 365c995b005SMaxime Coquelin if (ret == -ENOTSUP) 366c995b005SMaxime Coquelin goto out; 367c995b005SMaxime Coquelin 368c995b005SMaxime Coquelin if (memcmp(&cmdline_mac, dev->mac_addr, RTE_ETHER_ADDR_LEN)) 369c995b005SMaxime Coquelin PMD_DRV_LOG(INFO, "(%s) Device MAC update failed", dev->path); 370c995b005SMaxime Coquelin } else { 371c995b005SMaxime Coquelin ret = virtio_user_dev_get_mac(dev); 372c995b005SMaxime Coquelin if (ret) { 373c995b005SMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) No valid MAC in devargs or device, use random", 374c995b005SMaxime Coquelin dev->path); 375c995b005SMaxime Coquelin return; 376c995b005SMaxime Coquelin } 377c995b005SMaxime Coquelin 378c995b005SMaxime Coquelin dev->mac_specified = 1; 379c995b005SMaxime Coquelin } 380c995b005SMaxime Coquelin out: 381c995b005SMaxime Coquelin rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, 382c995b005SMaxime Coquelin (struct rte_ether_addr *)dev->mac_addr); 383c995b005SMaxime Coquelin PMD_DRV_LOG(INFO, "(%s) MAC %s specified", dev->path, buf); 38437a7eb2aSJianfeng Tan } 38537a7eb2aSJianfeng Tan 38633d24d65SJianfeng Tan static int 387e6e7ad8bSJianfeng Tan virtio_user_dev_init_notify(struct virtio_user_dev *dev) 38833d24d65SJianfeng Tan { 38990966e8eSMaxime Coquelin uint32_t i, j, nr_vq; 390e6e7ad8bSJianfeng Tan int callfd; 391e6e7ad8bSJianfeng Tan int kickfd; 39233d24d65SJianfeng Tan 39390966e8eSMaxime Coquelin nr_vq = dev->max_queue_pairs * 2; 39490966e8eSMaxime Coquelin if (dev->hw_cvq) 39590966e8eSMaxime Coquelin nr_vq++; 39690966e8eSMaxime Coquelin 39790966e8eSMaxime Coquelin for (i = 0; i < nr_vq; i++) { 398e6e7ad8bSJianfeng Tan /* May use invalid flag, but some backend uses kickfd and 399e6e7ad8bSJianfeng Tan * callfd as criteria to judge if dev is alive. so finally we 400e6e7ad8bSJianfeng Tan * use real event_fd. 401e6e7ad8bSJianfeng Tan */ 402e6e7ad8bSJianfeng Tan callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 403e6e7ad8bSJianfeng Tan if (callfd < 0) { 404a3fb6b1dSMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno)); 4052e4c1b50SMaxime Coquelin goto err; 406e6e7ad8bSJianfeng Tan } 407e6e7ad8bSJianfeng Tan kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 408e6e7ad8bSJianfeng Tan if (kickfd < 0) { 40997ed740cSJiawei Zhu close(callfd); 410a3fb6b1dSMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno)); 4112e4c1b50SMaxime Coquelin goto err; 412e6e7ad8bSJianfeng Tan } 413e6e7ad8bSJianfeng Tan dev->callfds[i] = callfd; 414e6e7ad8bSJianfeng Tan dev->kickfds[i] = kickfd; 415e6e7ad8bSJianfeng Tan } 416e6e7ad8bSJianfeng Tan 4172e4c1b50SMaxime Coquelin return 0; 4182e4c1b50SMaxime Coquelin err: 4192e4c1b50SMaxime Coquelin for (j = 0; j < i; j++) { 4202e4c1b50SMaxime Coquelin if (dev->kickfds[j] >= 0) { 421e6e7ad8bSJianfeng Tan close(dev->kickfds[j]); 4222e4c1b50SMaxime Coquelin dev->kickfds[j] = -1; 4232e4c1b50SMaxime Coquelin } 4242e4c1b50SMaxime Coquelin if (dev->callfds[j] >= 0) { 4252e4c1b50SMaxime Coquelin close(dev->callfds[j]); 4262e4c1b50SMaxime Coquelin dev->callfds[j] = -1; 4272e4c1b50SMaxime Coquelin } 428e6e7ad8bSJianfeng Tan } 429e6e7ad8bSJianfeng Tan 430e6e7ad8bSJianfeng Tan return -1; 431e6e7ad8bSJianfeng Tan } 432e6e7ad8bSJianfeng Tan 4332e4c1b50SMaxime Coquelin static void 4342e4c1b50SMaxime Coquelin virtio_user_dev_uninit_notify(struct virtio_user_dev *dev) 4352e4c1b50SMaxime Coquelin { 4362e4c1b50SMaxime Coquelin uint32_t i; 4372e4c1b50SMaxime Coquelin 4382e4c1b50SMaxime Coquelin for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 4392e4c1b50SMaxime Coquelin if (dev->kickfds[i] >= 0) { 4402e4c1b50SMaxime Coquelin close(dev->kickfds[i]); 4412e4c1b50SMaxime Coquelin dev->kickfds[i] = -1; 4422e4c1b50SMaxime Coquelin } 4432e4c1b50SMaxime Coquelin if (dev->callfds[i] >= 0) { 4442e4c1b50SMaxime Coquelin close(dev->callfds[i]); 4452e4c1b50SMaxime Coquelin dev->callfds[i] = -1; 4462e4c1b50SMaxime Coquelin } 4472e4c1b50SMaxime Coquelin } 448e6e7ad8bSJianfeng Tan } 449e6e7ad8bSJianfeng Tan 450e6e7ad8bSJianfeng Tan static int 4513d4fb6fdSJianfeng Tan virtio_user_fill_intr_handle(struct virtio_user_dev *dev) 4523d4fb6fdSJianfeng Tan { 4533d4fb6fdSJianfeng Tan uint32_t i; 4546564ddcdSDavid Marchand struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 4553d4fb6fdSJianfeng Tan 456d61138d4SHarman Kalra if (eth_dev->intr_handle == NULL) { 457d61138d4SHarman Kalra eth_dev->intr_handle = 458d61138d4SHarman Kalra rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); 459d61138d4SHarman Kalra if (eth_dev->intr_handle == NULL) { 460a3fb6b1dSMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) failed to allocate intr_handle", dev->path); 4613d4fb6fdSJianfeng Tan return -1; 4623d4fb6fdSJianfeng Tan } 4633d4fb6fdSJianfeng Tan } 4643d4fb6fdSJianfeng Tan 465d61138d4SHarman Kalra for (i = 0; i < dev->max_queue_pairs; ++i) { 466d61138d4SHarman Kalra if (rte_intr_efds_index_set(eth_dev->intr_handle, i, 46723ab0c59SYuan Wang dev->callfds[2 * i + VTNET_SQ_RQ_QUEUE_IDX])) 468d61138d4SHarman Kalra return -rte_errno; 469d61138d4SHarman Kalra } 470d61138d4SHarman Kalra 471d61138d4SHarman Kalra if (rte_intr_nb_efd_set(eth_dev->intr_handle, dev->max_queue_pairs)) 472d61138d4SHarman Kalra return -rte_errno; 473d61138d4SHarman Kalra 474d61138d4SHarman Kalra if (rte_intr_max_intr_set(eth_dev->intr_handle, 475d61138d4SHarman Kalra dev->max_queue_pairs + 1)) 476d61138d4SHarman Kalra return -rte_errno; 477d61138d4SHarman Kalra 478d61138d4SHarman Kalra if (rte_intr_type_set(eth_dev->intr_handle, RTE_INTR_HANDLE_VDEV)) 479d61138d4SHarman Kalra return -rte_errno; 480d61138d4SHarman Kalra 48129906b97SJingjing Wu /* For virtio vdev, no need to read counter for clean */ 482d61138d4SHarman Kalra if (rte_intr_efd_counter_size_set(eth_dev->intr_handle, 0)) 483d61138d4SHarman Kalra return -rte_errno; 484d61138d4SHarman Kalra 485d61138d4SHarman Kalra if (rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev))) 486d61138d4SHarman Kalra return -rte_errno; 4873d4fb6fdSJianfeng Tan 4883d4fb6fdSJianfeng Tan return 0; 4893d4fb6fdSJianfeng Tan } 4903d4fb6fdSJianfeng Tan 49112ecb2f6SMaxime Coquelin static void 49212ecb2f6SMaxime Coquelin virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, 4932286291dSTiwei Bie const void *addr, 49412ecb2f6SMaxime Coquelin size_t len __rte_unused, 49512ecb2f6SMaxime Coquelin void *arg) 49612ecb2f6SMaxime Coquelin { 49712ecb2f6SMaxime Coquelin struct virtio_user_dev *dev = arg; 498f32c7c9dSAnatoly Burakov struct rte_memseg_list *msl; 49912ecb2f6SMaxime Coquelin uint16_t i; 500a3fb6b1dSMaxime Coquelin int ret = 0; 50112ecb2f6SMaxime Coquelin 502f32c7c9dSAnatoly Burakov /* ignore externally allocated memory */ 503f32c7c9dSAnatoly Burakov msl = rte_mem_virt2memseg_list(addr); 504f32c7c9dSAnatoly Burakov if (msl->external) 505f32c7c9dSAnatoly Burakov return; 506f32c7c9dSAnatoly Burakov 50712ecb2f6SMaxime Coquelin pthread_mutex_lock(&dev->mutex); 50812ecb2f6SMaxime Coquelin 50912ecb2f6SMaxime Coquelin if (dev->started == false) 51012ecb2f6SMaxime Coquelin goto exit; 51112ecb2f6SMaxime Coquelin 51212ecb2f6SMaxime Coquelin /* Step 1: pause the active queues */ 513a3fb6b1dSMaxime Coquelin for (i = 0; i < dev->queue_pairs; i++) { 514a3fb6b1dSMaxime Coquelin ret = dev->ops->enable_qp(dev, i, 0); 515a3fb6b1dSMaxime Coquelin if (ret < 0) 516a3fb6b1dSMaxime Coquelin goto exit; 517a3fb6b1dSMaxime Coquelin } 51812ecb2f6SMaxime Coquelin 51912ecb2f6SMaxime Coquelin /* Step 2: update memory regions */ 520a3fb6b1dSMaxime Coquelin ret = dev->ops->set_memory_table(dev); 521a3fb6b1dSMaxime Coquelin if (ret < 0) 522a3fb6b1dSMaxime Coquelin goto exit; 52312ecb2f6SMaxime Coquelin 52412ecb2f6SMaxime Coquelin /* Step 3: resume the active queues */ 525a3fb6b1dSMaxime Coquelin for (i = 0; i < dev->queue_pairs; i++) { 526a3fb6b1dSMaxime Coquelin ret = dev->ops->enable_qp(dev, i, 1); 527a3fb6b1dSMaxime Coquelin if (ret < 0) 528a3fb6b1dSMaxime Coquelin goto exit; 529a3fb6b1dSMaxime Coquelin } 53012ecb2f6SMaxime Coquelin 53112ecb2f6SMaxime Coquelin exit: 53212ecb2f6SMaxime Coquelin pthread_mutex_unlock(&dev->mutex); 533a3fb6b1dSMaxime Coquelin 534a3fb6b1dSMaxime Coquelin if (ret < 0) 535f3854ebaSThomas Monjalon PMD_DRV_LOG(ERR, "(%s) Failed to update memory table", dev->path); 53612ecb2f6SMaxime Coquelin } 53712ecb2f6SMaxime Coquelin 5383d4fb6fdSJianfeng Tan static int 539e6e7ad8bSJianfeng Tan virtio_user_dev_setup(struct virtio_user_dev *dev) 540e6e7ad8bSJianfeng Tan { 541bd8f50a4SZhiyong Yang if (dev->is_server) { 542f908b22eSAdrian Moreno if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) { 543f908b22eSAdrian Moreno PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!"); 544bd8f50a4SZhiyong Yang return -1; 545bd8f50a4SZhiyong Yang } 5469af79db2SMaxime Coquelin } 5479af79db2SMaxime Coquelin 54886388a3aSMaxime Coquelin switch (dev->backend_type) { 54986388a3aSMaxime Coquelin case VIRTIO_USER_BACKEND_VHOST_USER: 550520dd992SFerruh Yigit dev->ops = &virtio_ops_user; 55186388a3aSMaxime Coquelin break; 55286388a3aSMaxime Coquelin case VIRTIO_USER_BACKEND_VHOST_KERNEL: 553520dd992SFerruh Yigit dev->ops = &virtio_ops_kernel; 55486388a3aSMaxime Coquelin break; 55586388a3aSMaxime Coquelin case VIRTIO_USER_BACKEND_VHOST_VDPA: 5566b901437SMaxime Coquelin dev->ops = &virtio_ops_vdpa; 55786388a3aSMaxime Coquelin break; 55886388a3aSMaxime Coquelin default: 559a3fb6b1dSMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path); 5606b901437SMaxime Coquelin return -1; 561e3b43481SJianfeng Tan } 5629af79db2SMaxime Coquelin 563a3fb6b1dSMaxime Coquelin if (dev->ops->setup(dev) < 0) { 564f3854ebaSThomas Monjalon PMD_INIT_LOG(ERR, "(%s) Failed to setup backend", dev->path); 565cc4690e9SJianfeng Tan return -1; 566a3fb6b1dSMaxime Coquelin } 567cc4690e9SJianfeng Tan 568cc4690e9SJianfeng Tan return 0; 56933d24d65SJianfeng Tan } 57033d24d65SJianfeng Tan 571*6fdf32d1SMaxime Coquelin static int 572*6fdf32d1SMaxime Coquelin virtio_user_alloc_vrings(struct virtio_user_dev *dev) 573*6fdf32d1SMaxime Coquelin { 574*6fdf32d1SMaxime Coquelin int i, size, nr_vrings; 575*6fdf32d1SMaxime Coquelin bool packed_ring = !!(dev->device_features & (1ull << VIRTIO_F_RING_PACKED)); 576*6fdf32d1SMaxime Coquelin 577*6fdf32d1SMaxime Coquelin nr_vrings = dev->max_queue_pairs * 2; 578*6fdf32d1SMaxime Coquelin if (dev->device_features & (1ull << VIRTIO_NET_F_MQ)) 579*6fdf32d1SMaxime Coquelin nr_vrings++; 580*6fdf32d1SMaxime Coquelin 581*6fdf32d1SMaxime Coquelin dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0); 582*6fdf32d1SMaxime Coquelin if (!dev->callfds) { 583*6fdf32d1SMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path); 584*6fdf32d1SMaxime Coquelin return -1; 585*6fdf32d1SMaxime Coquelin } 586*6fdf32d1SMaxime Coquelin 587*6fdf32d1SMaxime Coquelin dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->kickfds), 0); 588*6fdf32d1SMaxime Coquelin if (!dev->kickfds) { 589*6fdf32d1SMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path); 590*6fdf32d1SMaxime Coquelin goto free_callfds; 591*6fdf32d1SMaxime Coquelin } 592*6fdf32d1SMaxime Coquelin 593*6fdf32d1SMaxime Coquelin for (i = 0; i < nr_vrings; i++) { 594*6fdf32d1SMaxime Coquelin dev->callfds[i] = -1; 595*6fdf32d1SMaxime Coquelin dev->kickfds[i] = -1; 596*6fdf32d1SMaxime Coquelin } 597*6fdf32d1SMaxime Coquelin 598*6fdf32d1SMaxime Coquelin if (packed_ring) 599*6fdf32d1SMaxime Coquelin size = sizeof(*dev->vrings.packed); 600*6fdf32d1SMaxime Coquelin else 601*6fdf32d1SMaxime Coquelin size = sizeof(*dev->vrings.split); 602*6fdf32d1SMaxime Coquelin dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size, 0); 603*6fdf32d1SMaxime Coquelin if (!dev->vrings.ptr) { 604*6fdf32d1SMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev->path); 605*6fdf32d1SMaxime Coquelin goto free_kickfds; 606*6fdf32d1SMaxime Coquelin } 607*6fdf32d1SMaxime Coquelin 608*6fdf32d1SMaxime Coquelin if (packed_ring) { 609*6fdf32d1SMaxime Coquelin dev->packed_queues = rte_zmalloc("virtio_user_dev", 610*6fdf32d1SMaxime Coquelin nr_vrings * sizeof(*dev->packed_queues), 0); 611*6fdf32d1SMaxime Coquelin if (!dev->packed_queues) { 612*6fdf32d1SMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues metadata", 613*6fdf32d1SMaxime Coquelin dev->path); 614*6fdf32d1SMaxime Coquelin goto free_vrings; 615*6fdf32d1SMaxime Coquelin } 616*6fdf32d1SMaxime Coquelin } 617*6fdf32d1SMaxime Coquelin 618*6fdf32d1SMaxime Coquelin dev->qp_enabled = rte_zmalloc("virtio_user_dev", 619*6fdf32d1SMaxime Coquelin dev->max_queue_pairs * sizeof(*dev->qp_enabled), 0); 620*6fdf32d1SMaxime Coquelin if (!dev->qp_enabled) { 621*6fdf32d1SMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states", dev->path); 622*6fdf32d1SMaxime Coquelin goto free_packed_queues; 623*6fdf32d1SMaxime Coquelin } 624*6fdf32d1SMaxime Coquelin 625*6fdf32d1SMaxime Coquelin return 0; 626*6fdf32d1SMaxime Coquelin 627*6fdf32d1SMaxime Coquelin free_packed_queues: 628*6fdf32d1SMaxime Coquelin rte_free(dev->packed_queues); 629*6fdf32d1SMaxime Coquelin dev->packed_queues = NULL; 630*6fdf32d1SMaxime Coquelin free_vrings: 631*6fdf32d1SMaxime Coquelin rte_free(dev->vrings.ptr); 632*6fdf32d1SMaxime Coquelin dev->vrings.ptr = NULL; 633*6fdf32d1SMaxime Coquelin free_kickfds: 634*6fdf32d1SMaxime Coquelin rte_free(dev->kickfds); 635*6fdf32d1SMaxime Coquelin dev->kickfds = NULL; 636*6fdf32d1SMaxime Coquelin free_callfds: 637*6fdf32d1SMaxime Coquelin rte_free(dev->callfds); 638*6fdf32d1SMaxime Coquelin dev->callfds = NULL; 639*6fdf32d1SMaxime Coquelin 640*6fdf32d1SMaxime Coquelin return -1; 641*6fdf32d1SMaxime Coquelin } 642*6fdf32d1SMaxime Coquelin 643*6fdf32d1SMaxime Coquelin static void 644*6fdf32d1SMaxime Coquelin virtio_user_free_vrings(struct virtio_user_dev *dev) 645*6fdf32d1SMaxime Coquelin { 646*6fdf32d1SMaxime Coquelin rte_free(dev->qp_enabled); 647*6fdf32d1SMaxime Coquelin dev->qp_enabled = NULL; 648*6fdf32d1SMaxime Coquelin rte_free(dev->packed_queues); 649*6fdf32d1SMaxime Coquelin dev->packed_queues = NULL; 650*6fdf32d1SMaxime Coquelin rte_free(dev->vrings.ptr); 651*6fdf32d1SMaxime Coquelin dev->vrings.ptr = NULL; 652*6fdf32d1SMaxime Coquelin rte_free(dev->kickfds); 653*6fdf32d1SMaxime Coquelin dev->kickfds = NULL; 654*6fdf32d1SMaxime Coquelin rte_free(dev->callfds); 655*6fdf32d1SMaxime Coquelin dev->callfds = NULL; 656*6fdf32d1SMaxime Coquelin } 657*6fdf32d1SMaxime Coquelin 658bed3b24cSJianfeng Tan /* Use below macro to filter features from vhost backend */ 659bed3b24cSJianfeng Tan #define VIRTIO_USER_SUPPORTED_FEATURES \ 660bed3b24cSJianfeng Tan (1ULL << VIRTIO_NET_F_MAC | \ 661bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_STATUS | \ 662bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_MQ | \ 663bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \ 664bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_CTRL_VQ | \ 665bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_CTRL_RX | \ 666bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_CTRL_VLAN | \ 667bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_CSUM | \ 668bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_HOST_TSO4 | \ 669bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_HOST_TSO6 | \ 670bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_MRG_RXBUF | \ 671bed3b24cSJianfeng Tan 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \ 672bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_GUEST_CSUM | \ 673bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ 674bed3b24cSJianfeng Tan 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 67541e45c90SMarvin Liu 1ULL << VIRTIO_F_IN_ORDER | \ 67634f3966cSYuanhan Liu 1ULL << VIRTIO_F_VERSION_1 | \ 6775b75b63cSMaxime Coquelin 1ULL << VIRTIO_F_RING_PACKED) 6788e756105SMaxime Coquelin 67937a7eb2aSJianfeng Tan int 68052901852SMaxime Coquelin virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues, 681488ed97aSMarvin Liu int cq, int queue_size, const char *mac, char **ifname, 682f908b22eSAdrian Moreno int server, int mrg_rxbuf, int in_order, int packed_vq, 683f908b22eSAdrian Moreno enum virtio_user_backend_type backend_type) 68437a7eb2aSJianfeng Tan { 6855b75b63cSMaxime Coquelin uint64_t backend_features; 6868e756105SMaxime Coquelin 68712ecb2f6SMaxime Coquelin pthread_mutex_init(&dev->mutex, NULL); 6886723c0fcSBruce Richardson strlcpy(dev->path, path, PATH_MAX); 6892e4c1b50SMaxime Coquelin 69012ecb2f6SMaxime Coquelin dev->started = 0; 69137a7eb2aSJianfeng Tan dev->queue_pairs = 1; /* mq disabled by default */ 69237a7eb2aSJianfeng Tan dev->queue_size = queue_size; 6931c8489daSTiwei Bie dev->is_server = server; 69437a7eb2aSJianfeng Tan dev->mac_specified = 0; 695bb97d2ddSTiwei Bie dev->frontend_features = 0; 6965b75b63cSMaxime Coquelin dev->unsupported_features = 0; 697f908b22eSAdrian Moreno dev->backend_type = backend_type; 698f908b22eSAdrian Moreno 6994214a1b4SWenfeng Liu if (*ifname) { 7004214a1b4SWenfeng Liu dev->ifname = *ifname; 7014214a1b4SWenfeng Liu *ifname = NULL; 7024214a1b4SWenfeng Liu } 7034214a1b4SWenfeng Liu 70433d24d65SJianfeng Tan if (virtio_user_dev_setup(dev) < 0) { 705a3fb6b1dSMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path); 70637a7eb2aSJianfeng Tan return -1; 70737a7eb2aSJianfeng Tan } 708bce7e905SJianfeng Tan 70906856cabSMaxime Coquelin if (dev->ops->set_owner(dev) < 0) { 710a3fb6b1dSMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path); 7117be72485SMaxime Coquelin goto destroy; 71237a7eb2aSJianfeng Tan } 71337a7eb2aSJianfeng Tan 7145b75b63cSMaxime Coquelin if (dev->ops->get_backend_features(&backend_features) < 0) { 715a3fb6b1dSMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path); 7167be72485SMaxime Coquelin goto destroy; 71737a7eb2aSJianfeng Tan } 7188e756105SMaxime Coquelin 7195b75b63cSMaxime Coquelin dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features); 7205b75b63cSMaxime Coquelin 7215b75b63cSMaxime Coquelin if (dev->ops->get_features(dev, &dev->device_features) < 0) { 7225b75b63cSMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path); 7237be72485SMaxime Coquelin goto destroy; 724a3fb6b1dSMaxime Coquelin } 7258e756105SMaxime Coquelin 726c995b005SMaxime Coquelin virtio_user_dev_init_mac(dev, mac); 727c995b005SMaxime Coquelin 7287be72485SMaxime Coquelin if (virtio_user_dev_init_max_queue_pairs(dev, queues)) 7297be72485SMaxime Coquelin dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 7307be72485SMaxime Coquelin 7317be72485SMaxime Coquelin if (dev->max_queue_pairs > 1) 7327be72485SMaxime Coquelin cq = 1; 7337be72485SMaxime Coquelin 734bd9568f3STiwei Bie if (!mrg_rxbuf) 735488ed97aSMarvin Liu dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF); 736488ed97aSMarvin Liu 737bd9568f3STiwei Bie if (!in_order) 738488ed97aSMarvin Liu dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); 739488ed97aSMarvin Liu 74048a44640SJens Freimann if (!packed_vq) 7419070f88bSTiwei Bie dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); 74234f3966cSYuanhan Liu 7439070f88bSTiwei Bie if (dev->mac_specified) 7449070f88bSTiwei Bie dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); 7459070f88bSTiwei Bie else 7467c66ff61SMarvin Liu dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); 747f9b9d1a5SJianfeng Tan 748142678d4SJianfeng Tan if (cq) { 749142678d4SJianfeng Tan /* device does not really need to know anything about CQ, 750142678d4SJianfeng Tan * so if necessary, we just claim to support CQ 751f9b9d1a5SJianfeng Tan */ 752bb97d2ddSTiwei Bie dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 753142678d4SJianfeng Tan } else { 7547c66ff61SMarvin Liu dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 755bd9568f3STiwei Bie /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */ 7567c66ff61SMarvin Liu dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX); 7577c66ff61SMarvin Liu dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN); 7587c66ff61SMarvin Liu dev->unsupported_features |= 7597c66ff61SMarvin Liu (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); 7607c66ff61SMarvin Liu dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 7617c66ff61SMarvin Liu dev->unsupported_features |= 7627c66ff61SMarvin Liu (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); 763f9b9d1a5SJianfeng Tan } 764f9b9d1a5SJianfeng Tan 76535c4f855SJianfeng Tan /* The backend will not report this feature, we add it explicitly */ 766f908b22eSAdrian Moreno if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) 767bb97d2ddSTiwei Bie dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS); 76835c4f855SJianfeng Tan 769f078c2f0SMaxime Coquelin dev->frontend_features &= ~dev->unsupported_features; 770bd9568f3STiwei Bie dev->device_features &= ~dev->unsupported_features; 771bed3b24cSJianfeng Tan 772*6fdf32d1SMaxime Coquelin if (virtio_user_alloc_vrings(dev) < 0) { 773*6fdf32d1SMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata", dev->path); 774*6fdf32d1SMaxime Coquelin goto destroy; 775*6fdf32d1SMaxime Coquelin } 776*6fdf32d1SMaxime Coquelin 777*6fdf32d1SMaxime Coquelin if (virtio_user_dev_init_notify(dev) < 0) { 778*6fdf32d1SMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path); 779*6fdf32d1SMaxime Coquelin goto free_vrings; 780*6fdf32d1SMaxime Coquelin } 781*6fdf32d1SMaxime Coquelin 782*6fdf32d1SMaxime Coquelin if (virtio_user_fill_intr_handle(dev) < 0) { 783*6fdf32d1SMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler", dev->path); 784*6fdf32d1SMaxime Coquelin goto notify_uninit; 785*6fdf32d1SMaxime Coquelin } 786*6fdf32d1SMaxime Coquelin 78712ecb2f6SMaxime Coquelin if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME, 78812ecb2f6SMaxime Coquelin virtio_user_mem_event_cb, dev)) { 78988e5469fSXiao Wang if (rte_errno != ENOTSUP) { 790f3854ebaSThomas Monjalon PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback", 791a3fb6b1dSMaxime Coquelin dev->path); 7927be72485SMaxime Coquelin goto notify_uninit; 79312ecb2f6SMaxime Coquelin } 79488e5469fSXiao Wang } 79512ecb2f6SMaxime Coquelin 79637a7eb2aSJianfeng Tan return 0; 7977be72485SMaxime Coquelin 7987be72485SMaxime Coquelin notify_uninit: 7997be72485SMaxime Coquelin virtio_user_dev_uninit_notify(dev); 800*6fdf32d1SMaxime Coquelin free_vrings: 801*6fdf32d1SMaxime Coquelin virtio_user_free_vrings(dev); 8027be72485SMaxime Coquelin destroy: 8037be72485SMaxime Coquelin dev->ops->destroy(dev); 8047be72485SMaxime Coquelin 8057be72485SMaxime Coquelin return -1; 80637a7eb2aSJianfeng Tan } 80737a7eb2aSJianfeng Tan 80837a7eb2aSJianfeng Tan void 80937a7eb2aSJianfeng Tan virtio_user_dev_uninit(struct virtio_user_dev *dev) 81037a7eb2aSJianfeng Tan { 8117b919515SGaoxiang Liu struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 8127b919515SGaoxiang Liu 813d61138d4SHarman Kalra rte_intr_instance_free(eth_dev->intr_handle); 8147b919515SGaoxiang Liu eth_dev->intr_handle = NULL; 8157b919515SGaoxiang Liu 816e3b43481SJianfeng Tan virtio_user_stop_device(dev); 817e3b43481SJianfeng Tan 81812ecb2f6SMaxime Coquelin rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev); 81912ecb2f6SMaxime Coquelin 8202e4c1b50SMaxime Coquelin virtio_user_dev_uninit_notify(dev); 8214214a1b4SWenfeng Liu 822*6fdf32d1SMaxime Coquelin virtio_user_free_vrings(dev); 823*6fdf32d1SMaxime Coquelin 8244214a1b4SWenfeng Liu free(dev->ifname); 825bd8f50a4SZhiyong Yang 826bd8f50a4SZhiyong Yang if (dev->is_server) 827bd8f50a4SZhiyong Yang unlink(dev->path); 828748e5ea5SMaxime Coquelin 829748e5ea5SMaxime Coquelin dev->ops->destroy(dev); 83037a7eb2aSJianfeng Tan } 831f9b9d1a5SJianfeng Tan 832fcdb603aSMaxime Coquelin static uint8_t 833f9b9d1a5SJianfeng Tan virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) 834f9b9d1a5SJianfeng Tan { 835f9b9d1a5SJianfeng Tan uint16_t i; 836f9b9d1a5SJianfeng Tan uint8_t ret = 0; 837f9b9d1a5SJianfeng Tan 838f9b9d1a5SJianfeng Tan if (q_pairs > dev->max_queue_pairs) { 839a3fb6b1dSMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) multi-q config %u, but only %u supported", 840a3fb6b1dSMaxime Coquelin dev->path, q_pairs, dev->max_queue_pairs); 841f9b9d1a5SJianfeng Tan return -1; 842f9b9d1a5SJianfeng Tan } 843f9b9d1a5SJianfeng Tan 844f9b9d1a5SJianfeng Tan for (i = 0; i < q_pairs; ++i) 84533d24d65SJianfeng Tan ret |= dev->ops->enable_qp(dev, i, 1); 846f9b9d1a5SJianfeng Tan for (i = q_pairs; i < dev->max_queue_pairs; ++i) 84733d24d65SJianfeng Tan ret |= dev->ops->enable_qp(dev, i, 0); 84894973531SMaxime Coquelin 8499eb56fb2SMaxime Coquelin if (dev->scvq) 8509eb56fb2SMaxime Coquelin ret |= dev->ops->cvq_enable(dev, 1); 8519eb56fb2SMaxime Coquelin 852f9b9d1a5SJianfeng Tan dev->queue_pairs = q_pairs; 853f9b9d1a5SJianfeng Tan 854f9b9d1a5SJianfeng Tan return ret; 855f9b9d1a5SJianfeng Tan } 856f9b9d1a5SJianfeng Tan 857fcdb603aSMaxime Coquelin #define CVQ_MAX_DATA_DESCS 32 858fcdb603aSMaxime Coquelin 859f9b9d1a5SJianfeng Tan static uint32_t 860fcdb603aSMaxime Coquelin virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring, 861f9b9d1a5SJianfeng Tan uint16_t idx_hdr) 862f9b9d1a5SJianfeng Tan { 863f9b9d1a5SJianfeng Tan struct virtio_net_ctrl_hdr *hdr; 864f9b9d1a5SJianfeng Tan virtio_net_ctrl_ack status = ~0; 865f9b9d1a5SJianfeng Tan uint16_t i, idx_data, idx_status; 866f9b9d1a5SJianfeng Tan uint32_t n_descs = 0; 867fcdb603aSMaxime Coquelin int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0; 868f9b9d1a5SJianfeng Tan 869f9b9d1a5SJianfeng Tan /* locate desc for header, data, and status */ 870f9b9d1a5SJianfeng Tan idx_data = vring->desc[idx_hdr].next; 871f9b9d1a5SJianfeng Tan n_descs++; 872f9b9d1a5SJianfeng Tan 873f9b9d1a5SJianfeng Tan i = idx_data; 874f9b9d1a5SJianfeng Tan while (vring->desc[i].flags == VRING_DESC_F_NEXT) { 875fcdb603aSMaxime Coquelin dlen[nb_dlen++] = vring->desc[i].len; 876f9b9d1a5SJianfeng Tan i = vring->desc[i].next; 877f9b9d1a5SJianfeng Tan n_descs++; 878f9b9d1a5SJianfeng Tan } 879f9b9d1a5SJianfeng Tan 880f9b9d1a5SJianfeng Tan /* locate desc for status */ 881f9b9d1a5SJianfeng Tan idx_status = i; 882f9b9d1a5SJianfeng Tan n_descs++; 883f9b9d1a5SJianfeng Tan 884f9b9d1a5SJianfeng Tan hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 885f9b9d1a5SJianfeng Tan if (hdr->class == VIRTIO_NET_CTRL_MQ && 886f9b9d1a5SJianfeng Tan hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 887f9b9d1a5SJianfeng Tan uint16_t queues; 888f9b9d1a5SJianfeng Tan 889f9b9d1a5SJianfeng Tan queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; 890f9b9d1a5SJianfeng Tan status = virtio_user_handle_mq(dev, queues); 891a76552d4SMarvin Liu } else if (hdr->class == VIRTIO_NET_CTRL_RX || 892a76552d4SMarvin Liu hdr->class == VIRTIO_NET_CTRL_MAC || 893a76552d4SMarvin Liu hdr->class == VIRTIO_NET_CTRL_VLAN) { 894a76552d4SMarvin Liu status = 0; 895f9b9d1a5SJianfeng Tan } 896f9b9d1a5SJianfeng Tan 897fcdb603aSMaxime Coquelin if (!status && dev->scvq) 898fcdb603aSMaxime Coquelin status = virtio_send_command(&dev->scvq->cq, 899fcdb603aSMaxime Coquelin (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen); 900fcdb603aSMaxime Coquelin 901f9b9d1a5SJianfeng Tan /* Update status */ 902f9b9d1a5SJianfeng Tan *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status; 903f9b9d1a5SJianfeng Tan 904f9b9d1a5SJianfeng Tan return n_descs; 905f9b9d1a5SJianfeng Tan } 906f9b9d1a5SJianfeng Tan 90748a44640SJens Freimann static inline int 90848a44640SJens Freimann desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) 90948a44640SJens Freimann { 9106094557dSJoyce Kong uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); 91112e9e70cSTiwei Bie 91212e9e70cSTiwei Bie return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && 91312e9e70cSTiwei Bie wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); 91448a44640SJens Freimann } 91548a44640SJens Freimann 91648a44640SJens Freimann static uint32_t 91745c224e7STiwei Bie virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, 91848a44640SJens Freimann struct vring_packed *vring, 91948a44640SJens Freimann uint16_t idx_hdr) 92048a44640SJens Freimann { 92148a44640SJens Freimann struct virtio_net_ctrl_hdr *hdr; 92248a44640SJens Freimann virtio_net_ctrl_ack status = ~0; 92348a44640SJens Freimann uint16_t idx_data, idx_status; 92448a44640SJens Freimann /* initialize to one, header is first */ 92548a44640SJens Freimann uint32_t n_descs = 1; 926fcdb603aSMaxime Coquelin int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0; 92748a44640SJens Freimann 92848a44640SJens Freimann /* locate desc for header, data, and status */ 92948a44640SJens Freimann idx_data = idx_hdr + 1; 93048a44640SJens Freimann if (idx_data >= dev->queue_size) 93148a44640SJens Freimann idx_data -= dev->queue_size; 93248a44640SJens Freimann 93348a44640SJens Freimann n_descs++; 93448a44640SJens Freimann 93548a44640SJens Freimann idx_status = idx_data; 9364cdc4d98STiwei Bie while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) { 937fcdb603aSMaxime Coquelin dlen[nb_dlen++] = vring->desc[idx_status].len; 93848a44640SJens Freimann idx_status++; 93948a44640SJens Freimann if (idx_status >= dev->queue_size) 94048a44640SJens Freimann idx_status -= dev->queue_size; 94148a44640SJens Freimann n_descs++; 94248a44640SJens Freimann } 94348a44640SJens Freimann 9444cdc4d98STiwei Bie hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 94548a44640SJens Freimann if (hdr->class == VIRTIO_NET_CTRL_MQ && 94648a44640SJens Freimann hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 94748a44640SJens Freimann uint16_t queues; 94848a44640SJens Freimann 94948a44640SJens Freimann queues = *(uint16_t *)(uintptr_t) 9504cdc4d98STiwei Bie vring->desc[idx_data].addr; 95148a44640SJens Freimann status = virtio_user_handle_mq(dev, queues); 952a76552d4SMarvin Liu } else if (hdr->class == VIRTIO_NET_CTRL_RX || 953a76552d4SMarvin Liu hdr->class == VIRTIO_NET_CTRL_MAC || 954a76552d4SMarvin Liu hdr->class == VIRTIO_NET_CTRL_VLAN) { 955a76552d4SMarvin Liu status = 0; 95648a44640SJens Freimann } 95748a44640SJens Freimann 958fcdb603aSMaxime Coquelin if (!status && dev->scvq) 959fcdb603aSMaxime Coquelin status = virtio_send_command(&dev->scvq->cq, 960fcdb603aSMaxime Coquelin (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen); 961fcdb603aSMaxime Coquelin 96248a44640SJens Freimann /* Update status */ 96348a44640SJens Freimann *(virtio_net_ctrl_ack *)(uintptr_t) 9644cdc4d98STiwei Bie vring->desc[idx_status].addr = status; 96548a44640SJens Freimann 96645c224e7STiwei Bie /* Update used descriptor */ 9674cdc4d98STiwei Bie vring->desc[idx_hdr].id = vring->desc[idx_status].id; 9684cdc4d98STiwei Bie vring->desc[idx_hdr].len = sizeof(status); 96945c224e7STiwei Bie 97048a44640SJens Freimann return n_descs; 97148a44640SJens Freimann } 97248a44640SJens Freimann 973fcdb603aSMaxime Coquelin static void 97448a44640SJens Freimann virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) 97548a44640SJens Freimann { 97648a44640SJens Freimann struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; 977*6fdf32d1SMaxime Coquelin struct vring_packed *vring = &dev->vrings.packed[queue_idx]; 97812e9e70cSTiwei Bie uint16_t n_descs, flags; 97948a44640SJens Freimann 9806094557dSJoyce Kong /* Perform a load-acquire barrier in desc_is_avail to 9816094557dSJoyce Kong * enforce the ordering between desc flags and desc 9826094557dSJoyce Kong * content. 9836094557dSJoyce Kong */ 9844cdc4d98STiwei Bie while (desc_is_avail(&vring->desc[vq->used_idx], 98548a44640SJens Freimann vq->used_wrap_counter)) { 98648a44640SJens Freimann 98745c224e7STiwei Bie n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, 98845c224e7STiwei Bie vq->used_idx); 98948a44640SJens Freimann 99012e9e70cSTiwei Bie flags = VRING_DESC_F_WRITE; 99112e9e70cSTiwei Bie if (vq->used_wrap_counter) 99212e9e70cSTiwei Bie flags |= VRING_PACKED_DESC_F_AVAIL_USED; 99312e9e70cSTiwei Bie 9942c661d41SJoyce Kong __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, 9952c661d41SJoyce Kong __ATOMIC_RELEASE); 99645c224e7STiwei Bie 99745c224e7STiwei Bie vq->used_idx += n_descs; 99845c224e7STiwei Bie if (vq->used_idx >= dev->queue_size) { 99948a44640SJens Freimann vq->used_idx -= dev->queue_size; 100048a44640SJens Freimann vq->used_wrap_counter ^= 1; 100148a44640SJens Freimann } 100248a44640SJens Freimann } 100348a44640SJens Freimann } 100448a44640SJens Freimann 1005fcdb603aSMaxime Coquelin static void 1006fcdb603aSMaxime Coquelin virtio_user_handle_cq_split(struct virtio_user_dev *dev, uint16_t queue_idx) 1007f9b9d1a5SJianfeng Tan { 1008f9b9d1a5SJianfeng Tan uint16_t avail_idx, desc_idx; 1009f9b9d1a5SJianfeng Tan struct vring_used_elem *uep; 1010f9b9d1a5SJianfeng Tan uint32_t n_descs; 1011*6fdf32d1SMaxime Coquelin struct vring *vring = &dev->vrings.split[queue_idx]; 1012f9b9d1a5SJianfeng Tan 1013f9b9d1a5SJianfeng Tan /* Consume avail ring, using used ring idx as first one */ 1014ea5207c1SJoyce Kong while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 1015ea5207c1SJoyce Kong != vring->avail->idx) { 1016ea5207c1SJoyce Kong avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 1017ea5207c1SJoyce Kong & (vring->num - 1); 1018f9b9d1a5SJianfeng Tan desc_idx = vring->avail->ring[avail_idx]; 1019f9b9d1a5SJianfeng Tan 1020fcdb603aSMaxime Coquelin n_descs = virtio_user_handle_ctrl_msg_split(dev, vring, desc_idx); 1021f9b9d1a5SJianfeng Tan 1022f9b9d1a5SJianfeng Tan /* Update used ring */ 1023f9b9d1a5SJianfeng Tan uep = &vring->used->ring[avail_idx]; 10240403e37aSTiwei Bie uep->id = desc_idx; 1025f9b9d1a5SJianfeng Tan uep->len = n_descs; 1026f9b9d1a5SJianfeng Tan 1027ea5207c1SJoyce Kong __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED); 1028f9b9d1a5SJianfeng Tan } 1029f9b9d1a5SJianfeng Tan } 103057912824SMaxime Coquelin 1031fcdb603aSMaxime Coquelin void 1032fcdb603aSMaxime Coquelin virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) 1033fcdb603aSMaxime Coquelin { 1034fcdb603aSMaxime Coquelin if (virtio_with_packed_queue(&dev->hw)) 1035fcdb603aSMaxime Coquelin virtio_user_handle_cq_packed(dev, queue_idx); 1036fcdb603aSMaxime Coquelin else 1037fcdb603aSMaxime Coquelin virtio_user_handle_cq_split(dev, queue_idx); 1038fcdb603aSMaxime Coquelin } 1039fcdb603aSMaxime Coquelin 104066b45ceaSMaxime Coquelin static void 104166b45ceaSMaxime Coquelin virtio_user_control_queue_notify(struct virtqueue *vq, void *cookie) 104266b45ceaSMaxime Coquelin { 104366b45ceaSMaxime Coquelin struct virtio_user_dev *dev = cookie; 104466b45ceaSMaxime Coquelin uint64_t buf = 1; 104566b45ceaSMaxime Coquelin 104666b45ceaSMaxime Coquelin if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) 104766b45ceaSMaxime Coquelin PMD_DRV_LOG(ERR, "failed to kick backend: %s", 104866b45ceaSMaxime Coquelin strerror(errno)); 104966b45ceaSMaxime Coquelin } 105066b45ceaSMaxime Coquelin 105166b45ceaSMaxime Coquelin int 105266b45ceaSMaxime Coquelin virtio_user_dev_create_shadow_cvq(struct virtio_user_dev *dev, struct virtqueue *vq) 105366b45ceaSMaxime Coquelin { 105466b45ceaSMaxime Coquelin char name[VIRTQUEUE_MAX_NAME_SZ]; 105566b45ceaSMaxime Coquelin struct virtqueue *scvq; 105666b45ceaSMaxime Coquelin 105766b45ceaSMaxime Coquelin snprintf(name, sizeof(name), "port%d_shadow_cvq", vq->hw->port_id); 105866b45ceaSMaxime Coquelin scvq = virtqueue_alloc(&dev->hw, vq->vq_queue_index, vq->vq_nentries, 105966b45ceaSMaxime Coquelin VTNET_CQ, SOCKET_ID_ANY, name); 106066b45ceaSMaxime Coquelin if (!scvq) { 106166b45ceaSMaxime Coquelin PMD_INIT_LOG(ERR, "(%s) Failed to alloc shadow control vq\n", dev->path); 106266b45ceaSMaxime Coquelin return -ENOMEM; 106366b45ceaSMaxime Coquelin } 106466b45ceaSMaxime Coquelin 106566b45ceaSMaxime Coquelin scvq->cq.notify_queue = &virtio_user_control_queue_notify; 106666b45ceaSMaxime Coquelin scvq->cq.notify_cookie = dev; 106766b45ceaSMaxime Coquelin dev->scvq = scvq; 106866b45ceaSMaxime Coquelin 106966b45ceaSMaxime Coquelin return 0; 107066b45ceaSMaxime Coquelin } 107166b45ceaSMaxime Coquelin 107266b45ceaSMaxime Coquelin void 107366b45ceaSMaxime Coquelin virtio_user_dev_destroy_shadow_cvq(struct virtio_user_dev *dev) 107466b45ceaSMaxime Coquelin { 107566b45ceaSMaxime Coquelin if (!dev->scvq) 107666b45ceaSMaxime Coquelin return; 107766b45ceaSMaxime Coquelin 107866b45ceaSMaxime Coquelin virtqueue_free(dev->scvq); 107966b45ceaSMaxime Coquelin dev->scvq = NULL; 108066b45ceaSMaxime Coquelin } 108166b45ceaSMaxime Coquelin 108257912824SMaxime Coquelin int 1083d7e10ea9SAdrian Moreno virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status) 108457912824SMaxime Coquelin { 108557912824SMaxime Coquelin int ret; 108657912824SMaxime Coquelin 1087d7e10ea9SAdrian Moreno pthread_mutex_lock(&dev->mutex); 1088d7e10ea9SAdrian Moreno dev->status = status; 10898723c894SMaxime Coquelin ret = dev->ops->set_status(dev, status); 1090a3fb6b1dSMaxime Coquelin if (ret && ret != -ENOTSUP) 1091f3854ebaSThomas Monjalon PMD_INIT_LOG(ERR, "(%s) Failed to set backend status", dev->path); 1092d7e10ea9SAdrian Moreno 1093d7e10ea9SAdrian Moreno pthread_mutex_unlock(&dev->mutex); 10945043a060SAdrian Moreno return ret; 109557912824SMaxime Coquelin } 10960b0dc66cSAdrian Moreno 10970b0dc66cSAdrian Moreno int 1098d7e10ea9SAdrian Moreno virtio_user_dev_update_status(struct virtio_user_dev *dev) 10990b0dc66cSAdrian Moreno { 11008723c894SMaxime Coquelin int ret; 11017784e977SMaxime Coquelin uint8_t status; 11020b0dc66cSAdrian Moreno 1103d7e10ea9SAdrian Moreno pthread_mutex_lock(&dev->mutex); 11047784e977SMaxime Coquelin 11058723c894SMaxime Coquelin ret = dev->ops->get_status(dev, &status); 11068723c894SMaxime Coquelin if (!ret) { 11077784e977SMaxime Coquelin dev->status = status; 11080b0dc66cSAdrian Moreno PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n" 11090b0dc66cSAdrian Moreno "\t-RESET: %u\n" 11100b0dc66cSAdrian Moreno "\t-ACKNOWLEDGE: %u\n" 11110b0dc66cSAdrian Moreno "\t-DRIVER: %u\n" 11120b0dc66cSAdrian Moreno "\t-DRIVER_OK: %u\n" 11130b0dc66cSAdrian Moreno "\t-FEATURES_OK: %u\n" 11140b0dc66cSAdrian Moreno "\t-DEVICE_NEED_RESET: %u\n" 1115f3854ebaSThomas Monjalon "\t-FAILED: %u", 11160b0dc66cSAdrian Moreno dev->status, 11170b0dc66cSAdrian Moreno (dev->status == VIRTIO_CONFIG_STATUS_RESET), 11180b0dc66cSAdrian Moreno !!(dev->status & VIRTIO_CONFIG_STATUS_ACK), 11190b0dc66cSAdrian Moreno !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER), 11200b0dc66cSAdrian Moreno !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK), 11210b0dc66cSAdrian Moreno !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK), 11220b0dc66cSAdrian Moreno !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET), 11230b0dc66cSAdrian Moreno !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED)); 11248723c894SMaxime Coquelin } else if (ret != -ENOTSUP) { 1125f3854ebaSThomas Monjalon PMD_INIT_LOG(ERR, "(%s) Failed to get backend status", dev->path); 11265043a060SAdrian Moreno } 11275043a060SAdrian Moreno 1128d7e10ea9SAdrian Moreno pthread_mutex_unlock(&dev->mutex); 11298723c894SMaxime Coquelin return ret; 11300b0dc66cSAdrian Moreno } 113194973531SMaxime Coquelin 113294973531SMaxime Coquelin int 113394973531SMaxime Coquelin virtio_user_dev_update_link_state(struct virtio_user_dev *dev) 113494973531SMaxime Coquelin { 113594973531SMaxime Coquelin if (dev->ops->update_link_state) 113694973531SMaxime Coquelin return dev->ops->update_link_state(dev); 113794973531SMaxime Coquelin 113894973531SMaxime Coquelin return 0; 113994973531SMaxime Coquelin } 114094973531SMaxime Coquelin 114194973531SMaxime Coquelin static void 114294973531SMaxime Coquelin virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev) 114394973531SMaxime Coquelin { 114494973531SMaxime Coquelin struct virtio_user_dev *dev = eth_dev->data->dev_private; 114594973531SMaxime Coquelin struct virtio_hw *hw = &dev->hw; 114694973531SMaxime Coquelin struct virtnet_rx *rxvq; 114794973531SMaxime Coquelin struct virtnet_tx *txvq; 114894973531SMaxime Coquelin uint16_t i; 114994973531SMaxime Coquelin 115094973531SMaxime Coquelin /* Add lock to avoid queue contention. */ 115194973531SMaxime Coquelin rte_spinlock_lock(&hw->state_lock); 115294973531SMaxime Coquelin hw->started = 0; 115394973531SMaxime Coquelin 115494973531SMaxime Coquelin /* 115594973531SMaxime Coquelin * Waiting for datapath to complete before resetting queues. 115694973531SMaxime Coquelin * 1 ms should be enough for the ongoing Tx/Rx function to finish. 115794973531SMaxime Coquelin */ 115894973531SMaxime Coquelin rte_delay_ms(1); 115994973531SMaxime Coquelin 116094973531SMaxime Coquelin /* Vring reset for each Tx queue and Rx queue. */ 116194973531SMaxime Coquelin for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 116294973531SMaxime Coquelin rxvq = eth_dev->data->rx_queues[i]; 11633169550fSMaxime Coquelin virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq)); 116494973531SMaxime Coquelin virtio_dev_rx_queue_setup_finish(eth_dev, i); 116594973531SMaxime Coquelin } 116694973531SMaxime Coquelin 116794973531SMaxime Coquelin for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 116894973531SMaxime Coquelin txvq = eth_dev->data->tx_queues[i]; 11693169550fSMaxime Coquelin virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq)); 117094973531SMaxime Coquelin } 117194973531SMaxime Coquelin 117294973531SMaxime Coquelin hw->started = 1; 117394973531SMaxime Coquelin rte_spinlock_unlock(&hw->state_lock); 117494973531SMaxime Coquelin } 117594973531SMaxime Coquelin 117694973531SMaxime Coquelin void 117723abee9dSIlya Maximets virtio_user_dev_delayed_disconnect_handler(void *param) 117894973531SMaxime Coquelin { 117994973531SMaxime Coquelin struct virtio_user_dev *dev = param; 11806564ddcdSDavid Marchand struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 118194973531SMaxime Coquelin 118294973531SMaxime Coquelin if (rte_intr_disable(eth_dev->intr_handle) < 0) { 118394973531SMaxime Coquelin PMD_DRV_LOG(ERR, "interrupt disable failed"); 118494973531SMaxime Coquelin return; 118594973531SMaxime Coquelin } 118623abee9dSIlya Maximets PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d", 1187d61138d4SHarman Kalra rte_intr_fd_get(eth_dev->intr_handle)); 118823abee9dSIlya Maximets if (rte_intr_callback_unregister(eth_dev->intr_handle, 118923abee9dSIlya Maximets virtio_interrupt_handler, 119023abee9dSIlya Maximets eth_dev) != 1) 119123abee9dSIlya Maximets PMD_DRV_LOG(ERR, "interrupt unregister failed"); 119223abee9dSIlya Maximets 119394973531SMaxime Coquelin if (dev->is_server) { 119494973531SMaxime Coquelin if (dev->ops->server_disconnect) 119594973531SMaxime Coquelin dev->ops->server_disconnect(dev); 119623abee9dSIlya Maximets 1197d61138d4SHarman Kalra rte_intr_fd_set(eth_dev->intr_handle, 1198d61138d4SHarman Kalra dev->ops->get_intr_fd(dev)); 119923abee9dSIlya Maximets 120023abee9dSIlya Maximets PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", 1201d61138d4SHarman Kalra rte_intr_fd_get(eth_dev->intr_handle)); 120223abee9dSIlya Maximets 120323abee9dSIlya Maximets if (rte_intr_callback_register(eth_dev->intr_handle, 120423abee9dSIlya Maximets virtio_interrupt_handler, 120523abee9dSIlya Maximets eth_dev)) 120623abee9dSIlya Maximets PMD_DRV_LOG(ERR, "interrupt register failed"); 120723abee9dSIlya Maximets 120894973531SMaxime Coquelin if (rte_intr_enable(eth_dev->intr_handle) < 0) { 120994973531SMaxime Coquelin PMD_DRV_LOG(ERR, "interrupt enable failed"); 121094973531SMaxime Coquelin return; 121194973531SMaxime Coquelin } 121294973531SMaxime Coquelin } 121394973531SMaxime Coquelin } 121494973531SMaxime Coquelin 121523abee9dSIlya Maximets static void 121623abee9dSIlya Maximets virtio_user_dev_delayed_intr_reconfig_handler(void *param) 121723abee9dSIlya Maximets { 121823abee9dSIlya Maximets struct virtio_user_dev *dev = param; 121923abee9dSIlya Maximets struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 122023abee9dSIlya Maximets 122123abee9dSIlya Maximets PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d", 1222d61138d4SHarman Kalra rte_intr_fd_get(eth_dev->intr_handle)); 122323abee9dSIlya Maximets 122423abee9dSIlya Maximets if (rte_intr_callback_unregister(eth_dev->intr_handle, 122523abee9dSIlya Maximets virtio_interrupt_handler, 122623abee9dSIlya Maximets eth_dev) != 1) 122723abee9dSIlya Maximets PMD_DRV_LOG(ERR, "interrupt unregister failed"); 122823abee9dSIlya Maximets 1229d61138d4SHarman Kalra rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev)); 123023abee9dSIlya Maximets 1231d61138d4SHarman Kalra PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", 1232d61138d4SHarman Kalra rte_intr_fd_get(eth_dev->intr_handle)); 123323abee9dSIlya Maximets 123423abee9dSIlya Maximets if (rte_intr_callback_register(eth_dev->intr_handle, 123523abee9dSIlya Maximets virtio_interrupt_handler, eth_dev)) 123623abee9dSIlya Maximets PMD_DRV_LOG(ERR, "interrupt register failed"); 123723abee9dSIlya Maximets 123823abee9dSIlya Maximets if (rte_intr_enable(eth_dev->intr_handle) < 0) 123923abee9dSIlya Maximets PMD_DRV_LOG(ERR, "interrupt enable failed"); 124023abee9dSIlya Maximets } 124123abee9dSIlya Maximets 124294973531SMaxime Coquelin int 124394973531SMaxime Coquelin virtio_user_dev_server_reconnect(struct virtio_user_dev *dev) 124494973531SMaxime Coquelin { 124594973531SMaxime Coquelin int ret, old_status; 12466564ddcdSDavid Marchand struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 124794973531SMaxime Coquelin struct virtio_hw *hw = &dev->hw; 124894973531SMaxime Coquelin 124994973531SMaxime Coquelin if (!dev->ops->server_reconnect) { 125094973531SMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path); 125194973531SMaxime Coquelin return -1; 125294973531SMaxime Coquelin } 125394973531SMaxime Coquelin 125494973531SMaxime Coquelin if (dev->ops->server_reconnect(dev)) { 125594973531SMaxime Coquelin PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path); 125694973531SMaxime Coquelin return -1; 125794973531SMaxime Coquelin } 125894973531SMaxime Coquelin 125994973531SMaxime Coquelin old_status = dev->status; 126094973531SMaxime Coquelin 126194973531SMaxime Coquelin virtio_reset(hw); 126294973531SMaxime Coquelin 126394973531SMaxime Coquelin virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); 126494973531SMaxime Coquelin 126594973531SMaxime Coquelin virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); 126694973531SMaxime Coquelin 126794973531SMaxime Coquelin if (dev->ops->get_features(dev, &dev->device_features) < 0) { 126894973531SMaxime Coquelin PMD_INIT_LOG(ERR, "get_features failed: %s", 126994973531SMaxime Coquelin strerror(errno)); 127094973531SMaxime Coquelin return -1; 127194973531SMaxime Coquelin } 127294973531SMaxime Coquelin 127394973531SMaxime Coquelin /* unmask vhost-user unsupported features */ 127494973531SMaxime Coquelin dev->device_features &= ~(dev->unsupported_features); 127594973531SMaxime Coquelin 1276f078c2f0SMaxime Coquelin dev->features &= (dev->device_features | dev->frontend_features); 127794973531SMaxime Coquelin 127894973531SMaxime Coquelin /* For packed ring, resetting queues is required in reconnection. */ 127994973531SMaxime Coquelin if (virtio_with_packed_queue(hw) && 128094973531SMaxime Coquelin (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { 128194973531SMaxime Coquelin PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped" 128294973531SMaxime Coquelin " when packed ring reconnecting."); 128394973531SMaxime Coquelin virtio_user_dev_reset_queues_packed(eth_dev); 128494973531SMaxime Coquelin } 128594973531SMaxime Coquelin 128694973531SMaxime Coquelin virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK); 128794973531SMaxime Coquelin 128894973531SMaxime Coquelin /* Start the device */ 128994973531SMaxime Coquelin virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 129094973531SMaxime Coquelin if (!dev->started) 129194973531SMaxime Coquelin return -1; 129294973531SMaxime Coquelin 129394973531SMaxime Coquelin if (dev->queue_pairs > 1) { 129494973531SMaxime Coquelin ret = virtio_user_handle_mq(dev, dev->queue_pairs); 129594973531SMaxime Coquelin if (ret != 0) { 129694973531SMaxime Coquelin PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 129794973531SMaxime Coquelin return -1; 129894973531SMaxime Coquelin } 129994973531SMaxime Coquelin } 130094973531SMaxime Coquelin if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 130194973531SMaxime Coquelin if (rte_intr_disable(eth_dev->intr_handle) < 0) { 130294973531SMaxime Coquelin PMD_DRV_LOG(ERR, "interrupt disable failed"); 130394973531SMaxime Coquelin return -1; 130494973531SMaxime Coquelin } 130523abee9dSIlya Maximets /* 130623abee9dSIlya Maximets * This function can be called from the interrupt handler, so 130723abee9dSIlya Maximets * we can't unregister interrupt handler here. Setting 130823abee9dSIlya Maximets * alarm to do that later. 130923abee9dSIlya Maximets */ 131023abee9dSIlya Maximets rte_eal_alarm_set(1, 131123abee9dSIlya Maximets virtio_user_dev_delayed_intr_reconfig_handler, 131223abee9dSIlya Maximets (void *)dev); 131394973531SMaxime Coquelin } 131494973531SMaxime Coquelin PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 131594973531SMaxime Coquelin return 0; 131694973531SMaxime Coquelin } 1317