15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 25566a3e3SBruce Richardson * Copyright(c) 2010-2016 Intel Corporation 3e9efa4d9SJianfeng Tan */ 4e9efa4d9SJianfeng Tan 5e9efa4d9SJianfeng Tan #include <stdint.h> 6e9efa4d9SJianfeng Tan #include <sys/types.h> 7e9efa4d9SJianfeng Tan #include <unistd.h> 8ef53b603SJianfeng Tan #include <fcntl.h> 9ef53b603SJianfeng Tan #include <sys/socket.h> 10e9efa4d9SJianfeng Tan 11ce2eabddSJianfeng Tan #include <rte_malloc.h> 12ce2eabddSJianfeng Tan #include <rte_kvargs.h> 13050fe6e9SJan Blunck #include <rte_ethdev_vdev.h> 14d4a586d2SJianfeng Tan #include <rte_bus_vdev.h> 15ef53b603SJianfeng Tan #include <rte_alarm.h> 166ebbf410SXuan Ding #include <rte_cycles.h> 17ce2eabddSJianfeng Tan 18ce2eabddSJianfeng Tan #include "virtio_ethdev.h" 19e9efa4d9SJianfeng Tan #include "virtio_logs.h" 20e9efa4d9SJianfeng Tan #include "virtio_pci.h" 21e9efa4d9SJianfeng Tan #include "virtqueue.h" 221b69528eSJianfeng Tan #include "virtio_rxtx.h" 23e9efa4d9SJianfeng Tan #include "virtio_user/virtio_user_dev.h" 247f468b2eSTiwei Bie #include "virtio_user/vhost.h" 25e9efa4d9SJianfeng Tan 26e9efa4d9SJianfeng Tan #define virtio_user_get_dev(hw) \ 27e9efa4d9SJianfeng Tan ((struct virtio_user_dev *)(hw)->virtio_user_dev) 28e9efa4d9SJianfeng Tan 296ebbf410SXuan Ding static void 306ebbf410SXuan Ding virtio_user_reset_queues_packed(struct rte_eth_dev *dev) 316ebbf410SXuan Ding { 326ebbf410SXuan Ding struct virtio_hw *hw = dev->data->dev_private; 336ebbf410SXuan Ding struct virtnet_rx *rxvq; 346ebbf410SXuan Ding struct virtnet_tx *txvq; 356ebbf410SXuan Ding uint16_t i; 366ebbf410SXuan Ding 376ebbf410SXuan Ding /* Add lock to avoid queue contention. */ 386ebbf410SXuan Ding rte_spinlock_lock(&hw->state_lock); 396ebbf410SXuan Ding hw->started = 0; 406ebbf410SXuan Ding 416ebbf410SXuan Ding /* 426ebbf410SXuan Ding * Waitting for datapath to complete before resetting queues. 436ebbf410SXuan Ding * 1 ms should be enough for the ongoing Tx/Rx function to finish. 446ebbf410SXuan Ding */ 456ebbf410SXuan Ding rte_delay_ms(1); 466ebbf410SXuan Ding 476ebbf410SXuan Ding /* Vring reset for each Tx queue and Rx queue. */ 486ebbf410SXuan Ding for (i = 0; i < dev->data->nb_rx_queues; i++) { 496ebbf410SXuan Ding rxvq = dev->data->rx_queues[i]; 506ebbf410SXuan Ding virtqueue_rxvq_reset_packed(rxvq->vq); 516ebbf410SXuan Ding virtio_dev_rx_queue_setup_finish(dev, i); 526ebbf410SXuan Ding } 536ebbf410SXuan Ding 546ebbf410SXuan Ding for (i = 0; i < dev->data->nb_tx_queues; i++) { 556ebbf410SXuan Ding txvq = dev->data->tx_queues[i]; 566ebbf410SXuan Ding virtqueue_txvq_reset_packed(txvq->vq); 576ebbf410SXuan Ding } 586ebbf410SXuan Ding 596ebbf410SXuan Ding hw->started = 1; 606ebbf410SXuan Ding rte_spinlock_unlock(&hw->state_lock); 616ebbf410SXuan Ding } 626ebbf410SXuan Ding 636ebbf410SXuan Ding 64bd8f50a4SZhiyong Yang static int 65bd8f50a4SZhiyong Yang virtio_user_server_reconnect(struct virtio_user_dev *dev) 66bd8f50a4SZhiyong Yang { 67bd8f50a4SZhiyong Yang int ret; 68bd8f50a4SZhiyong Yang int connectfd; 69bd8f50a4SZhiyong Yang struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 706ebbf410SXuan Ding struct virtio_hw *hw = eth_dev->data->dev_private; 718e756105SMaxime Coquelin uint64_t protocol_features; 72bd8f50a4SZhiyong Yang 73bd8f50a4SZhiyong Yang connectfd = accept(dev->listenfd, NULL, NULL); 74bd8f50a4SZhiyong Yang if (connectfd < 0) 75bd8f50a4SZhiyong Yang return -1; 76bd8f50a4SZhiyong Yang 77bd8f50a4SZhiyong Yang dev->vhostfd = connectfd; 78201a4165SZhiyong Yang if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 79201a4165SZhiyong Yang &dev->device_features) < 0) { 80201a4165SZhiyong Yang PMD_INIT_LOG(ERR, "get_features failed: %s", 81201a4165SZhiyong Yang strerror(errno)); 82201a4165SZhiyong Yang return -1; 83201a4165SZhiyong Yang } 84201a4165SZhiyong Yang 858e756105SMaxime Coquelin if (dev->device_features & 868e756105SMaxime Coquelin (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { 878e756105SMaxime Coquelin if (dev->ops->send_request(dev, 888e756105SMaxime Coquelin VHOST_USER_GET_PROTOCOL_FEATURES, 898e756105SMaxime Coquelin &protocol_features)) 908e756105SMaxime Coquelin return -1; 918e756105SMaxime Coquelin 928e756105SMaxime Coquelin dev->protocol_features &= protocol_features; 938e756105SMaxime Coquelin 948e756105SMaxime Coquelin if (dev->ops->send_request(dev, 958e756105SMaxime Coquelin VHOST_USER_SET_PROTOCOL_FEATURES, 968e756105SMaxime Coquelin &dev->protocol_features)) 978e756105SMaxime Coquelin return -1; 988e756105SMaxime Coquelin 998e756105SMaxime Coquelin if (!(dev->protocol_features & 1008e756105SMaxime Coquelin (1ULL << VHOST_USER_PROTOCOL_F_MQ))) 1018e756105SMaxime Coquelin dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 1028e756105SMaxime Coquelin } 1038e756105SMaxime Coquelin 104bb97d2ddSTiwei Bie dev->device_features |= dev->frontend_features; 105bb97d2ddSTiwei Bie 1067c66ff61SMarvin Liu /* umask vhost-user unsupported features */ 1077c66ff61SMarvin Liu dev->device_features &= ~(dev->unsupported_features); 108201a4165SZhiyong Yang 109201a4165SZhiyong Yang dev->features &= dev->device_features; 110201a4165SZhiyong Yang 1116ebbf410SXuan Ding /* For packed ring, resetting queues is required in reconnection. */ 11201996a03SMarvin Liu if (vtpci_packed_queue(hw) && 11301996a03SMarvin Liu (vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { 1146ebbf410SXuan Ding PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped" 1156ebbf410SXuan Ding " when packed ring reconnecting."); 1166ebbf410SXuan Ding virtio_user_reset_queues_packed(eth_dev); 11792771257STiwei Bie } 1186ebbf410SXuan Ding 119bd8f50a4SZhiyong Yang ret = virtio_user_start_device(dev); 120bd8f50a4SZhiyong Yang if (ret < 0) 121bd8f50a4SZhiyong Yang return -1; 122bd8f50a4SZhiyong Yang 123201a4165SZhiyong Yang if (dev->queue_pairs > 1) { 124201a4165SZhiyong Yang ret = virtio_user_handle_mq(dev, dev->queue_pairs); 125201a4165SZhiyong Yang if (ret != 0) { 126201a4165SZhiyong Yang PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 127201a4165SZhiyong Yang return -1; 128201a4165SZhiyong Yang } 129201a4165SZhiyong Yang } 130bd8f50a4SZhiyong Yang if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 131bd8f50a4SZhiyong Yang if (rte_intr_disable(eth_dev->intr_handle) < 0) { 132bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt disable failed"); 133bd8f50a4SZhiyong Yang return -1; 134bd8f50a4SZhiyong Yang } 135bd8f50a4SZhiyong Yang rte_intr_callback_unregister(eth_dev->intr_handle, 136bd8f50a4SZhiyong Yang virtio_interrupt_handler, 137bd8f50a4SZhiyong Yang eth_dev); 138bd8f50a4SZhiyong Yang eth_dev->intr_handle->fd = connectfd; 139bd8f50a4SZhiyong Yang rte_intr_callback_register(eth_dev->intr_handle, 140bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 141bd8f50a4SZhiyong Yang 142bd8f50a4SZhiyong Yang if (rte_intr_enable(eth_dev->intr_handle) < 0) { 143bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt enable failed"); 144bd8f50a4SZhiyong Yang return -1; 145bd8f50a4SZhiyong Yang } 146bd8f50a4SZhiyong Yang } 147bd8f50a4SZhiyong Yang PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 148bd8f50a4SZhiyong Yang return 0; 149bd8f50a4SZhiyong Yang } 150bd8f50a4SZhiyong Yang 151e9efa4d9SJianfeng Tan static void 152ef53b603SJianfeng Tan virtio_user_delayed_handler(void *param) 153ef53b603SJianfeng Tan { 154ef53b603SJianfeng Tan struct virtio_hw *hw = (struct virtio_hw *)param; 155bd8f50a4SZhiyong Yang struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id]; 156bd8f50a4SZhiyong Yang struct virtio_user_dev *dev = virtio_user_get_dev(hw); 157ef53b603SJianfeng Tan 158bd8f50a4SZhiyong Yang if (rte_intr_disable(eth_dev->intr_handle) < 0) { 159bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt disable failed"); 160bd8f50a4SZhiyong Yang return; 161bd8f50a4SZhiyong Yang } 162bd8f50a4SZhiyong Yang rte_intr_callback_unregister(eth_dev->intr_handle, 163bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 164bd8f50a4SZhiyong Yang if (dev->is_server) { 165bd8f50a4SZhiyong Yang if (dev->vhostfd >= 0) { 166bd8f50a4SZhiyong Yang close(dev->vhostfd); 167bd8f50a4SZhiyong Yang dev->vhostfd = -1; 168bd8f50a4SZhiyong Yang } 169bd8f50a4SZhiyong Yang eth_dev->intr_handle->fd = dev->listenfd; 170bd8f50a4SZhiyong Yang rte_intr_callback_register(eth_dev->intr_handle, 171bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 172bd8f50a4SZhiyong Yang if (rte_intr_enable(eth_dev->intr_handle) < 0) { 173bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt enable failed"); 174bd8f50a4SZhiyong Yang return; 175bd8f50a4SZhiyong Yang } 176bd8f50a4SZhiyong Yang } 177ef53b603SJianfeng Tan } 178ef53b603SJianfeng Tan 179ef53b603SJianfeng Tan static void 180e9efa4d9SJianfeng Tan virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 181e9efa4d9SJianfeng Tan void *dst, int length) 182e9efa4d9SJianfeng Tan { 183e9efa4d9SJianfeng Tan int i; 184e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 185e9efa4d9SJianfeng Tan 186e9efa4d9SJianfeng Tan if (offset == offsetof(struct virtio_net_config, mac) && 18735b2d13fSOlivier Matz length == RTE_ETHER_ADDR_LEN) { 18835b2d13fSOlivier Matz for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 189e9efa4d9SJianfeng Tan ((uint8_t *)dst)[i] = dev->mac_addr[i]; 190e9efa4d9SJianfeng Tan return; 191e9efa4d9SJianfeng Tan } 192e9efa4d9SJianfeng Tan 193ef53b603SJianfeng Tan if (offset == offsetof(struct virtio_net_config, status)) { 194ef53b603SJianfeng Tan char buf[128]; 195ef53b603SJianfeng Tan 196ef53b603SJianfeng Tan if (dev->vhostfd >= 0) { 197ef53b603SJianfeng Tan int r; 198ef53b603SJianfeng Tan int flags; 199ef53b603SJianfeng Tan 200ef53b603SJianfeng Tan flags = fcntl(dev->vhostfd, F_GETFL); 2012fd826a3SSebastian Basierski if (fcntl(dev->vhostfd, F_SETFL, 2022fd826a3SSebastian Basierski flags | O_NONBLOCK) == -1) { 2032fd826a3SSebastian Basierski PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag"); 2042fd826a3SSebastian Basierski return; 2052fd826a3SSebastian Basierski } 206ef53b603SJianfeng Tan r = recv(dev->vhostfd, buf, 128, MSG_PEEK); 207ef53b603SJianfeng Tan if (r == 0 || (r < 0 && errno != EAGAIN)) { 208d0131e49SXiao Wang dev->net_status &= (~VIRTIO_NET_S_LINK_UP); 209ef53b603SJianfeng Tan PMD_DRV_LOG(ERR, "virtio-user port %u is down", 210ef53b603SJianfeng Tan hw->port_id); 211bd8f50a4SZhiyong Yang 212bd8f50a4SZhiyong Yang /* This function could be called in the process 213bd8f50a4SZhiyong Yang * of interrupt handling, callback cannot be 214bd8f50a4SZhiyong Yang * unregistered here, set an alarm to do it. 215ef53b603SJianfeng Tan */ 216ef53b603SJianfeng Tan rte_eal_alarm_set(1, 217ef53b603SJianfeng Tan virtio_user_delayed_handler, 218ef53b603SJianfeng Tan (void *)hw); 219ef53b603SJianfeng Tan } else { 220d0131e49SXiao Wang dev->net_status |= VIRTIO_NET_S_LINK_UP; 221ef53b603SJianfeng Tan } 222f76ef453SSebastian Basierski if (fcntl(dev->vhostfd, F_SETFL, 223f76ef453SSebastian Basierski flags & ~O_NONBLOCK) == -1) { 224f76ef453SSebastian Basierski PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag"); 225f76ef453SSebastian Basierski return; 226f76ef453SSebastian Basierski } 227bd8f50a4SZhiyong Yang } else if (dev->is_server) { 228d0131e49SXiao Wang dev->net_status &= (~VIRTIO_NET_S_LINK_UP); 229bd8f50a4SZhiyong Yang if (virtio_user_server_reconnect(dev) >= 0) 230d0131e49SXiao Wang dev->net_status |= VIRTIO_NET_S_LINK_UP; 231ef53b603SJianfeng Tan } 232bd8f50a4SZhiyong Yang 233d0131e49SXiao Wang *(uint16_t *)dst = dev->net_status; 234ef53b603SJianfeng Tan } 235e9efa4d9SJianfeng Tan 236e9efa4d9SJianfeng Tan if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 237e9efa4d9SJianfeng Tan *(uint16_t *)dst = dev->max_queue_pairs; 238e9efa4d9SJianfeng Tan } 239e9efa4d9SJianfeng Tan 240e9efa4d9SJianfeng Tan static void 241e9efa4d9SJianfeng Tan virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 242e9efa4d9SJianfeng Tan const void *src, int length) 243e9efa4d9SJianfeng Tan { 244e9efa4d9SJianfeng Tan int i; 245e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 246e9efa4d9SJianfeng Tan 247e9efa4d9SJianfeng Tan if ((offset == offsetof(struct virtio_net_config, mac)) && 24835b2d13fSOlivier Matz (length == RTE_ETHER_ADDR_LEN)) 24935b2d13fSOlivier Matz for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 250e9efa4d9SJianfeng Tan dev->mac_addr[i] = ((const uint8_t *)src)[i]; 251e9efa4d9SJianfeng Tan else 252f2462150SFerruh Yigit PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 253e9efa4d9SJianfeng Tan offset, length); 254e9efa4d9SJianfeng Tan } 255e9efa4d9SJianfeng Tan 256e9efa4d9SJianfeng Tan static void 257c12a26eeSJianfeng Tan virtio_user_reset(struct virtio_hw *hw) 258c12a26eeSJianfeng Tan { 259c12a26eeSJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 260c12a26eeSJianfeng Tan 261c12a26eeSJianfeng Tan if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 262c12a26eeSJianfeng Tan virtio_user_stop_device(dev); 263c12a26eeSJianfeng Tan } 264c12a26eeSJianfeng Tan 265c12a26eeSJianfeng Tan static void 266e9efa4d9SJianfeng Tan virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 267e9efa4d9SJianfeng Tan { 268e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 269e9efa4d9SJianfeng Tan 270e9efa4d9SJianfeng Tan if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 271e9efa4d9SJianfeng Tan virtio_user_start_device(dev); 272c12a26eeSJianfeng Tan else if (status == VIRTIO_CONFIG_STATUS_RESET) 273c12a26eeSJianfeng Tan virtio_user_reset(hw); 274e9efa4d9SJianfeng Tan dev->status = status; 27557912824SMaxime Coquelin virtio_user_send_status_update(dev, status); 276e9efa4d9SJianfeng Tan } 277e9efa4d9SJianfeng Tan 278e9efa4d9SJianfeng Tan static uint8_t 279e9efa4d9SJianfeng Tan virtio_user_get_status(struct virtio_hw *hw) 280e9efa4d9SJianfeng Tan { 281e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 282e9efa4d9SJianfeng Tan 2830b0dc66cSAdrian Moreno virtio_user_update_status(dev); 2840b0dc66cSAdrian Moreno 285e9efa4d9SJianfeng Tan return dev->status; 286e9efa4d9SJianfeng Tan } 287e9efa4d9SJianfeng Tan 288e9efa4d9SJianfeng Tan static uint64_t 289e9efa4d9SJianfeng Tan virtio_user_get_features(struct virtio_hw *hw) 290e9efa4d9SJianfeng Tan { 291e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 292e9efa4d9SJianfeng Tan 293142678d4SJianfeng Tan /* unmask feature bits defined in vhost user protocol */ 294142678d4SJianfeng Tan return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 295e9efa4d9SJianfeng Tan } 296e9efa4d9SJianfeng Tan 297e9efa4d9SJianfeng Tan static void 298e9efa4d9SJianfeng Tan virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 299e9efa4d9SJianfeng Tan { 300e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 301e9efa4d9SJianfeng Tan 302142678d4SJianfeng Tan dev->features = features & dev->device_features; 303e9efa4d9SJianfeng Tan } 304e9efa4d9SJianfeng Tan 305e9efa4d9SJianfeng Tan static uint8_t 306e9efa4d9SJianfeng Tan virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 307e9efa4d9SJianfeng Tan { 30835c4f855SJianfeng Tan /* rxq interrupts and config interrupt are separated in virtio-user, 30935c4f855SJianfeng Tan * here we only report config change. 310e9efa4d9SJianfeng Tan */ 31135c4f855SJianfeng Tan return VIRTIO_PCI_ISR_CONFIG; 312e9efa4d9SJianfeng Tan } 313e9efa4d9SJianfeng Tan 314e9efa4d9SJianfeng Tan static uint16_t 315e9efa4d9SJianfeng Tan virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 316e9efa4d9SJianfeng Tan uint16_t vec __rte_unused) 317e9efa4d9SJianfeng Tan { 31835c4f855SJianfeng Tan return 0; 319e9efa4d9SJianfeng Tan } 320e9efa4d9SJianfeng Tan 3213d4fb6fdSJianfeng Tan static uint16_t 3223d4fb6fdSJianfeng Tan virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 3233d4fb6fdSJianfeng Tan struct virtqueue *vq __rte_unused, 3243d4fb6fdSJianfeng Tan uint16_t vec) 3253d4fb6fdSJianfeng Tan { 3263d4fb6fdSJianfeng Tan /* pretend we have done that */ 3273d4fb6fdSJianfeng Tan return vec; 3283d4fb6fdSJianfeng Tan } 3293d4fb6fdSJianfeng Tan 330e9efa4d9SJianfeng Tan /* This function is to get the queue size, aka, number of descs, of a specified 331e9efa4d9SJianfeng Tan * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 332e9efa4d9SJianfeng Tan * max supported queues. 333e9efa4d9SJianfeng Tan */ 334e9efa4d9SJianfeng Tan static uint16_t 335e9efa4d9SJianfeng Tan virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 336e9efa4d9SJianfeng Tan { 337e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 338e9efa4d9SJianfeng Tan 339e9efa4d9SJianfeng Tan /* Currently, each queue has same queue size */ 340e9efa4d9SJianfeng Tan return dev->queue_size; 341e9efa4d9SJianfeng Tan } 342e9efa4d9SJianfeng Tan 34348a44640SJens Freimann static void 34448a44640SJens Freimann virtio_user_setup_queue_packed(struct virtqueue *vq, 34548a44640SJens Freimann struct virtio_user_dev *dev) 346e9efa4d9SJianfeng Tan { 34748a44640SJens Freimann uint16_t queue_idx = vq->vq_queue_index; 34848a44640SJens Freimann struct vring_packed *vring; 34948a44640SJens Freimann uint64_t desc_addr; 35048a44640SJens Freimann uint64_t avail_addr; 35148a44640SJens Freimann uint64_t used_addr; 35248a44640SJens Freimann uint16_t i; 35348a44640SJens Freimann 35448a44640SJens Freimann vring = &dev->packed_vrings[queue_idx]; 35548a44640SJens Freimann desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 35648a44640SJens Freimann avail_addr = desc_addr + vq->vq_nentries * 35748a44640SJens Freimann sizeof(struct vring_packed_desc); 35848a44640SJens Freimann used_addr = RTE_ALIGN_CEIL(avail_addr + 35948a44640SJens Freimann sizeof(struct vring_packed_desc_event), 36048a44640SJens Freimann VIRTIO_PCI_VRING_ALIGN); 36148a44640SJens Freimann vring->num = vq->vq_nentries; 3624cdc4d98STiwei Bie vring->desc = (void *)(uintptr_t)desc_addr; 3634cdc4d98STiwei Bie vring->driver = (void *)(uintptr_t)avail_addr; 3644cdc4d98STiwei Bie vring->device = (void *)(uintptr_t)used_addr; 36548a44640SJens Freimann dev->packed_queues[queue_idx].avail_wrap_counter = true; 36648a44640SJens Freimann dev->packed_queues[queue_idx].used_wrap_counter = true; 36748a44640SJens Freimann 36845c224e7STiwei Bie for (i = 0; i < vring->num; i++) 3694cdc4d98STiwei Bie vring->desc[i].flags = 0; 37048a44640SJens Freimann } 37148a44640SJens Freimann 37248a44640SJens Freimann static void 37348a44640SJens Freimann virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) 37448a44640SJens Freimann { 375e9efa4d9SJianfeng Tan uint16_t queue_idx = vq->vq_queue_index; 376e9efa4d9SJianfeng Tan uint64_t desc_addr, avail_addr, used_addr; 377e9efa4d9SJianfeng Tan 378e9efa4d9SJianfeng Tan desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 379e9efa4d9SJianfeng Tan avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 380e9efa4d9SJianfeng Tan used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 381e9efa4d9SJianfeng Tan ring[vq->vq_nentries]), 382e9efa4d9SJianfeng Tan VIRTIO_PCI_VRING_ALIGN); 383e9efa4d9SJianfeng Tan 384e9efa4d9SJianfeng Tan dev->vrings[queue_idx].num = vq->vq_nentries; 385e9efa4d9SJianfeng Tan dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; 386e9efa4d9SJianfeng Tan dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; 387e9efa4d9SJianfeng Tan dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; 38848a44640SJens Freimann } 38948a44640SJens Freimann 39048a44640SJens Freimann static int 39148a44640SJens Freimann virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 39248a44640SJens Freimann { 39348a44640SJens Freimann struct virtio_user_dev *dev = virtio_user_get_dev(hw); 39448a44640SJens Freimann 39548a44640SJens Freimann if (vtpci_packed_queue(hw)) 39648a44640SJens Freimann virtio_user_setup_queue_packed(vq, dev); 39748a44640SJens Freimann else 39848a44640SJens Freimann virtio_user_setup_queue_split(vq, dev); 399e9efa4d9SJianfeng Tan 400e9efa4d9SJianfeng Tan return 0; 401e9efa4d9SJianfeng Tan } 402e9efa4d9SJianfeng Tan 403e9efa4d9SJianfeng Tan static void 404e9efa4d9SJianfeng Tan virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 405e9efa4d9SJianfeng Tan { 406e9efa4d9SJianfeng Tan /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 407e9efa4d9SJianfeng Tan * correspondingly stops the ioeventfds, and reset the status of 408e9efa4d9SJianfeng Tan * the device. 409e9efa4d9SJianfeng Tan * For modern devices, set queue desc, avail, used in PCI bar to 0, 410e9efa4d9SJianfeng Tan * not see any more behavior in QEMU. 411e9efa4d9SJianfeng Tan * 412e9efa4d9SJianfeng Tan * Here we just care about what information to deliver to vhost-user 413e9efa4d9SJianfeng Tan * or vhost-kernel. So we just close ioeventfd for now. 414e9efa4d9SJianfeng Tan */ 415e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 416e9efa4d9SJianfeng Tan 417e9efa4d9SJianfeng Tan close(dev->callfds[vq->vq_queue_index]); 418e9efa4d9SJianfeng Tan close(dev->kickfds[vq->vq_queue_index]); 419e9efa4d9SJianfeng Tan } 420e9efa4d9SJianfeng Tan 421e9efa4d9SJianfeng Tan static void 422e9efa4d9SJianfeng Tan virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 423e9efa4d9SJianfeng Tan { 424e9efa4d9SJianfeng Tan uint64_t buf = 1; 425e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 426e9efa4d9SJianfeng Tan 4271b69528eSJianfeng Tan if (hw->cvq && (hw->cvq->vq == vq)) { 42848a44640SJens Freimann if (vtpci_packed_queue(vq->hw)) 42948a44640SJens Freimann virtio_user_handle_cq_packed(dev, vq->vq_queue_index); 43048a44640SJens Freimann else 4311b69528eSJianfeng Tan virtio_user_handle_cq(dev, vq->vq_queue_index); 4321b69528eSJianfeng Tan return; 4331b69528eSJianfeng Tan } 4341b69528eSJianfeng Tan 435e9efa4d9SJianfeng Tan if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) 436f2462150SFerruh Yigit PMD_DRV_LOG(ERR, "failed to kick backend: %s", 437e9efa4d9SJianfeng Tan strerror(errno)); 438e9efa4d9SJianfeng Tan } 439e9efa4d9SJianfeng Tan 4406d890f8aSYuanhan Liu const struct virtio_pci_ops virtio_user_ops = { 441e9efa4d9SJianfeng Tan .read_dev_cfg = virtio_user_read_dev_config, 442e9efa4d9SJianfeng Tan .write_dev_cfg = virtio_user_write_dev_config, 443e9efa4d9SJianfeng Tan .get_status = virtio_user_get_status, 444e9efa4d9SJianfeng Tan .set_status = virtio_user_set_status, 445e9efa4d9SJianfeng Tan .get_features = virtio_user_get_features, 446e9efa4d9SJianfeng Tan .set_features = virtio_user_set_features, 447e9efa4d9SJianfeng Tan .get_isr = virtio_user_get_isr, 448e9efa4d9SJianfeng Tan .set_config_irq = virtio_user_set_config_irq, 4493d4fb6fdSJianfeng Tan .set_queue_irq = virtio_user_set_queue_irq, 450e9efa4d9SJianfeng Tan .get_queue_num = virtio_user_get_queue_num, 451e9efa4d9SJianfeng Tan .setup_queue = virtio_user_setup_queue, 452e9efa4d9SJianfeng Tan .del_queue = virtio_user_del_queue, 453e9efa4d9SJianfeng Tan .notify_queue = virtio_user_notify_queue, 454e9efa4d9SJianfeng Tan }; 455ce2eabddSJianfeng Tan 456ce2eabddSJianfeng Tan static const char *valid_args[] = { 457ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 458ce2eabddSJianfeng Tan VIRTIO_USER_ARG_QUEUES_NUM, 459ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_CQ_NUM "cq" 460ce2eabddSJianfeng Tan VIRTIO_USER_ARG_CQ_NUM, 461ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_MAC "mac" 462ce2eabddSJianfeng Tan VIRTIO_USER_ARG_MAC, 463ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_PATH "path" 464ce2eabddSJianfeng Tan VIRTIO_USER_ARG_PATH, 465ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 466ce2eabddSJianfeng Tan VIRTIO_USER_ARG_QUEUE_SIZE, 4674214a1b4SWenfeng Liu #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 4684214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME, 469bd8f50a4SZhiyong Yang #define VIRTIO_USER_ARG_SERVER_MODE "server" 470bd8f50a4SZhiyong Yang VIRTIO_USER_ARG_SERVER_MODE, 471488ed97aSMarvin Liu #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 472488ed97aSMarvin Liu VIRTIO_USER_ARG_MRG_RXBUF, 473488ed97aSMarvin Liu #define VIRTIO_USER_ARG_IN_ORDER "in_order" 474488ed97aSMarvin Liu VIRTIO_USER_ARG_IN_ORDER, 47534f3966cSYuanhan Liu #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" 47634f3966cSYuanhan Liu VIRTIO_USER_ARG_PACKED_VQ, 477b0db4beaSIvan Dyukov #define VIRTIO_USER_ARG_SPEED "speed" 478b0db4beaSIvan Dyukov VIRTIO_USER_ARG_SPEED, 4796b7eefbcSMarvin Liu #define VIRTIO_USER_ARG_VECTORIZED "vectorized" 4806b7eefbcSMarvin Liu VIRTIO_USER_ARG_VECTORIZED, 481ce2eabddSJianfeng Tan NULL 482ce2eabddSJianfeng Tan }; 483ce2eabddSJianfeng Tan 484ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_CQ_EN 0 485ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_Q_NUM 1 486ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_Q_SZ 256 487bd8f50a4SZhiyong Yang #define VIRTIO_USER_DEF_SERVER_MODE 0 488ce2eabddSJianfeng Tan 489ce2eabddSJianfeng Tan static int 490ce2eabddSJianfeng Tan get_string_arg(const char *key __rte_unused, 491ce2eabddSJianfeng Tan const char *value, void *extra_args) 492ce2eabddSJianfeng Tan { 493ce2eabddSJianfeng Tan if (!value || !extra_args) 494ce2eabddSJianfeng Tan return -EINVAL; 495ce2eabddSJianfeng Tan 496ce2eabddSJianfeng Tan *(char **)extra_args = strdup(value); 497ce2eabddSJianfeng Tan 4984214a1b4SWenfeng Liu if (!*(char **)extra_args) 4994214a1b4SWenfeng Liu return -ENOMEM; 5004214a1b4SWenfeng Liu 501ce2eabddSJianfeng Tan return 0; 502ce2eabddSJianfeng Tan } 503ce2eabddSJianfeng Tan 504ce2eabddSJianfeng Tan static int 505ce2eabddSJianfeng Tan get_integer_arg(const char *key __rte_unused, 506ce2eabddSJianfeng Tan const char *value, void *extra_args) 507ce2eabddSJianfeng Tan { 508bc5b6c11SIvan Dyukov uint64_t integer = 0; 509ce2eabddSJianfeng Tan if (!value || !extra_args) 510ce2eabddSJianfeng Tan return -EINVAL; 511bc5b6c11SIvan Dyukov errno = 0; 512bc5b6c11SIvan Dyukov integer = strtoull(value, NULL, 0); 513bc5b6c11SIvan Dyukov /* extra_args keeps default value, it should be replaced 514bc5b6c11SIvan Dyukov * only in case of successful parsing of the 'value' arg 515bc5b6c11SIvan Dyukov */ 516bc5b6c11SIvan Dyukov if (errno == 0) 517bc5b6c11SIvan Dyukov *(uint64_t *)extra_args = integer; 518bc5b6c11SIvan Dyukov return -errno; 519ce2eabddSJianfeng Tan } 520ce2eabddSJianfeng Tan 521ce2eabddSJianfeng Tan static struct rte_eth_dev * 522050fe6e9SJan Blunck virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 523ce2eabddSJianfeng Tan { 524ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 525ce2eabddSJianfeng Tan struct rte_eth_dev_data *data; 526ce2eabddSJianfeng Tan struct virtio_hw *hw; 527ce2eabddSJianfeng Tan struct virtio_user_dev *dev; 528ce2eabddSJianfeng Tan 529050fe6e9SJan Blunck eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw)); 530ce2eabddSJianfeng Tan if (!eth_dev) { 531ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 532ce2eabddSJianfeng Tan return NULL; 533ce2eabddSJianfeng Tan } 534ce2eabddSJianfeng Tan 535ce2eabddSJianfeng Tan data = eth_dev->data; 536050fe6e9SJan Blunck hw = eth_dev->data->dev_private; 537ce2eabddSJianfeng Tan 538ce2eabddSJianfeng Tan dev = rte_zmalloc(NULL, sizeof(*dev), 0); 539ce2eabddSJianfeng Tan if (!dev) { 540ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed"); 541ce2eabddSJianfeng Tan rte_eth_dev_release_port(eth_dev); 542ce2eabddSJianfeng Tan return NULL; 543ce2eabddSJianfeng Tan } 544ce2eabddSJianfeng Tan 545553f4593SYuanhan Liu hw->port_id = data->port_id; 5463d4fb6fdSJianfeng Tan dev->port_id = data->port_id; 547553f4593SYuanhan Liu virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops; 548ef53b603SJianfeng Tan /* 549ef53b603SJianfeng Tan * MSIX is required to enable LSC (see virtio_init_device). 550ef53b603SJianfeng Tan * Here just pretend that we support msix. 551ef53b603SJianfeng Tan */ 552ef53b603SJianfeng Tan hw->use_msix = 1; 553ce2eabddSJianfeng Tan hw->modern = 0; 5544710e16aSMarvin Liu hw->use_vec_rx = 0; 55577d66da8SMarvin Liu hw->use_vec_tx = 0; 5569f233f54SMarvin Liu hw->use_inorder_rx = 0; 5579f233f54SMarvin Liu hw->use_inorder_tx = 0; 558ce2eabddSJianfeng Tan hw->virtio_user_dev = dev; 559ce2eabddSJianfeng Tan return eth_dev; 560ce2eabddSJianfeng Tan } 561ce2eabddSJianfeng Tan 562ca8326a9SJianfeng Tan static void 563ca8326a9SJianfeng Tan virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 564ca8326a9SJianfeng Tan { 565ca8326a9SJianfeng Tan struct rte_eth_dev_data *data = eth_dev->data; 566ca8326a9SJianfeng Tan struct virtio_hw *hw = data->dev_private; 567ca8326a9SJianfeng Tan 568ca8326a9SJianfeng Tan rte_free(hw->virtio_user_dev); 569ca8326a9SJianfeng Tan rte_eth_dev_release_port(eth_dev); 570ca8326a9SJianfeng Tan } 571ca8326a9SJianfeng Tan 572ce2eabddSJianfeng Tan /* Dev initialization routine. Invoked once for each virtio vdev at 573c3b2fdfeSYong Wang * EAL init time, see rte_bus_probe(). 574ce2eabddSJianfeng Tan * Returns 0 on success. 575ce2eabddSJianfeng Tan */ 576ce2eabddSJianfeng Tan static int 5775d2aa461SJan Blunck virtio_user_pmd_probe(struct rte_vdev_device *dev) 578ce2eabddSJianfeng Tan { 57914f06474SJianfeng Tan struct rte_kvargs *kvlist = NULL; 580ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 581ce2eabddSJianfeng Tan struct virtio_hw *hw; 582ce2eabddSJianfeng Tan uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 583ce2eabddSJianfeng Tan uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 584ce2eabddSJianfeng Tan uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 585bd8f50a4SZhiyong Yang uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 586488ed97aSMarvin Liu uint64_t mrg_rxbuf = 1; 587488ed97aSMarvin Liu uint64_t in_order = 1; 5889070f88bSTiwei Bie uint64_t packed_vq = 0; 5896b7eefbcSMarvin Liu uint64_t vectorized = 0; 590ce2eabddSJianfeng Tan char *path = NULL; 5914214a1b4SWenfeng Liu char *ifname = NULL; 592ce2eabddSJianfeng Tan char *mac_addr = NULL; 593ce2eabddSJianfeng Tan int ret = -1; 594ce2eabddSJianfeng Tan 5951c8489daSTiwei Bie if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 5961c8489daSTiwei Bie const char *name = rte_vdev_device_name(dev); 5971c8489daSTiwei Bie eth_dev = rte_eth_dev_attach_secondary(name); 5981c8489daSTiwei Bie if (!eth_dev) { 59988fa5bb6SStephen Hemminger PMD_INIT_LOG(ERR, "Failed to probe %s", name); 6001c8489daSTiwei Bie return -1; 6011c8489daSTiwei Bie } 6021c8489daSTiwei Bie 6031c8489daSTiwei Bie if (eth_virtio_dev_init(eth_dev) < 0) { 6041c8489daSTiwei Bie PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 6051c8489daSTiwei Bie rte_eth_dev_release_port(eth_dev); 6061c8489daSTiwei Bie return -1; 6071c8489daSTiwei Bie } 6081c8489daSTiwei Bie 6091c8489daSTiwei Bie eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops; 6101c8489daSTiwei Bie eth_dev->device = &dev->device; 6111c8489daSTiwei Bie rte_eth_dev_probing_finish(eth_dev); 6121c8489daSTiwei Bie return 0; 6131c8489daSTiwei Bie } 6141c8489daSTiwei Bie 6155d2aa461SJan Blunck kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args); 616ce2eabddSJianfeng Tan if (!kvlist) { 617ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "error when parsing param"); 618ce2eabddSJianfeng Tan goto end; 619ce2eabddSJianfeng Tan } 620ce2eabddSJianfeng Tan 6219cca159eSMaxime Coquelin if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 622ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 623ca8326a9SJianfeng Tan &get_string_arg, &path) < 0) { 624404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 625404bd6bfSJianfeng Tan VIRTIO_USER_ARG_PATH); 626404bd6bfSJianfeng Tan goto end; 627404bd6bfSJianfeng Tan } 6289cca159eSMaxime Coquelin } else { 629f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 6302dac0df4STiwei Bie VIRTIO_USER_ARG_PATH); 631ce2eabddSJianfeng Tan goto end; 632ce2eabddSJianfeng Tan } 633ce2eabddSJianfeng Tan 6344214a1b4SWenfeng Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 6354214a1b4SWenfeng Liu if (is_vhost_user_by_type(path)) { 6364214a1b4SWenfeng Liu PMD_INIT_LOG(ERR, 6374214a1b4SWenfeng Liu "arg %s applies only to vhost-kernel backend", 6384214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME); 6394214a1b4SWenfeng Liu goto end; 6404214a1b4SWenfeng Liu } 6414214a1b4SWenfeng Liu 6424214a1b4SWenfeng Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 6434214a1b4SWenfeng Liu &get_string_arg, &ifname) < 0) { 6444214a1b4SWenfeng Liu PMD_INIT_LOG(ERR, "error to parse %s", 6454214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME); 6464214a1b4SWenfeng Liu goto end; 6474214a1b4SWenfeng Liu } 6484214a1b4SWenfeng Liu } 6494214a1b4SWenfeng Liu 650404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 651ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 652ca8326a9SJianfeng Tan &get_string_arg, &mac_addr) < 0) { 653404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 654404bd6bfSJianfeng Tan VIRTIO_USER_ARG_MAC); 655404bd6bfSJianfeng Tan goto end; 656404bd6bfSJianfeng Tan } 657404bd6bfSJianfeng Tan } 658ce2eabddSJianfeng Tan 659404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 660ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 661ca8326a9SJianfeng Tan &get_integer_arg, &queue_size) < 0) { 662404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 663404bd6bfSJianfeng Tan VIRTIO_USER_ARG_QUEUE_SIZE); 664404bd6bfSJianfeng Tan goto end; 665404bd6bfSJianfeng Tan } 666404bd6bfSJianfeng Tan } 667ce2eabddSJianfeng Tan 668404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 669ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 670ca8326a9SJianfeng Tan &get_integer_arg, &queues) < 0) { 671404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 672404bd6bfSJianfeng Tan VIRTIO_USER_ARG_QUEUES_NUM); 673404bd6bfSJianfeng Tan goto end; 674404bd6bfSJianfeng Tan } 675404bd6bfSJianfeng Tan } 676ce2eabddSJianfeng Tan 677bd8f50a4SZhiyong Yang if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 678bd8f50a4SZhiyong Yang if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 679bd8f50a4SZhiyong Yang &get_integer_arg, &server_mode) < 0) { 680bd8f50a4SZhiyong Yang PMD_INIT_LOG(ERR, "error to parse %s", 681bd8f50a4SZhiyong Yang VIRTIO_USER_ARG_SERVER_MODE); 682bd8f50a4SZhiyong Yang goto end; 683bd8f50a4SZhiyong Yang } 684bd8f50a4SZhiyong Yang } 685bd8f50a4SZhiyong Yang 686404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 687ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 688ca8326a9SJianfeng Tan &get_integer_arg, &cq) < 0) { 689404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 690404bd6bfSJianfeng Tan VIRTIO_USER_ARG_CQ_NUM); 691404bd6bfSJianfeng Tan goto end; 692404bd6bfSJianfeng Tan } 693404bd6bfSJianfeng Tan } else if (queues > 1) { 6941b69528eSJianfeng Tan cq = 1; 695404bd6bfSJianfeng Tan } 6961b69528eSJianfeng Tan 69734f3966cSYuanhan Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { 69834f3966cSYuanhan Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, 69934f3966cSYuanhan Liu &get_integer_arg, &packed_vq) < 0) { 70034f3966cSYuanhan Liu PMD_INIT_LOG(ERR, "error to parse %s", 70134f3966cSYuanhan Liu VIRTIO_USER_ARG_PACKED_VQ); 70234f3966cSYuanhan Liu goto end; 70334f3966cSYuanhan Liu } 70434f3966cSYuanhan Liu } 70534f3966cSYuanhan Liu 7066b7eefbcSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) { 7076b7eefbcSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED, 7086b7eefbcSMarvin Liu &get_integer_arg, &vectorized) < 0) { 7096b7eefbcSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 7106b7eefbcSMarvin Liu VIRTIO_USER_ARG_VECTORIZED); 7116b7eefbcSMarvin Liu goto end; 7126b7eefbcSMarvin Liu } 7136b7eefbcSMarvin Liu } 7146b7eefbcSMarvin Liu 7151b69528eSJianfeng Tan if (queues > 1 && cq == 0) { 7161b69528eSJianfeng Tan PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); 7171b69528eSJianfeng Tan goto end; 7181b69528eSJianfeng Tan } 719ce2eabddSJianfeng Tan 7202269b9aeSWenfeng Liu if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) { 7212269b9aeSWenfeng Liu PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u", 7222269b9aeSWenfeng Liu VIRTIO_USER_ARG_QUEUES_NUM, queues, 7232269b9aeSWenfeng Liu VIRTIO_MAX_VIRTQUEUE_PAIRS); 7242269b9aeSWenfeng Liu goto end; 7252269b9aeSWenfeng Liu } 7262269b9aeSWenfeng Liu 727488ed97aSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 728488ed97aSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 729488ed97aSMarvin Liu &get_integer_arg, &mrg_rxbuf) < 0) { 730488ed97aSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 731488ed97aSMarvin Liu VIRTIO_USER_ARG_MRG_RXBUF); 732488ed97aSMarvin Liu goto end; 733488ed97aSMarvin Liu } 734488ed97aSMarvin Liu } 735488ed97aSMarvin Liu 736488ed97aSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 737488ed97aSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 738488ed97aSMarvin Liu &get_integer_arg, &in_order) < 0) { 739488ed97aSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 740488ed97aSMarvin Liu VIRTIO_USER_ARG_IN_ORDER); 741488ed97aSMarvin Liu goto end; 742488ed97aSMarvin Liu } 743488ed97aSMarvin Liu } 744488ed97aSMarvin Liu 745050fe6e9SJan Blunck eth_dev = virtio_user_eth_dev_alloc(dev); 746ce2eabddSJianfeng Tan if (!eth_dev) { 747e8df94b8SJianfeng Tan PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 748ce2eabddSJianfeng Tan goto end; 749ce2eabddSJianfeng Tan } 750ce2eabddSJianfeng Tan 751ce2eabddSJianfeng Tan hw = eth_dev->data->dev_private; 752ce2eabddSJianfeng Tan if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, 7531c8489daSTiwei Bie queue_size, mac_addr, &ifname, server_mode, 7541c8489daSTiwei Bie mrg_rxbuf, in_order, packed_vq) < 0) { 755ca8326a9SJianfeng Tan PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 756ca8326a9SJianfeng Tan virtio_user_eth_dev_free(eth_dev); 757ce2eabddSJianfeng Tan goto end; 758ca8326a9SJianfeng Tan } 759fbe90cddSThomas Monjalon 76087db93e0SDavid Marchand /* previously called by pci probing for physical dev */ 761ce2eabddSJianfeng Tan if (eth_virtio_dev_init(eth_dev) < 0) { 762ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 763ca8326a9SJianfeng Tan virtio_user_eth_dev_free(eth_dev); 764ce2eabddSJianfeng Tan goto end; 765ce2eabddSJianfeng Tan } 766fbe90cddSThomas Monjalon 76777d66da8SMarvin Liu if (vectorized) { 76877d66da8SMarvin Liu if (packed_vq) { 76977d66da8SMarvin Liu #if defined(CC_AVX512_SUPPORT) 7706b7eefbcSMarvin Liu hw->use_vec_rx = 1; 77177d66da8SMarvin Liu hw->use_vec_tx = 1; 77277d66da8SMarvin Liu #else 77377d66da8SMarvin Liu PMD_INIT_LOG(INFO, 77477d66da8SMarvin Liu "building environment do not support packed ring vectorized"); 77577d66da8SMarvin Liu #endif 77677d66da8SMarvin Liu } else { 77777d66da8SMarvin Liu hw->use_vec_rx = 1; 77877d66da8SMarvin Liu } 77977d66da8SMarvin Liu } 7806b7eefbcSMarvin Liu 781fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 782ce2eabddSJianfeng Tan ret = 0; 783ce2eabddSJianfeng Tan 784ce2eabddSJianfeng Tan end: 78514f06474SJianfeng Tan if (kvlist) 78614f06474SJianfeng Tan rte_kvargs_free(kvlist); 787ce2eabddSJianfeng Tan if (path) 788ce2eabddSJianfeng Tan free(path); 789ce2eabddSJianfeng Tan if (mac_addr) 790ce2eabddSJianfeng Tan free(mac_addr); 7914214a1b4SWenfeng Liu if (ifname) 7924214a1b4SWenfeng Liu free(ifname); 793ce2eabddSJianfeng Tan return ret; 794ce2eabddSJianfeng Tan } 795ce2eabddSJianfeng Tan 796ce2eabddSJianfeng Tan static int 7975d2aa461SJan Blunck virtio_user_pmd_remove(struct rte_vdev_device *vdev) 798ce2eabddSJianfeng Tan { 7995d2aa461SJan Blunck const char *name; 800ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 801ce2eabddSJianfeng Tan 8025d2aa461SJan Blunck if (!vdev) 803ce2eabddSJianfeng Tan return -EINVAL; 804ce2eabddSJianfeng Tan 8055d2aa461SJan Blunck name = rte_vdev_device_name(vdev); 806f2462150SFerruh Yigit PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 807ce2eabddSJianfeng Tan eth_dev = rte_eth_dev_allocated(name); 8087f468b2eSTiwei Bie /* Port has already been released by close. */ 809ce2eabddSJianfeng Tan if (!eth_dev) 8107f468b2eSTiwei Bie return 0; 811ce2eabddSJianfeng Tan 8121c8489daSTiwei Bie if (rte_eal_process_type() != RTE_PROC_PRIMARY) 8131c8489daSTiwei Bie return rte_eth_dev_release_port(eth_dev); 8141c8489daSTiwei Bie 815ce2eabddSJianfeng Tan /* make sure the device is stopped, queues freed */ 816ce2eabddSJianfeng Tan rte_eth_dev_close(eth_dev->data->port_id); 817ce2eabddSJianfeng Tan 818ce2eabddSJianfeng Tan return 0; 819ce2eabddSJianfeng Tan } 820ce2eabddSJianfeng Tan 821*86e71eb2SMaxime Coquelin static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr, 822*86e71eb2SMaxime Coquelin uint64_t iova, size_t len) 823*86e71eb2SMaxime Coquelin { 824*86e71eb2SMaxime Coquelin const char *name; 825*86e71eb2SMaxime Coquelin struct rte_eth_dev *eth_dev; 826*86e71eb2SMaxime Coquelin struct virtio_user_dev *dev; 827*86e71eb2SMaxime Coquelin struct virtio_hw *hw; 828*86e71eb2SMaxime Coquelin 829*86e71eb2SMaxime Coquelin if (!vdev) 830*86e71eb2SMaxime Coquelin return -EINVAL; 831*86e71eb2SMaxime Coquelin 832*86e71eb2SMaxime Coquelin name = rte_vdev_device_name(vdev); 833*86e71eb2SMaxime Coquelin eth_dev = rte_eth_dev_allocated(name); 834*86e71eb2SMaxime Coquelin /* Port has already been released by close. */ 835*86e71eb2SMaxime Coquelin if (!eth_dev) 836*86e71eb2SMaxime Coquelin return 0; 837*86e71eb2SMaxime Coquelin 838*86e71eb2SMaxime Coquelin hw = (struct virtio_hw *)eth_dev->data->dev_private; 839*86e71eb2SMaxime Coquelin dev = hw->virtio_user_dev; 840*86e71eb2SMaxime Coquelin 841*86e71eb2SMaxime Coquelin if (dev->ops->dma_map) 842*86e71eb2SMaxime Coquelin return dev->ops->dma_map(dev, addr, iova, len); 843*86e71eb2SMaxime Coquelin 844*86e71eb2SMaxime Coquelin return 0; 845*86e71eb2SMaxime Coquelin } 846*86e71eb2SMaxime Coquelin 847*86e71eb2SMaxime Coquelin static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr, 848*86e71eb2SMaxime Coquelin uint64_t iova, size_t len) 849*86e71eb2SMaxime Coquelin { 850*86e71eb2SMaxime Coquelin const char *name; 851*86e71eb2SMaxime Coquelin struct rte_eth_dev *eth_dev; 852*86e71eb2SMaxime Coquelin struct virtio_user_dev *dev; 853*86e71eb2SMaxime Coquelin struct virtio_hw *hw; 854*86e71eb2SMaxime Coquelin 855*86e71eb2SMaxime Coquelin if (!vdev) 856*86e71eb2SMaxime Coquelin return -EINVAL; 857*86e71eb2SMaxime Coquelin 858*86e71eb2SMaxime Coquelin name = rte_vdev_device_name(vdev); 859*86e71eb2SMaxime Coquelin eth_dev = rte_eth_dev_allocated(name); 860*86e71eb2SMaxime Coquelin /* Port has already been released by close. */ 861*86e71eb2SMaxime Coquelin if (!eth_dev) 862*86e71eb2SMaxime Coquelin return 0; 863*86e71eb2SMaxime Coquelin 864*86e71eb2SMaxime Coquelin hw = (struct virtio_hw *)eth_dev->data->dev_private; 865*86e71eb2SMaxime Coquelin dev = hw->virtio_user_dev; 866*86e71eb2SMaxime Coquelin 867*86e71eb2SMaxime Coquelin if (dev->ops->dma_unmap) 868*86e71eb2SMaxime Coquelin return dev->ops->dma_unmap(dev, addr, iova, len); 869*86e71eb2SMaxime Coquelin 870*86e71eb2SMaxime Coquelin return 0; 871*86e71eb2SMaxime Coquelin } 872*86e71eb2SMaxime Coquelin 873fe363dd4SJan Viktorin static struct rte_vdev_driver virtio_user_driver = { 87450a3345fSShreyansh Jain .probe = virtio_user_pmd_probe, 87550a3345fSShreyansh Jain .remove = virtio_user_pmd_remove, 876*86e71eb2SMaxime Coquelin .dma_map = virtio_user_pmd_dma_map, 877*86e71eb2SMaxime Coquelin .dma_unmap = virtio_user_pmd_dma_unmap, 878ce2eabddSJianfeng Tan }; 879ce2eabddSJianfeng Tan 88001f19227SShreyansh Jain RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 8819fa80cb2SJan Blunck RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 88201f19227SShreyansh Jain RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 88344e32a67SPablo de Lara "path=<path> " 88444e32a67SPablo de Lara "mac=<mac addr> " 88544e32a67SPablo de Lara "cq=<int> " 88644e32a67SPablo de Lara "queue_size=<int> " 8874214a1b4SWenfeng Liu "queues=<int> " 888488ed97aSMarvin Liu "iface=<string> " 88962758c76STiwei Bie "server=<0|1> " 890488ed97aSMarvin Liu "mrg_rxbuf=<0|1> " 8919070f88bSTiwei Bie "in_order=<0|1> " 892b0db4beaSIvan Dyukov "packed_vq=<0|1> " 8936b7eefbcSMarvin Liu "speed=<int> " 8946b7eefbcSMarvin Liu "vectorized=<0|1>"); 895