1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <sys/types.h> 7 #include <unistd.h> 8 #include <fcntl.h> 9 #include <sys/socket.h> 10 11 #include <rte_malloc.h> 12 #include <rte_kvargs.h> 13 #include <rte_ethdev_vdev.h> 14 #include <rte_bus_vdev.h> 15 #include <rte_alarm.h> 16 #include <rte_cycles.h> 17 18 #include "virtio_ethdev.h" 19 #include "virtio_logs.h" 20 #include "virtio_pci.h" 21 #include "virtqueue.h" 22 #include "virtio_rxtx.h" 23 #include "virtio_user/virtio_user_dev.h" 24 #include "virtio_user/vhost.h" 25 26 #define virtio_user_get_dev(hw) \ 27 ((struct virtio_user_dev *)(hw)->virtio_user_dev) 28 29 static void 30 virtio_user_reset_queues_packed(struct rte_eth_dev *dev) 31 { 32 struct virtio_hw *hw = dev->data->dev_private; 33 struct virtnet_rx *rxvq; 34 struct virtnet_tx *txvq; 35 uint16_t i; 36 37 /* Add lock to avoid queue contention. */ 38 rte_spinlock_lock(&hw->state_lock); 39 hw->started = 0; 40 41 /* 42 * Waitting for datapath to complete before resetting queues. 43 * 1 ms should be enough for the ongoing Tx/Rx function to finish. 44 */ 45 rte_delay_ms(1); 46 47 /* Vring reset for each Tx queue and Rx queue. */ 48 for (i = 0; i < dev->data->nb_rx_queues; i++) { 49 rxvq = dev->data->rx_queues[i]; 50 virtqueue_rxvq_reset_packed(rxvq->vq); 51 virtio_dev_rx_queue_setup_finish(dev, i); 52 } 53 54 for (i = 0; i < dev->data->nb_tx_queues; i++) { 55 txvq = dev->data->tx_queues[i]; 56 virtqueue_txvq_reset_packed(txvq->vq); 57 } 58 59 hw->started = 1; 60 rte_spinlock_unlock(&hw->state_lock); 61 } 62 63 64 static int 65 virtio_user_server_reconnect(struct virtio_user_dev *dev) 66 { 67 int ret; 68 int connectfd; 69 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 70 struct virtio_hw *hw = eth_dev->data->dev_private; 71 uint64_t protocol_features; 72 73 connectfd = accept(dev->listenfd, NULL, NULL); 74 if (connectfd < 0) 75 return -1; 76 77 dev->vhostfd = connectfd; 78 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 79 &dev->device_features) < 0) { 80 PMD_INIT_LOG(ERR, "get_features failed: %s", 81 strerror(errno)); 82 return -1; 83 } 84 85 if (dev->device_features & 86 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { 87 if (dev->ops->send_request(dev, 88 VHOST_USER_GET_PROTOCOL_FEATURES, 89 &protocol_features)) 90 return -1; 91 92 dev->protocol_features &= protocol_features; 93 94 if (dev->ops->send_request(dev, 95 VHOST_USER_SET_PROTOCOL_FEATURES, 96 &dev->protocol_features)) 97 return -1; 98 99 if (!(dev->protocol_features & 100 (1ULL << VHOST_USER_PROTOCOL_F_MQ))) 101 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 102 } 103 104 dev->device_features |= dev->frontend_features; 105 106 /* umask vhost-user unsupported features */ 107 dev->device_features &= ~(dev->unsupported_features); 108 109 dev->features &= dev->device_features; 110 111 /* For packed ring, resetting queues is required in reconnection. */ 112 if (vtpci_packed_queue(hw) && 113 (vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { 114 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped" 115 " when packed ring reconnecting."); 116 virtio_user_reset_queues_packed(eth_dev); 117 } 118 119 ret = virtio_user_start_device(dev); 120 if (ret < 0) 121 return -1; 122 123 if (dev->queue_pairs > 1) { 124 ret = virtio_user_handle_mq(dev, dev->queue_pairs); 125 if (ret != 0) { 126 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 127 return -1; 128 } 129 } 130 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 131 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 132 PMD_DRV_LOG(ERR, "interrupt disable failed"); 133 return -1; 134 } 135 rte_intr_callback_unregister(eth_dev->intr_handle, 136 virtio_interrupt_handler, 137 eth_dev); 138 eth_dev->intr_handle->fd = connectfd; 139 rte_intr_callback_register(eth_dev->intr_handle, 140 virtio_interrupt_handler, eth_dev); 141 142 if (rte_intr_enable(eth_dev->intr_handle) < 0) { 143 PMD_DRV_LOG(ERR, "interrupt enable failed"); 144 return -1; 145 } 146 } 147 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 148 return 0; 149 } 150 151 static void 152 virtio_user_delayed_handler(void *param) 153 { 154 struct virtio_hw *hw = (struct virtio_hw *)param; 155 struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id]; 156 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 157 158 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 159 PMD_DRV_LOG(ERR, "interrupt disable failed"); 160 return; 161 } 162 rte_intr_callback_unregister(eth_dev->intr_handle, 163 virtio_interrupt_handler, eth_dev); 164 if (dev->is_server) { 165 if (dev->vhostfd >= 0) { 166 close(dev->vhostfd); 167 dev->vhostfd = -1; 168 } 169 eth_dev->intr_handle->fd = dev->listenfd; 170 rte_intr_callback_register(eth_dev->intr_handle, 171 virtio_interrupt_handler, eth_dev); 172 if (rte_intr_enable(eth_dev->intr_handle) < 0) { 173 PMD_DRV_LOG(ERR, "interrupt enable failed"); 174 return; 175 } 176 } 177 } 178 179 static void 180 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 181 void *dst, int length) 182 { 183 int i; 184 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 185 186 if (offset == offsetof(struct virtio_net_config, mac) && 187 length == RTE_ETHER_ADDR_LEN) { 188 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 189 ((uint8_t *)dst)[i] = dev->mac_addr[i]; 190 return; 191 } 192 193 if (offset == offsetof(struct virtio_net_config, status)) { 194 char buf[128]; 195 196 if (dev->vhostfd >= 0) { 197 int r; 198 int flags; 199 200 flags = fcntl(dev->vhostfd, F_GETFL); 201 if (fcntl(dev->vhostfd, F_SETFL, 202 flags | O_NONBLOCK) == -1) { 203 PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag"); 204 return; 205 } 206 r = recv(dev->vhostfd, buf, 128, MSG_PEEK); 207 if (r == 0 || (r < 0 && errno != EAGAIN)) { 208 dev->net_status &= (~VIRTIO_NET_S_LINK_UP); 209 PMD_DRV_LOG(ERR, "virtio-user port %u is down", 210 hw->port_id); 211 212 /* This function could be called in the process 213 * of interrupt handling, callback cannot be 214 * unregistered here, set an alarm to do it. 215 */ 216 rte_eal_alarm_set(1, 217 virtio_user_delayed_handler, 218 (void *)hw); 219 } else { 220 dev->net_status |= VIRTIO_NET_S_LINK_UP; 221 } 222 if (fcntl(dev->vhostfd, F_SETFL, 223 flags & ~O_NONBLOCK) == -1) { 224 PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag"); 225 return; 226 } 227 } else if (dev->is_server) { 228 dev->net_status &= (~VIRTIO_NET_S_LINK_UP); 229 if (virtio_user_server_reconnect(dev) >= 0) 230 dev->net_status |= VIRTIO_NET_S_LINK_UP; 231 } 232 233 *(uint16_t *)dst = dev->net_status; 234 } 235 236 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 237 *(uint16_t *)dst = dev->max_queue_pairs; 238 } 239 240 static void 241 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 242 const void *src, int length) 243 { 244 int i; 245 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 246 247 if ((offset == offsetof(struct virtio_net_config, mac)) && 248 (length == RTE_ETHER_ADDR_LEN)) 249 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 250 dev->mac_addr[i] = ((const uint8_t *)src)[i]; 251 else 252 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 253 offset, length); 254 } 255 256 static void 257 virtio_user_reset(struct virtio_hw *hw) 258 { 259 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 260 261 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 262 virtio_user_stop_device(dev); 263 } 264 265 static void 266 virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 267 { 268 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 269 270 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 271 virtio_user_start_device(dev); 272 else if (status == VIRTIO_CONFIG_STATUS_RESET) 273 virtio_user_reset(hw); 274 dev->status = status; 275 virtio_user_send_status_update(dev, status); 276 } 277 278 static uint8_t 279 virtio_user_get_status(struct virtio_hw *hw) 280 { 281 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 282 283 virtio_user_update_status(dev); 284 285 return dev->status; 286 } 287 288 static uint64_t 289 virtio_user_get_features(struct virtio_hw *hw) 290 { 291 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 292 293 /* unmask feature bits defined in vhost user protocol */ 294 return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 295 } 296 297 static void 298 virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 299 { 300 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 301 302 dev->features = features & dev->device_features; 303 } 304 305 static uint8_t 306 virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 307 { 308 /* rxq interrupts and config interrupt are separated in virtio-user, 309 * here we only report config change. 310 */ 311 return VIRTIO_PCI_ISR_CONFIG; 312 } 313 314 static uint16_t 315 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 316 uint16_t vec __rte_unused) 317 { 318 return 0; 319 } 320 321 static uint16_t 322 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 323 struct virtqueue *vq __rte_unused, 324 uint16_t vec) 325 { 326 /* pretend we have done that */ 327 return vec; 328 } 329 330 /* This function is to get the queue size, aka, number of descs, of a specified 331 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 332 * max supported queues. 333 */ 334 static uint16_t 335 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 336 { 337 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 338 339 /* Currently, each queue has same queue size */ 340 return dev->queue_size; 341 } 342 343 static void 344 virtio_user_setup_queue_packed(struct virtqueue *vq, 345 struct virtio_user_dev *dev) 346 { 347 uint16_t queue_idx = vq->vq_queue_index; 348 struct vring_packed *vring; 349 uint64_t desc_addr; 350 uint64_t avail_addr; 351 uint64_t used_addr; 352 uint16_t i; 353 354 vring = &dev->packed_vrings[queue_idx]; 355 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 356 avail_addr = desc_addr + vq->vq_nentries * 357 sizeof(struct vring_packed_desc); 358 used_addr = RTE_ALIGN_CEIL(avail_addr + 359 sizeof(struct vring_packed_desc_event), 360 VIRTIO_PCI_VRING_ALIGN); 361 vring->num = vq->vq_nentries; 362 vring->desc = (void *)(uintptr_t)desc_addr; 363 vring->driver = (void *)(uintptr_t)avail_addr; 364 vring->device = (void *)(uintptr_t)used_addr; 365 dev->packed_queues[queue_idx].avail_wrap_counter = true; 366 dev->packed_queues[queue_idx].used_wrap_counter = true; 367 368 for (i = 0; i < vring->num; i++) 369 vring->desc[i].flags = 0; 370 } 371 372 static void 373 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) 374 { 375 uint16_t queue_idx = vq->vq_queue_index; 376 uint64_t desc_addr, avail_addr, used_addr; 377 378 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 379 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 380 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 381 ring[vq->vq_nentries]), 382 VIRTIO_PCI_VRING_ALIGN); 383 384 dev->vrings[queue_idx].num = vq->vq_nentries; 385 dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; 386 dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; 387 dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; 388 } 389 390 static int 391 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 392 { 393 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 394 395 if (vtpci_packed_queue(hw)) 396 virtio_user_setup_queue_packed(vq, dev); 397 else 398 virtio_user_setup_queue_split(vq, dev); 399 400 return 0; 401 } 402 403 static void 404 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 405 { 406 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 407 * correspondingly stops the ioeventfds, and reset the status of 408 * the device. 409 * For modern devices, set queue desc, avail, used in PCI bar to 0, 410 * not see any more behavior in QEMU. 411 * 412 * Here we just care about what information to deliver to vhost-user 413 * or vhost-kernel. So we just close ioeventfd for now. 414 */ 415 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 416 417 close(dev->callfds[vq->vq_queue_index]); 418 close(dev->kickfds[vq->vq_queue_index]); 419 } 420 421 static void 422 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 423 { 424 uint64_t buf = 1; 425 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 426 427 if (hw->cvq && (hw->cvq->vq == vq)) { 428 if (vtpci_packed_queue(vq->hw)) 429 virtio_user_handle_cq_packed(dev, vq->vq_queue_index); 430 else 431 virtio_user_handle_cq(dev, vq->vq_queue_index); 432 return; 433 } 434 435 if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) 436 PMD_DRV_LOG(ERR, "failed to kick backend: %s", 437 strerror(errno)); 438 } 439 440 const struct virtio_pci_ops virtio_user_ops = { 441 .read_dev_cfg = virtio_user_read_dev_config, 442 .write_dev_cfg = virtio_user_write_dev_config, 443 .get_status = virtio_user_get_status, 444 .set_status = virtio_user_set_status, 445 .get_features = virtio_user_get_features, 446 .set_features = virtio_user_set_features, 447 .get_isr = virtio_user_get_isr, 448 .set_config_irq = virtio_user_set_config_irq, 449 .set_queue_irq = virtio_user_set_queue_irq, 450 .get_queue_num = virtio_user_get_queue_num, 451 .setup_queue = virtio_user_setup_queue, 452 .del_queue = virtio_user_del_queue, 453 .notify_queue = virtio_user_notify_queue, 454 }; 455 456 static const char *valid_args[] = { 457 #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 458 VIRTIO_USER_ARG_QUEUES_NUM, 459 #define VIRTIO_USER_ARG_CQ_NUM "cq" 460 VIRTIO_USER_ARG_CQ_NUM, 461 #define VIRTIO_USER_ARG_MAC "mac" 462 VIRTIO_USER_ARG_MAC, 463 #define VIRTIO_USER_ARG_PATH "path" 464 VIRTIO_USER_ARG_PATH, 465 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 466 VIRTIO_USER_ARG_QUEUE_SIZE, 467 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 468 VIRTIO_USER_ARG_INTERFACE_NAME, 469 #define VIRTIO_USER_ARG_SERVER_MODE "server" 470 VIRTIO_USER_ARG_SERVER_MODE, 471 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 472 VIRTIO_USER_ARG_MRG_RXBUF, 473 #define VIRTIO_USER_ARG_IN_ORDER "in_order" 474 VIRTIO_USER_ARG_IN_ORDER, 475 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" 476 VIRTIO_USER_ARG_PACKED_VQ, 477 #define VIRTIO_USER_ARG_SPEED "speed" 478 VIRTIO_USER_ARG_SPEED, 479 #define VIRTIO_USER_ARG_VECTORIZED "vectorized" 480 VIRTIO_USER_ARG_VECTORIZED, 481 NULL 482 }; 483 484 #define VIRTIO_USER_DEF_CQ_EN 0 485 #define VIRTIO_USER_DEF_Q_NUM 1 486 #define VIRTIO_USER_DEF_Q_SZ 256 487 #define VIRTIO_USER_DEF_SERVER_MODE 0 488 489 static int 490 get_string_arg(const char *key __rte_unused, 491 const char *value, void *extra_args) 492 { 493 if (!value || !extra_args) 494 return -EINVAL; 495 496 *(char **)extra_args = strdup(value); 497 498 if (!*(char **)extra_args) 499 return -ENOMEM; 500 501 return 0; 502 } 503 504 static int 505 get_integer_arg(const char *key __rte_unused, 506 const char *value, void *extra_args) 507 { 508 uint64_t integer = 0; 509 if (!value || !extra_args) 510 return -EINVAL; 511 errno = 0; 512 integer = strtoull(value, NULL, 0); 513 /* extra_args keeps default value, it should be replaced 514 * only in case of successful parsing of the 'value' arg 515 */ 516 if (errno == 0) 517 *(uint64_t *)extra_args = integer; 518 return -errno; 519 } 520 521 static struct rte_eth_dev * 522 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 523 { 524 struct rte_eth_dev *eth_dev; 525 struct rte_eth_dev_data *data; 526 struct virtio_hw *hw; 527 struct virtio_user_dev *dev; 528 529 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw)); 530 if (!eth_dev) { 531 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 532 return NULL; 533 } 534 535 data = eth_dev->data; 536 hw = eth_dev->data->dev_private; 537 538 dev = rte_zmalloc(NULL, sizeof(*dev), 0); 539 if (!dev) { 540 PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed"); 541 rte_eth_dev_release_port(eth_dev); 542 return NULL; 543 } 544 545 hw->port_id = data->port_id; 546 dev->port_id = data->port_id; 547 virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops; 548 /* 549 * MSIX is required to enable LSC (see virtio_init_device). 550 * Here just pretend that we support msix. 551 */ 552 hw->use_msix = 1; 553 hw->modern = 0; 554 hw->use_vec_rx = 0; 555 hw->use_vec_tx = 0; 556 hw->use_inorder_rx = 0; 557 hw->use_inorder_tx = 0; 558 hw->virtio_user_dev = dev; 559 return eth_dev; 560 } 561 562 static void 563 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 564 { 565 struct rte_eth_dev_data *data = eth_dev->data; 566 struct virtio_hw *hw = data->dev_private; 567 568 rte_free(hw->virtio_user_dev); 569 rte_eth_dev_release_port(eth_dev); 570 } 571 572 /* Dev initialization routine. Invoked once for each virtio vdev at 573 * EAL init time, see rte_bus_probe(). 574 * Returns 0 on success. 575 */ 576 static int 577 virtio_user_pmd_probe(struct rte_vdev_device *dev) 578 { 579 struct rte_kvargs *kvlist = NULL; 580 struct rte_eth_dev *eth_dev; 581 struct virtio_hw *hw; 582 uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 583 uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 584 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 585 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 586 uint64_t mrg_rxbuf = 1; 587 uint64_t in_order = 1; 588 uint64_t packed_vq = 0; 589 uint64_t vectorized = 0; 590 char *path = NULL; 591 char *ifname = NULL; 592 char *mac_addr = NULL; 593 int ret = -1; 594 595 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 596 const char *name = rte_vdev_device_name(dev); 597 eth_dev = rte_eth_dev_attach_secondary(name); 598 if (!eth_dev) { 599 PMD_INIT_LOG(ERR, "Failed to probe %s", name); 600 return -1; 601 } 602 603 if (eth_virtio_dev_init(eth_dev) < 0) { 604 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 605 rte_eth_dev_release_port(eth_dev); 606 return -1; 607 } 608 609 eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops; 610 eth_dev->device = &dev->device; 611 rte_eth_dev_probing_finish(eth_dev); 612 return 0; 613 } 614 615 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args); 616 if (!kvlist) { 617 PMD_INIT_LOG(ERR, "error when parsing param"); 618 goto end; 619 } 620 621 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 622 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 623 &get_string_arg, &path) < 0) { 624 PMD_INIT_LOG(ERR, "error to parse %s", 625 VIRTIO_USER_ARG_PATH); 626 goto end; 627 } 628 } else { 629 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 630 VIRTIO_USER_ARG_PATH); 631 goto end; 632 } 633 634 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 635 if (is_vhost_user_by_type(path)) { 636 PMD_INIT_LOG(ERR, 637 "arg %s applies only to vhost-kernel backend", 638 VIRTIO_USER_ARG_INTERFACE_NAME); 639 goto end; 640 } 641 642 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 643 &get_string_arg, &ifname) < 0) { 644 PMD_INIT_LOG(ERR, "error to parse %s", 645 VIRTIO_USER_ARG_INTERFACE_NAME); 646 goto end; 647 } 648 } 649 650 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 651 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 652 &get_string_arg, &mac_addr) < 0) { 653 PMD_INIT_LOG(ERR, "error to parse %s", 654 VIRTIO_USER_ARG_MAC); 655 goto end; 656 } 657 } 658 659 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 660 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 661 &get_integer_arg, &queue_size) < 0) { 662 PMD_INIT_LOG(ERR, "error to parse %s", 663 VIRTIO_USER_ARG_QUEUE_SIZE); 664 goto end; 665 } 666 } 667 668 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 669 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 670 &get_integer_arg, &queues) < 0) { 671 PMD_INIT_LOG(ERR, "error to parse %s", 672 VIRTIO_USER_ARG_QUEUES_NUM); 673 goto end; 674 } 675 } 676 677 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 678 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 679 &get_integer_arg, &server_mode) < 0) { 680 PMD_INIT_LOG(ERR, "error to parse %s", 681 VIRTIO_USER_ARG_SERVER_MODE); 682 goto end; 683 } 684 } 685 686 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 687 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 688 &get_integer_arg, &cq) < 0) { 689 PMD_INIT_LOG(ERR, "error to parse %s", 690 VIRTIO_USER_ARG_CQ_NUM); 691 goto end; 692 } 693 } else if (queues > 1) { 694 cq = 1; 695 } 696 697 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { 698 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, 699 &get_integer_arg, &packed_vq) < 0) { 700 PMD_INIT_LOG(ERR, "error to parse %s", 701 VIRTIO_USER_ARG_PACKED_VQ); 702 goto end; 703 } 704 } 705 706 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) { 707 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED, 708 &get_integer_arg, &vectorized) < 0) { 709 PMD_INIT_LOG(ERR, "error to parse %s", 710 VIRTIO_USER_ARG_VECTORIZED); 711 goto end; 712 } 713 } 714 715 if (queues > 1 && cq == 0) { 716 PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); 717 goto end; 718 } 719 720 if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) { 721 PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u", 722 VIRTIO_USER_ARG_QUEUES_NUM, queues, 723 VIRTIO_MAX_VIRTQUEUE_PAIRS); 724 goto end; 725 } 726 727 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 728 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 729 &get_integer_arg, &mrg_rxbuf) < 0) { 730 PMD_INIT_LOG(ERR, "error to parse %s", 731 VIRTIO_USER_ARG_MRG_RXBUF); 732 goto end; 733 } 734 } 735 736 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 737 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 738 &get_integer_arg, &in_order) < 0) { 739 PMD_INIT_LOG(ERR, "error to parse %s", 740 VIRTIO_USER_ARG_IN_ORDER); 741 goto end; 742 } 743 } 744 745 eth_dev = virtio_user_eth_dev_alloc(dev); 746 if (!eth_dev) { 747 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 748 goto end; 749 } 750 751 hw = eth_dev->data->dev_private; 752 if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, 753 queue_size, mac_addr, &ifname, server_mode, 754 mrg_rxbuf, in_order, packed_vq) < 0) { 755 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 756 virtio_user_eth_dev_free(eth_dev); 757 goto end; 758 } 759 760 /* previously called by pci probing for physical dev */ 761 if (eth_virtio_dev_init(eth_dev) < 0) { 762 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 763 virtio_user_eth_dev_free(eth_dev); 764 goto end; 765 } 766 767 if (vectorized) { 768 if (packed_vq) { 769 #if defined(CC_AVX512_SUPPORT) 770 hw->use_vec_rx = 1; 771 hw->use_vec_tx = 1; 772 #else 773 PMD_INIT_LOG(INFO, 774 "building environment do not support packed ring vectorized"); 775 #endif 776 } else { 777 hw->use_vec_rx = 1; 778 } 779 } 780 781 rte_eth_dev_probing_finish(eth_dev); 782 ret = 0; 783 784 end: 785 if (kvlist) 786 rte_kvargs_free(kvlist); 787 if (path) 788 free(path); 789 if (mac_addr) 790 free(mac_addr); 791 if (ifname) 792 free(ifname); 793 return ret; 794 } 795 796 static int 797 virtio_user_pmd_remove(struct rte_vdev_device *vdev) 798 { 799 const char *name; 800 struct rte_eth_dev *eth_dev; 801 802 if (!vdev) 803 return -EINVAL; 804 805 name = rte_vdev_device_name(vdev); 806 PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 807 eth_dev = rte_eth_dev_allocated(name); 808 /* Port has already been released by close. */ 809 if (!eth_dev) 810 return 0; 811 812 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 813 return rte_eth_dev_release_port(eth_dev); 814 815 /* make sure the device is stopped, queues freed */ 816 rte_eth_dev_close(eth_dev->data->port_id); 817 818 return 0; 819 } 820 821 static struct rte_vdev_driver virtio_user_driver = { 822 .probe = virtio_user_pmd_probe, 823 .remove = virtio_user_pmd_remove, 824 }; 825 826 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 827 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 828 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 829 "path=<path> " 830 "mac=<mac addr> " 831 "cq=<int> " 832 "queue_size=<int> " 833 "queues=<int> " 834 "iface=<string> " 835 "server=<0|1> " 836 "mrg_rxbuf=<0|1> " 837 "in_order=<0|1> " 838 "packed_vq=<0|1> " 839 "speed=<int> " 840 "vectorized=<0|1>"); 841