1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <sys/types.h> 7 #include <unistd.h> 8 #include <fcntl.h> 9 #include <sys/socket.h> 10 11 #include <rte_malloc.h> 12 #include <rte_kvargs.h> 13 #include <rte_ethdev_vdev.h> 14 #include <rte_bus_vdev.h> 15 #include <rte_alarm.h> 16 17 #include "virtio_ethdev.h" 18 #include "virtio_logs.h" 19 #include "virtio_pci.h" 20 #include "virtqueue.h" 21 #include "virtio_rxtx.h" 22 #include "virtio_user/virtio_user_dev.h" 23 24 #define virtio_user_get_dev(hw) \ 25 ((struct virtio_user_dev *)(hw)->virtio_user_dev) 26 27 static int 28 virtio_user_server_reconnect(struct virtio_user_dev *dev) 29 { 30 int ret; 31 int flag; 32 int connectfd; 33 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 34 35 connectfd = accept(dev->listenfd, NULL, NULL); 36 if (connectfd < 0) 37 return -1; 38 39 dev->vhostfd = connectfd; 40 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 41 &dev->device_features) < 0) { 42 PMD_INIT_LOG(ERR, "get_features failed: %s", 43 strerror(errno)); 44 return -1; 45 } 46 47 /* umask vhost-user unsupported features */ 48 dev->device_features &= ~(dev->unsupported_features); 49 50 dev->features &= dev->device_features; 51 52 flag = fcntl(connectfd, F_GETFD); 53 fcntl(connectfd, F_SETFL, flag | O_NONBLOCK); 54 55 ret = virtio_user_start_device(dev); 56 if (ret < 0) 57 return -1; 58 59 if (dev->queue_pairs > 1) { 60 ret = virtio_user_handle_mq(dev, dev->queue_pairs); 61 if (ret != 0) { 62 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 63 return -1; 64 } 65 } 66 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 67 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 68 PMD_DRV_LOG(ERR, "interrupt disable failed"); 69 return -1; 70 } 71 rte_intr_callback_unregister(eth_dev->intr_handle, 72 virtio_interrupt_handler, 73 eth_dev); 74 eth_dev->intr_handle->fd = connectfd; 75 rte_intr_callback_register(eth_dev->intr_handle, 76 virtio_interrupt_handler, eth_dev); 77 78 if (rte_intr_enable(eth_dev->intr_handle) < 0) { 79 PMD_DRV_LOG(ERR, "interrupt enable failed"); 80 return -1; 81 } 82 } 83 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 84 return 0; 85 } 86 87 static void 88 virtio_user_delayed_handler(void *param) 89 { 90 struct virtio_hw *hw = (struct virtio_hw *)param; 91 struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id]; 92 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 93 94 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 95 PMD_DRV_LOG(ERR, "interrupt disable failed"); 96 return; 97 } 98 rte_intr_callback_unregister(eth_dev->intr_handle, 99 virtio_interrupt_handler, eth_dev); 100 if (dev->is_server) { 101 if (dev->vhostfd >= 0) { 102 close(dev->vhostfd); 103 dev->vhostfd = -1; 104 } 105 eth_dev->intr_handle->fd = dev->listenfd; 106 rte_intr_callback_register(eth_dev->intr_handle, 107 virtio_interrupt_handler, eth_dev); 108 if (rte_intr_enable(eth_dev->intr_handle) < 0) { 109 PMD_DRV_LOG(ERR, "interrupt enable failed"); 110 return; 111 } 112 } 113 } 114 115 static void 116 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 117 void *dst, int length) 118 { 119 int i; 120 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 121 122 if (offset == offsetof(struct virtio_net_config, mac) && 123 length == ETHER_ADDR_LEN) { 124 for (i = 0; i < ETHER_ADDR_LEN; ++i) 125 ((uint8_t *)dst)[i] = dev->mac_addr[i]; 126 return; 127 } 128 129 if (offset == offsetof(struct virtio_net_config, status)) { 130 char buf[128]; 131 132 if (dev->vhostfd >= 0) { 133 int r; 134 int flags; 135 136 flags = fcntl(dev->vhostfd, F_GETFL); 137 if (fcntl(dev->vhostfd, F_SETFL, 138 flags | O_NONBLOCK) == -1) { 139 PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag"); 140 return; 141 } 142 r = recv(dev->vhostfd, buf, 128, MSG_PEEK); 143 if (r == 0 || (r < 0 && errno != EAGAIN)) { 144 dev->status &= (~VIRTIO_NET_S_LINK_UP); 145 PMD_DRV_LOG(ERR, "virtio-user port %u is down", 146 hw->port_id); 147 148 /* This function could be called in the process 149 * of interrupt handling, callback cannot be 150 * unregistered here, set an alarm to do it. 151 */ 152 rte_eal_alarm_set(1, 153 virtio_user_delayed_handler, 154 (void *)hw); 155 } else { 156 dev->status |= VIRTIO_NET_S_LINK_UP; 157 } 158 if (fcntl(dev->vhostfd, F_SETFL, 159 flags & ~O_NONBLOCK) == -1) { 160 PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag"); 161 return; 162 } 163 } else if (dev->is_server) { 164 dev->status &= (~VIRTIO_NET_S_LINK_UP); 165 if (virtio_user_server_reconnect(dev) >= 0) 166 dev->status |= VIRTIO_NET_S_LINK_UP; 167 } 168 169 *(uint16_t *)dst = dev->status; 170 } 171 172 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 173 *(uint16_t *)dst = dev->max_queue_pairs; 174 } 175 176 static void 177 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 178 const void *src, int length) 179 { 180 int i; 181 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 182 183 if ((offset == offsetof(struct virtio_net_config, mac)) && 184 (length == ETHER_ADDR_LEN)) 185 for (i = 0; i < ETHER_ADDR_LEN; ++i) 186 dev->mac_addr[i] = ((const uint8_t *)src)[i]; 187 else 188 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 189 offset, length); 190 } 191 192 static void 193 virtio_user_reset(struct virtio_hw *hw) 194 { 195 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 196 197 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 198 virtio_user_stop_device(dev); 199 } 200 201 static void 202 virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 203 { 204 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 205 206 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 207 virtio_user_start_device(dev); 208 else if (status == VIRTIO_CONFIG_STATUS_RESET) 209 virtio_user_reset(hw); 210 dev->status = status; 211 } 212 213 static uint8_t 214 virtio_user_get_status(struct virtio_hw *hw) 215 { 216 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 217 218 return dev->status; 219 } 220 221 static uint64_t 222 virtio_user_get_features(struct virtio_hw *hw) 223 { 224 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 225 226 /* unmask feature bits defined in vhost user protocol */ 227 return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 228 } 229 230 static void 231 virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 232 { 233 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 234 235 dev->features = features & dev->device_features; 236 } 237 238 static uint8_t 239 virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 240 { 241 /* rxq interrupts and config interrupt are separated in virtio-user, 242 * here we only report config change. 243 */ 244 return VIRTIO_PCI_ISR_CONFIG; 245 } 246 247 static uint16_t 248 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 249 uint16_t vec __rte_unused) 250 { 251 return 0; 252 } 253 254 static uint16_t 255 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 256 struct virtqueue *vq __rte_unused, 257 uint16_t vec) 258 { 259 /* pretend we have done that */ 260 return vec; 261 } 262 263 /* This function is to get the queue size, aka, number of descs, of a specified 264 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 265 * max supported queues. 266 */ 267 static uint16_t 268 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 269 { 270 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 271 272 /* Currently, each queue has same queue size */ 273 return dev->queue_size; 274 } 275 276 static int 277 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 278 { 279 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 280 uint16_t queue_idx = vq->vq_queue_index; 281 uint64_t desc_addr, avail_addr, used_addr; 282 283 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 284 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 285 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 286 ring[vq->vq_nentries]), 287 VIRTIO_PCI_VRING_ALIGN); 288 289 dev->vrings[queue_idx].num = vq->vq_nentries; 290 dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; 291 dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; 292 dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; 293 294 return 0; 295 } 296 297 static void 298 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 299 { 300 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 301 * correspondingly stops the ioeventfds, and reset the status of 302 * the device. 303 * For modern devices, set queue desc, avail, used in PCI bar to 0, 304 * not see any more behavior in QEMU. 305 * 306 * Here we just care about what information to deliver to vhost-user 307 * or vhost-kernel. So we just close ioeventfd for now. 308 */ 309 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 310 311 close(dev->callfds[vq->vq_queue_index]); 312 close(dev->kickfds[vq->vq_queue_index]); 313 } 314 315 static void 316 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 317 { 318 uint64_t buf = 1; 319 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 320 321 if (hw->cvq && (hw->cvq->vq == vq)) { 322 virtio_user_handle_cq(dev, vq->vq_queue_index); 323 return; 324 } 325 326 if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) 327 PMD_DRV_LOG(ERR, "failed to kick backend: %s", 328 strerror(errno)); 329 } 330 331 const struct virtio_pci_ops virtio_user_ops = { 332 .read_dev_cfg = virtio_user_read_dev_config, 333 .write_dev_cfg = virtio_user_write_dev_config, 334 .reset = virtio_user_reset, 335 .get_status = virtio_user_get_status, 336 .set_status = virtio_user_set_status, 337 .get_features = virtio_user_get_features, 338 .set_features = virtio_user_set_features, 339 .get_isr = virtio_user_get_isr, 340 .set_config_irq = virtio_user_set_config_irq, 341 .set_queue_irq = virtio_user_set_queue_irq, 342 .get_queue_num = virtio_user_get_queue_num, 343 .setup_queue = virtio_user_setup_queue, 344 .del_queue = virtio_user_del_queue, 345 .notify_queue = virtio_user_notify_queue, 346 }; 347 348 static const char *valid_args[] = { 349 #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 350 VIRTIO_USER_ARG_QUEUES_NUM, 351 #define VIRTIO_USER_ARG_CQ_NUM "cq" 352 VIRTIO_USER_ARG_CQ_NUM, 353 #define VIRTIO_USER_ARG_MAC "mac" 354 VIRTIO_USER_ARG_MAC, 355 #define VIRTIO_USER_ARG_PATH "path" 356 VIRTIO_USER_ARG_PATH, 357 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 358 VIRTIO_USER_ARG_QUEUE_SIZE, 359 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 360 VIRTIO_USER_ARG_INTERFACE_NAME, 361 #define VIRTIO_USER_ARG_SERVER_MODE "server" 362 VIRTIO_USER_ARG_SERVER_MODE, 363 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 364 VIRTIO_USER_ARG_MRG_RXBUF, 365 #define VIRTIO_USER_ARG_IN_ORDER "in_order" 366 VIRTIO_USER_ARG_IN_ORDER, 367 NULL 368 }; 369 370 #define VIRTIO_USER_DEF_CQ_EN 0 371 #define VIRTIO_USER_DEF_Q_NUM 1 372 #define VIRTIO_USER_DEF_Q_SZ 256 373 #define VIRTIO_USER_DEF_SERVER_MODE 0 374 375 static int 376 get_string_arg(const char *key __rte_unused, 377 const char *value, void *extra_args) 378 { 379 if (!value || !extra_args) 380 return -EINVAL; 381 382 *(char **)extra_args = strdup(value); 383 384 if (!*(char **)extra_args) 385 return -ENOMEM; 386 387 return 0; 388 } 389 390 static int 391 get_integer_arg(const char *key __rte_unused, 392 const char *value, void *extra_args) 393 { 394 if (!value || !extra_args) 395 return -EINVAL; 396 397 *(uint64_t *)extra_args = strtoull(value, NULL, 0); 398 399 return 0; 400 } 401 402 static struct rte_vdev_driver virtio_user_driver; 403 404 static struct rte_eth_dev * 405 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 406 { 407 struct rte_eth_dev *eth_dev; 408 struct rte_eth_dev_data *data; 409 struct virtio_hw *hw; 410 struct virtio_user_dev *dev; 411 412 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw)); 413 if (!eth_dev) { 414 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 415 return NULL; 416 } 417 418 data = eth_dev->data; 419 hw = eth_dev->data->dev_private; 420 421 dev = rte_zmalloc(NULL, sizeof(*dev), 0); 422 if (!dev) { 423 PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed"); 424 rte_eth_dev_release_port(eth_dev); 425 rte_free(hw); 426 return NULL; 427 } 428 429 hw->port_id = data->port_id; 430 dev->port_id = data->port_id; 431 virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops; 432 /* 433 * MSIX is required to enable LSC (see virtio_init_device). 434 * Here just pretend that we support msix. 435 */ 436 hw->use_msix = 1; 437 hw->modern = 0; 438 hw->use_simple_rx = 0; 439 hw->use_inorder_rx = 0; 440 hw->use_inorder_tx = 0; 441 hw->virtio_user_dev = dev; 442 return eth_dev; 443 } 444 445 static void 446 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 447 { 448 struct rte_eth_dev_data *data = eth_dev->data; 449 struct virtio_hw *hw = data->dev_private; 450 451 rte_free(hw->virtio_user_dev); 452 rte_free(hw); 453 rte_eth_dev_release_port(eth_dev); 454 } 455 456 /* Dev initialization routine. Invoked once for each virtio vdev at 457 * EAL init time, see rte_bus_probe(). 458 * Returns 0 on success. 459 */ 460 static int 461 virtio_user_pmd_probe(struct rte_vdev_device *dev) 462 { 463 struct rte_kvargs *kvlist = NULL; 464 struct rte_eth_dev *eth_dev; 465 struct virtio_hw *hw; 466 uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 467 uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 468 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 469 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 470 uint64_t mrg_rxbuf = 1; 471 uint64_t in_order = 1; 472 char *path = NULL; 473 char *ifname = NULL; 474 char *mac_addr = NULL; 475 int ret = -1; 476 477 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args); 478 if (!kvlist) { 479 PMD_INIT_LOG(ERR, "error when parsing param"); 480 goto end; 481 } 482 483 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 484 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 485 &get_string_arg, &path) < 0) { 486 PMD_INIT_LOG(ERR, "error to parse %s", 487 VIRTIO_USER_ARG_PATH); 488 goto end; 489 } 490 } else { 491 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 492 VIRTIO_USER_ARG_QUEUE_SIZE); 493 goto end; 494 } 495 496 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 497 if (is_vhost_user_by_type(path)) { 498 PMD_INIT_LOG(ERR, 499 "arg %s applies only to vhost-kernel backend", 500 VIRTIO_USER_ARG_INTERFACE_NAME); 501 goto end; 502 } 503 504 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 505 &get_string_arg, &ifname) < 0) { 506 PMD_INIT_LOG(ERR, "error to parse %s", 507 VIRTIO_USER_ARG_INTERFACE_NAME); 508 goto end; 509 } 510 } 511 512 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 513 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 514 &get_string_arg, &mac_addr) < 0) { 515 PMD_INIT_LOG(ERR, "error to parse %s", 516 VIRTIO_USER_ARG_MAC); 517 goto end; 518 } 519 } 520 521 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 522 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 523 &get_integer_arg, &queue_size) < 0) { 524 PMD_INIT_LOG(ERR, "error to parse %s", 525 VIRTIO_USER_ARG_QUEUE_SIZE); 526 goto end; 527 } 528 } 529 530 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 531 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 532 &get_integer_arg, &queues) < 0) { 533 PMD_INIT_LOG(ERR, "error to parse %s", 534 VIRTIO_USER_ARG_QUEUES_NUM); 535 goto end; 536 } 537 } 538 539 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 540 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 541 &get_integer_arg, &server_mode) < 0) { 542 PMD_INIT_LOG(ERR, "error to parse %s", 543 VIRTIO_USER_ARG_SERVER_MODE); 544 goto end; 545 } 546 } 547 548 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 549 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 550 &get_integer_arg, &cq) < 0) { 551 PMD_INIT_LOG(ERR, "error to parse %s", 552 VIRTIO_USER_ARG_CQ_NUM); 553 goto end; 554 } 555 } else if (queues > 1) { 556 cq = 1; 557 } 558 559 if (queues > 1 && cq == 0) { 560 PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); 561 goto end; 562 } 563 564 if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) { 565 PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u", 566 VIRTIO_USER_ARG_QUEUES_NUM, queues, 567 VIRTIO_MAX_VIRTQUEUE_PAIRS); 568 goto end; 569 } 570 571 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 572 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 573 &get_integer_arg, &mrg_rxbuf) < 0) { 574 PMD_INIT_LOG(ERR, "error to parse %s", 575 VIRTIO_USER_ARG_MRG_RXBUF); 576 goto end; 577 } 578 } 579 580 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 581 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 582 &get_integer_arg, &in_order) < 0) { 583 PMD_INIT_LOG(ERR, "error to parse %s", 584 VIRTIO_USER_ARG_IN_ORDER); 585 goto end; 586 } 587 } 588 589 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 590 struct virtio_user_dev *vu_dev; 591 592 eth_dev = virtio_user_eth_dev_alloc(dev); 593 if (!eth_dev) { 594 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 595 goto end; 596 } 597 598 hw = eth_dev->data->dev_private; 599 vu_dev = virtio_user_get_dev(hw); 600 if (server_mode == 1) 601 vu_dev->is_server = true; 602 else 603 vu_dev->is_server = false; 604 if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, 605 queue_size, mac_addr, &ifname, mrg_rxbuf, 606 in_order) < 0) { 607 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 608 virtio_user_eth_dev_free(eth_dev); 609 goto end; 610 } 611 612 } else { 613 eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev)); 614 if (!eth_dev) 615 goto end; 616 } 617 618 /* previously called by rte_pci_probe() for physical dev */ 619 if (eth_virtio_dev_init(eth_dev) < 0) { 620 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 621 virtio_user_eth_dev_free(eth_dev); 622 goto end; 623 } 624 625 rte_eth_dev_probing_finish(eth_dev); 626 ret = 0; 627 628 end: 629 if (kvlist) 630 rte_kvargs_free(kvlist); 631 if (path) 632 free(path); 633 if (mac_addr) 634 free(mac_addr); 635 if (ifname) 636 free(ifname); 637 return ret; 638 } 639 640 /** Called by rte_eth_dev_detach() */ 641 static int 642 virtio_user_pmd_remove(struct rte_vdev_device *vdev) 643 { 644 const char *name; 645 struct rte_eth_dev *eth_dev; 646 struct virtio_hw *hw; 647 struct virtio_user_dev *dev; 648 649 if (!vdev) 650 return -EINVAL; 651 652 name = rte_vdev_device_name(vdev); 653 PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 654 eth_dev = rte_eth_dev_allocated(name); 655 if (!eth_dev) 656 return -ENODEV; 657 658 /* make sure the device is stopped, queues freed */ 659 rte_eth_dev_close(eth_dev->data->port_id); 660 661 hw = eth_dev->data->dev_private; 662 dev = hw->virtio_user_dev; 663 virtio_user_dev_uninit(dev); 664 665 rte_free(eth_dev->data->dev_private); 666 rte_eth_dev_release_port(eth_dev); 667 668 return 0; 669 } 670 671 static struct rte_vdev_driver virtio_user_driver = { 672 .probe = virtio_user_pmd_probe, 673 .remove = virtio_user_pmd_remove, 674 }; 675 676 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 677 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 678 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 679 "path=<path> " 680 "mac=<mac addr> " 681 "cq=<int> " 682 "queue_size=<int> " 683 "queues=<int> " 684 "iface=<string> " 685 "server=<0|1> " 686 "mrg_rxbuf=<0|1> " 687 "in_order=<0|1>"); 688