1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdlib.h> 7 #include <sys/types.h> 8 #include <unistd.h> 9 #include <fcntl.h> 10 #include <linux/major.h> 11 #include <sys/stat.h> 12 #include <sys/sysmacros.h> 13 #include <sys/socket.h> 14 15 #include <rte_malloc.h> 16 #include <rte_kvargs.h> 17 #include <ethdev_vdev.h> 18 #include <bus_vdev_driver.h> 19 #include <rte_alarm.h> 20 #include <rte_cycles.h> 21 #include <rte_io.h> 22 23 #include "virtio_ethdev.h" 24 #include "virtio_logs.h" 25 #include "virtio.h" 26 #include "virtqueue.h" 27 #include "virtio_rxtx.h" 28 #include "virtio_user/virtio_user_dev.h" 29 #include "virtio_user/vhost.h" 30 31 #define virtio_user_get_dev(hwp) container_of(hwp, struct virtio_user_dev, hw) 32 33 static void 34 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 35 void *dst, int length) 36 { 37 int i; 38 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 39 40 if (offset == offsetof(struct virtio_net_config, mac) && 41 length == RTE_ETHER_ADDR_LEN) { 42 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 43 ((uint8_t *)dst)[i] = dev->mac_addr[i]; 44 return; 45 } 46 47 if (offset == offsetof(struct virtio_net_config, status)) { 48 virtio_user_dev_update_link_state(dev); 49 50 *(uint16_t *)dst = dev->net_status; 51 } 52 53 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 54 *(uint16_t *)dst = dev->max_queue_pairs; 55 56 if (offset >= offsetof(struct virtio_net_config, rss_max_key_size)) 57 virtio_user_dev_get_rss_config(dev, dst, offset, length); 58 } 59 60 static void 61 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 62 const void *src, int length) 63 { 64 int i; 65 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 66 67 if ((offset == offsetof(struct virtio_net_config, mac)) && 68 (length == RTE_ETHER_ADDR_LEN)) { 69 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 70 dev->mac_addr[i] = ((const uint8_t *)src)[i]; 71 virtio_user_dev_set_mac(dev); 72 virtio_user_dev_get_mac(dev); 73 } else { 74 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 75 offset, length); 76 } 77 } 78 79 static void 80 virtio_user_reset(struct virtio_hw *hw) 81 { 82 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 83 84 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 85 virtio_user_stop_device(dev); 86 } 87 88 static void 89 virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 90 { 91 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 92 uint8_t old_status = dev->status; 93 94 if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK && 95 ~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK) 96 virtio_user_dev_set_features(dev); 97 98 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) { 99 if (virtio_user_start_device(dev)) { 100 virtio_user_dev_update_status(dev); 101 return; 102 } 103 } else if (status == VIRTIO_CONFIG_STATUS_RESET) { 104 virtio_user_reset(hw); 105 } 106 107 virtio_user_dev_set_status(dev, status); 108 } 109 110 static uint8_t 111 virtio_user_get_status(struct virtio_hw *hw) 112 { 113 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 114 115 virtio_user_dev_update_status(dev); 116 117 return dev->status; 118 } 119 120 static uint64_t 121 virtio_user_get_features(struct virtio_hw *hw) 122 { 123 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 124 125 /* unmask feature bits defined in vhost user protocol */ 126 return (dev->device_features | dev->frontend_features) & 127 VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 128 } 129 130 static void 131 virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 132 { 133 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 134 135 dev->features = features & (dev->device_features | dev->frontend_features); 136 } 137 138 static int 139 virtio_user_features_ok(struct virtio_hw *hw __rte_unused) 140 { 141 return 0; 142 } 143 144 static uint8_t 145 virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 146 { 147 /* rxq interrupts and config interrupt are separated in virtio-user, 148 * here we only report config change. 149 */ 150 return VIRTIO_ISR_CONFIG; 151 } 152 153 static uint16_t 154 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 155 uint16_t vec __rte_unused) 156 { 157 return 0; 158 } 159 160 static uint16_t 161 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 162 struct virtqueue *vq __rte_unused, 163 uint16_t vec) 164 { 165 /* pretend we have done that */ 166 return vec; 167 } 168 169 /* This function is to get the queue size, aka, number of descs, of a specified 170 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 171 * max supported queues. 172 */ 173 static uint16_t 174 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 175 { 176 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 177 178 /* Currently, each queue has same queue size */ 179 return dev->queue_size; 180 } 181 182 static void 183 virtio_user_setup_queue_packed(struct virtqueue *vq, 184 struct virtio_user_dev *dev) 185 { 186 uint16_t queue_idx = vq->vq_queue_index; 187 struct vring_packed *vring; 188 uint64_t desc_addr; 189 uint64_t avail_addr; 190 uint64_t used_addr; 191 uint16_t i; 192 193 vring = &dev->vrings.packed[queue_idx]; 194 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 195 avail_addr = desc_addr + vq->vq_nentries * 196 sizeof(struct vring_packed_desc); 197 used_addr = RTE_ALIGN_CEIL(avail_addr + 198 sizeof(struct vring_packed_desc_event), 199 VIRTIO_VRING_ALIGN); 200 vring->num = vq->vq_nentries; 201 vring->desc_iova = vq->vq_ring_mem; 202 vring->desc = (void *)(uintptr_t)desc_addr; 203 vring->driver = (void *)(uintptr_t)avail_addr; 204 vring->device = (void *)(uintptr_t)used_addr; 205 dev->packed_queues[queue_idx].avail_wrap_counter = true; 206 dev->packed_queues[queue_idx].used_wrap_counter = true; 207 208 for (i = 0; i < vring->num; i++) 209 vring->desc[i].flags = 0; 210 } 211 212 static void 213 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) 214 { 215 uint16_t queue_idx = vq->vq_queue_index; 216 uint64_t desc_addr, avail_addr, used_addr; 217 218 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 219 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 220 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 221 ring[vq->vq_nentries]), 222 VIRTIO_VRING_ALIGN); 223 224 dev->vrings.split[queue_idx].num = vq->vq_nentries; 225 dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem; 226 dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr; 227 dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr; 228 dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr; 229 } 230 231 static int 232 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 233 { 234 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 235 236 if (virtio_with_packed_queue(hw)) 237 virtio_user_setup_queue_packed(vq, dev); 238 else 239 virtio_user_setup_queue_split(vq, dev); 240 241 if (dev->notify_area) 242 vq->notify_addr = dev->notify_area[vq->vq_queue_index]; 243 244 if (dev->hw_cvq && hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) 245 return virtio_user_dev_create_shadow_cvq(dev, vq); 246 247 return 0; 248 } 249 250 static void 251 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 252 { 253 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 254 * correspondingly stops the ioeventfds, and reset the status of 255 * the device. 256 * For modern devices, set queue desc, avail, used in PCI bar to 0, 257 * not see any more behavior in QEMU. 258 * 259 * Here we just care about what information to deliver to vhost-user 260 * or vhost-kernel. So we just close ioeventfd for now. 261 */ 262 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 263 264 close(dev->callfds[vq->vq_queue_index]); 265 close(dev->kickfds[vq->vq_queue_index]); 266 267 if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) 268 virtio_user_dev_destroy_shadow_cvq(dev); 269 } 270 271 static void 272 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 273 { 274 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 275 uint64_t notify_data = 1; 276 277 if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) { 278 virtio_user_handle_cq(dev, vq->vq_queue_index); 279 280 return; 281 } 282 283 if (!dev->notify_area) { 284 if (write(dev->kickfds[vq->vq_queue_index], ¬ify_data, 285 sizeof(notify_data)) < 0) 286 PMD_DRV_LOG(ERR, "failed to kick backend: %s", 287 strerror(errno)); 288 return; 289 } else if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) { 290 rte_write16(vq->vq_queue_index, vq->notify_addr); 291 return; 292 } 293 294 if (virtio_with_packed_queue(hw)) { 295 /* Bit[0:15]: vq queue index 296 * Bit[16:30]: avail index 297 * Bit[31]: avail wrap counter 298 */ 299 notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags & 300 VRING_PACKED_DESC_F_AVAIL)) << 31) | 301 ((uint32_t)vq->vq_avail_idx << 16) | 302 vq->vq_queue_index; 303 } else { 304 /* Bit[0:15]: vq queue index 305 * Bit[16:31]: avail index 306 */ 307 notify_data = ((uint32_t)vq->vq_avail_idx << 16) | 308 vq->vq_queue_index; 309 } 310 rte_write32(notify_data, vq->notify_addr); 311 } 312 313 static int 314 virtio_user_dev_close(struct virtio_hw *hw) 315 { 316 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 317 318 virtio_user_dev_uninit(dev); 319 320 return 0; 321 } 322 323 const struct virtio_ops virtio_user_ops = { 324 .read_dev_cfg = virtio_user_read_dev_config, 325 .write_dev_cfg = virtio_user_write_dev_config, 326 .get_status = virtio_user_get_status, 327 .set_status = virtio_user_set_status, 328 .get_features = virtio_user_get_features, 329 .set_features = virtio_user_set_features, 330 .features_ok = virtio_user_features_ok, 331 .get_isr = virtio_user_get_isr, 332 .set_config_irq = virtio_user_set_config_irq, 333 .set_queue_irq = virtio_user_set_queue_irq, 334 .get_queue_num = virtio_user_get_queue_num, 335 .setup_queue = virtio_user_setup_queue, 336 .del_queue = virtio_user_del_queue, 337 .notify_queue = virtio_user_notify_queue, 338 .dev_close = virtio_user_dev_close, 339 }; 340 341 static const char *valid_args[] = { 342 #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 343 VIRTIO_USER_ARG_QUEUES_NUM, 344 #define VIRTIO_USER_ARG_CQ_NUM "cq" 345 VIRTIO_USER_ARG_CQ_NUM, 346 #define VIRTIO_USER_ARG_MAC "mac" 347 VIRTIO_USER_ARG_MAC, 348 #define VIRTIO_USER_ARG_PATH "path" 349 VIRTIO_USER_ARG_PATH, 350 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 351 VIRTIO_USER_ARG_QUEUE_SIZE, 352 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 353 VIRTIO_USER_ARG_INTERFACE_NAME, 354 #define VIRTIO_USER_ARG_SERVER_MODE "server" 355 VIRTIO_USER_ARG_SERVER_MODE, 356 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 357 VIRTIO_USER_ARG_MRG_RXBUF, 358 #define VIRTIO_USER_ARG_IN_ORDER "in_order" 359 VIRTIO_USER_ARG_IN_ORDER, 360 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" 361 VIRTIO_USER_ARG_PACKED_VQ, 362 #define VIRTIO_USER_ARG_SPEED "speed" 363 VIRTIO_USER_ARG_SPEED, 364 #define VIRTIO_USER_ARG_VECTORIZED "vectorized" 365 VIRTIO_USER_ARG_VECTORIZED, 366 NULL 367 }; 368 369 #define VIRTIO_USER_DEF_CQ_EN 0 370 #define VIRTIO_USER_DEF_Q_NUM 1 371 #define VIRTIO_USER_DEF_Q_SZ 256 372 #define VIRTIO_USER_DEF_SERVER_MODE 0 373 374 static int 375 get_string_arg(const char *key __rte_unused, 376 const char *value, void *extra_args) 377 { 378 if (!value || !extra_args) 379 return -EINVAL; 380 381 *(char **)extra_args = strdup(value); 382 383 if (!*(char **)extra_args) 384 return -ENOMEM; 385 386 return 0; 387 } 388 389 static int 390 get_integer_arg(const char *key __rte_unused, 391 const char *value, void *extra_args) 392 { 393 uint64_t integer = 0; 394 if (!value || !extra_args) 395 return -EINVAL; 396 errno = 0; 397 integer = strtoull(value, NULL, 0); 398 /* extra_args keeps default value, it should be replaced 399 * only in case of successful parsing of the 'value' arg 400 */ 401 if (errno == 0) 402 *(uint64_t *)extra_args = integer; 403 return -errno; 404 } 405 406 static uint32_t 407 vdpa_dynamic_major_num(void) 408 { 409 FILE *fp; 410 char *line = NULL; 411 size_t size = 0; 412 char name[11]; 413 bool found = false; 414 uint32_t num; 415 416 fp = fopen("/proc/devices", "r"); 417 if (fp == NULL) { 418 PMD_INIT_LOG(ERR, "Cannot open /proc/devices: %s", 419 strerror(errno)); 420 return UNNAMED_MAJOR; 421 } 422 423 while (getline(&line, &size, fp) > 0) { 424 char *stripped = line + strspn(line, " "); 425 if ((sscanf(stripped, "%u %10s", &num, name) == 2) && 426 (strncmp(name, "vhost-vdpa", 10) == 0)) { 427 found = true; 428 break; 429 } 430 } 431 free(line); 432 fclose(fp); 433 return found ? num : UNNAMED_MAJOR; 434 } 435 436 static enum virtio_user_backend_type 437 virtio_user_backend_type(const char *path) 438 { 439 struct stat sb; 440 441 if (stat(path, &sb) == -1) { 442 if (errno == ENOENT) 443 return VIRTIO_USER_BACKEND_VHOST_USER; 444 445 PMD_INIT_LOG(ERR, "Stat fails: %s (%s)", path, 446 strerror(errno)); 447 return VIRTIO_USER_BACKEND_UNKNOWN; 448 } 449 450 if (S_ISSOCK(sb.st_mode)) { 451 return VIRTIO_USER_BACKEND_VHOST_USER; 452 } else if (S_ISCHR(sb.st_mode)) { 453 if (major(sb.st_rdev) == MISC_MAJOR) 454 return VIRTIO_USER_BACKEND_VHOST_KERNEL; 455 if (major(sb.st_rdev) == vdpa_dynamic_major_num()) 456 return VIRTIO_USER_BACKEND_VHOST_VDPA; 457 } 458 return VIRTIO_USER_BACKEND_UNKNOWN; 459 } 460 461 static struct rte_eth_dev * 462 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 463 { 464 struct rte_eth_dev *eth_dev; 465 struct rte_eth_dev_data *data; 466 struct virtio_hw *hw; 467 struct virtio_user_dev *dev; 468 469 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*dev)); 470 if (!eth_dev) { 471 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 472 return NULL; 473 } 474 475 data = eth_dev->data; 476 dev = eth_dev->data->dev_private; 477 hw = &dev->hw; 478 479 hw->port_id = data->port_id; 480 VIRTIO_OPS(hw) = &virtio_user_ops; 481 482 hw->intr_lsc = 1; 483 hw->use_vec_rx = 0; 484 hw->use_vec_tx = 0; 485 hw->use_inorder_rx = 0; 486 hw->use_inorder_tx = 0; 487 488 return eth_dev; 489 } 490 491 static void 492 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 493 { 494 rte_eth_dev_release_port(eth_dev); 495 } 496 497 /* Dev initialization routine. Invoked once for each virtio vdev at 498 * EAL init time, see rte_bus_probe(). 499 * Returns 0 on success. 500 */ 501 static int 502 virtio_user_pmd_probe(struct rte_vdev_device *vdev) 503 { 504 struct rte_kvargs *kvlist = NULL; 505 struct rte_eth_dev *eth_dev; 506 struct virtio_hw *hw; 507 struct virtio_user_dev *dev; 508 enum virtio_user_backend_type backend_type = VIRTIO_USER_BACKEND_UNKNOWN; 509 uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 510 uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 511 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 512 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 513 uint64_t mrg_rxbuf = 1; 514 uint64_t in_order = 1; 515 uint64_t packed_vq = 0; 516 uint64_t vectorized = 0; 517 char *path = NULL; 518 char *ifname = NULL; 519 char *mac_addr = NULL; 520 int ret = -1; 521 522 RTE_BUILD_BUG_ON(offsetof(struct virtio_user_dev, hw) != 0); 523 524 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 525 const char *name = rte_vdev_device_name(vdev); 526 eth_dev = rte_eth_dev_attach_secondary(name); 527 if (!eth_dev) { 528 PMD_INIT_LOG(ERR, "Failed to probe %s", name); 529 return -1; 530 } 531 532 dev = eth_dev->data->dev_private; 533 hw = &dev->hw; 534 VIRTIO_OPS(hw) = &virtio_user_ops; 535 536 if (eth_virtio_dev_init(eth_dev) < 0) { 537 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 538 rte_eth_dev_release_port(eth_dev); 539 return -1; 540 } 541 542 eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops; 543 eth_dev->device = &vdev->device; 544 rte_eth_dev_probing_finish(eth_dev); 545 return 0; 546 } 547 548 kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_args); 549 if (!kvlist) { 550 PMD_INIT_LOG(ERR, "error when parsing param"); 551 goto end; 552 } 553 554 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 555 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 556 &get_string_arg, &path) < 0) { 557 PMD_INIT_LOG(ERR, "error to parse %s", 558 VIRTIO_USER_ARG_PATH); 559 goto end; 560 } 561 } else { 562 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 563 VIRTIO_USER_ARG_PATH); 564 goto end; 565 } 566 567 backend_type = virtio_user_backend_type(path); 568 if (backend_type == VIRTIO_USER_BACKEND_UNKNOWN) { 569 PMD_INIT_LOG(ERR, 570 "unable to determine backend type for path %s", 571 path); 572 goto end; 573 } 574 PMD_INIT_LOG(INFO, "Backend type detected: %s", 575 virtio_user_backend_strings[backend_type]); 576 577 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 578 if (backend_type != VIRTIO_USER_BACKEND_VHOST_KERNEL) { 579 PMD_INIT_LOG(ERR, 580 "arg %s applies only to vhost-kernel backend", 581 VIRTIO_USER_ARG_INTERFACE_NAME); 582 goto end; 583 } 584 585 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 586 &get_string_arg, &ifname) < 0) { 587 PMD_INIT_LOG(ERR, "error to parse %s", 588 VIRTIO_USER_ARG_INTERFACE_NAME); 589 goto end; 590 } 591 } 592 593 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 594 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 595 &get_string_arg, &mac_addr) < 0) { 596 PMD_INIT_LOG(ERR, "error to parse %s", 597 VIRTIO_USER_ARG_MAC); 598 goto end; 599 } 600 } 601 602 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 603 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 604 &get_integer_arg, &queue_size) < 0) { 605 PMD_INIT_LOG(ERR, "error to parse %s", 606 VIRTIO_USER_ARG_QUEUE_SIZE); 607 goto end; 608 } 609 } 610 611 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 612 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 613 &get_integer_arg, &queues) < 0) { 614 PMD_INIT_LOG(ERR, "error to parse %s", 615 VIRTIO_USER_ARG_QUEUES_NUM); 616 goto end; 617 } 618 } 619 620 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 621 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 622 &get_integer_arg, &server_mode) < 0) { 623 PMD_INIT_LOG(ERR, "error to parse %s", 624 VIRTIO_USER_ARG_SERVER_MODE); 625 goto end; 626 } 627 } 628 629 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 630 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 631 &get_integer_arg, &cq) < 0) { 632 PMD_INIT_LOG(ERR, "error to parse %s", 633 VIRTIO_USER_ARG_CQ_NUM); 634 goto end; 635 } 636 } 637 638 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { 639 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, 640 &get_integer_arg, &packed_vq) < 0) { 641 PMD_INIT_LOG(ERR, "error to parse %s", 642 VIRTIO_USER_ARG_PACKED_VQ); 643 goto end; 644 } 645 } 646 647 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) { 648 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED, 649 &get_integer_arg, &vectorized) < 0) { 650 PMD_INIT_LOG(ERR, "error to parse %s", 651 VIRTIO_USER_ARG_VECTORIZED); 652 goto end; 653 } 654 } 655 656 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 657 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 658 &get_integer_arg, &mrg_rxbuf) < 0) { 659 PMD_INIT_LOG(ERR, "error to parse %s", 660 VIRTIO_USER_ARG_MRG_RXBUF); 661 goto end; 662 } 663 } 664 665 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 666 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 667 &get_integer_arg, &in_order) < 0) { 668 PMD_INIT_LOG(ERR, "error to parse %s", 669 VIRTIO_USER_ARG_IN_ORDER); 670 goto end; 671 } 672 } 673 674 eth_dev = virtio_user_eth_dev_alloc(vdev); 675 if (!eth_dev) { 676 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 677 goto end; 678 } 679 680 dev = eth_dev->data->dev_private; 681 hw = &dev->hw; 682 if (virtio_user_dev_init(dev, path, (uint16_t)queues, cq, 683 queue_size, mac_addr, &ifname, server_mode, 684 mrg_rxbuf, in_order, packed_vq, backend_type) < 0) { 685 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 686 virtio_user_eth_dev_free(eth_dev); 687 goto end; 688 } 689 690 /* 691 * Virtio-user requires using virtual addresses for the descriptors 692 * buffers, whatever other devices require 693 */ 694 if (backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) 695 /* vDPA backend requires using IOVA for the buffers 696 * to make it work in IOVA as PA mode also. 697 */ 698 hw->use_va = false; 699 else 700 hw->use_va = true; 701 702 /* previously called by pci probing for physical dev */ 703 if (eth_virtio_dev_init(eth_dev) < 0) { 704 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 705 virtio_user_dev_uninit(dev); 706 virtio_user_eth_dev_free(eth_dev); 707 goto end; 708 } 709 710 if (vectorized) { 711 if (packed_vq) { 712 #if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM) 713 hw->use_vec_rx = 1; 714 hw->use_vec_tx = 1; 715 #else 716 PMD_INIT_LOG(INFO, 717 "building environment do not support packed ring vectorized"); 718 #endif 719 } else { 720 hw->use_vec_rx = 1; 721 } 722 } 723 724 rte_eth_dev_probing_finish(eth_dev); 725 ret = 0; 726 727 end: 728 rte_kvargs_free(kvlist); 729 free(path); 730 free(mac_addr); 731 free(ifname); 732 return ret; 733 } 734 735 static int 736 virtio_user_pmd_remove(struct rte_vdev_device *vdev) 737 { 738 const char *name; 739 struct rte_eth_dev *eth_dev; 740 741 if (!vdev) 742 return -EINVAL; 743 744 name = rte_vdev_device_name(vdev); 745 PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 746 eth_dev = rte_eth_dev_allocated(name); 747 /* Port has already been released by close. */ 748 if (!eth_dev) 749 return 0; 750 751 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 752 return rte_eth_dev_release_port(eth_dev); 753 754 /* make sure the device is stopped, queues freed */ 755 return rte_eth_dev_close(eth_dev->data->port_id); 756 } 757 758 static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr, 759 uint64_t iova, size_t len) 760 { 761 const char *name; 762 struct rte_eth_dev *eth_dev; 763 struct virtio_user_dev *dev; 764 765 if (!vdev) 766 return -EINVAL; 767 768 name = rte_vdev_device_name(vdev); 769 eth_dev = rte_eth_dev_allocated(name); 770 /* Port has already been released by close. */ 771 if (!eth_dev) 772 return 0; 773 774 dev = eth_dev->data->dev_private; 775 776 if (dev->ops->dma_map) 777 return dev->ops->dma_map(dev, addr, iova, len); 778 779 return 0; 780 } 781 782 static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr, 783 uint64_t iova, size_t len) 784 { 785 const char *name; 786 struct rte_eth_dev *eth_dev; 787 struct virtio_user_dev *dev; 788 789 if (!vdev) 790 return -EINVAL; 791 792 name = rte_vdev_device_name(vdev); 793 eth_dev = rte_eth_dev_allocated(name); 794 /* Port has already been released by close. */ 795 if (!eth_dev) 796 return 0; 797 798 dev = eth_dev->data->dev_private; 799 800 if (dev->ops->dma_unmap) 801 return dev->ops->dma_unmap(dev, addr, iova, len); 802 803 return 0; 804 } 805 806 static struct rte_vdev_driver virtio_user_driver = { 807 .probe = virtio_user_pmd_probe, 808 .remove = virtio_user_pmd_remove, 809 .dma_map = virtio_user_pmd_dma_map, 810 .dma_unmap = virtio_user_pmd_dma_unmap, 811 }; 812 813 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 814 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 815 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 816 "path=<path> " 817 "mac=<mac addr> " 818 "cq=<int> " 819 "queue_size=<int> " 820 "queues=<int> " 821 "iface=<string> " 822 "server=<0|1> " 823 "mrg_rxbuf=<0|1> " 824 "in_order=<0|1> " 825 "packed_vq=<0|1> " 826 "speed=<int> " 827 "vectorized=<0|1>"); 828