1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdlib.h> 7 #include <sys/types.h> 8 #include <unistd.h> 9 #include <fcntl.h> 10 #include <linux/major.h> 11 #include <sys/stat.h> 12 #include <sys/sysmacros.h> 13 #include <sys/socket.h> 14 15 #include <rte_malloc.h> 16 #include <rte_kvargs.h> 17 #include <ethdev_vdev.h> 18 #include <bus_vdev_driver.h> 19 #include <rte_alarm.h> 20 #include <rte_cycles.h> 21 #include <rte_io.h> 22 23 #include "virtio_ethdev.h" 24 #include "virtio_logs.h" 25 #include "virtio.h" 26 #include "virtqueue.h" 27 #include "virtio_rxtx.h" 28 #include "virtio_user/virtio_user_dev.h" 29 #include "virtio_user/vhost.h" 30 31 #define virtio_user_get_dev(hwp) container_of(hwp, struct virtio_user_dev, hw) 32 33 static void 34 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 35 void *dst, int length) 36 { 37 int i; 38 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 39 40 if (offset == offsetof(struct virtio_net_config, mac) && 41 length == RTE_ETHER_ADDR_LEN) { 42 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 43 ((uint8_t *)dst)[i] = dev->mac_addr[i]; 44 return; 45 } 46 47 if (offset == offsetof(struct virtio_net_config, status)) { 48 virtio_user_dev_update_link_state(dev); 49 50 *(uint16_t *)dst = dev->net_status; 51 } 52 53 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 54 *(uint16_t *)dst = dev->max_queue_pairs; 55 56 if (offset >= offsetof(struct virtio_net_config, rss_max_key_size)) 57 virtio_user_dev_get_rss_config(dev, dst, offset, length); 58 } 59 60 static void 61 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 62 const void *src, int length) 63 { 64 int i; 65 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 66 67 if ((offset == offsetof(struct virtio_net_config, mac)) && 68 (length == RTE_ETHER_ADDR_LEN)) { 69 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 70 dev->mac_addr[i] = ((const uint8_t *)src)[i]; 71 virtio_user_dev_set_mac(dev); 72 virtio_user_dev_get_mac(dev); 73 } else { 74 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 75 offset, length); 76 } 77 } 78 79 static void 80 virtio_user_reset(struct virtio_hw *hw) 81 { 82 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 83 84 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 85 virtio_user_stop_device(dev); 86 } 87 88 static void 89 virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 90 { 91 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 92 uint8_t old_status = dev->status; 93 94 if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK && 95 ~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK) 96 virtio_user_dev_set_features(dev); 97 98 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) { 99 if (virtio_user_start_device(dev)) { 100 virtio_user_dev_update_status(dev); 101 return; 102 } 103 } else if (status == VIRTIO_CONFIG_STATUS_RESET) { 104 virtio_user_reset(hw); 105 } 106 107 virtio_user_dev_set_status(dev, status); 108 } 109 110 static uint8_t 111 virtio_user_get_status(struct virtio_hw *hw) 112 { 113 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 114 115 virtio_user_dev_update_status(dev); 116 117 return dev->status; 118 } 119 120 static uint64_t 121 virtio_user_get_features(struct virtio_hw *hw) 122 { 123 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 124 125 /* unmask feature bits defined in vhost user protocol */ 126 return (dev->device_features | dev->frontend_features) & 127 VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 128 } 129 130 static void 131 virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 132 { 133 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 134 135 dev->features = features & (dev->device_features | dev->frontend_features); 136 } 137 138 static int 139 virtio_user_features_ok(struct virtio_hw *hw __rte_unused) 140 { 141 return 0; 142 } 143 144 static uint8_t 145 virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 146 { 147 /* rxq interrupts and config interrupt are separated in virtio-user, 148 * here we only report config change. 149 */ 150 return VIRTIO_ISR_CONFIG; 151 } 152 153 static uint16_t 154 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 155 uint16_t vec __rte_unused) 156 { 157 return 0; 158 } 159 160 static uint16_t 161 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 162 struct virtqueue *vq __rte_unused, 163 uint16_t vec) 164 { 165 /* pretend we have done that */ 166 return vec; 167 } 168 169 /* This function is to get the queue size, aka, number of descs, of a specified 170 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 171 * max supported queues. 172 */ 173 static uint16_t 174 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 175 { 176 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 177 178 /* Currently, each queue has same queue size */ 179 return dev->queue_size; 180 } 181 182 static void 183 virtio_user_setup_queue_packed(struct virtqueue *vq, 184 struct virtio_user_dev *dev) 185 { 186 uint16_t queue_idx = vq->vq_queue_index; 187 struct vring_packed *vring; 188 uint64_t desc_addr; 189 uint64_t avail_addr; 190 uint64_t used_addr; 191 uint16_t i; 192 193 vring = &dev->vrings.packed[queue_idx]; 194 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 195 avail_addr = desc_addr + vq->vq_nentries * 196 sizeof(struct vring_packed_desc); 197 used_addr = RTE_ALIGN_CEIL(avail_addr + 198 sizeof(struct vring_packed_desc_event), 199 VIRTIO_VRING_ALIGN); 200 vring->num = vq->vq_nentries; 201 vring->desc_iova = vq->vq_ring_mem; 202 vring->desc = (void *)(uintptr_t)desc_addr; 203 vring->driver = (void *)(uintptr_t)avail_addr; 204 vring->device = (void *)(uintptr_t)used_addr; 205 dev->packed_queues[queue_idx].avail_wrap_counter = true; 206 dev->packed_queues[queue_idx].used_wrap_counter = true; 207 dev->packed_queues[queue_idx].used_idx = 0; 208 209 for (i = 0; i < vring->num; i++) 210 vring->desc[i].flags = 0; 211 } 212 213 static void 214 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) 215 { 216 uint16_t queue_idx = vq->vq_queue_index; 217 uint64_t desc_addr, avail_addr, used_addr; 218 219 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 220 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 221 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 222 ring[vq->vq_nentries]), 223 VIRTIO_VRING_ALIGN); 224 225 dev->vrings.split[queue_idx].num = vq->vq_nentries; 226 dev->vrings.split[queue_idx].desc_iova = vq->vq_ring_mem; 227 dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr; 228 dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr; 229 dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr; 230 } 231 232 static int 233 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 234 { 235 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 236 237 if (virtio_with_packed_queue(hw)) 238 virtio_user_setup_queue_packed(vq, dev); 239 else 240 virtio_user_setup_queue_split(vq, dev); 241 242 if (dev->notify_area) 243 vq->notify_addr = dev->notify_area[vq->vq_queue_index]; 244 245 if (dev->hw_cvq && hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) 246 return virtio_user_dev_create_shadow_cvq(dev, vq); 247 248 return 0; 249 } 250 251 static void 252 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 253 { 254 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 255 * correspondingly stops the ioeventfds, and reset the status of 256 * the device. 257 * For modern devices, set queue desc, avail, used in PCI bar to 0, 258 * not see any more behavior in QEMU. 259 * 260 * Here we just care about what information to deliver to vhost-user 261 * or vhost-kernel. So we just close ioeventfd for now. 262 */ 263 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 264 265 close(dev->callfds[vq->vq_queue_index]); 266 close(dev->kickfds[vq->vq_queue_index]); 267 268 if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) 269 virtio_user_dev_destroy_shadow_cvq(dev); 270 } 271 272 static void 273 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 274 { 275 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 276 uint64_t notify_data = 1; 277 278 if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) { 279 virtio_user_handle_cq(dev, vq->vq_queue_index); 280 281 return; 282 } 283 284 if (!dev->notify_area) { 285 if (write(dev->kickfds[vq->vq_queue_index], ¬ify_data, 286 sizeof(notify_data)) < 0) 287 PMD_DRV_LOG(ERR, "failed to kick backend: %s", 288 strerror(errno)); 289 return; 290 } else if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) { 291 rte_write16(vq->vq_queue_index, vq->notify_addr); 292 return; 293 } 294 295 if (virtio_with_packed_queue(hw)) { 296 /* Bit[0:15]: vq queue index 297 * Bit[16:30]: avail index 298 * Bit[31]: avail wrap counter 299 */ 300 notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags & 301 VRING_PACKED_DESC_F_AVAIL)) << 31) | 302 ((uint32_t)vq->vq_avail_idx << 16) | 303 vq->vq_queue_index; 304 } else { 305 /* Bit[0:15]: vq queue index 306 * Bit[16:31]: avail index 307 */ 308 notify_data = ((uint32_t)vq->vq_avail_idx << 16) | 309 vq->vq_queue_index; 310 } 311 rte_write32(notify_data, vq->notify_addr); 312 } 313 314 static int 315 virtio_user_dev_close(struct virtio_hw *hw) 316 { 317 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 318 319 virtio_user_dev_uninit(dev); 320 321 return 0; 322 } 323 324 const struct virtio_ops virtio_user_ops = { 325 .read_dev_cfg = virtio_user_read_dev_config, 326 .write_dev_cfg = virtio_user_write_dev_config, 327 .get_status = virtio_user_get_status, 328 .set_status = virtio_user_set_status, 329 .get_features = virtio_user_get_features, 330 .set_features = virtio_user_set_features, 331 .features_ok = virtio_user_features_ok, 332 .get_isr = virtio_user_get_isr, 333 .set_config_irq = virtio_user_set_config_irq, 334 .set_queue_irq = virtio_user_set_queue_irq, 335 .get_queue_num = virtio_user_get_queue_num, 336 .setup_queue = virtio_user_setup_queue, 337 .del_queue = virtio_user_del_queue, 338 .notify_queue = virtio_user_notify_queue, 339 .dev_close = virtio_user_dev_close, 340 }; 341 342 static const char *valid_args[] = { 343 #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 344 VIRTIO_USER_ARG_QUEUES_NUM, 345 #define VIRTIO_USER_ARG_CQ_NUM "cq" 346 VIRTIO_USER_ARG_CQ_NUM, 347 #define VIRTIO_USER_ARG_MAC "mac" 348 VIRTIO_USER_ARG_MAC, 349 #define VIRTIO_USER_ARG_PATH "path" 350 VIRTIO_USER_ARG_PATH, 351 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 352 VIRTIO_USER_ARG_QUEUE_SIZE, 353 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 354 VIRTIO_USER_ARG_INTERFACE_NAME, 355 #define VIRTIO_USER_ARG_SERVER_MODE "server" 356 VIRTIO_USER_ARG_SERVER_MODE, 357 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 358 VIRTIO_USER_ARG_MRG_RXBUF, 359 #define VIRTIO_USER_ARG_IN_ORDER "in_order" 360 VIRTIO_USER_ARG_IN_ORDER, 361 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" 362 VIRTIO_USER_ARG_PACKED_VQ, 363 #define VIRTIO_USER_ARG_SPEED "speed" 364 VIRTIO_USER_ARG_SPEED, 365 #define VIRTIO_USER_ARG_VECTORIZED "vectorized" 366 VIRTIO_USER_ARG_VECTORIZED, 367 NULL 368 }; 369 370 #define VIRTIO_USER_DEF_CQ_EN 0 371 #define VIRTIO_USER_DEF_Q_NUM 1 372 #define VIRTIO_USER_DEF_Q_SZ 256 373 #define VIRTIO_USER_DEF_SERVER_MODE 0 374 375 static int 376 get_string_arg(const char *key __rte_unused, 377 const char *value, void *extra_args) 378 { 379 if (!value || !extra_args) 380 return -EINVAL; 381 382 *(char **)extra_args = strdup(value); 383 384 if (!*(char **)extra_args) 385 return -ENOMEM; 386 387 return 0; 388 } 389 390 static int 391 get_integer_arg(const char *key __rte_unused, 392 const char *value, void *extra_args) 393 { 394 uint64_t integer = 0; 395 if (!value || !extra_args) 396 return -EINVAL; 397 errno = 0; 398 integer = strtoull(value, NULL, 0); 399 /* extra_args keeps default value, it should be replaced 400 * only in case of successful parsing of the 'value' arg 401 */ 402 if (errno == 0) 403 *(uint64_t *)extra_args = integer; 404 return -errno; 405 } 406 407 static uint32_t 408 vdpa_dynamic_major_num(void) 409 { 410 FILE *fp; 411 char *line = NULL; 412 size_t size = 0; 413 char name[11]; 414 bool found = false; 415 uint32_t num; 416 417 fp = fopen("/proc/devices", "r"); 418 if (fp == NULL) { 419 PMD_INIT_LOG(ERR, "Cannot open /proc/devices: %s", 420 strerror(errno)); 421 return UNNAMED_MAJOR; 422 } 423 424 while (getline(&line, &size, fp) > 0) { 425 char *stripped = line + strspn(line, " "); 426 if ((sscanf(stripped, "%u %10s", &num, name) == 2) && 427 (strncmp(name, "vhost-vdpa", 10) == 0)) { 428 found = true; 429 break; 430 } 431 } 432 free(line); 433 fclose(fp); 434 return found ? num : UNNAMED_MAJOR; 435 } 436 437 static enum virtio_user_backend_type 438 virtio_user_backend_type(const char *path) 439 { 440 struct stat sb; 441 442 if (stat(path, &sb) == -1) { 443 if (errno == ENOENT) 444 return VIRTIO_USER_BACKEND_VHOST_USER; 445 446 PMD_INIT_LOG(ERR, "Stat fails: %s (%s)", path, 447 strerror(errno)); 448 return VIRTIO_USER_BACKEND_UNKNOWN; 449 } 450 451 if (S_ISSOCK(sb.st_mode)) { 452 return VIRTIO_USER_BACKEND_VHOST_USER; 453 } else if (S_ISCHR(sb.st_mode)) { 454 if (major(sb.st_rdev) == MISC_MAJOR) 455 return VIRTIO_USER_BACKEND_VHOST_KERNEL; 456 if (major(sb.st_rdev) == vdpa_dynamic_major_num()) 457 return VIRTIO_USER_BACKEND_VHOST_VDPA; 458 } 459 return VIRTIO_USER_BACKEND_UNKNOWN; 460 } 461 462 static struct rte_eth_dev * 463 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 464 { 465 struct rte_eth_dev *eth_dev; 466 struct rte_eth_dev_data *data; 467 struct virtio_hw *hw; 468 struct virtio_user_dev *dev; 469 470 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*dev)); 471 if (!eth_dev) { 472 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 473 return NULL; 474 } 475 476 data = eth_dev->data; 477 dev = eth_dev->data->dev_private; 478 hw = &dev->hw; 479 480 hw->port_id = data->port_id; 481 VIRTIO_OPS(hw) = &virtio_user_ops; 482 483 hw->intr_lsc = 1; 484 hw->use_vec_rx = 0; 485 hw->use_vec_tx = 0; 486 hw->use_inorder_rx = 0; 487 hw->use_inorder_tx = 0; 488 489 return eth_dev; 490 } 491 492 static void 493 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 494 { 495 rte_eth_dev_release_port(eth_dev); 496 } 497 498 /* Dev initialization routine. Invoked once for each virtio vdev at 499 * EAL init time, see rte_bus_probe(). 500 * Returns 0 on success. 501 */ 502 static int 503 virtio_user_pmd_probe(struct rte_vdev_device *vdev) 504 { 505 struct rte_kvargs *kvlist = NULL; 506 struct rte_eth_dev *eth_dev; 507 struct virtio_hw *hw; 508 struct virtio_user_dev *dev; 509 enum virtio_user_backend_type backend_type = VIRTIO_USER_BACKEND_UNKNOWN; 510 uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 511 uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 512 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 513 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 514 uint64_t mrg_rxbuf = 1; 515 uint64_t in_order = 1; 516 uint64_t packed_vq = 0; 517 uint64_t vectorized = 0; 518 char *path = NULL; 519 char *ifname = NULL; 520 char *mac_addr = NULL; 521 int ret = -1; 522 523 RTE_BUILD_BUG_ON(offsetof(struct virtio_user_dev, hw) != 0); 524 525 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 526 const char *name = rte_vdev_device_name(vdev); 527 eth_dev = rte_eth_dev_attach_secondary(name); 528 if (!eth_dev) { 529 PMD_INIT_LOG(ERR, "Failed to probe %s", name); 530 return -1; 531 } 532 533 dev = eth_dev->data->dev_private; 534 hw = &dev->hw; 535 VIRTIO_OPS(hw) = &virtio_user_ops; 536 537 if (eth_virtio_dev_init(eth_dev) < 0) { 538 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 539 rte_eth_dev_release_port(eth_dev); 540 return -1; 541 } 542 543 eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops; 544 eth_dev->device = &vdev->device; 545 rte_eth_dev_probing_finish(eth_dev); 546 return 0; 547 } 548 549 kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_args); 550 if (!kvlist) { 551 PMD_INIT_LOG(ERR, "error when parsing param"); 552 goto end; 553 } 554 555 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 556 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 557 &get_string_arg, &path) < 0) { 558 PMD_INIT_LOG(ERR, "error to parse %s", 559 VIRTIO_USER_ARG_PATH); 560 goto end; 561 } 562 } else { 563 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 564 VIRTIO_USER_ARG_PATH); 565 goto end; 566 } 567 568 backend_type = virtio_user_backend_type(path); 569 if (backend_type == VIRTIO_USER_BACKEND_UNKNOWN) { 570 PMD_INIT_LOG(ERR, 571 "unable to determine backend type for path %s", 572 path); 573 goto end; 574 } 575 PMD_INIT_LOG(INFO, "Backend type detected: %s", 576 virtio_user_backend_strings[backend_type]); 577 578 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 579 if (backend_type != VIRTIO_USER_BACKEND_VHOST_KERNEL) { 580 PMD_INIT_LOG(ERR, 581 "arg %s applies only to vhost-kernel backend", 582 VIRTIO_USER_ARG_INTERFACE_NAME); 583 goto end; 584 } 585 586 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 587 &get_string_arg, &ifname) < 0) { 588 PMD_INIT_LOG(ERR, "error to parse %s", 589 VIRTIO_USER_ARG_INTERFACE_NAME); 590 goto end; 591 } 592 } 593 594 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 595 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 596 &get_string_arg, &mac_addr) < 0) { 597 PMD_INIT_LOG(ERR, "error to parse %s", 598 VIRTIO_USER_ARG_MAC); 599 goto end; 600 } 601 } 602 603 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 604 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 605 &get_integer_arg, &queue_size) < 0) { 606 PMD_INIT_LOG(ERR, "error to parse %s", 607 VIRTIO_USER_ARG_QUEUE_SIZE); 608 goto end; 609 } 610 } 611 612 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 613 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 614 &get_integer_arg, &queues) < 0) { 615 PMD_INIT_LOG(ERR, "error to parse %s", 616 VIRTIO_USER_ARG_QUEUES_NUM); 617 goto end; 618 } 619 } 620 621 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 622 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 623 &get_integer_arg, &server_mode) < 0) { 624 PMD_INIT_LOG(ERR, "error to parse %s", 625 VIRTIO_USER_ARG_SERVER_MODE); 626 goto end; 627 } 628 } 629 630 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 631 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 632 &get_integer_arg, &cq) < 0) { 633 PMD_INIT_LOG(ERR, "error to parse %s", 634 VIRTIO_USER_ARG_CQ_NUM); 635 goto end; 636 } 637 } 638 639 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { 640 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, 641 &get_integer_arg, &packed_vq) < 0) { 642 PMD_INIT_LOG(ERR, "error to parse %s", 643 VIRTIO_USER_ARG_PACKED_VQ); 644 goto end; 645 } 646 } 647 648 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) { 649 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED, 650 &get_integer_arg, &vectorized) < 0) { 651 PMD_INIT_LOG(ERR, "error to parse %s", 652 VIRTIO_USER_ARG_VECTORIZED); 653 goto end; 654 } 655 } 656 657 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 658 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 659 &get_integer_arg, &mrg_rxbuf) < 0) { 660 PMD_INIT_LOG(ERR, "error to parse %s", 661 VIRTIO_USER_ARG_MRG_RXBUF); 662 goto end; 663 } 664 } 665 666 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 667 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 668 &get_integer_arg, &in_order) < 0) { 669 PMD_INIT_LOG(ERR, "error to parse %s", 670 VIRTIO_USER_ARG_IN_ORDER); 671 goto end; 672 } 673 } 674 675 eth_dev = virtio_user_eth_dev_alloc(vdev); 676 if (!eth_dev) { 677 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 678 goto end; 679 } 680 681 dev = eth_dev->data->dev_private; 682 hw = &dev->hw; 683 if (virtio_user_dev_init(dev, path, (uint16_t)queues, cq, 684 queue_size, mac_addr, &ifname, server_mode, 685 mrg_rxbuf, in_order, packed_vq, backend_type) < 0) { 686 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 687 virtio_user_eth_dev_free(eth_dev); 688 goto end; 689 } 690 691 /* 692 * Virtio-user requires using virtual addresses for the descriptors 693 * buffers, whatever other devices require 694 */ 695 if (backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) 696 /* vDPA backend requires using IOVA for the buffers 697 * to make it work in IOVA as PA mode also. 698 */ 699 hw->use_va = false; 700 else 701 hw->use_va = true; 702 703 /* previously called by pci probing for physical dev */ 704 if (eth_virtio_dev_init(eth_dev) < 0) { 705 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 706 virtio_user_dev_uninit(dev); 707 virtio_user_eth_dev_free(eth_dev); 708 goto end; 709 } 710 711 if (vectorized) { 712 if (packed_vq) { 713 #if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM) 714 hw->use_vec_rx = 1; 715 hw->use_vec_tx = 1; 716 #else 717 PMD_INIT_LOG(INFO, 718 "building environment do not support packed ring vectorized"); 719 #endif 720 } else { 721 hw->use_vec_rx = 1; 722 } 723 } 724 725 rte_eth_dev_probing_finish(eth_dev); 726 ret = 0; 727 728 end: 729 rte_kvargs_free(kvlist); 730 free(path); 731 free(mac_addr); 732 free(ifname); 733 return ret; 734 } 735 736 static int 737 virtio_user_pmd_remove(struct rte_vdev_device *vdev) 738 { 739 const char *name; 740 struct rte_eth_dev *eth_dev; 741 742 if (!vdev) 743 return -EINVAL; 744 745 name = rte_vdev_device_name(vdev); 746 PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 747 eth_dev = rte_eth_dev_allocated(name); 748 /* Port has already been released by close. */ 749 if (!eth_dev) 750 return 0; 751 752 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 753 return rte_eth_dev_release_port(eth_dev); 754 755 /* make sure the device is stopped, queues freed */ 756 return rte_eth_dev_close(eth_dev->data->port_id); 757 } 758 759 static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr, 760 uint64_t iova, size_t len) 761 { 762 const char *name; 763 struct rte_eth_dev *eth_dev; 764 struct virtio_user_dev *dev; 765 766 if (!vdev) 767 return -EINVAL; 768 769 name = rte_vdev_device_name(vdev); 770 eth_dev = rte_eth_dev_allocated(name); 771 /* Port has already been released by close. */ 772 if (!eth_dev) 773 return 0; 774 775 dev = eth_dev->data->dev_private; 776 777 if (dev->ops->dma_map) 778 return dev->ops->dma_map(dev, addr, iova, len); 779 780 return 0; 781 } 782 783 static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr, 784 uint64_t iova, size_t len) 785 { 786 const char *name; 787 struct rte_eth_dev *eth_dev; 788 struct virtio_user_dev *dev; 789 790 if (!vdev) 791 return -EINVAL; 792 793 name = rte_vdev_device_name(vdev); 794 eth_dev = rte_eth_dev_allocated(name); 795 /* Port has already been released by close. */ 796 if (!eth_dev) 797 return 0; 798 799 dev = eth_dev->data->dev_private; 800 801 if (dev->ops->dma_unmap) 802 return dev->ops->dma_unmap(dev, addr, iova, len); 803 804 return 0; 805 } 806 807 static struct rte_vdev_driver virtio_user_driver = { 808 .probe = virtio_user_pmd_probe, 809 .remove = virtio_user_pmd_remove, 810 .dma_map = virtio_user_pmd_dma_map, 811 .dma_unmap = virtio_user_pmd_dma_unmap, 812 }; 813 814 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 815 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 816 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 817 "path=<path> " 818 "mac=<mac addr> " 819 "cq=<int> " 820 "queue_size=<int> " 821 "queues=<int> " 822 "iface=<string> " 823 "server=<0|1> " 824 "mrg_rxbuf=<0|1> " 825 "in_order=<0|1> " 826 "packed_vq=<0|1> " 827 "speed=<int> " 828 "vectorized=<0|1>"); 829