1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdlib.h> 7 #include <sys/types.h> 8 #include <unistd.h> 9 #include <fcntl.h> 10 #include <linux/major.h> 11 #include <sys/stat.h> 12 #include <sys/sysmacros.h> 13 #include <sys/socket.h> 14 15 #include <rte_malloc.h> 16 #include <rte_kvargs.h> 17 #include <ethdev_vdev.h> 18 #include <bus_vdev_driver.h> 19 #include <rte_alarm.h> 20 #include <rte_cycles.h> 21 #include <rte_io.h> 22 23 #include "virtio_ethdev.h" 24 #include "virtio_logs.h" 25 #include "virtio.h" 26 #include "virtqueue.h" 27 #include "virtio_rxtx.h" 28 #include "virtio_user/virtio_user_dev.h" 29 #include "virtio_user/vhost.h" 30 31 #define virtio_user_get_dev(hwp) container_of(hwp, struct virtio_user_dev, hw) 32 33 static void 34 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 35 void *dst, int length) 36 { 37 int i; 38 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 39 40 if (offset == offsetof(struct virtio_net_config, mac) && 41 length == RTE_ETHER_ADDR_LEN) { 42 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 43 ((uint8_t *)dst)[i] = dev->mac_addr[i]; 44 return; 45 } 46 47 if (offset == offsetof(struct virtio_net_config, status)) { 48 virtio_user_dev_update_link_state(dev); 49 50 *(uint16_t *)dst = dev->net_status; 51 } 52 53 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 54 *(uint16_t *)dst = dev->max_queue_pairs; 55 56 if (offset >= offsetof(struct virtio_net_config, rss_max_key_size)) 57 virtio_user_dev_get_rss_config(dev, dst, offset, length); 58 } 59 60 static void 61 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 62 const void *src, int length) 63 { 64 int i; 65 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 66 67 if ((offset == offsetof(struct virtio_net_config, mac)) && 68 (length == RTE_ETHER_ADDR_LEN)) { 69 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 70 dev->mac_addr[i] = ((const uint8_t *)src)[i]; 71 virtio_user_dev_set_mac(dev); 72 virtio_user_dev_get_mac(dev); 73 } else { 74 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 75 offset, length); 76 } 77 } 78 79 static void 80 virtio_user_reset(struct virtio_hw *hw) 81 { 82 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 83 84 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 85 virtio_user_stop_device(dev); 86 } 87 88 static void 89 virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 90 { 91 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 92 uint8_t old_status = dev->status; 93 94 if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK && 95 ~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK) 96 virtio_user_dev_set_features(dev); 97 98 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) { 99 if (virtio_user_start_device(dev)) { 100 virtio_user_dev_update_status(dev); 101 return; 102 } 103 } else if (status == VIRTIO_CONFIG_STATUS_RESET) { 104 virtio_user_reset(hw); 105 } 106 107 virtio_user_dev_set_status(dev, status); 108 } 109 110 static uint8_t 111 virtio_user_get_status(struct virtio_hw *hw) 112 { 113 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 114 115 virtio_user_dev_update_status(dev); 116 117 return dev->status; 118 } 119 120 static uint64_t 121 virtio_user_get_features(struct virtio_hw *hw) 122 { 123 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 124 125 /* unmask feature bits defined in vhost user protocol */ 126 return (dev->device_features | dev->frontend_features) & 127 VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 128 } 129 130 static void 131 virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 132 { 133 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 134 135 dev->features = features & (dev->device_features | dev->frontend_features); 136 } 137 138 static int 139 virtio_user_features_ok(struct virtio_hw *hw __rte_unused) 140 { 141 return 0; 142 } 143 144 static uint8_t 145 virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 146 { 147 /* rxq interrupts and config interrupt are separated in virtio-user, 148 * here we only report config change. 149 */ 150 return VIRTIO_ISR_CONFIG; 151 } 152 153 static uint16_t 154 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 155 uint16_t vec __rte_unused) 156 { 157 return 0; 158 } 159 160 static uint16_t 161 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 162 struct virtqueue *vq __rte_unused, 163 uint16_t vec) 164 { 165 /* pretend we have done that */ 166 return vec; 167 } 168 169 /* This function is to get the queue size, aka, number of descs, of a specified 170 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 171 * max supported queues. 172 */ 173 static uint16_t 174 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 175 { 176 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 177 178 /* Currently, each queue has same queue size */ 179 return dev->queue_size; 180 } 181 182 static void 183 virtio_user_setup_queue_packed(struct virtqueue *vq, 184 struct virtio_user_dev *dev) 185 { 186 uint16_t queue_idx = vq->vq_queue_index; 187 struct vring_packed *vring; 188 uint64_t desc_addr; 189 uint64_t avail_addr; 190 uint64_t used_addr; 191 uint16_t i; 192 193 vring = &dev->vrings.packed[queue_idx]; 194 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 195 avail_addr = desc_addr + vq->vq_nentries * 196 sizeof(struct vring_packed_desc); 197 used_addr = RTE_ALIGN_CEIL(avail_addr + 198 sizeof(struct vring_packed_desc_event), 199 VIRTIO_VRING_ALIGN); 200 vring->num = vq->vq_nentries; 201 vring->desc = (void *)(uintptr_t)desc_addr; 202 vring->driver = (void *)(uintptr_t)avail_addr; 203 vring->device = (void *)(uintptr_t)used_addr; 204 dev->packed_queues[queue_idx].avail_wrap_counter = true; 205 dev->packed_queues[queue_idx].used_wrap_counter = true; 206 207 for (i = 0; i < vring->num; i++) 208 vring->desc[i].flags = 0; 209 } 210 211 static void 212 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) 213 { 214 uint16_t queue_idx = vq->vq_queue_index; 215 uint64_t desc_addr, avail_addr, used_addr; 216 217 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 218 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 219 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 220 ring[vq->vq_nentries]), 221 VIRTIO_VRING_ALIGN); 222 223 dev->vrings.split[queue_idx].num = vq->vq_nentries; 224 dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr; 225 dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr; 226 dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr; 227 } 228 229 static int 230 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 231 { 232 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 233 234 if (virtio_with_packed_queue(hw)) 235 virtio_user_setup_queue_packed(vq, dev); 236 else 237 virtio_user_setup_queue_split(vq, dev); 238 239 if (dev->notify_area) 240 vq->notify_addr = dev->notify_area[vq->vq_queue_index]; 241 242 if (dev->hw_cvq && hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) 243 return virtio_user_dev_create_shadow_cvq(dev, vq); 244 245 return 0; 246 } 247 248 static void 249 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 250 { 251 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 252 * correspondingly stops the ioeventfds, and reset the status of 253 * the device. 254 * For modern devices, set queue desc, avail, used in PCI bar to 0, 255 * not see any more behavior in QEMU. 256 * 257 * Here we just care about what information to deliver to vhost-user 258 * or vhost-kernel. So we just close ioeventfd for now. 259 */ 260 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 261 262 close(dev->callfds[vq->vq_queue_index]); 263 close(dev->kickfds[vq->vq_queue_index]); 264 265 if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) 266 virtio_user_dev_destroy_shadow_cvq(dev); 267 } 268 269 static void 270 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 271 { 272 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 273 uint64_t notify_data = 1; 274 275 if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) { 276 virtio_user_handle_cq(dev, vq->vq_queue_index); 277 278 return; 279 } 280 281 if (!dev->notify_area) { 282 if (write(dev->kickfds[vq->vq_queue_index], ¬ify_data, 283 sizeof(notify_data)) < 0) 284 PMD_DRV_LOG(ERR, "failed to kick backend: %s", 285 strerror(errno)); 286 return; 287 } else if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) { 288 rte_write16(vq->vq_queue_index, vq->notify_addr); 289 return; 290 } 291 292 if (virtio_with_packed_queue(hw)) { 293 /* Bit[0:15]: vq queue index 294 * Bit[16:30]: avail index 295 * Bit[31]: avail wrap counter 296 */ 297 notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags & 298 VRING_PACKED_DESC_F_AVAIL)) << 31) | 299 ((uint32_t)vq->vq_avail_idx << 16) | 300 vq->vq_queue_index; 301 } else { 302 /* Bit[0:15]: vq queue index 303 * Bit[16:31]: avail index 304 */ 305 notify_data = ((uint32_t)vq->vq_avail_idx << 16) | 306 vq->vq_queue_index; 307 } 308 rte_write32(notify_data, vq->notify_addr); 309 } 310 311 static int 312 virtio_user_dev_close(struct virtio_hw *hw) 313 { 314 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 315 316 virtio_user_dev_uninit(dev); 317 318 return 0; 319 } 320 321 const struct virtio_ops virtio_user_ops = { 322 .read_dev_cfg = virtio_user_read_dev_config, 323 .write_dev_cfg = virtio_user_write_dev_config, 324 .get_status = virtio_user_get_status, 325 .set_status = virtio_user_set_status, 326 .get_features = virtio_user_get_features, 327 .set_features = virtio_user_set_features, 328 .features_ok = virtio_user_features_ok, 329 .get_isr = virtio_user_get_isr, 330 .set_config_irq = virtio_user_set_config_irq, 331 .set_queue_irq = virtio_user_set_queue_irq, 332 .get_queue_num = virtio_user_get_queue_num, 333 .setup_queue = virtio_user_setup_queue, 334 .del_queue = virtio_user_del_queue, 335 .notify_queue = virtio_user_notify_queue, 336 .dev_close = virtio_user_dev_close, 337 }; 338 339 static const char *valid_args[] = { 340 #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 341 VIRTIO_USER_ARG_QUEUES_NUM, 342 #define VIRTIO_USER_ARG_CQ_NUM "cq" 343 VIRTIO_USER_ARG_CQ_NUM, 344 #define VIRTIO_USER_ARG_MAC "mac" 345 VIRTIO_USER_ARG_MAC, 346 #define VIRTIO_USER_ARG_PATH "path" 347 VIRTIO_USER_ARG_PATH, 348 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 349 VIRTIO_USER_ARG_QUEUE_SIZE, 350 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 351 VIRTIO_USER_ARG_INTERFACE_NAME, 352 #define VIRTIO_USER_ARG_SERVER_MODE "server" 353 VIRTIO_USER_ARG_SERVER_MODE, 354 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 355 VIRTIO_USER_ARG_MRG_RXBUF, 356 #define VIRTIO_USER_ARG_IN_ORDER "in_order" 357 VIRTIO_USER_ARG_IN_ORDER, 358 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" 359 VIRTIO_USER_ARG_PACKED_VQ, 360 #define VIRTIO_USER_ARG_SPEED "speed" 361 VIRTIO_USER_ARG_SPEED, 362 #define VIRTIO_USER_ARG_VECTORIZED "vectorized" 363 VIRTIO_USER_ARG_VECTORIZED, 364 NULL 365 }; 366 367 #define VIRTIO_USER_DEF_CQ_EN 0 368 #define VIRTIO_USER_DEF_Q_NUM 1 369 #define VIRTIO_USER_DEF_Q_SZ 256 370 #define VIRTIO_USER_DEF_SERVER_MODE 0 371 372 static int 373 get_string_arg(const char *key __rte_unused, 374 const char *value, void *extra_args) 375 { 376 if (!value || !extra_args) 377 return -EINVAL; 378 379 *(char **)extra_args = strdup(value); 380 381 if (!*(char **)extra_args) 382 return -ENOMEM; 383 384 return 0; 385 } 386 387 static int 388 get_integer_arg(const char *key __rte_unused, 389 const char *value, void *extra_args) 390 { 391 uint64_t integer = 0; 392 if (!value || !extra_args) 393 return -EINVAL; 394 errno = 0; 395 integer = strtoull(value, NULL, 0); 396 /* extra_args keeps default value, it should be replaced 397 * only in case of successful parsing of the 'value' arg 398 */ 399 if (errno == 0) 400 *(uint64_t *)extra_args = integer; 401 return -errno; 402 } 403 404 static uint32_t 405 vdpa_dynamic_major_num(void) 406 { 407 FILE *fp; 408 char *line = NULL; 409 size_t size = 0; 410 char name[11]; 411 bool found = false; 412 uint32_t num; 413 414 fp = fopen("/proc/devices", "r"); 415 if (fp == NULL) { 416 PMD_INIT_LOG(ERR, "Cannot open /proc/devices: %s", 417 strerror(errno)); 418 return UNNAMED_MAJOR; 419 } 420 421 while (getline(&line, &size, fp) > 0) { 422 char *stripped = line + strspn(line, " "); 423 if ((sscanf(stripped, "%u %10s", &num, name) == 2) && 424 (strncmp(name, "vhost-vdpa", 10) == 0)) { 425 found = true; 426 break; 427 } 428 } 429 free(line); 430 fclose(fp); 431 return found ? num : UNNAMED_MAJOR; 432 } 433 434 static enum virtio_user_backend_type 435 virtio_user_backend_type(const char *path) 436 { 437 struct stat sb; 438 439 if (stat(path, &sb) == -1) { 440 if (errno == ENOENT) 441 return VIRTIO_USER_BACKEND_VHOST_USER; 442 443 PMD_INIT_LOG(ERR, "Stat fails: %s (%s)", path, 444 strerror(errno)); 445 return VIRTIO_USER_BACKEND_UNKNOWN; 446 } 447 448 if (S_ISSOCK(sb.st_mode)) { 449 return VIRTIO_USER_BACKEND_VHOST_USER; 450 } else if (S_ISCHR(sb.st_mode)) { 451 if (major(sb.st_rdev) == MISC_MAJOR) 452 return VIRTIO_USER_BACKEND_VHOST_KERNEL; 453 if (major(sb.st_rdev) == vdpa_dynamic_major_num()) 454 return VIRTIO_USER_BACKEND_VHOST_VDPA; 455 } 456 return VIRTIO_USER_BACKEND_UNKNOWN; 457 } 458 459 static struct rte_eth_dev * 460 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 461 { 462 struct rte_eth_dev *eth_dev; 463 struct rte_eth_dev_data *data; 464 struct virtio_hw *hw; 465 struct virtio_user_dev *dev; 466 467 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*dev)); 468 if (!eth_dev) { 469 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 470 return NULL; 471 } 472 473 data = eth_dev->data; 474 dev = eth_dev->data->dev_private; 475 hw = &dev->hw; 476 477 hw->port_id = data->port_id; 478 VIRTIO_OPS(hw) = &virtio_user_ops; 479 480 hw->intr_lsc = 1; 481 hw->use_vec_rx = 0; 482 hw->use_vec_tx = 0; 483 hw->use_inorder_rx = 0; 484 hw->use_inorder_tx = 0; 485 486 return eth_dev; 487 } 488 489 static void 490 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 491 { 492 rte_eth_dev_release_port(eth_dev); 493 } 494 495 /* Dev initialization routine. Invoked once for each virtio vdev at 496 * EAL init time, see rte_bus_probe(). 497 * Returns 0 on success. 498 */ 499 static int 500 virtio_user_pmd_probe(struct rte_vdev_device *vdev) 501 { 502 struct rte_kvargs *kvlist = NULL; 503 struct rte_eth_dev *eth_dev; 504 struct virtio_hw *hw; 505 struct virtio_user_dev *dev; 506 enum virtio_user_backend_type backend_type = VIRTIO_USER_BACKEND_UNKNOWN; 507 uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 508 uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 509 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 510 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 511 uint64_t mrg_rxbuf = 1; 512 uint64_t in_order = 1; 513 uint64_t packed_vq = 0; 514 uint64_t vectorized = 0; 515 char *path = NULL; 516 char *ifname = NULL; 517 char *mac_addr = NULL; 518 int ret = -1; 519 520 RTE_BUILD_BUG_ON(offsetof(struct virtio_user_dev, hw) != 0); 521 522 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 523 const char *name = rte_vdev_device_name(vdev); 524 eth_dev = rte_eth_dev_attach_secondary(name); 525 if (!eth_dev) { 526 PMD_INIT_LOG(ERR, "Failed to probe %s", name); 527 return -1; 528 } 529 530 dev = eth_dev->data->dev_private; 531 hw = &dev->hw; 532 VIRTIO_OPS(hw) = &virtio_user_ops; 533 534 if (eth_virtio_dev_init(eth_dev) < 0) { 535 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 536 rte_eth_dev_release_port(eth_dev); 537 return -1; 538 } 539 540 eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops; 541 eth_dev->device = &vdev->device; 542 rte_eth_dev_probing_finish(eth_dev); 543 return 0; 544 } 545 546 kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_args); 547 if (!kvlist) { 548 PMD_INIT_LOG(ERR, "error when parsing param"); 549 goto end; 550 } 551 552 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 553 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 554 &get_string_arg, &path) < 0) { 555 PMD_INIT_LOG(ERR, "error to parse %s", 556 VIRTIO_USER_ARG_PATH); 557 goto end; 558 } 559 } else { 560 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 561 VIRTIO_USER_ARG_PATH); 562 goto end; 563 } 564 565 backend_type = virtio_user_backend_type(path); 566 if (backend_type == VIRTIO_USER_BACKEND_UNKNOWN) { 567 PMD_INIT_LOG(ERR, 568 "unable to determine backend type for path %s", 569 path); 570 goto end; 571 } 572 PMD_INIT_LOG(INFO, "Backend type detected: %s", 573 virtio_user_backend_strings[backend_type]); 574 575 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 576 if (backend_type != VIRTIO_USER_BACKEND_VHOST_KERNEL) { 577 PMD_INIT_LOG(ERR, 578 "arg %s applies only to vhost-kernel backend", 579 VIRTIO_USER_ARG_INTERFACE_NAME); 580 goto end; 581 } 582 583 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 584 &get_string_arg, &ifname) < 0) { 585 PMD_INIT_LOG(ERR, "error to parse %s", 586 VIRTIO_USER_ARG_INTERFACE_NAME); 587 goto end; 588 } 589 } 590 591 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 592 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 593 &get_string_arg, &mac_addr) < 0) { 594 PMD_INIT_LOG(ERR, "error to parse %s", 595 VIRTIO_USER_ARG_MAC); 596 goto end; 597 } 598 } 599 600 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 601 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 602 &get_integer_arg, &queue_size) < 0) { 603 PMD_INIT_LOG(ERR, "error to parse %s", 604 VIRTIO_USER_ARG_QUEUE_SIZE); 605 goto end; 606 } 607 } 608 609 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 610 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 611 &get_integer_arg, &queues) < 0) { 612 PMD_INIT_LOG(ERR, "error to parse %s", 613 VIRTIO_USER_ARG_QUEUES_NUM); 614 goto end; 615 } 616 } 617 618 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 619 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 620 &get_integer_arg, &server_mode) < 0) { 621 PMD_INIT_LOG(ERR, "error to parse %s", 622 VIRTIO_USER_ARG_SERVER_MODE); 623 goto end; 624 } 625 } 626 627 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 628 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 629 &get_integer_arg, &cq) < 0) { 630 PMD_INIT_LOG(ERR, "error to parse %s", 631 VIRTIO_USER_ARG_CQ_NUM); 632 goto end; 633 } 634 } 635 636 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { 637 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, 638 &get_integer_arg, &packed_vq) < 0) { 639 PMD_INIT_LOG(ERR, "error to parse %s", 640 VIRTIO_USER_ARG_PACKED_VQ); 641 goto end; 642 } 643 } 644 645 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) { 646 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED, 647 &get_integer_arg, &vectorized) < 0) { 648 PMD_INIT_LOG(ERR, "error to parse %s", 649 VIRTIO_USER_ARG_VECTORIZED); 650 goto end; 651 } 652 } 653 654 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 655 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 656 &get_integer_arg, &mrg_rxbuf) < 0) { 657 PMD_INIT_LOG(ERR, "error to parse %s", 658 VIRTIO_USER_ARG_MRG_RXBUF); 659 goto end; 660 } 661 } 662 663 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 664 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 665 &get_integer_arg, &in_order) < 0) { 666 PMD_INIT_LOG(ERR, "error to parse %s", 667 VIRTIO_USER_ARG_IN_ORDER); 668 goto end; 669 } 670 } 671 672 eth_dev = virtio_user_eth_dev_alloc(vdev); 673 if (!eth_dev) { 674 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 675 goto end; 676 } 677 678 dev = eth_dev->data->dev_private; 679 hw = &dev->hw; 680 if (virtio_user_dev_init(dev, path, (uint16_t)queues, cq, 681 queue_size, mac_addr, &ifname, server_mode, 682 mrg_rxbuf, in_order, packed_vq, backend_type) < 0) { 683 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 684 virtio_user_eth_dev_free(eth_dev); 685 goto end; 686 } 687 688 /* 689 * Virtio-user requires using virtual addresses for the descriptors 690 * buffers, whatever other devices require 691 */ 692 hw->use_va = true; 693 694 /* previously called by pci probing for physical dev */ 695 if (eth_virtio_dev_init(eth_dev) < 0) { 696 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 697 virtio_user_dev_uninit(dev); 698 virtio_user_eth_dev_free(eth_dev); 699 goto end; 700 } 701 702 if (vectorized) { 703 if (packed_vq) { 704 #if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM) 705 hw->use_vec_rx = 1; 706 hw->use_vec_tx = 1; 707 #else 708 PMD_INIT_LOG(INFO, 709 "building environment do not support packed ring vectorized"); 710 #endif 711 } else { 712 hw->use_vec_rx = 1; 713 } 714 } 715 716 rte_eth_dev_probing_finish(eth_dev); 717 ret = 0; 718 719 end: 720 rte_kvargs_free(kvlist); 721 free(path); 722 free(mac_addr); 723 free(ifname); 724 return ret; 725 } 726 727 static int 728 virtio_user_pmd_remove(struct rte_vdev_device *vdev) 729 { 730 const char *name; 731 struct rte_eth_dev *eth_dev; 732 733 if (!vdev) 734 return -EINVAL; 735 736 name = rte_vdev_device_name(vdev); 737 PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 738 eth_dev = rte_eth_dev_allocated(name); 739 /* Port has already been released by close. */ 740 if (!eth_dev) 741 return 0; 742 743 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 744 return rte_eth_dev_release_port(eth_dev); 745 746 /* make sure the device is stopped, queues freed */ 747 return rte_eth_dev_close(eth_dev->data->port_id); 748 } 749 750 static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr, 751 uint64_t iova, size_t len) 752 { 753 const char *name; 754 struct rte_eth_dev *eth_dev; 755 struct virtio_user_dev *dev; 756 757 if (!vdev) 758 return -EINVAL; 759 760 name = rte_vdev_device_name(vdev); 761 eth_dev = rte_eth_dev_allocated(name); 762 /* Port has already been released by close. */ 763 if (!eth_dev) 764 return 0; 765 766 dev = eth_dev->data->dev_private; 767 768 if (dev->ops->dma_map) 769 return dev->ops->dma_map(dev, addr, iova, len); 770 771 return 0; 772 } 773 774 static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr, 775 uint64_t iova, size_t len) 776 { 777 const char *name; 778 struct rte_eth_dev *eth_dev; 779 struct virtio_user_dev *dev; 780 781 if (!vdev) 782 return -EINVAL; 783 784 name = rte_vdev_device_name(vdev); 785 eth_dev = rte_eth_dev_allocated(name); 786 /* Port has already been released by close. */ 787 if (!eth_dev) 788 return 0; 789 790 dev = eth_dev->data->dev_private; 791 792 if (dev->ops->dma_unmap) 793 return dev->ops->dma_unmap(dev, addr, iova, len); 794 795 return 0; 796 } 797 798 static struct rte_vdev_driver virtio_user_driver = { 799 .probe = virtio_user_pmd_probe, 800 .remove = virtio_user_pmd_remove, 801 .dma_map = virtio_user_pmd_dma_map, 802 .dma_unmap = virtio_user_pmd_dma_unmap, 803 }; 804 805 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 806 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 807 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 808 "path=<path> " 809 "mac=<mac addr> " 810 "cq=<int> " 811 "queue_size=<int> " 812 "queues=<int> " 813 "iface=<string> " 814 "server=<0|1> " 815 "mrg_rxbuf=<0|1> " 816 "in_order=<0|1> " 817 "packed_vq=<0|1> " 818 "speed=<int> " 819 "vectorized=<0|1>"); 820