1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 RehiveTech. All rights reserved. 3 */ 4 5 #include <string.h> 6 #include <inttypes.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <stdint.h> 10 #include <stdbool.h> 11 #include <sys/queue.h> 12 13 #include <rte_eal.h> 14 #include <dev_driver.h> 15 #include <bus_driver.h> 16 #include <rte_common.h> 17 #include <rte_devargs.h> 18 #include <rte_memory.h> 19 #include <rte_tailq.h> 20 #include <rte_spinlock.h> 21 #include <rte_string_fns.h> 22 #include <rte_errno.h> 23 24 #include "bus_vdev_driver.h" 25 #include "vdev_logs.h" 26 #include "vdev_private.h" 27 28 #define VDEV_MP_KEY "bus_vdev_mp" 29 30 /* Forward declare to access virtual bus name */ 31 static struct rte_bus rte_vdev_bus; 32 33 34 static TAILQ_HEAD(, rte_vdev_device) vdev_device_list = 35 TAILQ_HEAD_INITIALIZER(vdev_device_list); 36 /* The lock needs to be recursive because a vdev can manage another vdev. */ 37 static rte_spinlock_recursive_t vdev_device_list_lock = 38 RTE_SPINLOCK_RECURSIVE_INITIALIZER; 39 40 static TAILQ_HEAD(, rte_vdev_driver) vdev_driver_list = 41 TAILQ_HEAD_INITIALIZER(vdev_driver_list); 42 43 struct vdev_custom_scan { 44 TAILQ_ENTRY(vdev_custom_scan) next; 45 rte_vdev_scan_callback callback; 46 void *user_arg; 47 }; 48 TAILQ_HEAD(vdev_custom_scans, vdev_custom_scan); 49 static struct vdev_custom_scans vdev_custom_scans = 50 TAILQ_HEAD_INITIALIZER(vdev_custom_scans); 51 static rte_spinlock_t vdev_custom_scan_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* register a driver */ 54 void 55 rte_vdev_register(struct rte_vdev_driver *driver) 56 { 57 TAILQ_INSERT_TAIL(&vdev_driver_list, driver, next); 58 } 59 60 /* unregister a driver */ 61 void 62 rte_vdev_unregister(struct rte_vdev_driver *driver) 63 { 64 TAILQ_REMOVE(&vdev_driver_list, driver, next); 65 } 66 67 int 68 rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg) 69 { 70 struct vdev_custom_scan *custom_scan; 71 72 rte_spinlock_lock(&vdev_custom_scan_lock); 73 74 /* check if already registered */ 75 TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) { 76 if (custom_scan->callback == callback && 77 custom_scan->user_arg == user_arg) 78 break; 79 } 80 81 if (custom_scan == NULL) { 82 custom_scan = malloc(sizeof(struct vdev_custom_scan)); 83 if (custom_scan != NULL) { 84 custom_scan->callback = callback; 85 custom_scan->user_arg = user_arg; 86 TAILQ_INSERT_TAIL(&vdev_custom_scans, custom_scan, next); 87 } 88 } 89 90 rte_spinlock_unlock(&vdev_custom_scan_lock); 91 92 return (custom_scan == NULL) ? -1 : 0; 93 } 94 95 int 96 rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg) 97 { 98 struct vdev_custom_scan *custom_scan, *tmp_scan; 99 100 rte_spinlock_lock(&vdev_custom_scan_lock); 101 RTE_TAILQ_FOREACH_SAFE(custom_scan, &vdev_custom_scans, next, 102 tmp_scan) { 103 if (custom_scan->callback != callback || 104 (custom_scan->user_arg != (void *)-1 && 105 custom_scan->user_arg != user_arg)) 106 continue; 107 TAILQ_REMOVE(&vdev_custom_scans, custom_scan, next); 108 free(custom_scan); 109 } 110 rte_spinlock_unlock(&vdev_custom_scan_lock); 111 112 return 0; 113 } 114 115 static int 116 vdev_parse(const char *name, void *addr) 117 { 118 struct rte_vdev_driver **out = addr; 119 struct rte_vdev_driver *driver = NULL; 120 121 TAILQ_FOREACH(driver, &vdev_driver_list, next) { 122 if (strncmp(driver->driver.name, name, 123 strlen(driver->driver.name)) == 0) 124 break; 125 if (driver->driver.alias && 126 strncmp(driver->driver.alias, name, 127 strlen(driver->driver.alias)) == 0) 128 break; 129 } 130 if (driver != NULL && 131 addr != NULL) 132 *out = driver; 133 return driver == NULL; 134 } 135 136 static int 137 vdev_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 138 { 139 struct rte_vdev_device *vdev = RTE_DEV_TO_VDEV(dev); 140 const struct rte_vdev_driver *driver; 141 142 if (!vdev) { 143 rte_errno = EINVAL; 144 return -1; 145 } 146 147 if (!vdev->device.driver) { 148 VDEV_LOG(DEBUG, "no driver attach to device %s", dev->name); 149 return 1; 150 } 151 152 driver = container_of(vdev->device.driver, const struct rte_vdev_driver, 153 driver); 154 155 if (driver->dma_map) 156 return driver->dma_map(vdev, addr, iova, len); 157 158 return 0; 159 } 160 161 static int 162 vdev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 163 { 164 struct rte_vdev_device *vdev = RTE_DEV_TO_VDEV(dev); 165 const struct rte_vdev_driver *driver; 166 167 if (!vdev) { 168 rte_errno = EINVAL; 169 return -1; 170 } 171 172 if (!vdev->device.driver) { 173 VDEV_LOG(DEBUG, "no driver attach to device %s", dev->name); 174 return 1; 175 } 176 177 driver = container_of(vdev->device.driver, const struct rte_vdev_driver, 178 driver); 179 180 if (driver->dma_unmap) 181 return driver->dma_unmap(vdev, addr, iova, len); 182 183 return 0; 184 } 185 186 static int 187 vdev_probe_all_drivers(struct rte_vdev_device *dev) 188 { 189 const char *name; 190 struct rte_vdev_driver *driver; 191 enum rte_iova_mode iova_mode; 192 int ret; 193 194 if (rte_dev_is_probed(&dev->device)) 195 return -EEXIST; 196 197 name = rte_vdev_device_name(dev); 198 VDEV_LOG(DEBUG, "Search driver to probe device %s", name); 199 200 if (vdev_parse(name, &driver)) 201 return -1; 202 203 iova_mode = rte_eal_iova_mode(); 204 if ((driver->drv_flags & RTE_VDEV_DRV_NEED_IOVA_AS_VA) && (iova_mode == RTE_IOVA_PA)) { 205 VDEV_LOG(ERR, "%s requires VA IOVA mode but current mode is PA, not initializing", 206 name); 207 return -1; 208 } 209 210 ret = driver->probe(dev); 211 if (ret == 0) 212 dev->device.driver = &driver->driver; 213 return ret; 214 } 215 216 /* The caller shall be responsible for thread-safe */ 217 static struct rte_vdev_device * 218 find_vdev(const char *name) 219 { 220 struct rte_vdev_device *dev; 221 222 if (!name) 223 return NULL; 224 225 TAILQ_FOREACH(dev, &vdev_device_list, next) { 226 const char *devname = rte_vdev_device_name(dev); 227 228 if (!strcmp(devname, name)) 229 return dev; 230 } 231 232 return NULL; 233 } 234 235 static struct rte_devargs * 236 alloc_devargs(const char *name, const char *args) 237 { 238 struct rte_devargs *devargs; 239 int ret; 240 241 devargs = calloc(1, sizeof(*devargs)); 242 if (!devargs) 243 return NULL; 244 245 devargs->bus = &rte_vdev_bus; 246 if (args) 247 devargs->data = strdup(args); 248 else 249 devargs->data = strdup(""); 250 devargs->args = devargs->data; 251 252 ret = strlcpy(devargs->name, name, sizeof(devargs->name)); 253 if (ret < 0 || ret >= (int)sizeof(devargs->name)) { 254 rte_devargs_reset(devargs); 255 free(devargs); 256 return NULL; 257 } 258 259 return devargs; 260 } 261 262 static int 263 insert_vdev(const char *name, const char *args, 264 struct rte_vdev_device **p_dev, 265 bool init) 266 { 267 struct rte_vdev_device *dev; 268 struct rte_devargs *devargs; 269 int ret; 270 271 if (name == NULL) 272 return -EINVAL; 273 274 devargs = alloc_devargs(name, args); 275 if (!devargs) 276 return -ENOMEM; 277 278 dev = calloc(1, sizeof(*dev)); 279 if (!dev) { 280 ret = -ENOMEM; 281 goto fail; 282 } 283 284 dev->device.bus = &rte_vdev_bus; 285 dev->device.numa_node = SOCKET_ID_ANY; 286 dev->device.name = devargs->name; 287 288 if (find_vdev(name)) { 289 /* 290 * A vdev is expected to have only one port. 291 * So there is no reason to try probing again, 292 * even with new arguments. 293 */ 294 ret = -EEXIST; 295 goto fail; 296 } 297 298 if (init) 299 rte_devargs_insert(&devargs); 300 dev->device.devargs = devargs; 301 TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); 302 303 if (p_dev) 304 *p_dev = dev; 305 306 return 0; 307 fail: 308 rte_devargs_reset(devargs); 309 free(devargs); 310 free(dev); 311 return ret; 312 } 313 314 int 315 rte_vdev_init(const char *name, const char *args) 316 { 317 struct rte_vdev_device *dev; 318 int ret; 319 320 rte_spinlock_recursive_lock(&vdev_device_list_lock); 321 ret = insert_vdev(name, args, &dev, true); 322 if (ret == 0) { 323 ret = vdev_probe_all_drivers(dev); 324 if (ret) { 325 if (ret > 0) 326 VDEV_LOG(ERR, "no driver found for %s", name); 327 /* If fails, remove it from vdev list */ 328 TAILQ_REMOVE(&vdev_device_list, dev, next); 329 rte_devargs_remove(dev->device.devargs); 330 free(dev); 331 } 332 } 333 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 334 return ret; 335 } 336 337 static int 338 vdev_remove_driver(struct rte_vdev_device *dev) 339 { 340 const char *name = rte_vdev_device_name(dev); 341 const struct rte_vdev_driver *driver; 342 343 if (!dev->device.driver) { 344 VDEV_LOG(DEBUG, "no driver attach to device %s", name); 345 return 1; 346 } 347 348 driver = container_of(dev->device.driver, const struct rte_vdev_driver, 349 driver); 350 return driver->remove(dev); 351 } 352 353 int 354 rte_vdev_uninit(const char *name) 355 { 356 struct rte_vdev_device *dev; 357 int ret; 358 359 if (name == NULL) 360 return -EINVAL; 361 362 rte_spinlock_recursive_lock(&vdev_device_list_lock); 363 364 dev = find_vdev(name); 365 if (!dev) { 366 ret = -ENOENT; 367 goto unlock; 368 } 369 370 ret = vdev_remove_driver(dev); 371 if (ret) 372 goto unlock; 373 374 TAILQ_REMOVE(&vdev_device_list, dev, next); 375 rte_devargs_remove(dev->device.devargs); 376 free(dev); 377 378 unlock: 379 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 380 return ret; 381 } 382 383 struct vdev_param { 384 #define VDEV_SCAN_REQ 1 385 #define VDEV_SCAN_ONE 2 386 #define VDEV_SCAN_REP 3 387 int type; 388 int num; 389 char name[RTE_DEV_NAME_MAX_LEN]; 390 }; 391 392 static int vdev_plug(struct rte_device *dev); 393 394 /** 395 * This function works as the action for both primary and secondary process 396 * for static vdev discovery when a secondary process is booting. 397 * 398 * step 1, secondary process sends a sync request to ask for vdev in primary; 399 * step 2, primary process receives the request, and send vdevs one by one; 400 * step 3, primary process sends back reply, which indicates how many vdevs 401 * are sent. 402 */ 403 static int 404 vdev_action(const struct rte_mp_msg *mp_msg, const void *peer) 405 { 406 struct rte_vdev_device *dev; 407 struct rte_mp_msg mp_resp; 408 struct vdev_param *ou = (struct vdev_param *)&mp_resp.param; 409 const struct vdev_param *in = (const struct vdev_param *)mp_msg->param; 410 const char *devname; 411 int num; 412 int ret; 413 414 strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name)); 415 mp_resp.len_param = sizeof(*ou); 416 mp_resp.num_fds = 0; 417 418 switch (in->type) { 419 case VDEV_SCAN_REQ: 420 ou->type = VDEV_SCAN_ONE; 421 ou->num = 1; 422 num = 0; 423 424 rte_spinlock_recursive_lock(&vdev_device_list_lock); 425 TAILQ_FOREACH(dev, &vdev_device_list, next) { 426 devname = rte_vdev_device_name(dev); 427 if (strlen(devname) == 0) { 428 VDEV_LOG(INFO, "vdev with no name is not sent"); 429 continue; 430 } 431 VDEV_LOG(INFO, "send vdev, %s", devname); 432 strlcpy(ou->name, devname, RTE_DEV_NAME_MAX_LEN); 433 if (rte_mp_sendmsg(&mp_resp) < 0) 434 VDEV_LOG(ERR, "send vdev, %s, failed, %s", 435 devname, strerror(rte_errno)); 436 num++; 437 } 438 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 439 440 ou->type = VDEV_SCAN_REP; 441 ou->num = num; 442 if (rte_mp_reply(&mp_resp, peer) < 0) 443 VDEV_LOG(ERR, "Failed to reply a scan request"); 444 break; 445 case VDEV_SCAN_ONE: 446 VDEV_LOG(INFO, "receive vdev, %s", in->name); 447 ret = insert_vdev(in->name, NULL, NULL, false); 448 if (ret == -EEXIST) 449 VDEV_LOG(DEBUG, "device already exist, %s", in->name); 450 else if (ret < 0) 451 VDEV_LOG(ERR, "failed to add vdev, %s", in->name); 452 break; 453 default: 454 VDEV_LOG(ERR, "vdev cannot recognize this message"); 455 } 456 457 return 0; 458 } 459 460 static int 461 vdev_scan(void) 462 { 463 struct rte_vdev_device *dev; 464 struct rte_devargs *devargs; 465 struct vdev_custom_scan *custom_scan; 466 467 if (rte_mp_action_register(VDEV_MP_KEY, vdev_action) < 0 && 468 rte_errno != EEXIST) { 469 /* for primary, unsupported IPC is not an error */ 470 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 471 rte_errno == ENOTSUP) 472 goto scan; 473 VDEV_LOG(ERR, "Failed to add vdev mp action"); 474 return -1; 475 } 476 477 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 478 struct rte_mp_msg mp_req, *mp_rep; 479 struct rte_mp_reply mp_reply; 480 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0}; 481 struct vdev_param *req = (struct vdev_param *)mp_req.param; 482 struct vdev_param *resp; 483 484 strlcpy(mp_req.name, VDEV_MP_KEY, sizeof(mp_req.name)); 485 mp_req.len_param = sizeof(*req); 486 mp_req.num_fds = 0; 487 req->type = VDEV_SCAN_REQ; 488 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 && 489 mp_reply.nb_received == 1) { 490 mp_rep = &mp_reply.msgs[0]; 491 resp = (struct vdev_param *)mp_rep->param; 492 VDEV_LOG(INFO, "Received %d vdevs", resp->num); 493 free(mp_reply.msgs); 494 } else 495 VDEV_LOG(ERR, "Failed to request vdev from primary"); 496 497 /* Fall through to allow private vdevs in secondary process */ 498 } 499 500 scan: 501 /* call custom scan callbacks if any */ 502 rte_spinlock_lock(&vdev_custom_scan_lock); 503 TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) { 504 if (custom_scan->callback != NULL) 505 /* 506 * the callback should update devargs list 507 * by calling rte_devargs_insert() with 508 * devargs.bus = rte_bus_find_by_name("vdev"); 509 * devargs.type = RTE_DEVTYPE_VIRTUAL; 510 * devargs.policy = RTE_DEV_ALLOWED; 511 */ 512 custom_scan->callback(custom_scan->user_arg); 513 } 514 rte_spinlock_unlock(&vdev_custom_scan_lock); 515 516 /* for virtual devices we scan the devargs_list populated via cmdline */ 517 RTE_EAL_DEVARGS_FOREACH("vdev", devargs) { 518 519 dev = calloc(1, sizeof(*dev)); 520 if (!dev) 521 return -1; 522 523 rte_spinlock_recursive_lock(&vdev_device_list_lock); 524 525 if (find_vdev(devargs->name)) { 526 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 527 free(dev); 528 continue; 529 } 530 531 dev->device.bus = &rte_vdev_bus; 532 dev->device.devargs = devargs; 533 dev->device.numa_node = SOCKET_ID_ANY; 534 dev->device.name = devargs->name; 535 536 TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); 537 538 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 539 } 540 541 return 0; 542 } 543 544 static int 545 vdev_probe(void) 546 { 547 struct rte_vdev_device *dev; 548 int r, ret = 0; 549 550 /* call the init function for each virtual device */ 551 TAILQ_FOREACH(dev, &vdev_device_list, next) { 552 /* we don't use the vdev lock here, as it's only used in DPDK 553 * initialization; and we don't want to hold such a lock when 554 * we call each driver probe. 555 */ 556 557 r = vdev_probe_all_drivers(dev); 558 if (r != 0) { 559 if (r == -EEXIST) 560 continue; 561 VDEV_LOG(ERR, "failed to initialize %s device", 562 rte_vdev_device_name(dev)); 563 ret = -1; 564 } 565 } 566 567 return ret; 568 } 569 570 static int 571 vdev_cleanup(void) 572 { 573 struct rte_vdev_device *dev, *tmp_dev; 574 int error = 0; 575 576 RTE_TAILQ_FOREACH_SAFE(dev, &vdev_device_list, next, tmp_dev) { 577 const struct rte_vdev_driver *drv; 578 int ret = 0; 579 580 if (dev->device.driver == NULL) 581 goto free; 582 583 drv = container_of(dev->device.driver, const struct rte_vdev_driver, driver); 584 585 if (drv->remove == NULL) 586 goto free; 587 588 ret = drv->remove(dev); 589 if (ret < 0) 590 error = -1; 591 592 dev->device.driver = NULL; 593 free: 594 free(dev); 595 } 596 597 return error; 598 } 599 600 struct rte_device * 601 rte_vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, 602 const void *data) 603 { 604 const struct rte_vdev_device *vstart; 605 struct rte_vdev_device *dev; 606 607 rte_spinlock_recursive_lock(&vdev_device_list_lock); 608 if (start != NULL) { 609 vstart = RTE_DEV_TO_VDEV_CONST(start); 610 dev = TAILQ_NEXT(vstart, next); 611 } else { 612 dev = TAILQ_FIRST(&vdev_device_list); 613 } 614 while (dev != NULL) { 615 if (cmp(&dev->device, data) == 0) 616 break; 617 dev = TAILQ_NEXT(dev, next); 618 } 619 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 620 621 return dev ? &dev->device : NULL; 622 } 623 624 static int 625 vdev_plug(struct rte_device *dev) 626 { 627 return vdev_probe_all_drivers(RTE_DEV_TO_VDEV(dev)); 628 } 629 630 static int 631 vdev_unplug(struct rte_device *dev) 632 { 633 return rte_vdev_uninit(dev->name); 634 } 635 636 static enum rte_iova_mode 637 vdev_get_iommu_class(void) 638 { 639 const char *name; 640 struct rte_vdev_device *dev; 641 struct rte_vdev_driver *driver; 642 643 TAILQ_FOREACH(dev, &vdev_device_list, next) { 644 name = rte_vdev_device_name(dev); 645 if (vdev_parse(name, &driver)) 646 continue; 647 648 if (driver->drv_flags & RTE_VDEV_DRV_NEED_IOVA_AS_VA) 649 return RTE_IOVA_VA; 650 } 651 652 return RTE_IOVA_DC; 653 } 654 655 static struct rte_bus rte_vdev_bus = { 656 .scan = vdev_scan, 657 .probe = vdev_probe, 658 .cleanup = vdev_cleanup, 659 .find_device = rte_vdev_find_device, 660 .plug = vdev_plug, 661 .unplug = vdev_unplug, 662 .parse = vdev_parse, 663 .dma_map = vdev_dma_map, 664 .dma_unmap = vdev_dma_unmap, 665 .get_iommu_class = vdev_get_iommu_class, 666 .dev_iterate = rte_vdev_dev_iterate, 667 }; 668 669 RTE_REGISTER_BUS(vdev, rte_vdev_bus); 670 RTE_LOG_REGISTER_DEFAULT(vdev_logtype_bus, NOTICE); 671