1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <string.h> 7 #include <inttypes.h> 8 #include <stdint.h> 9 #include <stdbool.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <sys/queue.h> 13 #include <rte_errno.h> 14 #include <rte_interrupts.h> 15 #include <rte_log.h> 16 #include <bus_driver.h> 17 #include <rte_pci.h> 18 #include <rte_bus_pci.h> 19 #include <rte_lcore.h> 20 #include <rte_per_lcore.h> 21 #include <rte_memory.h> 22 #include <rte_eal.h> 23 #include <rte_eal_paging.h> 24 #include <rte_string_fns.h> 25 #include <rte_common.h> 26 #include <rte_devargs.h> 27 #include <rte_vfio.h> 28 #include <rte_tailq.h> 29 30 #include "private.h" 31 32 33 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" 34 35 const char *rte_pci_get_sysfs_path(void) 36 { 37 const char *path = NULL; 38 39 #ifdef RTE_EXEC_ENV_LINUX 40 path = getenv("SYSFS_PCI_DEVICES"); 41 if (path == NULL) 42 return SYSFS_PCI_DEVICES; 43 #endif 44 45 return path; 46 } 47 48 #ifdef RTE_EXEC_ENV_WINDOWS 49 #define asprintf pci_asprintf 50 51 static int 52 __rte_format_printf(2, 3) 53 pci_asprintf(char **buffer, const char *format, ...) 54 { 55 int size, ret; 56 va_list arg; 57 58 va_start(arg, format); 59 size = vsnprintf(NULL, 0, format, arg); 60 va_end(arg); 61 if (size < 0) 62 return -1; 63 size++; 64 65 *buffer = malloc(size); 66 if (*buffer == NULL) 67 return -1; 68 69 va_start(arg, format); 70 ret = vsnprintf(*buffer, size, format, arg); 71 va_end(arg); 72 if (ret != size - 1) { 73 free(*buffer); 74 return -1; 75 } 76 return ret; 77 } 78 #endif /* RTE_EXEC_ENV_WINDOWS */ 79 80 static struct rte_devargs * 81 pci_devargs_lookup(const struct rte_pci_addr *pci_addr) 82 { 83 struct rte_devargs *devargs; 84 struct rte_pci_addr addr; 85 86 RTE_EAL_DEVARGS_FOREACH("pci", devargs) { 87 devargs->bus->parse(devargs->name, &addr); 88 if (!rte_pci_addr_cmp(pci_addr, &addr)) 89 return devargs; 90 } 91 return NULL; 92 } 93 94 void 95 pci_common_set(struct rte_pci_device *dev) 96 { 97 struct rte_devargs *devargs; 98 99 /* Each device has its internal, canonical name set. */ 100 rte_pci_device_name(&dev->addr, 101 dev->name, sizeof(dev->name)); 102 devargs = pci_devargs_lookup(&dev->addr); 103 dev->device.devargs = devargs; 104 105 /* When using a blocklist, only blocked devices will have 106 * an rte_devargs. Allowed devices won't have one. 107 */ 108 if (devargs != NULL) 109 /* If an rte_devargs exists, the generic rte_device uses the 110 * given name as its name. 111 */ 112 dev->device.name = dev->device.devargs->name; 113 else 114 /* Otherwise, it uses the internal, canonical form. */ 115 dev->device.name = dev->name; 116 117 if (dev->bus_info != NULL || 118 asprintf(&dev->bus_info, "vendor_id=%"PRIx16", device_id=%"PRIx16, 119 dev->id.vendor_id, dev->id.device_id) != -1) 120 dev->device.bus_info = dev->bus_info; 121 } 122 123 void 124 pci_free(struct rte_pci_device *dev) 125 { 126 if (dev == NULL) 127 return; 128 free(dev->bus_info); 129 free(dev); 130 } 131 132 /* map a particular resource from a file */ 133 void * 134 pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size, 135 int additional_flags) 136 { 137 void *mapaddr; 138 139 /* Map the PCI memory resource of device */ 140 mapaddr = rte_mem_map(requested_addr, size, 141 RTE_PROT_READ | RTE_PROT_WRITE, 142 RTE_MAP_SHARED | additional_flags, fd, offset); 143 if (mapaddr == NULL) { 144 RTE_LOG(ERR, EAL, 145 "%s(): cannot map resource(%d, %p, 0x%zx, 0x%llx): %s (%p)\n", 146 __func__, fd, requested_addr, size, 147 (unsigned long long)offset, 148 rte_strerror(rte_errno), mapaddr); 149 } else 150 RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr); 151 152 return mapaddr; 153 } 154 155 /* unmap a particular resource */ 156 void 157 pci_unmap_resource(void *requested_addr, size_t size) 158 { 159 if (requested_addr == NULL) 160 return; 161 162 /* Unmap the PCI memory resource of device */ 163 if (rte_mem_unmap(requested_addr, size)) { 164 RTE_LOG(ERR, EAL, "%s(): cannot mem unmap(%p, %#zx): %s\n", 165 __func__, requested_addr, size, 166 rte_strerror(rte_errno)); 167 } else 168 RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n", 169 requested_addr); 170 } 171 /* 172 * Match the PCI Driver and Device using the ID Table 173 */ 174 int 175 rte_pci_match(const struct rte_pci_driver *pci_drv, 176 const struct rte_pci_device *pci_dev) 177 { 178 const struct rte_pci_id *id_table; 179 180 for (id_table = pci_drv->id_table; id_table->vendor_id != 0; 181 id_table++) { 182 /* check if device's identifiers match the driver's ones */ 183 if (id_table->vendor_id != pci_dev->id.vendor_id && 184 id_table->vendor_id != RTE_PCI_ANY_ID) 185 continue; 186 if (id_table->device_id != pci_dev->id.device_id && 187 id_table->device_id != RTE_PCI_ANY_ID) 188 continue; 189 if (id_table->subsystem_vendor_id != 190 pci_dev->id.subsystem_vendor_id && 191 id_table->subsystem_vendor_id != RTE_PCI_ANY_ID) 192 continue; 193 if (id_table->subsystem_device_id != 194 pci_dev->id.subsystem_device_id && 195 id_table->subsystem_device_id != RTE_PCI_ANY_ID) 196 continue; 197 if (id_table->class_id != pci_dev->id.class_id && 198 id_table->class_id != RTE_CLASS_ANY_ID) 199 continue; 200 201 return 1; 202 } 203 204 return 0; 205 } 206 207 /* 208 * If vendor/device ID match, call the probe() function of the 209 * driver. 210 */ 211 static int 212 rte_pci_probe_one_driver(struct rte_pci_driver *dr, 213 struct rte_pci_device *dev) 214 { 215 int ret; 216 bool already_probed; 217 struct rte_pci_addr *loc; 218 219 if ((dr == NULL) || (dev == NULL)) 220 return -EINVAL; 221 222 loc = &dev->addr; 223 224 /* The device is not blocked; Check if driver supports it */ 225 if (!rte_pci_match(dr, dev)) 226 /* Match of device and driver failed */ 227 return 1; 228 229 RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n", 230 loc->domain, loc->bus, loc->devid, loc->function, 231 dev->device.numa_node); 232 233 /* no initialization when marked as blocked, return without error */ 234 if (dev->device.devargs != NULL && 235 dev->device.devargs->policy == RTE_DEV_BLOCKED) { 236 RTE_LOG(INFO, EAL, " Device is blocked, not initializing\n"); 237 return 1; 238 } 239 240 if (dev->device.numa_node < 0 && rte_socket_count() > 1) 241 RTE_LOG(INFO, EAL, "Device %s is not NUMA-aware\n", dev->name); 242 243 already_probed = rte_dev_is_probed(&dev->device); 244 if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) { 245 RTE_LOG(DEBUG, EAL, "Device %s is already probed\n", 246 dev->device.name); 247 return -EEXIST; 248 } 249 250 RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id, 251 dev->id.device_id, dr->driver.name); 252 253 if (!already_probed) { 254 enum rte_iova_mode dev_iova_mode; 255 enum rte_iova_mode iova_mode; 256 257 dev_iova_mode = pci_device_iova_mode(dr, dev); 258 iova_mode = rte_eal_iova_mode(); 259 if (dev_iova_mode != RTE_IOVA_DC && 260 dev_iova_mode != iova_mode) { 261 RTE_LOG(ERR, EAL, " Expecting '%s' IOVA mode but current mode is '%s', not initializing\n", 262 dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA", 263 iova_mode == RTE_IOVA_PA ? "PA" : "VA"); 264 return -EINVAL; 265 } 266 267 /* Allocate interrupt instance for pci device */ 268 dev->intr_handle = 269 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); 270 if (dev->intr_handle == NULL) { 271 RTE_LOG(ERR, EAL, 272 "Failed to create interrupt instance for %s\n", 273 dev->device.name); 274 return -ENOMEM; 275 } 276 277 dev->vfio_req_intr_handle = 278 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); 279 if (dev->vfio_req_intr_handle == NULL) { 280 rte_intr_instance_free(dev->intr_handle); 281 dev->intr_handle = NULL; 282 RTE_LOG(ERR, EAL, 283 "Failed to create vfio req interrupt instance for %s\n", 284 dev->device.name); 285 return -ENOMEM; 286 } 287 288 /* 289 * Reference driver structure. 290 * This needs to be before rte_pci_map_device(), as it enables 291 * to use driver flags for adjusting configuration. 292 */ 293 dev->driver = dr; 294 if (dev->driver->drv_flags & RTE_PCI_DRV_NEED_MAPPING) { 295 ret = rte_pci_map_device(dev); 296 if (ret != 0) { 297 dev->driver = NULL; 298 rte_intr_instance_free(dev->vfio_req_intr_handle); 299 dev->vfio_req_intr_handle = NULL; 300 rte_intr_instance_free(dev->intr_handle); 301 dev->intr_handle = NULL; 302 return ret; 303 } 304 } 305 } 306 307 RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n", 308 dr->driver.name, dev->id.vendor_id, dev->id.device_id, 309 loc->domain, loc->bus, loc->devid, loc->function, 310 dev->device.numa_node); 311 /* call the driver probe() function */ 312 ret = dr->probe(dr, dev); 313 if (already_probed) 314 return ret; /* no rollback if already succeeded earlier */ 315 if (ret) { 316 dev->driver = NULL; 317 if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) && 318 /* Don't unmap if device is unsupported and 319 * driver needs mapped resources. 320 */ 321 !(ret > 0 && 322 (dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES))) 323 rte_pci_unmap_device(dev); 324 rte_intr_instance_free(dev->vfio_req_intr_handle); 325 dev->vfio_req_intr_handle = NULL; 326 rte_intr_instance_free(dev->intr_handle); 327 dev->intr_handle = NULL; 328 } else { 329 dev->device.driver = &dr->driver; 330 } 331 332 return ret; 333 } 334 335 /* 336 * If vendor/device ID match, call the remove() function of the 337 * driver. 338 */ 339 static int 340 rte_pci_detach_dev(struct rte_pci_device *dev) 341 { 342 struct rte_pci_addr *loc; 343 struct rte_pci_driver *dr; 344 int ret = 0; 345 346 if (dev == NULL) 347 return -EINVAL; 348 349 dr = dev->driver; 350 loc = &dev->addr; 351 352 RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n", 353 loc->domain, loc->bus, loc->devid, 354 loc->function, dev->device.numa_node); 355 356 RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id, 357 dev->id.device_id, dr->driver.name); 358 359 if (dr->remove) { 360 ret = dr->remove(dev); 361 if (ret < 0) 362 return ret; 363 } 364 365 /* clear driver structure */ 366 dev->driver = NULL; 367 dev->device.driver = NULL; 368 369 if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) 370 /* unmap resources for devices that use igb_uio */ 371 rte_pci_unmap_device(dev); 372 373 rte_intr_instance_free(dev->intr_handle); 374 dev->intr_handle = NULL; 375 rte_intr_instance_free(dev->vfio_req_intr_handle); 376 dev->vfio_req_intr_handle = NULL; 377 378 return 0; 379 } 380 381 /* 382 * If vendor/device ID match, call the probe() function of all 383 * registered driver for the given device. Return < 0 if initialization 384 * failed, return 1 if no driver is found for this device. 385 */ 386 static int 387 pci_probe_all_drivers(struct rte_pci_device *dev) 388 { 389 struct rte_pci_driver *dr = NULL; 390 int rc = 0; 391 392 if (dev == NULL) 393 return -EINVAL; 394 395 FOREACH_DRIVER_ON_PCIBUS(dr) { 396 rc = rte_pci_probe_one_driver(dr, dev); 397 if (rc < 0) 398 /* negative value is an error */ 399 return rc; 400 if (rc > 0) 401 /* positive value means driver doesn't support it */ 402 continue; 403 return 0; 404 } 405 return 1; 406 } 407 408 /* 409 * Scan the content of the PCI bus, and call the probe() function for 410 * all registered drivers that have a matching entry in its id_table 411 * for discovered devices. 412 */ 413 static int 414 pci_probe(void) 415 { 416 struct rte_pci_device *dev = NULL; 417 size_t probed = 0, failed = 0; 418 int ret = 0; 419 420 FOREACH_DEVICE_ON_PCIBUS(dev) { 421 probed++; 422 423 ret = pci_probe_all_drivers(dev); 424 if (ret < 0) { 425 if (ret != -EEXIST) { 426 RTE_LOG(ERR, EAL, "Requested device " 427 PCI_PRI_FMT " cannot be used\n", 428 dev->addr.domain, dev->addr.bus, 429 dev->addr.devid, dev->addr.function); 430 rte_errno = errno; 431 failed++; 432 } 433 ret = 0; 434 } 435 } 436 437 return (probed && probed == failed) ? -1 : 0; 438 } 439 440 static int 441 pci_cleanup(void) 442 { 443 struct rte_pci_device *dev, *tmp_dev; 444 int error = 0; 445 446 RTE_TAILQ_FOREACH_SAFE(dev, &rte_pci_bus.device_list, next, tmp_dev) { 447 struct rte_pci_driver *drv = dev->driver; 448 int ret = 0; 449 450 if (drv == NULL || drv->remove == NULL) 451 continue; 452 453 ret = drv->remove(dev); 454 if (ret < 0) { 455 rte_errno = errno; 456 error = -1; 457 } 458 dev->driver = NULL; 459 dev->device.driver = NULL; 460 461 /* free interrupt handles */ 462 rte_intr_instance_free(dev->intr_handle); 463 dev->intr_handle = NULL; 464 rte_intr_instance_free(dev->vfio_req_intr_handle); 465 dev->vfio_req_intr_handle = NULL; 466 467 pci_free(dev); 468 } 469 470 return error; 471 } 472 473 /* dump one device */ 474 static int 475 pci_dump_one_device(FILE *f, struct rte_pci_device *dev) 476 { 477 int i; 478 479 fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus, 480 dev->addr.devid, dev->addr.function); 481 fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id, 482 dev->id.device_id); 483 484 for (i = 0; i != sizeof(dev->mem_resource) / 485 sizeof(dev->mem_resource[0]); i++) { 486 fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n", 487 dev->mem_resource[i].phys_addr, 488 dev->mem_resource[i].len); 489 } 490 return 0; 491 } 492 493 /* dump devices on the bus */ 494 void 495 rte_pci_dump(FILE *f) 496 { 497 struct rte_pci_device *dev = NULL; 498 499 FOREACH_DEVICE_ON_PCIBUS(dev) { 500 pci_dump_one_device(f, dev); 501 } 502 } 503 504 static int 505 pci_parse(const char *name, void *addr) 506 { 507 struct rte_pci_addr *out = addr; 508 struct rte_pci_addr pci_addr; 509 bool parse; 510 511 parse = (rte_pci_addr_parse(name, &pci_addr) == 0); 512 if (parse && addr != NULL) 513 *out = pci_addr; 514 return parse == false; 515 } 516 517 /* register a driver */ 518 void 519 rte_pci_register(struct rte_pci_driver *driver) 520 { 521 TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next); 522 } 523 524 /* unregister a driver */ 525 void 526 rte_pci_unregister(struct rte_pci_driver *driver) 527 { 528 TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next); 529 } 530 531 /* Add a device to PCI bus */ 532 void 533 rte_pci_add_device(struct rte_pci_device *pci_dev) 534 { 535 TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next); 536 } 537 538 /* Insert a device into a predefined position in PCI bus */ 539 void 540 rte_pci_insert_device(struct rte_pci_device *exist_pci_dev, 541 struct rte_pci_device *new_pci_dev) 542 { 543 TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next); 544 } 545 546 /* Remove a device from PCI bus */ 547 static void 548 rte_pci_remove_device(struct rte_pci_device *pci_dev) 549 { 550 TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next); 551 } 552 553 static struct rte_device * 554 pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, 555 const void *data) 556 { 557 const struct rte_pci_device *pstart; 558 struct rte_pci_device *pdev; 559 560 if (start != NULL) { 561 pstart = RTE_DEV_TO_PCI_CONST(start); 562 pdev = TAILQ_NEXT(pstart, next); 563 } else { 564 pdev = TAILQ_FIRST(&rte_pci_bus.device_list); 565 } 566 while (pdev != NULL) { 567 if (cmp(&pdev->device, data) == 0) 568 return &pdev->device; 569 pdev = TAILQ_NEXT(pdev, next); 570 } 571 return NULL; 572 } 573 574 /* 575 * find the device which encounter the failure, by iterate over all device on 576 * PCI bus to check if the memory failure address is located in the range 577 * of the BARs of the device. 578 */ 579 static struct rte_pci_device * 580 pci_find_device_by_addr(const void *failure_addr) 581 { 582 struct rte_pci_device *pdev = NULL; 583 uint64_t check_point, start, end, len; 584 int i; 585 586 check_point = (uint64_t)(uintptr_t)failure_addr; 587 588 FOREACH_DEVICE_ON_PCIBUS(pdev) { 589 for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) { 590 start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr; 591 len = pdev->mem_resource[i].len; 592 end = start + len; 593 if (check_point >= start && check_point < end) { 594 RTE_LOG(DEBUG, EAL, "Failure address %16.16" 595 PRIx64" belongs to device %s!\n", 596 check_point, pdev->device.name); 597 return pdev; 598 } 599 } 600 } 601 return NULL; 602 } 603 604 static int 605 pci_hot_unplug_handler(struct rte_device *dev) 606 { 607 struct rte_pci_device *pdev = NULL; 608 int ret = 0; 609 610 pdev = RTE_DEV_TO_PCI(dev); 611 if (!pdev) 612 return -1; 613 614 switch (pdev->kdrv) { 615 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE 616 case RTE_PCI_KDRV_VFIO: 617 /* 618 * vfio kernel module guaranty the pci device would not be 619 * deleted until the user space release the resource, so no 620 * need to remap BARs resource here, just directly notify 621 * the req event to the user space to handle it. 622 */ 623 rte_dev_event_callback_process(dev->name, 624 RTE_DEV_EVENT_REMOVE); 625 break; 626 #endif 627 case RTE_PCI_KDRV_IGB_UIO: 628 case RTE_PCI_KDRV_UIO_GENERIC: 629 case RTE_PCI_KDRV_NIC_UIO: 630 /* BARs resource is invalid, remap it to be safe. */ 631 ret = pci_uio_remap_resource(pdev); 632 break; 633 default: 634 RTE_LOG(DEBUG, EAL, 635 "Not managed by a supported kernel driver, skipped\n"); 636 ret = -1; 637 break; 638 } 639 640 return ret; 641 } 642 643 static int 644 pci_sigbus_handler(const void *failure_addr) 645 { 646 struct rte_pci_device *pdev = NULL; 647 int ret = 0; 648 649 pdev = pci_find_device_by_addr(failure_addr); 650 if (!pdev) { 651 /* It is a generic sigbus error, no bus would handle it. */ 652 ret = 1; 653 } else { 654 /* The sigbus error is caused of hot-unplug. */ 655 ret = pci_hot_unplug_handler(&pdev->device); 656 if (ret) { 657 RTE_LOG(ERR, EAL, 658 "Failed to handle hot-unplug for device %s", 659 pdev->name); 660 ret = -1; 661 } 662 } 663 return ret; 664 } 665 666 static int 667 pci_plug(struct rte_device *dev) 668 { 669 return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev)); 670 } 671 672 static int 673 pci_unplug(struct rte_device *dev) 674 { 675 struct rte_pci_device *pdev; 676 int ret; 677 678 pdev = RTE_DEV_TO_PCI(dev); 679 ret = rte_pci_detach_dev(pdev); 680 if (ret == 0) { 681 rte_pci_remove_device(pdev); 682 rte_devargs_remove(dev->devargs); 683 pci_free(pdev); 684 } 685 return ret; 686 } 687 688 static int 689 pci_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 690 { 691 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev); 692 693 if (!pdev || !pdev->driver) { 694 rte_errno = EINVAL; 695 return -1; 696 } 697 if (pdev->driver->dma_map) 698 return pdev->driver->dma_map(pdev, addr, iova, len); 699 /** 700 * In case driver don't provides any specific mapping 701 * try fallback to VFIO. 702 */ 703 if (pdev->kdrv == RTE_PCI_KDRV_VFIO) 704 return rte_vfio_container_dma_map 705 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr, 706 iova, len); 707 rte_errno = ENOTSUP; 708 return -1; 709 } 710 711 static int 712 pci_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 713 { 714 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev); 715 716 if (!pdev || !pdev->driver) { 717 rte_errno = EINVAL; 718 return -1; 719 } 720 if (pdev->driver->dma_unmap) 721 return pdev->driver->dma_unmap(pdev, addr, iova, len); 722 /** 723 * In case driver don't provides any specific mapping 724 * try fallback to VFIO. 725 */ 726 if (pdev->kdrv == RTE_PCI_KDRV_VFIO) 727 return rte_vfio_container_dma_unmap 728 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr, 729 iova, len); 730 rte_errno = ENOTSUP; 731 return -1; 732 } 733 734 bool 735 rte_pci_ignore_device(const struct rte_pci_addr *pci_addr) 736 { 737 struct rte_devargs *devargs = pci_devargs_lookup(pci_addr); 738 739 switch (rte_pci_bus.bus.conf.scan_mode) { 740 case RTE_BUS_SCAN_ALLOWLIST: 741 if (devargs && devargs->policy == RTE_DEV_ALLOWED) 742 return false; 743 break; 744 case RTE_BUS_SCAN_UNDEFINED: 745 case RTE_BUS_SCAN_BLOCKLIST: 746 if (devargs == NULL || devargs->policy != RTE_DEV_BLOCKED) 747 return false; 748 break; 749 } 750 return true; 751 } 752 753 enum rte_iova_mode 754 rte_pci_get_iommu_class(void) 755 { 756 enum rte_iova_mode iova_mode = RTE_IOVA_DC; 757 const struct rte_pci_device *dev; 758 const struct rte_pci_driver *drv; 759 bool devices_want_va = false; 760 bool devices_want_pa = false; 761 int iommu_no_va = -1; 762 763 FOREACH_DEVICE_ON_PCIBUS(dev) { 764 /* 765 * We can check this only once, because the IOMMU hardware is 766 * the same for all of them. 767 */ 768 if (iommu_no_va == -1) 769 iommu_no_va = pci_device_iommu_support_va(dev) 770 ? 0 : 1; 771 772 if (dev->kdrv == RTE_PCI_KDRV_UNKNOWN || 773 dev->kdrv == RTE_PCI_KDRV_NONE) 774 continue; 775 FOREACH_DRIVER_ON_PCIBUS(drv) { 776 enum rte_iova_mode dev_iova_mode; 777 778 if (!rte_pci_match(drv, dev)) 779 continue; 780 781 dev_iova_mode = pci_device_iova_mode(drv, dev); 782 RTE_LOG(DEBUG, EAL, "PCI driver %s for device " 783 PCI_PRI_FMT " wants IOVA as '%s'\n", 784 drv->driver.name, 785 dev->addr.domain, dev->addr.bus, 786 dev->addr.devid, dev->addr.function, 787 dev_iova_mode == RTE_IOVA_DC ? "DC" : 788 (dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA")); 789 if (dev_iova_mode == RTE_IOVA_PA) 790 devices_want_pa = true; 791 else if (dev_iova_mode == RTE_IOVA_VA) 792 devices_want_va = true; 793 } 794 } 795 if (iommu_no_va == 1) { 796 iova_mode = RTE_IOVA_PA; 797 if (devices_want_va) { 798 RTE_LOG(WARNING, EAL, "Some devices want 'VA' but IOMMU does not support 'VA'.\n"); 799 RTE_LOG(WARNING, EAL, "The devices that want 'VA' won't initialize.\n"); 800 } 801 } else if (devices_want_va && !devices_want_pa) { 802 iova_mode = RTE_IOVA_VA; 803 } else if (devices_want_pa && !devices_want_va) { 804 iova_mode = RTE_IOVA_PA; 805 } else { 806 iova_mode = RTE_IOVA_DC; 807 if (devices_want_va) { 808 RTE_LOG(WARNING, EAL, "Some devices want 'VA' but forcing 'DC' because other devices want 'PA'.\n"); 809 RTE_LOG(WARNING, EAL, "Depending on the final decision by the EAL, not all devices may be able to initialize.\n"); 810 } 811 } 812 return iova_mode; 813 } 814 815 off_t 816 rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap) 817 { 818 off_t offset = RTE_PCI_CFG_SPACE_SIZE; 819 uint32_t header; 820 int ttl; 821 822 /* minimum 8 bytes per capability */ 823 ttl = (RTE_PCI_CFG_SPACE_EXP_SIZE - RTE_PCI_CFG_SPACE_SIZE) / 8; 824 825 if (rte_pci_read_config(dev, &header, 4, offset) < 0) { 826 RTE_LOG(ERR, EAL, "error in reading extended capabilities\n"); 827 return -1; 828 } 829 830 /* 831 * If we have no capabilities, this is indicated by cap ID, 832 * cap version and next pointer all being 0. 833 */ 834 if (header == 0) 835 return 0; 836 837 while (ttl != 0) { 838 if (RTE_PCI_EXT_CAP_ID(header) == cap) 839 return offset; 840 841 offset = RTE_PCI_EXT_CAP_NEXT(header); 842 843 if (offset < RTE_PCI_CFG_SPACE_SIZE) 844 break; 845 846 if (rte_pci_read_config(dev, &header, 4, offset) < 0) { 847 RTE_LOG(ERR, EAL, 848 "error in reading extended capabilities\n"); 849 return -1; 850 } 851 852 ttl--; 853 } 854 855 return 0; 856 } 857 858 int 859 rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable) 860 { 861 uint16_t old_cmd, cmd; 862 863 if (rte_pci_read_config(dev, &old_cmd, sizeof(old_cmd), 864 RTE_PCI_COMMAND) < 0) { 865 RTE_LOG(ERR, EAL, "error in reading PCI command register\n"); 866 return -1; 867 } 868 869 if (enable) 870 cmd = old_cmd | RTE_PCI_COMMAND_MASTER; 871 else 872 cmd = old_cmd & ~RTE_PCI_COMMAND_MASTER; 873 874 if (cmd == old_cmd) 875 return 0; 876 877 if (rte_pci_write_config(dev, &cmd, sizeof(cmd), 878 RTE_PCI_COMMAND) < 0) { 879 RTE_LOG(ERR, EAL, "error in writing PCI command register\n"); 880 return -1; 881 } 882 883 return 0; 884 } 885 886 struct rte_pci_bus rte_pci_bus = { 887 .bus = { 888 .scan = rte_pci_scan, 889 .probe = pci_probe, 890 .cleanup = pci_cleanup, 891 .find_device = pci_find_device, 892 .plug = pci_plug, 893 .unplug = pci_unplug, 894 .parse = pci_parse, 895 .devargs_parse = rte_pci_devargs_parse, 896 .dma_map = pci_dma_map, 897 .dma_unmap = pci_dma_unmap, 898 .get_iommu_class = rte_pci_get_iommu_class, 899 .dev_iterate = rte_pci_dev_iterate, 900 .hot_unplug_handler = pci_hot_unplug_handler, 901 .sigbus_handler = pci_sigbus_handler, 902 }, 903 .device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list), 904 .driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list), 905 }; 906 907 RTE_REGISTER_BUS(pci, rte_pci_bus.bus); 908