1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <string.h> 7 #include <inttypes.h> 8 #include <stdint.h> 9 #include <stdbool.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <sys/queue.h> 13 #include <rte_errno.h> 14 #include <rte_interrupts.h> 15 #include <rte_log.h> 16 #include <rte_bus.h> 17 #include <rte_pci.h> 18 #include <rte_bus_pci.h> 19 #include <rte_lcore.h> 20 #include <rte_per_lcore.h> 21 #include <rte_memory.h> 22 #include <rte_eal.h> 23 #include <rte_eal_paging.h> 24 #include <rte_string_fns.h> 25 #include <rte_common.h> 26 #include <rte_devargs.h> 27 #include <rte_vfio.h> 28 29 #include "private.h" 30 31 32 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" 33 34 const char *rte_pci_get_sysfs_path(void) 35 { 36 const char *path = NULL; 37 38 #ifdef RTE_EXEC_ENV_LINUX 39 path = getenv("SYSFS_PCI_DEVICES"); 40 if (path == NULL) 41 return SYSFS_PCI_DEVICES; 42 #endif 43 44 return path; 45 } 46 47 static struct rte_devargs * 48 pci_devargs_lookup(const struct rte_pci_addr *pci_addr) 49 { 50 struct rte_devargs *devargs; 51 struct rte_pci_addr addr; 52 53 RTE_EAL_DEVARGS_FOREACH("pci", devargs) { 54 devargs->bus->parse(devargs->name, &addr); 55 if (!rte_pci_addr_cmp(pci_addr, &addr)) 56 return devargs; 57 } 58 return NULL; 59 } 60 61 void 62 pci_name_set(struct rte_pci_device *dev) 63 { 64 struct rte_devargs *devargs; 65 66 /* Each device has its internal, canonical name set. */ 67 rte_pci_device_name(&dev->addr, 68 dev->name, sizeof(dev->name)); 69 devargs = pci_devargs_lookup(&dev->addr); 70 dev->device.devargs = devargs; 71 72 /* When using a blocklist, only blocked devices will have 73 * an rte_devargs. Allowed devices won't have one. 74 */ 75 if (devargs != NULL) 76 /* If an rte_devargs exists, the generic rte_device uses the 77 * given name as its name. 78 */ 79 dev->device.name = dev->device.devargs->name; 80 else 81 /* Otherwise, it uses the internal, canonical form. */ 82 dev->device.name = dev->name; 83 } 84 85 /* map a particular resource from a file */ 86 void * 87 pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size, 88 int additional_flags) 89 { 90 void *mapaddr; 91 92 /* Map the PCI memory resource of device */ 93 mapaddr = rte_mem_map(requested_addr, size, 94 RTE_PROT_READ | RTE_PROT_WRITE, 95 RTE_MAP_SHARED | additional_flags, fd, offset); 96 if (mapaddr == NULL) { 97 RTE_LOG(ERR, EAL, 98 "%s(): cannot map resource(%d, %p, 0x%zx, 0x%llx): %s (%p)\n", 99 __func__, fd, requested_addr, size, 100 (unsigned long long)offset, 101 rte_strerror(rte_errno), mapaddr); 102 } else 103 RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr); 104 105 return mapaddr; 106 } 107 108 /* unmap a particular resource */ 109 void 110 pci_unmap_resource(void *requested_addr, size_t size) 111 { 112 if (requested_addr == NULL) 113 return; 114 115 /* Unmap the PCI memory resource of device */ 116 if (rte_mem_unmap(requested_addr, size)) { 117 RTE_LOG(ERR, EAL, "%s(): cannot mem unmap(%p, %#zx): %s\n", 118 __func__, requested_addr, size, 119 rte_strerror(rte_errno)); 120 } else 121 RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n", 122 requested_addr); 123 } 124 /* 125 * Match the PCI Driver and Device using the ID Table 126 */ 127 int 128 rte_pci_match(const struct rte_pci_driver *pci_drv, 129 const struct rte_pci_device *pci_dev) 130 { 131 const struct rte_pci_id *id_table; 132 133 for (id_table = pci_drv->id_table; id_table->vendor_id != 0; 134 id_table++) { 135 /* check if device's identifiers match the driver's ones */ 136 if (id_table->vendor_id != pci_dev->id.vendor_id && 137 id_table->vendor_id != RTE_PCI_ANY_ID) 138 continue; 139 if (id_table->device_id != pci_dev->id.device_id && 140 id_table->device_id != RTE_PCI_ANY_ID) 141 continue; 142 if (id_table->subsystem_vendor_id != 143 pci_dev->id.subsystem_vendor_id && 144 id_table->subsystem_vendor_id != RTE_PCI_ANY_ID) 145 continue; 146 if (id_table->subsystem_device_id != 147 pci_dev->id.subsystem_device_id && 148 id_table->subsystem_device_id != RTE_PCI_ANY_ID) 149 continue; 150 if (id_table->class_id != pci_dev->id.class_id && 151 id_table->class_id != RTE_CLASS_ANY_ID) 152 continue; 153 154 return 1; 155 } 156 157 return 0; 158 } 159 160 /* 161 * If vendor/device ID match, call the probe() function of the 162 * driver. 163 */ 164 static int 165 rte_pci_probe_one_driver(struct rte_pci_driver *dr, 166 struct rte_pci_device *dev) 167 { 168 int ret; 169 bool already_probed; 170 struct rte_pci_addr *loc; 171 172 if ((dr == NULL) || (dev == NULL)) 173 return -EINVAL; 174 175 loc = &dev->addr; 176 177 /* The device is not blocked; Check if driver supports it */ 178 if (!rte_pci_match(dr, dev)) 179 /* Match of device and driver failed */ 180 return 1; 181 182 RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n", 183 loc->domain, loc->bus, loc->devid, loc->function, 184 dev->device.numa_node); 185 186 /* no initialization when marked as blocked, return without error */ 187 if (dev->device.devargs != NULL && 188 dev->device.devargs->policy == RTE_DEV_BLOCKED) { 189 RTE_LOG(INFO, EAL, " Device is blocked, not initializing\n"); 190 return 1; 191 } 192 193 if (dev->device.numa_node < 0) { 194 if (rte_socket_count() > 1) 195 RTE_LOG(INFO, EAL, "Device %s is not NUMA-aware, defaulting socket to 0\n", 196 dev->name); 197 dev->device.numa_node = 0; 198 } 199 200 already_probed = rte_dev_is_probed(&dev->device); 201 if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) { 202 RTE_LOG(DEBUG, EAL, "Device %s is already probed\n", 203 dev->device.name); 204 return -EEXIST; 205 } 206 207 RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id, 208 dev->id.device_id, dr->driver.name); 209 210 /* 211 * reference driver structure 212 * This needs to be before rte_pci_map_device(), as it enables to use 213 * driver flags for adjusting configuration. 214 */ 215 if (!already_probed) { 216 enum rte_iova_mode dev_iova_mode; 217 enum rte_iova_mode iova_mode; 218 219 dev_iova_mode = pci_device_iova_mode(dr, dev); 220 iova_mode = rte_eal_iova_mode(); 221 if (dev_iova_mode != RTE_IOVA_DC && 222 dev_iova_mode != iova_mode) { 223 RTE_LOG(ERR, EAL, " Expecting '%s' IOVA mode but current mode is '%s', not initializing\n", 224 dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA", 225 iova_mode == RTE_IOVA_PA ? "PA" : "VA"); 226 return -EINVAL; 227 } 228 229 /* Allocate interrupt instance for pci device */ 230 dev->intr_handle = 231 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); 232 if (dev->intr_handle == NULL) { 233 RTE_LOG(ERR, EAL, 234 "Failed to create interrupt instance for %s\n", 235 dev->device.name); 236 return -ENOMEM; 237 } 238 239 dev->vfio_req_intr_handle = 240 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); 241 if (dev->vfio_req_intr_handle == NULL) { 242 rte_intr_instance_free(dev->intr_handle); 243 dev->intr_handle = NULL; 244 RTE_LOG(ERR, EAL, 245 "Failed to create vfio req interrupt instance for %s\n", 246 dev->device.name); 247 return -ENOMEM; 248 } 249 250 if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) { 251 ret = rte_pci_map_device(dev); 252 if (ret != 0) { 253 rte_intr_instance_free(dev->vfio_req_intr_handle); 254 dev->vfio_req_intr_handle = NULL; 255 rte_intr_instance_free(dev->intr_handle); 256 dev->intr_handle = NULL; 257 return ret; 258 } 259 } 260 261 dev->driver = dr; 262 } 263 264 RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n", 265 dr->driver.name, dev->id.vendor_id, dev->id.device_id, 266 loc->domain, loc->bus, loc->devid, loc->function, 267 dev->device.numa_node); 268 /* call the driver probe() function */ 269 ret = dr->probe(dr, dev); 270 if (already_probed) 271 return ret; /* no rollback if already succeeded earlier */ 272 if (ret) { 273 dev->driver = NULL; 274 if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) && 275 /* Don't unmap if device is unsupported and 276 * driver needs mapped resources. 277 */ 278 !(ret > 0 && 279 (dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES))) 280 rte_pci_unmap_device(dev); 281 rte_intr_instance_free(dev->vfio_req_intr_handle); 282 dev->vfio_req_intr_handle = NULL; 283 rte_intr_instance_free(dev->intr_handle); 284 dev->intr_handle = NULL; 285 } else { 286 dev->device.driver = &dr->driver; 287 } 288 289 return ret; 290 } 291 292 /* 293 * If vendor/device ID match, call the remove() function of the 294 * driver. 295 */ 296 static int 297 rte_pci_detach_dev(struct rte_pci_device *dev) 298 { 299 struct rte_pci_addr *loc; 300 struct rte_pci_driver *dr; 301 int ret = 0; 302 303 if (dev == NULL) 304 return -EINVAL; 305 306 dr = dev->driver; 307 loc = &dev->addr; 308 309 RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n", 310 loc->domain, loc->bus, loc->devid, 311 loc->function, dev->device.numa_node); 312 313 RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id, 314 dev->id.device_id, dr->driver.name); 315 316 if (dr->remove) { 317 ret = dr->remove(dev); 318 if (ret < 0) 319 return ret; 320 } 321 322 /* clear driver structure */ 323 dev->driver = NULL; 324 dev->device.driver = NULL; 325 326 if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) 327 /* unmap resources for devices that use igb_uio */ 328 rte_pci_unmap_device(dev); 329 330 rte_intr_instance_free(dev->intr_handle); 331 dev->intr_handle = NULL; 332 rte_intr_instance_free(dev->vfio_req_intr_handle); 333 dev->vfio_req_intr_handle = NULL; 334 335 return 0; 336 } 337 338 /* 339 * If vendor/device ID match, call the probe() function of all 340 * registered driver for the given device. Return < 0 if initialization 341 * failed, return 1 if no driver is found for this device. 342 */ 343 static int 344 pci_probe_all_drivers(struct rte_pci_device *dev) 345 { 346 struct rte_pci_driver *dr = NULL; 347 int rc = 0; 348 349 if (dev == NULL) 350 return -EINVAL; 351 352 FOREACH_DRIVER_ON_PCIBUS(dr) { 353 rc = rte_pci_probe_one_driver(dr, dev); 354 if (rc < 0) 355 /* negative value is an error */ 356 return rc; 357 if (rc > 0) 358 /* positive value means driver doesn't support it */ 359 continue; 360 return 0; 361 } 362 return 1; 363 } 364 365 /* 366 * Scan the content of the PCI bus, and call the probe() function for 367 * all registered drivers that have a matching entry in its id_table 368 * for discovered devices. 369 */ 370 static int 371 pci_probe(void) 372 { 373 struct rte_pci_device *dev = NULL; 374 size_t probed = 0, failed = 0; 375 int ret = 0; 376 377 FOREACH_DEVICE_ON_PCIBUS(dev) { 378 probed++; 379 380 ret = pci_probe_all_drivers(dev); 381 if (ret < 0) { 382 if (ret != -EEXIST) { 383 RTE_LOG(ERR, EAL, "Requested device " 384 PCI_PRI_FMT " cannot be used\n", 385 dev->addr.domain, dev->addr.bus, 386 dev->addr.devid, dev->addr.function); 387 rte_errno = errno; 388 failed++; 389 } 390 ret = 0; 391 } 392 } 393 394 return (probed && probed == failed) ? -1 : 0; 395 } 396 397 /* dump one device */ 398 static int 399 pci_dump_one_device(FILE *f, struct rte_pci_device *dev) 400 { 401 int i; 402 403 fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus, 404 dev->addr.devid, dev->addr.function); 405 fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id, 406 dev->id.device_id); 407 408 for (i = 0; i != sizeof(dev->mem_resource) / 409 sizeof(dev->mem_resource[0]); i++) { 410 fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n", 411 dev->mem_resource[i].phys_addr, 412 dev->mem_resource[i].len); 413 } 414 return 0; 415 } 416 417 /* dump devices on the bus */ 418 void 419 rte_pci_dump(FILE *f) 420 { 421 struct rte_pci_device *dev = NULL; 422 423 FOREACH_DEVICE_ON_PCIBUS(dev) { 424 pci_dump_one_device(f, dev); 425 } 426 } 427 428 static int 429 pci_parse(const char *name, void *addr) 430 { 431 struct rte_pci_addr *out = addr; 432 struct rte_pci_addr pci_addr; 433 bool parse; 434 435 parse = (rte_pci_addr_parse(name, &pci_addr) == 0); 436 if (parse && addr != NULL) 437 *out = pci_addr; 438 return parse == false; 439 } 440 441 /* register a driver */ 442 void 443 rte_pci_register(struct rte_pci_driver *driver) 444 { 445 TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next); 446 driver->bus = &rte_pci_bus; 447 } 448 449 /* unregister a driver */ 450 void 451 rte_pci_unregister(struct rte_pci_driver *driver) 452 { 453 TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next); 454 driver->bus = NULL; 455 } 456 457 /* Add a device to PCI bus */ 458 void 459 rte_pci_add_device(struct rte_pci_device *pci_dev) 460 { 461 TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next); 462 } 463 464 /* Insert a device into a predefined position in PCI bus */ 465 void 466 rte_pci_insert_device(struct rte_pci_device *exist_pci_dev, 467 struct rte_pci_device *new_pci_dev) 468 { 469 TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next); 470 } 471 472 /* Remove a device from PCI bus */ 473 static void 474 rte_pci_remove_device(struct rte_pci_device *pci_dev) 475 { 476 TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next); 477 } 478 479 static struct rte_device * 480 pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, 481 const void *data) 482 { 483 const struct rte_pci_device *pstart; 484 struct rte_pci_device *pdev; 485 486 if (start != NULL) { 487 pstart = RTE_DEV_TO_PCI_CONST(start); 488 pdev = TAILQ_NEXT(pstart, next); 489 } else { 490 pdev = TAILQ_FIRST(&rte_pci_bus.device_list); 491 } 492 while (pdev != NULL) { 493 if (cmp(&pdev->device, data) == 0) 494 return &pdev->device; 495 pdev = TAILQ_NEXT(pdev, next); 496 } 497 return NULL; 498 } 499 500 /* 501 * find the device which encounter the failure, by iterate over all device on 502 * PCI bus to check if the memory failure address is located in the range 503 * of the BARs of the device. 504 */ 505 static struct rte_pci_device * 506 pci_find_device_by_addr(const void *failure_addr) 507 { 508 struct rte_pci_device *pdev = NULL; 509 uint64_t check_point, start, end, len; 510 int i; 511 512 check_point = (uint64_t)(uintptr_t)failure_addr; 513 514 FOREACH_DEVICE_ON_PCIBUS(pdev) { 515 for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) { 516 start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr; 517 len = pdev->mem_resource[i].len; 518 end = start + len; 519 if (check_point >= start && check_point < end) { 520 RTE_LOG(DEBUG, EAL, "Failure address %16.16" 521 PRIx64" belongs to device %s!\n", 522 check_point, pdev->device.name); 523 return pdev; 524 } 525 } 526 } 527 return NULL; 528 } 529 530 static int 531 pci_hot_unplug_handler(struct rte_device *dev) 532 { 533 struct rte_pci_device *pdev = NULL; 534 int ret = 0; 535 536 pdev = RTE_DEV_TO_PCI(dev); 537 if (!pdev) 538 return -1; 539 540 switch (pdev->kdrv) { 541 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE 542 case RTE_PCI_KDRV_VFIO: 543 /* 544 * vfio kernel module guaranty the pci device would not be 545 * deleted until the user space release the resource, so no 546 * need to remap BARs resource here, just directly notify 547 * the req event to the user space to handle it. 548 */ 549 rte_dev_event_callback_process(dev->name, 550 RTE_DEV_EVENT_REMOVE); 551 break; 552 #endif 553 case RTE_PCI_KDRV_IGB_UIO: 554 case RTE_PCI_KDRV_UIO_GENERIC: 555 case RTE_PCI_KDRV_NIC_UIO: 556 /* BARs resource is invalid, remap it to be safe. */ 557 ret = pci_uio_remap_resource(pdev); 558 break; 559 default: 560 RTE_LOG(DEBUG, EAL, 561 "Not managed by a supported kernel driver, skipped\n"); 562 ret = -1; 563 break; 564 } 565 566 return ret; 567 } 568 569 static int 570 pci_sigbus_handler(const void *failure_addr) 571 { 572 struct rte_pci_device *pdev = NULL; 573 int ret = 0; 574 575 pdev = pci_find_device_by_addr(failure_addr); 576 if (!pdev) { 577 /* It is a generic sigbus error, no bus would handle it. */ 578 ret = 1; 579 } else { 580 /* The sigbus error is caused of hot-unplug. */ 581 ret = pci_hot_unplug_handler(&pdev->device); 582 if (ret) { 583 RTE_LOG(ERR, EAL, 584 "Failed to handle hot-unplug for device %s", 585 pdev->name); 586 ret = -1; 587 } 588 } 589 return ret; 590 } 591 592 static int 593 pci_plug(struct rte_device *dev) 594 { 595 return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev)); 596 } 597 598 static int 599 pci_unplug(struct rte_device *dev) 600 { 601 struct rte_pci_device *pdev; 602 int ret; 603 604 pdev = RTE_DEV_TO_PCI(dev); 605 ret = rte_pci_detach_dev(pdev); 606 if (ret == 0) { 607 rte_pci_remove_device(pdev); 608 rte_devargs_remove(dev->devargs); 609 free(pdev); 610 } 611 return ret; 612 } 613 614 static int 615 pci_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 616 { 617 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev); 618 619 if (!pdev || !pdev->driver) { 620 rte_errno = EINVAL; 621 return -1; 622 } 623 if (pdev->driver->dma_map) 624 return pdev->driver->dma_map(pdev, addr, iova, len); 625 /** 626 * In case driver don't provides any specific mapping 627 * try fallback to VFIO. 628 */ 629 if (pdev->kdrv == RTE_PCI_KDRV_VFIO) 630 return rte_vfio_container_dma_map 631 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr, 632 iova, len); 633 rte_errno = ENOTSUP; 634 return -1; 635 } 636 637 static int 638 pci_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 639 { 640 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev); 641 642 if (!pdev || !pdev->driver) { 643 rte_errno = EINVAL; 644 return -1; 645 } 646 if (pdev->driver->dma_unmap) 647 return pdev->driver->dma_unmap(pdev, addr, iova, len); 648 /** 649 * In case driver don't provides any specific mapping 650 * try fallback to VFIO. 651 */ 652 if (pdev->kdrv == RTE_PCI_KDRV_VFIO) 653 return rte_vfio_container_dma_unmap 654 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr, 655 iova, len); 656 rte_errno = ENOTSUP; 657 return -1; 658 } 659 660 bool 661 rte_pci_ignore_device(const struct rte_pci_addr *pci_addr) 662 { 663 struct rte_devargs *devargs = pci_devargs_lookup(pci_addr); 664 665 switch (rte_pci_bus.bus.conf.scan_mode) { 666 case RTE_BUS_SCAN_ALLOWLIST: 667 if (devargs && devargs->policy == RTE_DEV_ALLOWED) 668 return false; 669 break; 670 case RTE_BUS_SCAN_UNDEFINED: 671 case RTE_BUS_SCAN_BLOCKLIST: 672 if (devargs == NULL || devargs->policy != RTE_DEV_BLOCKED) 673 return false; 674 break; 675 } 676 return true; 677 } 678 679 enum rte_iova_mode 680 rte_pci_get_iommu_class(void) 681 { 682 enum rte_iova_mode iova_mode = RTE_IOVA_DC; 683 const struct rte_pci_device *dev; 684 const struct rte_pci_driver *drv; 685 bool devices_want_va = false; 686 bool devices_want_pa = false; 687 int iommu_no_va = -1; 688 689 FOREACH_DEVICE_ON_PCIBUS(dev) { 690 /* 691 * We can check this only once, because the IOMMU hardware is 692 * the same for all of them. 693 */ 694 if (iommu_no_va == -1) 695 iommu_no_va = pci_device_iommu_support_va(dev) 696 ? 0 : 1; 697 698 if (dev->kdrv == RTE_PCI_KDRV_UNKNOWN || 699 dev->kdrv == RTE_PCI_KDRV_NONE) 700 continue; 701 FOREACH_DRIVER_ON_PCIBUS(drv) { 702 enum rte_iova_mode dev_iova_mode; 703 704 if (!rte_pci_match(drv, dev)) 705 continue; 706 707 dev_iova_mode = pci_device_iova_mode(drv, dev); 708 RTE_LOG(DEBUG, EAL, "PCI driver %s for device " 709 PCI_PRI_FMT " wants IOVA as '%s'\n", 710 drv->driver.name, 711 dev->addr.domain, dev->addr.bus, 712 dev->addr.devid, dev->addr.function, 713 dev_iova_mode == RTE_IOVA_DC ? "DC" : 714 (dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA")); 715 if (dev_iova_mode == RTE_IOVA_PA) 716 devices_want_pa = true; 717 else if (dev_iova_mode == RTE_IOVA_VA) 718 devices_want_va = true; 719 } 720 } 721 if (iommu_no_va == 1) { 722 iova_mode = RTE_IOVA_PA; 723 if (devices_want_va) { 724 RTE_LOG(WARNING, EAL, "Some devices want 'VA' but IOMMU does not support 'VA'.\n"); 725 RTE_LOG(WARNING, EAL, "The devices that want 'VA' won't initialize.\n"); 726 } 727 } else if (devices_want_va && !devices_want_pa) { 728 iova_mode = RTE_IOVA_VA; 729 } else if (devices_want_pa && !devices_want_va) { 730 iova_mode = RTE_IOVA_PA; 731 } else { 732 iova_mode = RTE_IOVA_DC; 733 if (devices_want_va) { 734 RTE_LOG(WARNING, EAL, "Some devices want 'VA' but forcing 'DC' because other devices want 'PA'.\n"); 735 RTE_LOG(WARNING, EAL, "Depending on the final decision by the EAL, not all devices may be able to initialize.\n"); 736 } 737 } 738 return iova_mode; 739 } 740 741 off_t 742 rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap) 743 { 744 off_t offset = RTE_PCI_CFG_SPACE_SIZE; 745 uint32_t header; 746 int ttl; 747 748 /* minimum 8 bytes per capability */ 749 ttl = (RTE_PCI_CFG_SPACE_EXP_SIZE - RTE_PCI_CFG_SPACE_SIZE) / 8; 750 751 if (rte_pci_read_config(dev, &header, 4, offset) < 0) { 752 RTE_LOG(ERR, EAL, "error in reading extended capabilities\n"); 753 return -1; 754 } 755 756 /* 757 * If we have no capabilities, this is indicated by cap ID, 758 * cap version and next pointer all being 0. 759 */ 760 if (header == 0) 761 return 0; 762 763 while (ttl != 0) { 764 if (RTE_PCI_EXT_CAP_ID(header) == cap) 765 return offset; 766 767 offset = RTE_PCI_EXT_CAP_NEXT(header); 768 769 if (offset < RTE_PCI_CFG_SPACE_SIZE) 770 break; 771 772 if (rte_pci_read_config(dev, &header, 4, offset) < 0) { 773 RTE_LOG(ERR, EAL, 774 "error in reading extended capabilities\n"); 775 return -1; 776 } 777 778 ttl--; 779 } 780 781 return 0; 782 } 783 784 int 785 rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable) 786 { 787 uint16_t old_cmd, cmd; 788 789 if (rte_pci_read_config(dev, &old_cmd, sizeof(old_cmd), 790 RTE_PCI_COMMAND) < 0) { 791 RTE_LOG(ERR, EAL, "error in reading PCI command register\n"); 792 return -1; 793 } 794 795 if (enable) 796 cmd = old_cmd | RTE_PCI_COMMAND_MASTER; 797 else 798 cmd = old_cmd & ~RTE_PCI_COMMAND_MASTER; 799 800 if (cmd == old_cmd) 801 return 0; 802 803 if (rte_pci_write_config(dev, &cmd, sizeof(cmd), 804 RTE_PCI_COMMAND) < 0) { 805 RTE_LOG(ERR, EAL, "error in writing PCI command register\n"); 806 return -1; 807 } 808 809 return 0; 810 } 811 812 struct rte_pci_bus rte_pci_bus = { 813 .bus = { 814 .scan = rte_pci_scan, 815 .probe = pci_probe, 816 .find_device = pci_find_device, 817 .plug = pci_plug, 818 .unplug = pci_unplug, 819 .parse = pci_parse, 820 .devargs_parse = rte_pci_devargs_parse, 821 .dma_map = pci_dma_map, 822 .dma_unmap = pci_dma_unmap, 823 .get_iommu_class = rte_pci_get_iommu_class, 824 .dev_iterate = rte_pci_dev_iterate, 825 .hot_unplug_handler = pci_hot_unplug_handler, 826 .sigbus_handler = pci_sigbus_handler, 827 }, 828 .device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list), 829 .driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list), 830 }; 831 832 RTE_REGISTER_BUS(pci, rte_pci_bus.bus); 833