1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright 2013-2014 6WIND S.A. 4 */ 5 6 #include <string.h> 7 #include <inttypes.h> 8 #include <stdint.h> 9 #include <stdbool.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <sys/queue.h> 13 #include <rte_errno.h> 14 #include <rte_interrupts.h> 15 #include <rte_log.h> 16 #include <rte_bus.h> 17 #include <rte_pci.h> 18 #include <rte_bus_pci.h> 19 #include <rte_per_lcore.h> 20 #include <rte_memory.h> 21 #include <rte_eal.h> 22 #include <rte_eal_paging.h> 23 #include <rte_string_fns.h> 24 #include <rte_common.h> 25 #include <rte_devargs.h> 26 #include <rte_vfio.h> 27 28 #include "private.h" 29 30 31 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" 32 33 const char *rte_pci_get_sysfs_path(void) 34 { 35 const char *path = NULL; 36 37 #ifdef RTE_EXEC_ENV_LINUX 38 path = getenv("SYSFS_PCI_DEVICES"); 39 if (path == NULL) 40 return SYSFS_PCI_DEVICES; 41 #endif 42 43 return path; 44 } 45 46 static struct rte_devargs * 47 pci_devargs_lookup(const struct rte_pci_addr *pci_addr) 48 { 49 struct rte_devargs *devargs; 50 struct rte_pci_addr addr; 51 52 RTE_EAL_DEVARGS_FOREACH("pci", devargs) { 53 devargs->bus->parse(devargs->name, &addr); 54 if (!rte_pci_addr_cmp(pci_addr, &addr)) 55 return devargs; 56 } 57 return NULL; 58 } 59 60 void 61 pci_name_set(struct rte_pci_device *dev) 62 { 63 struct rte_devargs *devargs; 64 65 /* Each device has its internal, canonical name set. */ 66 rte_pci_device_name(&dev->addr, 67 dev->name, sizeof(dev->name)); 68 devargs = pci_devargs_lookup(&dev->addr); 69 dev->device.devargs = devargs; 70 71 /* When using a blocklist, only blocked devices will have 72 * an rte_devargs. Allowed devices won't have one. 73 */ 74 if (devargs != NULL) 75 /* If an rte_devargs exists, the generic rte_device uses the 76 * given name as its name. 77 */ 78 dev->device.name = dev->device.devargs->name; 79 else 80 /* Otherwise, it uses the internal, canonical form. */ 81 dev->device.name = dev->name; 82 } 83 84 /* map a particular resource from a file */ 85 void * 86 pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size, 87 int additional_flags) 88 { 89 void *mapaddr; 90 91 /* Map the PCI memory resource of device */ 92 mapaddr = rte_mem_map(requested_addr, size, 93 RTE_PROT_READ | RTE_PROT_WRITE, 94 RTE_MAP_SHARED | additional_flags, fd, offset); 95 if (mapaddr == NULL) { 96 RTE_LOG(ERR, EAL, 97 "%s(): cannot map resource(%d, %p, 0x%zx, 0x%llx): %s (%p)\n", 98 __func__, fd, requested_addr, size, 99 (unsigned long long)offset, 100 rte_strerror(rte_errno), mapaddr); 101 } else 102 RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr); 103 104 return mapaddr; 105 } 106 107 /* unmap a particular resource */ 108 void 109 pci_unmap_resource(void *requested_addr, size_t size) 110 { 111 if (requested_addr == NULL) 112 return; 113 114 /* Unmap the PCI memory resource of device */ 115 if (rte_mem_unmap(requested_addr, size)) { 116 RTE_LOG(ERR, EAL, "%s(): cannot mem unmap(%p, %#zx): %s\n", 117 __func__, requested_addr, size, 118 rte_strerror(rte_errno)); 119 } else 120 RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n", 121 requested_addr); 122 } 123 /* 124 * Match the PCI Driver and Device using the ID Table 125 */ 126 int 127 rte_pci_match(const struct rte_pci_driver *pci_drv, 128 const struct rte_pci_device *pci_dev) 129 { 130 const struct rte_pci_id *id_table; 131 132 for (id_table = pci_drv->id_table; id_table->vendor_id != 0; 133 id_table++) { 134 /* check if device's identifiers match the driver's ones */ 135 if (id_table->vendor_id != pci_dev->id.vendor_id && 136 id_table->vendor_id != RTE_PCI_ANY_ID) 137 continue; 138 if (id_table->device_id != pci_dev->id.device_id && 139 id_table->device_id != RTE_PCI_ANY_ID) 140 continue; 141 if (id_table->subsystem_vendor_id != 142 pci_dev->id.subsystem_vendor_id && 143 id_table->subsystem_vendor_id != RTE_PCI_ANY_ID) 144 continue; 145 if (id_table->subsystem_device_id != 146 pci_dev->id.subsystem_device_id && 147 id_table->subsystem_device_id != RTE_PCI_ANY_ID) 148 continue; 149 if (id_table->class_id != pci_dev->id.class_id && 150 id_table->class_id != RTE_CLASS_ANY_ID) 151 continue; 152 153 return 1; 154 } 155 156 return 0; 157 } 158 159 /* 160 * If vendor/device ID match, call the probe() function of the 161 * driver. 162 */ 163 static int 164 rte_pci_probe_one_driver(struct rte_pci_driver *dr, 165 struct rte_pci_device *dev) 166 { 167 int ret; 168 bool already_probed; 169 struct rte_pci_addr *loc; 170 171 if ((dr == NULL) || (dev == NULL)) 172 return -EINVAL; 173 174 loc = &dev->addr; 175 176 /* The device is not blocked; Check if driver supports it */ 177 if (!rte_pci_match(dr, dev)) 178 /* Match of device and driver failed */ 179 return 1; 180 181 RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n", 182 loc->domain, loc->bus, loc->devid, loc->function, 183 dev->device.numa_node); 184 185 /* no initialization when marked as blocked, return without error */ 186 if (dev->device.devargs != NULL && 187 dev->device.devargs->policy == RTE_DEV_BLOCKED) { 188 RTE_LOG(INFO, EAL, " Device is blocked, not initializing\n"); 189 return 1; 190 } 191 192 if (dev->device.numa_node < 0) { 193 RTE_LOG(WARNING, EAL, " Invalid NUMA socket, default to 0\n"); 194 dev->device.numa_node = 0; 195 } 196 197 already_probed = rte_dev_is_probed(&dev->device); 198 if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) { 199 RTE_LOG(DEBUG, EAL, "Device %s is already probed\n", 200 dev->device.name); 201 return -EEXIST; 202 } 203 204 RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id, 205 dev->id.device_id, dr->driver.name); 206 207 /* 208 * reference driver structure 209 * This needs to be before rte_pci_map_device(), as it enables to use 210 * driver flags for adjusting configuration. 211 */ 212 if (!already_probed) { 213 enum rte_iova_mode dev_iova_mode; 214 enum rte_iova_mode iova_mode; 215 216 dev_iova_mode = pci_device_iova_mode(dr, dev); 217 iova_mode = rte_eal_iova_mode(); 218 if (dev_iova_mode != RTE_IOVA_DC && 219 dev_iova_mode != iova_mode) { 220 RTE_LOG(ERR, EAL, " Expecting '%s' IOVA mode but current mode is '%s', not initializing\n", 221 dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA", 222 iova_mode == RTE_IOVA_PA ? "PA" : "VA"); 223 return -EINVAL; 224 } 225 226 dev->driver = dr; 227 } 228 229 if (!already_probed && (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)) { 230 /* map resources for devices that use igb_uio */ 231 ret = rte_pci_map_device(dev); 232 if (ret != 0) { 233 dev->driver = NULL; 234 return ret; 235 } 236 } 237 238 RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n", 239 dr->driver.name, dev->id.vendor_id, dev->id.device_id, 240 loc->domain, loc->bus, loc->devid, loc->function, 241 dev->device.numa_node); 242 /* call the driver probe() function */ 243 ret = dr->probe(dr, dev); 244 if (already_probed) 245 return ret; /* no rollback if already succeeded earlier */ 246 if (ret) { 247 dev->driver = NULL; 248 if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) && 249 /* Don't unmap if device is unsupported and 250 * driver needs mapped resources. 251 */ 252 !(ret > 0 && 253 (dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES))) 254 rte_pci_unmap_device(dev); 255 } else { 256 dev->device.driver = &dr->driver; 257 } 258 259 return ret; 260 } 261 262 /* 263 * If vendor/device ID match, call the remove() function of the 264 * driver. 265 */ 266 static int 267 rte_pci_detach_dev(struct rte_pci_device *dev) 268 { 269 struct rte_pci_addr *loc; 270 struct rte_pci_driver *dr; 271 int ret = 0; 272 273 if (dev == NULL) 274 return -EINVAL; 275 276 dr = dev->driver; 277 loc = &dev->addr; 278 279 RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n", 280 loc->domain, loc->bus, loc->devid, 281 loc->function, dev->device.numa_node); 282 283 RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id, 284 dev->id.device_id, dr->driver.name); 285 286 if (dr->remove) { 287 ret = dr->remove(dev); 288 if (ret < 0) 289 return ret; 290 } 291 292 /* clear driver structure */ 293 dev->driver = NULL; 294 dev->device.driver = NULL; 295 296 if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) 297 /* unmap resources for devices that use igb_uio */ 298 rte_pci_unmap_device(dev); 299 300 return 0; 301 } 302 303 /* 304 * If vendor/device ID match, call the probe() function of all 305 * registered driver for the given device. Return < 0 if initialization 306 * failed, return 1 if no driver is found for this device. 307 */ 308 static int 309 pci_probe_all_drivers(struct rte_pci_device *dev) 310 { 311 struct rte_pci_driver *dr = NULL; 312 int rc = 0; 313 314 if (dev == NULL) 315 return -EINVAL; 316 317 FOREACH_DRIVER_ON_PCIBUS(dr) { 318 rc = rte_pci_probe_one_driver(dr, dev); 319 if (rc < 0) 320 /* negative value is an error */ 321 return rc; 322 if (rc > 0) 323 /* positive value means driver doesn't support it */ 324 continue; 325 return 0; 326 } 327 return 1; 328 } 329 330 /* 331 * Scan the content of the PCI bus, and call the probe() function for 332 * all registered drivers that have a matching entry in its id_table 333 * for discovered devices. 334 */ 335 static int 336 pci_probe(void) 337 { 338 struct rte_pci_device *dev = NULL; 339 size_t probed = 0, failed = 0; 340 int ret = 0; 341 342 FOREACH_DEVICE_ON_PCIBUS(dev) { 343 probed++; 344 345 ret = pci_probe_all_drivers(dev); 346 if (ret < 0) { 347 if (ret != -EEXIST) { 348 RTE_LOG(ERR, EAL, "Requested device " 349 PCI_PRI_FMT " cannot be used\n", 350 dev->addr.domain, dev->addr.bus, 351 dev->addr.devid, dev->addr.function); 352 rte_errno = errno; 353 failed++; 354 } 355 ret = 0; 356 } 357 } 358 359 return (probed && probed == failed) ? -1 : 0; 360 } 361 362 /* dump one device */ 363 static int 364 pci_dump_one_device(FILE *f, struct rte_pci_device *dev) 365 { 366 int i; 367 368 fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus, 369 dev->addr.devid, dev->addr.function); 370 fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id, 371 dev->id.device_id); 372 373 for (i = 0; i != sizeof(dev->mem_resource) / 374 sizeof(dev->mem_resource[0]); i++) { 375 fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n", 376 dev->mem_resource[i].phys_addr, 377 dev->mem_resource[i].len); 378 } 379 return 0; 380 } 381 382 /* dump devices on the bus */ 383 void 384 rte_pci_dump(FILE *f) 385 { 386 struct rte_pci_device *dev = NULL; 387 388 FOREACH_DEVICE_ON_PCIBUS(dev) { 389 pci_dump_one_device(f, dev); 390 } 391 } 392 393 static int 394 pci_parse(const char *name, void *addr) 395 { 396 struct rte_pci_addr *out = addr; 397 struct rte_pci_addr pci_addr; 398 bool parse; 399 400 parse = (rte_pci_addr_parse(name, &pci_addr) == 0); 401 if (parse && addr != NULL) 402 *out = pci_addr; 403 return parse == false; 404 } 405 406 /* register a driver */ 407 void 408 rte_pci_register(struct rte_pci_driver *driver) 409 { 410 TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next); 411 driver->bus = &rte_pci_bus; 412 } 413 414 /* unregister a driver */ 415 void 416 rte_pci_unregister(struct rte_pci_driver *driver) 417 { 418 TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next); 419 driver->bus = NULL; 420 } 421 422 /* Add a device to PCI bus */ 423 void 424 rte_pci_add_device(struct rte_pci_device *pci_dev) 425 { 426 TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next); 427 } 428 429 /* Insert a device into a predefined position in PCI bus */ 430 void 431 rte_pci_insert_device(struct rte_pci_device *exist_pci_dev, 432 struct rte_pci_device *new_pci_dev) 433 { 434 TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next); 435 } 436 437 /* Remove a device from PCI bus */ 438 static void 439 rte_pci_remove_device(struct rte_pci_device *pci_dev) 440 { 441 TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next); 442 } 443 444 static struct rte_device * 445 pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, 446 const void *data) 447 { 448 const struct rte_pci_device *pstart; 449 struct rte_pci_device *pdev; 450 451 if (start != NULL) { 452 pstart = RTE_DEV_TO_PCI_CONST(start); 453 pdev = TAILQ_NEXT(pstart, next); 454 } else { 455 pdev = TAILQ_FIRST(&rte_pci_bus.device_list); 456 } 457 while (pdev != NULL) { 458 if (cmp(&pdev->device, data) == 0) 459 return &pdev->device; 460 pdev = TAILQ_NEXT(pdev, next); 461 } 462 return NULL; 463 } 464 465 /* 466 * find the device which encounter the failure, by iterate over all device on 467 * PCI bus to check if the memory failure address is located in the range 468 * of the BARs of the device. 469 */ 470 static struct rte_pci_device * 471 pci_find_device_by_addr(const void *failure_addr) 472 { 473 struct rte_pci_device *pdev = NULL; 474 uint64_t check_point, start, end, len; 475 int i; 476 477 check_point = (uint64_t)(uintptr_t)failure_addr; 478 479 FOREACH_DEVICE_ON_PCIBUS(pdev) { 480 for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) { 481 start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr; 482 len = pdev->mem_resource[i].len; 483 end = start + len; 484 if (check_point >= start && check_point < end) { 485 RTE_LOG(DEBUG, EAL, "Failure address %16.16" 486 PRIx64" belongs to device %s!\n", 487 check_point, pdev->device.name); 488 return pdev; 489 } 490 } 491 } 492 return NULL; 493 } 494 495 static int 496 pci_hot_unplug_handler(struct rte_device *dev) 497 { 498 struct rte_pci_device *pdev = NULL; 499 int ret = 0; 500 501 pdev = RTE_DEV_TO_PCI(dev); 502 if (!pdev) 503 return -1; 504 505 switch (pdev->kdrv) { 506 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE 507 case RTE_PCI_KDRV_VFIO: 508 /* 509 * vfio kernel module guaranty the pci device would not be 510 * deleted until the user space release the resource, so no 511 * need to remap BARs resource here, just directly notify 512 * the req event to the user space to handle it. 513 */ 514 rte_dev_event_callback_process(dev->name, 515 RTE_DEV_EVENT_REMOVE); 516 break; 517 #endif 518 case RTE_PCI_KDRV_IGB_UIO: 519 case RTE_PCI_KDRV_UIO_GENERIC: 520 case RTE_PCI_KDRV_NIC_UIO: 521 /* BARs resource is invalid, remap it to be safe. */ 522 ret = pci_uio_remap_resource(pdev); 523 break; 524 default: 525 RTE_LOG(DEBUG, EAL, 526 "Not managed by a supported kernel driver, skipped\n"); 527 ret = -1; 528 break; 529 } 530 531 return ret; 532 } 533 534 static int 535 pci_sigbus_handler(const void *failure_addr) 536 { 537 struct rte_pci_device *pdev = NULL; 538 int ret = 0; 539 540 pdev = pci_find_device_by_addr(failure_addr); 541 if (!pdev) { 542 /* It is a generic sigbus error, no bus would handle it. */ 543 ret = 1; 544 } else { 545 /* The sigbus error is caused of hot-unplug. */ 546 ret = pci_hot_unplug_handler(&pdev->device); 547 if (ret) { 548 RTE_LOG(ERR, EAL, 549 "Failed to handle hot-unplug for device %s", 550 pdev->name); 551 ret = -1; 552 } 553 } 554 return ret; 555 } 556 557 static int 558 pci_plug(struct rte_device *dev) 559 { 560 return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev)); 561 } 562 563 static int 564 pci_unplug(struct rte_device *dev) 565 { 566 struct rte_pci_device *pdev; 567 int ret; 568 569 pdev = RTE_DEV_TO_PCI(dev); 570 ret = rte_pci_detach_dev(pdev); 571 if (ret == 0) { 572 rte_pci_remove_device(pdev); 573 rte_devargs_remove(dev->devargs); 574 free(pdev); 575 } 576 return ret; 577 } 578 579 static int 580 pci_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 581 { 582 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev); 583 584 if (!pdev || !pdev->driver) { 585 rte_errno = EINVAL; 586 return -1; 587 } 588 if (pdev->driver->dma_map) 589 return pdev->driver->dma_map(pdev, addr, iova, len); 590 /** 591 * In case driver don't provides any specific mapping 592 * try fallback to VFIO. 593 */ 594 if (pdev->kdrv == RTE_PCI_KDRV_VFIO) 595 return rte_vfio_container_dma_map 596 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr, 597 iova, len); 598 rte_errno = ENOTSUP; 599 return -1; 600 } 601 602 static int 603 pci_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 604 { 605 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev); 606 607 if (!pdev || !pdev->driver) { 608 rte_errno = EINVAL; 609 return -1; 610 } 611 if (pdev->driver->dma_unmap) 612 return pdev->driver->dma_unmap(pdev, addr, iova, len); 613 /** 614 * In case driver don't provides any specific mapping 615 * try fallback to VFIO. 616 */ 617 if (pdev->kdrv == RTE_PCI_KDRV_VFIO) 618 return rte_vfio_container_dma_unmap 619 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr, 620 iova, len); 621 rte_errno = ENOTSUP; 622 return -1; 623 } 624 625 bool 626 rte_pci_ignore_device(const struct rte_pci_addr *pci_addr) 627 { 628 struct rte_devargs *devargs = pci_devargs_lookup(pci_addr); 629 630 switch (rte_pci_bus.bus.conf.scan_mode) { 631 case RTE_BUS_SCAN_ALLOWLIST: 632 if (devargs && devargs->policy == RTE_DEV_ALLOWED) 633 return false; 634 break; 635 case RTE_BUS_SCAN_UNDEFINED: 636 case RTE_BUS_SCAN_BLOCKLIST: 637 if (devargs == NULL || devargs->policy != RTE_DEV_BLOCKED) 638 return false; 639 break; 640 } 641 return true; 642 } 643 644 enum rte_iova_mode 645 rte_pci_get_iommu_class(void) 646 { 647 enum rte_iova_mode iova_mode = RTE_IOVA_DC; 648 const struct rte_pci_device *dev; 649 const struct rte_pci_driver *drv; 650 bool devices_want_va = false; 651 bool devices_want_pa = false; 652 int iommu_no_va = -1; 653 654 FOREACH_DEVICE_ON_PCIBUS(dev) { 655 /* 656 * We can check this only once, because the IOMMU hardware is 657 * the same for all of them. 658 */ 659 if (iommu_no_va == -1) 660 iommu_no_va = pci_device_iommu_support_va(dev) 661 ? 0 : 1; 662 663 if (dev->kdrv == RTE_PCI_KDRV_UNKNOWN || 664 dev->kdrv == RTE_PCI_KDRV_NONE) 665 continue; 666 FOREACH_DRIVER_ON_PCIBUS(drv) { 667 enum rte_iova_mode dev_iova_mode; 668 669 if (!rte_pci_match(drv, dev)) 670 continue; 671 672 dev_iova_mode = pci_device_iova_mode(drv, dev); 673 RTE_LOG(DEBUG, EAL, "PCI driver %s for device " 674 PCI_PRI_FMT " wants IOVA as '%s'\n", 675 drv->driver.name, 676 dev->addr.domain, dev->addr.bus, 677 dev->addr.devid, dev->addr.function, 678 dev_iova_mode == RTE_IOVA_DC ? "DC" : 679 (dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA")); 680 if (dev_iova_mode == RTE_IOVA_PA) 681 devices_want_pa = true; 682 else if (dev_iova_mode == RTE_IOVA_VA) 683 devices_want_va = true; 684 } 685 } 686 if (iommu_no_va == 1) { 687 iova_mode = RTE_IOVA_PA; 688 if (devices_want_va) { 689 RTE_LOG(WARNING, EAL, "Some devices want 'VA' but IOMMU does not support 'VA'.\n"); 690 RTE_LOG(WARNING, EAL, "The devices that want 'VA' won't initialize.\n"); 691 } 692 } else if (devices_want_va && !devices_want_pa) { 693 iova_mode = RTE_IOVA_VA; 694 } else if (devices_want_pa && !devices_want_va) { 695 iova_mode = RTE_IOVA_PA; 696 } else { 697 iova_mode = RTE_IOVA_DC; 698 if (devices_want_va) { 699 RTE_LOG(WARNING, EAL, "Some devices want 'VA' but forcing 'DC' because other devices want 'PA'.\n"); 700 RTE_LOG(WARNING, EAL, "Depending on the final decision by the EAL, not all devices may be able to initialize.\n"); 701 } 702 } 703 return iova_mode; 704 } 705 706 off_t 707 rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap) 708 { 709 off_t offset = RTE_PCI_CFG_SPACE_SIZE; 710 uint32_t header; 711 int ttl; 712 713 /* minimum 8 bytes per capability */ 714 ttl = (RTE_PCI_CFG_SPACE_EXP_SIZE - RTE_PCI_CFG_SPACE_SIZE) / 8; 715 716 if (rte_pci_read_config(dev, &header, 4, offset) < 0) { 717 RTE_LOG(ERR, EAL, "error in reading extended capabilities\n"); 718 return -1; 719 } 720 721 /* 722 * If we have no capabilities, this is indicated by cap ID, 723 * cap version and next pointer all being 0. 724 */ 725 if (header == 0) 726 return 0; 727 728 while (ttl != 0) { 729 if (RTE_PCI_EXT_CAP_ID(header) == cap) 730 return offset; 731 732 offset = RTE_PCI_EXT_CAP_NEXT(header); 733 734 if (offset < RTE_PCI_CFG_SPACE_SIZE) 735 break; 736 737 if (rte_pci_read_config(dev, &header, 4, offset) < 0) { 738 RTE_LOG(ERR, EAL, 739 "error in reading extended capabilities\n"); 740 return -1; 741 } 742 743 ttl--; 744 } 745 746 return 0; 747 } 748 749 int 750 rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable) 751 { 752 uint16_t old_cmd, cmd; 753 754 if (rte_pci_read_config(dev, &old_cmd, sizeof(old_cmd), 755 RTE_PCI_COMMAND) < 0) { 756 RTE_LOG(ERR, EAL, "error in reading PCI command register\n"); 757 return -1; 758 } 759 760 if (enable) 761 cmd = old_cmd | RTE_PCI_COMMAND_MASTER; 762 else 763 cmd = old_cmd & ~RTE_PCI_COMMAND_MASTER; 764 765 if (cmd == old_cmd) 766 return 0; 767 768 if (rte_pci_write_config(dev, &cmd, sizeof(cmd), 769 RTE_PCI_COMMAND) < 0) { 770 RTE_LOG(ERR, EAL, "error in writing PCI command register\n"); 771 return -1; 772 } 773 774 return 0; 775 } 776 777 struct rte_pci_bus rte_pci_bus = { 778 .bus = { 779 .scan = rte_pci_scan, 780 .probe = pci_probe, 781 .find_device = pci_find_device, 782 .plug = pci_plug, 783 .unplug = pci_unplug, 784 .parse = pci_parse, 785 .dma_map = pci_dma_map, 786 .dma_unmap = pci_dma_unmap, 787 .get_iommu_class = rte_pci_get_iommu_class, 788 .dev_iterate = rte_pci_dev_iterate, 789 .hot_unplug_handler = pci_hot_unplug_handler, 790 .sigbus_handler = pci_sigbus_handler, 791 }, 792 .device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list), 793 .driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list), 794 }; 795 796 RTE_REGISTER_BUS(pci, rte_pci_bus.bus); 797