1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <dirent.h> 7 8 #include <rte_log.h> 9 #include <rte_bus.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <rte_eal_memconfig.h> 13 #include <rte_malloc.h> 14 #include <rte_devargs.h> 15 #include <rte_memcpy.h> 16 #include <rte_vfio.h> 17 18 #include "eal_filesystem.h" 19 20 #include "private.h" 21 #include "pci_init.h" 22 23 /** 24 * @file 25 * PCI probing under linux 26 * 27 * This code is used to simulate a PCI probe by parsing information in sysfs. 28 * When a registered device matches a driver, it is then initialized with 29 * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it). 30 */ 31 32 extern struct rte_pci_bus rte_pci_bus; 33 34 static int 35 pci_get_kernel_driver_by_path(const char *filename, char *dri_name, 36 size_t len) 37 { 38 int count; 39 char path[PATH_MAX]; 40 char *name; 41 42 if (!filename || !dri_name) 43 return -1; 44 45 count = readlink(filename, path, PATH_MAX); 46 if (count >= PATH_MAX) 47 return -1; 48 49 /* For device does not have a driver */ 50 if (count < 0) 51 return 1; 52 53 path[count] = '\0'; 54 55 name = strrchr(path, '/'); 56 if (name) { 57 strlcpy(dri_name, name + 1, len); 58 return 0; 59 } 60 61 return -1; 62 } 63 64 /* Map pci device */ 65 int 66 rte_pci_map_device(struct rte_pci_device *dev) 67 { 68 int ret = -1; 69 70 /* try mapping the NIC resources using VFIO if it exists */ 71 switch (dev->kdrv) { 72 case RTE_KDRV_VFIO: 73 #ifdef VFIO_PRESENT 74 if (pci_vfio_is_enabled()) 75 ret = pci_vfio_map_resource(dev); 76 #endif 77 break; 78 case RTE_KDRV_IGB_UIO: 79 case RTE_KDRV_UIO_GENERIC: 80 if (rte_eal_using_phys_addrs()) { 81 /* map resources for devices that use uio */ 82 ret = pci_uio_map_resource(dev); 83 } 84 break; 85 default: 86 RTE_LOG(DEBUG, EAL, 87 " Not managed by a supported kernel driver, skipped\n"); 88 ret = 1; 89 break; 90 } 91 92 return ret; 93 } 94 95 /* Unmap pci device */ 96 void 97 rte_pci_unmap_device(struct rte_pci_device *dev) 98 { 99 /* try unmapping the NIC resources using VFIO if it exists */ 100 switch (dev->kdrv) { 101 case RTE_KDRV_VFIO: 102 #ifdef VFIO_PRESENT 103 if (pci_vfio_is_enabled()) 104 pci_vfio_unmap_resource(dev); 105 #endif 106 break; 107 case RTE_KDRV_IGB_UIO: 108 case RTE_KDRV_UIO_GENERIC: 109 /* unmap resources for devices that use uio */ 110 pci_uio_unmap_resource(dev); 111 break; 112 default: 113 RTE_LOG(DEBUG, EAL, 114 " Not managed by a supported kernel driver, skipped\n"); 115 break; 116 } 117 } 118 119 static int 120 find_max_end_va(const struct rte_memseg_list *msl, void *arg) 121 { 122 size_t sz = msl->len; 123 void *end_va = RTE_PTR_ADD(msl->base_va, sz); 124 void **max_va = arg; 125 126 if (*max_va < end_va) 127 *max_va = end_va; 128 return 0; 129 } 130 131 void * 132 pci_find_max_end_va(void) 133 { 134 void *va = NULL; 135 136 rte_memseg_list_walk(find_max_end_va, &va); 137 return va; 138 } 139 140 141 /* parse one line of the "resource" sysfs file (note that the 'line' 142 * string is modified) 143 */ 144 int 145 pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, 146 uint64_t *end_addr, uint64_t *flags) 147 { 148 union pci_resource_info { 149 struct { 150 char *phys_addr; 151 char *end_addr; 152 char *flags; 153 }; 154 char *ptrs[PCI_RESOURCE_FMT_NVAL]; 155 } res_info; 156 157 if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) { 158 RTE_LOG(ERR, EAL, 159 "%s(): bad resource format\n", __func__); 160 return -1; 161 } 162 errno = 0; 163 *phys_addr = strtoull(res_info.phys_addr, NULL, 16); 164 *end_addr = strtoull(res_info.end_addr, NULL, 16); 165 *flags = strtoull(res_info.flags, NULL, 16); 166 if (errno != 0) { 167 RTE_LOG(ERR, EAL, 168 "%s(): bad resource format\n", __func__); 169 return -1; 170 } 171 172 return 0; 173 } 174 175 /* parse the "resource" sysfs file */ 176 static int 177 pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) 178 { 179 FILE *f; 180 char buf[BUFSIZ]; 181 int i; 182 uint64_t phys_addr, end_addr, flags; 183 184 f = fopen(filename, "r"); 185 if (f == NULL) { 186 RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n"); 187 return -1; 188 } 189 190 for (i = 0; i<PCI_MAX_RESOURCE; i++) { 191 192 if (fgets(buf, sizeof(buf), f) == NULL) { 193 RTE_LOG(ERR, EAL, 194 "%s(): cannot read resource\n", __func__); 195 goto error; 196 } 197 if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr, 198 &end_addr, &flags) < 0) 199 goto error; 200 201 if (flags & IORESOURCE_MEM) { 202 dev->mem_resource[i].phys_addr = phys_addr; 203 dev->mem_resource[i].len = end_addr - phys_addr + 1; 204 /* not mapped for now */ 205 dev->mem_resource[i].addr = NULL; 206 } 207 } 208 fclose(f); 209 return 0; 210 211 error: 212 fclose(f); 213 return -1; 214 } 215 216 /* Scan one pci sysfs entry, and fill the devices list from it. */ 217 static int 218 pci_scan_one(const char *dirname, const struct rte_pci_addr *addr) 219 { 220 char filename[PATH_MAX]; 221 unsigned long tmp; 222 struct rte_pci_device *dev; 223 char driver[PATH_MAX]; 224 int ret; 225 226 dev = malloc(sizeof(*dev)); 227 if (dev == NULL) 228 return -1; 229 230 memset(dev, 0, sizeof(*dev)); 231 dev->device.bus = &rte_pci_bus.bus; 232 dev->addr = *addr; 233 234 /* get vendor id */ 235 snprintf(filename, sizeof(filename), "%s/vendor", dirname); 236 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 237 free(dev); 238 return -1; 239 } 240 dev->id.vendor_id = (uint16_t)tmp; 241 242 /* get device id */ 243 snprintf(filename, sizeof(filename), "%s/device", dirname); 244 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 245 free(dev); 246 return -1; 247 } 248 dev->id.device_id = (uint16_t)tmp; 249 250 /* get subsystem_vendor id */ 251 snprintf(filename, sizeof(filename), "%s/subsystem_vendor", 252 dirname); 253 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 254 free(dev); 255 return -1; 256 } 257 dev->id.subsystem_vendor_id = (uint16_t)tmp; 258 259 /* get subsystem_device id */ 260 snprintf(filename, sizeof(filename), "%s/subsystem_device", 261 dirname); 262 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 263 free(dev); 264 return -1; 265 } 266 dev->id.subsystem_device_id = (uint16_t)tmp; 267 268 /* get class_id */ 269 snprintf(filename, sizeof(filename), "%s/class", 270 dirname); 271 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 272 free(dev); 273 return -1; 274 } 275 /* the least 24 bits are valid: class, subclass, program interface */ 276 dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; 277 278 /* get max_vfs */ 279 dev->max_vfs = 0; 280 snprintf(filename, sizeof(filename), "%s/max_vfs", dirname); 281 if (!access(filename, F_OK) && 282 eal_parse_sysfs_value(filename, &tmp) == 0) 283 dev->max_vfs = (uint16_t)tmp; 284 else { 285 /* for non igb_uio driver, need kernel version >= 3.8 */ 286 snprintf(filename, sizeof(filename), 287 "%s/sriov_numvfs", dirname); 288 if (!access(filename, F_OK) && 289 eal_parse_sysfs_value(filename, &tmp) == 0) 290 dev->max_vfs = (uint16_t)tmp; 291 } 292 293 /* get numa node, default to 0 if not present */ 294 snprintf(filename, sizeof(filename), "%s/numa_node", 295 dirname); 296 297 if (access(filename, F_OK) != -1) { 298 if (eal_parse_sysfs_value(filename, &tmp) == 0) 299 dev->device.numa_node = tmp; 300 else 301 dev->device.numa_node = -1; 302 } else { 303 dev->device.numa_node = 0; 304 } 305 306 pci_name_set(dev); 307 308 /* parse resources */ 309 snprintf(filename, sizeof(filename), "%s/resource", dirname); 310 if (pci_parse_sysfs_resource(filename, dev) < 0) { 311 RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__); 312 free(dev); 313 return -1; 314 } 315 316 /* parse driver */ 317 snprintf(filename, sizeof(filename), "%s/driver", dirname); 318 ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver)); 319 if (ret < 0) { 320 RTE_LOG(ERR, EAL, "Fail to get kernel driver\n"); 321 free(dev); 322 return -1; 323 } 324 325 if (!ret) { 326 if (!strcmp(driver, "vfio-pci")) 327 dev->kdrv = RTE_KDRV_VFIO; 328 else if (!strcmp(driver, "igb_uio")) 329 dev->kdrv = RTE_KDRV_IGB_UIO; 330 else if (!strcmp(driver, "uio_pci_generic")) 331 dev->kdrv = RTE_KDRV_UIO_GENERIC; 332 else 333 dev->kdrv = RTE_KDRV_UNKNOWN; 334 } else 335 dev->kdrv = RTE_KDRV_NONE; 336 337 /* device is valid, add in list (sorted) */ 338 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 339 rte_pci_add_device(dev); 340 } else { 341 struct rte_pci_device *dev2; 342 int ret; 343 344 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 345 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 346 if (ret > 0) 347 continue; 348 349 if (ret < 0) { 350 rte_pci_insert_device(dev2, dev); 351 } else { /* already registered */ 352 if (!rte_dev_is_probed(&dev2->device)) { 353 dev2->kdrv = dev->kdrv; 354 dev2->max_vfs = dev->max_vfs; 355 pci_name_set(dev2); 356 memmove(dev2->mem_resource, 357 dev->mem_resource, 358 sizeof(dev->mem_resource)); 359 } else { 360 /** 361 * If device is plugged and driver is 362 * probed already, (This happens when 363 * we call rte_dev_probe which will 364 * scan all device on the bus) we don't 365 * need to do anything here unless... 366 **/ 367 if (dev2->kdrv != dev->kdrv || 368 dev2->max_vfs != dev->max_vfs) 369 /* 370 * This should not happens. 371 * But it is still possible if 372 * we unbind a device from 373 * vfio or uio before hotplug 374 * remove and rebind it with 375 * a different configure. 376 * So we just print out the 377 * error as an alarm. 378 */ 379 RTE_LOG(ERR, EAL, "Unexpected device scan at %s!\n", 380 filename); 381 } 382 free(dev); 383 } 384 return 0; 385 } 386 387 rte_pci_add_device(dev); 388 } 389 390 return 0; 391 } 392 393 int 394 pci_update_device(const struct rte_pci_addr *addr) 395 { 396 char filename[PATH_MAX]; 397 398 snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT, 399 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 400 addr->function); 401 402 return pci_scan_one(filename, addr); 403 } 404 405 /* 406 * split up a pci address into its constituent parts. 407 */ 408 static int 409 parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr) 410 { 411 /* first split on ':' */ 412 union splitaddr { 413 struct { 414 char *domain; 415 char *bus; 416 char *devid; 417 char *function; 418 }; 419 char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */ 420 } splitaddr; 421 422 char *buf_copy = strndup(buf, bufsize); 423 if (buf_copy == NULL) 424 return -1; 425 426 if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') 427 != PCI_FMT_NVAL - 1) 428 goto error; 429 /* final split is on '.' between devid and function */ 430 splitaddr.function = strchr(splitaddr.devid,'.'); 431 if (splitaddr.function == NULL) 432 goto error; 433 *splitaddr.function++ = '\0'; 434 435 /* now convert to int values */ 436 errno = 0; 437 addr->domain = strtoul(splitaddr.domain, NULL, 16); 438 addr->bus = strtoul(splitaddr.bus, NULL, 16); 439 addr->devid = strtoul(splitaddr.devid, NULL, 16); 440 addr->function = strtoul(splitaddr.function, NULL, 10); 441 if (errno != 0) 442 goto error; 443 444 free(buf_copy); /* free the copy made with strdup */ 445 return 0; 446 error: 447 free(buf_copy); 448 return -1; 449 } 450 451 /* 452 * Scan the content of the PCI bus, and the devices in the devices 453 * list 454 */ 455 int 456 rte_pci_scan(void) 457 { 458 struct dirent *e; 459 DIR *dir; 460 char dirname[PATH_MAX]; 461 struct rte_pci_addr addr; 462 463 /* for debug purposes, PCI can be disabled */ 464 if (!rte_eal_has_pci()) 465 return 0; 466 467 #ifdef VFIO_PRESENT 468 if (!pci_vfio_is_enabled()) 469 RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n"); 470 #endif 471 472 dir = opendir(rte_pci_get_sysfs_path()); 473 if (dir == NULL) { 474 RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n", 475 __func__, strerror(errno)); 476 return -1; 477 } 478 479 while ((e = readdir(dir)) != NULL) { 480 if (e->d_name[0] == '.') 481 continue; 482 483 if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0) 484 continue; 485 486 snprintf(dirname, sizeof(dirname), "%s/%s", 487 rte_pci_get_sysfs_path(), e->d_name); 488 489 if (pci_scan_one(dirname, &addr) < 0) 490 goto error; 491 } 492 closedir(dir); 493 return 0; 494 495 error: 496 closedir(dir); 497 return -1; 498 } 499 500 /* 501 * Is pci device bound to any kdrv 502 */ 503 static inline int 504 pci_one_device_is_bound(void) 505 { 506 struct rte_pci_device *dev = NULL; 507 int ret = 0; 508 509 FOREACH_DEVICE_ON_PCIBUS(dev) { 510 if (dev->kdrv == RTE_KDRV_UNKNOWN || 511 dev->kdrv == RTE_KDRV_NONE) { 512 continue; 513 } else { 514 ret = 1; 515 break; 516 } 517 } 518 return ret; 519 } 520 521 /* 522 * Any one of the device bound to uio 523 */ 524 static inline int 525 pci_one_device_bound_uio(void) 526 { 527 struct rte_pci_device *dev = NULL; 528 struct rte_devargs *devargs; 529 int need_check; 530 531 FOREACH_DEVICE_ON_PCIBUS(dev) { 532 devargs = dev->device.devargs; 533 534 need_check = 0; 535 switch (rte_pci_bus.bus.conf.scan_mode) { 536 case RTE_BUS_SCAN_WHITELIST: 537 if (devargs && devargs->policy == RTE_DEV_WHITELISTED) 538 need_check = 1; 539 break; 540 case RTE_BUS_SCAN_UNDEFINED: 541 case RTE_BUS_SCAN_BLACKLIST: 542 if (devargs == NULL || 543 devargs->policy != RTE_DEV_BLACKLISTED) 544 need_check = 1; 545 break; 546 } 547 548 if (!need_check) 549 continue; 550 551 if (dev->kdrv == RTE_KDRV_IGB_UIO || 552 dev->kdrv == RTE_KDRV_UIO_GENERIC) { 553 return 1; 554 } 555 } 556 return 0; 557 } 558 559 /* 560 * Any one of the device has iova as va 561 */ 562 static inline int 563 pci_one_device_has_iova_va(void) 564 { 565 struct rte_pci_device *dev = NULL; 566 struct rte_pci_driver *drv = NULL; 567 568 FOREACH_DRIVER_ON_PCIBUS(drv) { 569 if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) { 570 FOREACH_DEVICE_ON_PCIBUS(dev) { 571 if (dev->kdrv == RTE_KDRV_VFIO && 572 rte_pci_match(drv, dev)) 573 return 1; 574 } 575 } 576 } 577 return 0; 578 } 579 580 #if defined(RTE_ARCH_X86) 581 static bool 582 pci_one_device_iommu_support_va(struct rte_pci_device *dev) 583 { 584 #define VTD_CAP_MGAW_SHIFT 16 585 #define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT) 586 #define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */ 587 struct rte_pci_addr *addr = &dev->addr; 588 char filename[PATH_MAX]; 589 FILE *fp; 590 uint64_t mgaw, vtd_cap_reg = 0; 591 592 snprintf(filename, sizeof(filename), 593 "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap", 594 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 595 addr->function); 596 if (access(filename, F_OK) == -1) { 597 /* We don't have an Intel IOMMU, assume VA supported*/ 598 return true; 599 } 600 601 /* We have an intel IOMMU */ 602 fp = fopen(filename, "r"); 603 if (fp == NULL) { 604 RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename); 605 return false; 606 } 607 608 if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) { 609 RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename); 610 fclose(fp); 611 return false; 612 } 613 614 fclose(fp); 615 616 mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1; 617 618 /* 619 * Assuming there is no limitation by now. We can not know at this point 620 * because the memory has not been initialized yet. Setting the dma mask 621 * will force a check once memory initialization is done. We can not do 622 * a fallback to IOVA PA now, but if the dma check fails, the error 623 * message should advice for using '--iova-mode pa' if IOVA VA is the 624 * current mode. 625 */ 626 rte_mem_set_dma_mask(mgaw); 627 return true; 628 } 629 #elif defined(RTE_ARCH_PPC_64) 630 static bool 631 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) 632 { 633 return false; 634 } 635 #else 636 static bool 637 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) 638 { 639 return true; 640 } 641 #endif 642 643 /* 644 * All devices IOMMUs support VA as IOVA 645 */ 646 static bool 647 pci_devices_iommu_support_va(void) 648 { 649 struct rte_pci_device *dev = NULL; 650 struct rte_pci_driver *drv = NULL; 651 652 FOREACH_DRIVER_ON_PCIBUS(drv) { 653 FOREACH_DEVICE_ON_PCIBUS(dev) { 654 if (!rte_pci_match(drv, dev)) 655 continue; 656 /* 657 * just one PCI device needs to be checked out because 658 * the IOMMU hardware is the same for all of them. 659 */ 660 return pci_one_device_iommu_support_va(dev); 661 } 662 } 663 return true; 664 } 665 666 /* 667 * Get iommu class of PCI devices on the bus. 668 */ 669 enum rte_iova_mode 670 rte_pci_get_iommu_class(void) 671 { 672 bool is_bound; 673 bool is_vfio_noiommu_enabled = true; 674 bool has_iova_va; 675 bool is_bound_uio; 676 bool iommu_no_va; 677 678 is_bound = pci_one_device_is_bound(); 679 if (!is_bound) 680 return RTE_IOVA_DC; 681 682 has_iova_va = pci_one_device_has_iova_va(); 683 is_bound_uio = pci_one_device_bound_uio(); 684 iommu_no_va = !pci_devices_iommu_support_va(); 685 #ifdef VFIO_PRESENT 686 is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ? 687 true : false; 688 #endif 689 690 if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled && 691 !iommu_no_va) 692 return RTE_IOVA_VA; 693 694 if (has_iova_va) { 695 RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. "); 696 if (is_vfio_noiommu_enabled) 697 RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n"); 698 if (is_bound_uio) 699 RTE_LOG(WARNING, EAL, "few device bound to UIO\n"); 700 if (iommu_no_va) 701 RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n"); 702 } 703 704 return RTE_IOVA_PA; 705 } 706 707 /* Read PCI config space. */ 708 int rte_pci_read_config(const struct rte_pci_device *device, 709 void *buf, size_t len, off_t offset) 710 { 711 char devname[RTE_DEV_NAME_MAX_LEN] = ""; 712 const struct rte_intr_handle *intr_handle = &device->intr_handle; 713 714 switch (device->kdrv) { 715 case RTE_KDRV_IGB_UIO: 716 case RTE_KDRV_UIO_GENERIC: 717 return pci_uio_read_config(intr_handle, buf, len, offset); 718 #ifdef VFIO_PRESENT 719 case RTE_KDRV_VFIO: 720 return pci_vfio_read_config(intr_handle, buf, len, offset); 721 #endif 722 default: 723 rte_pci_device_name(&device->addr, devname, 724 RTE_DEV_NAME_MAX_LEN); 725 RTE_LOG(ERR, EAL, 726 "Unknown driver type for %s\n", devname); 727 return -1; 728 } 729 } 730 731 /* Write PCI config space. */ 732 int rte_pci_write_config(const struct rte_pci_device *device, 733 const void *buf, size_t len, off_t offset) 734 { 735 char devname[RTE_DEV_NAME_MAX_LEN] = ""; 736 const struct rte_intr_handle *intr_handle = &device->intr_handle; 737 738 switch (device->kdrv) { 739 case RTE_KDRV_IGB_UIO: 740 case RTE_KDRV_UIO_GENERIC: 741 return pci_uio_write_config(intr_handle, buf, len, offset); 742 #ifdef VFIO_PRESENT 743 case RTE_KDRV_VFIO: 744 return pci_vfio_write_config(intr_handle, buf, len, offset); 745 #endif 746 default: 747 rte_pci_device_name(&device->addr, devname, 748 RTE_DEV_NAME_MAX_LEN); 749 RTE_LOG(ERR, EAL, 750 "Unknown driver type for %s\n", devname); 751 return -1; 752 } 753 } 754 755 #if defined(RTE_ARCH_X86) 756 static int 757 pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused, 758 struct rte_pci_ioport *p) 759 { 760 uint16_t start, end; 761 FILE *fp; 762 char *line = NULL; 763 char pci_id[16]; 764 int found = 0; 765 size_t linesz; 766 767 snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT, 768 dev->addr.domain, dev->addr.bus, 769 dev->addr.devid, dev->addr.function); 770 771 fp = fopen("/proc/ioports", "r"); 772 if (fp == NULL) { 773 RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__); 774 return -1; 775 } 776 777 while (getdelim(&line, &linesz, '\n', fp) > 0) { 778 char *ptr = line; 779 char *left; 780 int n; 781 782 n = strcspn(ptr, ":"); 783 ptr[n] = 0; 784 left = &ptr[n + 1]; 785 786 while (*left && isspace(*left)) 787 left++; 788 789 if (!strncmp(left, pci_id, strlen(pci_id))) { 790 found = 1; 791 792 while (*ptr && isspace(*ptr)) 793 ptr++; 794 795 sscanf(ptr, "%04hx-%04hx", &start, &end); 796 797 break; 798 } 799 } 800 801 free(line); 802 fclose(fp); 803 804 if (!found) 805 return -1; 806 807 p->base = start; 808 RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start); 809 810 return 0; 811 } 812 #endif 813 814 int 815 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 816 struct rte_pci_ioport *p) 817 { 818 int ret = -1; 819 820 switch (dev->kdrv) { 821 #ifdef VFIO_PRESENT 822 case RTE_KDRV_VFIO: 823 if (pci_vfio_is_enabled()) 824 ret = pci_vfio_ioport_map(dev, bar, p); 825 break; 826 #endif 827 case RTE_KDRV_IGB_UIO: 828 ret = pci_uio_ioport_map(dev, bar, p); 829 break; 830 case RTE_KDRV_UIO_GENERIC: 831 #if defined(RTE_ARCH_X86) 832 ret = pci_ioport_map(dev, bar, p); 833 #else 834 ret = pci_uio_ioport_map(dev, bar, p); 835 #endif 836 break; 837 case RTE_KDRV_NONE: 838 #if defined(RTE_ARCH_X86) 839 ret = pci_ioport_map(dev, bar, p); 840 #endif 841 break; 842 default: 843 break; 844 } 845 846 if (!ret) 847 p->dev = dev; 848 849 return ret; 850 } 851 852 void 853 rte_pci_ioport_read(struct rte_pci_ioport *p, 854 void *data, size_t len, off_t offset) 855 { 856 switch (p->dev->kdrv) { 857 #ifdef VFIO_PRESENT 858 case RTE_KDRV_VFIO: 859 pci_vfio_ioport_read(p, data, len, offset); 860 break; 861 #endif 862 case RTE_KDRV_IGB_UIO: 863 pci_uio_ioport_read(p, data, len, offset); 864 break; 865 case RTE_KDRV_UIO_GENERIC: 866 pci_uio_ioport_read(p, data, len, offset); 867 break; 868 case RTE_KDRV_NONE: 869 #if defined(RTE_ARCH_X86) 870 pci_uio_ioport_read(p, data, len, offset); 871 #endif 872 break; 873 default: 874 break; 875 } 876 } 877 878 void 879 rte_pci_ioport_write(struct rte_pci_ioport *p, 880 const void *data, size_t len, off_t offset) 881 { 882 switch (p->dev->kdrv) { 883 #ifdef VFIO_PRESENT 884 case RTE_KDRV_VFIO: 885 pci_vfio_ioport_write(p, data, len, offset); 886 break; 887 #endif 888 case RTE_KDRV_IGB_UIO: 889 pci_uio_ioport_write(p, data, len, offset); 890 break; 891 case RTE_KDRV_UIO_GENERIC: 892 pci_uio_ioport_write(p, data, len, offset); 893 break; 894 case RTE_KDRV_NONE: 895 #if defined(RTE_ARCH_X86) 896 pci_uio_ioport_write(p, data, len, offset); 897 #endif 898 break; 899 default: 900 break; 901 } 902 } 903 904 int 905 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 906 { 907 int ret = -1; 908 909 switch (p->dev->kdrv) { 910 #ifdef VFIO_PRESENT 911 case RTE_KDRV_VFIO: 912 if (pci_vfio_is_enabled()) 913 ret = pci_vfio_ioport_unmap(p); 914 break; 915 #endif 916 case RTE_KDRV_IGB_UIO: 917 ret = pci_uio_ioport_unmap(p); 918 break; 919 case RTE_KDRV_UIO_GENERIC: 920 #if defined(RTE_ARCH_X86) 921 ret = 0; 922 #else 923 ret = pci_uio_ioport_unmap(p); 924 #endif 925 break; 926 case RTE_KDRV_NONE: 927 #if defined(RTE_ARCH_X86) 928 ret = 0; 929 #endif 930 break; 931 default: 932 break; 933 } 934 935 return ret; 936 } 937