1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <dirent.h> 7 8 #include <rte_log.h> 9 #include <rte_bus.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <rte_eal_memconfig.h> 13 #include <rte_malloc.h> 14 #include <rte_devargs.h> 15 #include <rte_memcpy.h> 16 #include <rte_vfio.h> 17 18 #include "eal_filesystem.h" 19 20 #include "private.h" 21 #include "pci_init.h" 22 23 /** 24 * @file 25 * PCI probing under linux 26 * 27 * This code is used to simulate a PCI probe by parsing information in sysfs. 28 * When a registered device matches a driver, it is then initialized with 29 * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it). 30 */ 31 32 extern struct rte_pci_bus rte_pci_bus; 33 34 static int 35 pci_get_kernel_driver_by_path(const char *filename, char *dri_name, 36 size_t len) 37 { 38 int count; 39 char path[PATH_MAX]; 40 char *name; 41 42 if (!filename || !dri_name) 43 return -1; 44 45 count = readlink(filename, path, PATH_MAX); 46 if (count >= PATH_MAX) 47 return -1; 48 49 /* For device does not have a driver */ 50 if (count < 0) 51 return 1; 52 53 path[count] = '\0'; 54 55 name = strrchr(path, '/'); 56 if (name) { 57 strlcpy(dri_name, name + 1, len); 58 return 0; 59 } 60 61 return -1; 62 } 63 64 /* Map pci device */ 65 int 66 rte_pci_map_device(struct rte_pci_device *dev) 67 { 68 int ret = -1; 69 70 /* try mapping the NIC resources using VFIO if it exists */ 71 switch (dev->kdrv) { 72 case RTE_KDRV_VFIO: 73 #ifdef VFIO_PRESENT 74 if (pci_vfio_is_enabled()) 75 ret = pci_vfio_map_resource(dev); 76 #endif 77 break; 78 case RTE_KDRV_IGB_UIO: 79 case RTE_KDRV_UIO_GENERIC: 80 if (rte_eal_using_phys_addrs()) { 81 /* map resources for devices that use uio */ 82 ret = pci_uio_map_resource(dev); 83 } 84 break; 85 default: 86 RTE_LOG(DEBUG, EAL, 87 " Not managed by a supported kernel driver, skipped\n"); 88 ret = 1; 89 break; 90 } 91 92 return ret; 93 } 94 95 /* Unmap pci device */ 96 void 97 rte_pci_unmap_device(struct rte_pci_device *dev) 98 { 99 /* try unmapping the NIC resources using VFIO if it exists */ 100 switch (dev->kdrv) { 101 case RTE_KDRV_VFIO: 102 #ifdef VFIO_PRESENT 103 if (pci_vfio_is_enabled()) 104 pci_vfio_unmap_resource(dev); 105 #endif 106 break; 107 case RTE_KDRV_IGB_UIO: 108 case RTE_KDRV_UIO_GENERIC: 109 /* unmap resources for devices that use uio */ 110 pci_uio_unmap_resource(dev); 111 break; 112 default: 113 RTE_LOG(DEBUG, EAL, 114 " Not managed by a supported kernel driver, skipped\n"); 115 break; 116 } 117 } 118 119 static int 120 find_max_end_va(const struct rte_memseg_list *msl, void *arg) 121 { 122 size_t sz = msl->len; 123 void *end_va = RTE_PTR_ADD(msl->base_va, sz); 124 void **max_va = arg; 125 126 if (*max_va < end_va) 127 *max_va = end_va; 128 return 0; 129 } 130 131 void * 132 pci_find_max_end_va(void) 133 { 134 void *va = NULL; 135 136 rte_memseg_list_walk(find_max_end_va, &va); 137 return va; 138 } 139 140 141 /* parse one line of the "resource" sysfs file (note that the 'line' 142 * string is modified) 143 */ 144 int 145 pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, 146 uint64_t *end_addr, uint64_t *flags) 147 { 148 union pci_resource_info { 149 struct { 150 char *phys_addr; 151 char *end_addr; 152 char *flags; 153 }; 154 char *ptrs[PCI_RESOURCE_FMT_NVAL]; 155 } res_info; 156 157 if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) { 158 RTE_LOG(ERR, EAL, 159 "%s(): bad resource format\n", __func__); 160 return -1; 161 } 162 errno = 0; 163 *phys_addr = strtoull(res_info.phys_addr, NULL, 16); 164 *end_addr = strtoull(res_info.end_addr, NULL, 16); 165 *flags = strtoull(res_info.flags, NULL, 16); 166 if (errno != 0) { 167 RTE_LOG(ERR, EAL, 168 "%s(): bad resource format\n", __func__); 169 return -1; 170 } 171 172 return 0; 173 } 174 175 /* parse the "resource" sysfs file */ 176 static int 177 pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) 178 { 179 FILE *f; 180 char buf[BUFSIZ]; 181 int i; 182 uint64_t phys_addr, end_addr, flags; 183 184 f = fopen(filename, "r"); 185 if (f == NULL) { 186 RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n"); 187 return -1; 188 } 189 190 for (i = 0; i<PCI_MAX_RESOURCE; i++) { 191 192 if (fgets(buf, sizeof(buf), f) == NULL) { 193 RTE_LOG(ERR, EAL, 194 "%s(): cannot read resource\n", __func__); 195 goto error; 196 } 197 if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr, 198 &end_addr, &flags) < 0) 199 goto error; 200 201 if (flags & IORESOURCE_MEM) { 202 dev->mem_resource[i].phys_addr = phys_addr; 203 dev->mem_resource[i].len = end_addr - phys_addr + 1; 204 /* not mapped for now */ 205 dev->mem_resource[i].addr = NULL; 206 } 207 } 208 fclose(f); 209 return 0; 210 211 error: 212 fclose(f); 213 return -1; 214 } 215 216 /* Scan one pci sysfs entry, and fill the devices list from it. */ 217 static int 218 pci_scan_one(const char *dirname, const struct rte_pci_addr *addr) 219 { 220 char filename[PATH_MAX]; 221 unsigned long tmp; 222 struct rte_pci_device *dev; 223 char driver[PATH_MAX]; 224 int ret; 225 226 dev = malloc(sizeof(*dev)); 227 if (dev == NULL) 228 return -1; 229 230 memset(dev, 0, sizeof(*dev)); 231 dev->device.bus = &rte_pci_bus.bus; 232 dev->addr = *addr; 233 234 /* get vendor id */ 235 snprintf(filename, sizeof(filename), "%s/vendor", dirname); 236 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 237 free(dev); 238 return -1; 239 } 240 dev->id.vendor_id = (uint16_t)tmp; 241 242 /* get device id */ 243 snprintf(filename, sizeof(filename), "%s/device", dirname); 244 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 245 free(dev); 246 return -1; 247 } 248 dev->id.device_id = (uint16_t)tmp; 249 250 /* get subsystem_vendor id */ 251 snprintf(filename, sizeof(filename), "%s/subsystem_vendor", 252 dirname); 253 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 254 free(dev); 255 return -1; 256 } 257 dev->id.subsystem_vendor_id = (uint16_t)tmp; 258 259 /* get subsystem_device id */ 260 snprintf(filename, sizeof(filename), "%s/subsystem_device", 261 dirname); 262 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 263 free(dev); 264 return -1; 265 } 266 dev->id.subsystem_device_id = (uint16_t)tmp; 267 268 /* get class_id */ 269 snprintf(filename, sizeof(filename), "%s/class", 270 dirname); 271 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 272 free(dev); 273 return -1; 274 } 275 /* the least 24 bits are valid: class, subclass, program interface */ 276 dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; 277 278 /* get max_vfs */ 279 dev->max_vfs = 0; 280 snprintf(filename, sizeof(filename), "%s/max_vfs", dirname); 281 if (!access(filename, F_OK) && 282 eal_parse_sysfs_value(filename, &tmp) == 0) 283 dev->max_vfs = (uint16_t)tmp; 284 else { 285 /* for non igb_uio driver, need kernel version >= 3.8 */ 286 snprintf(filename, sizeof(filename), 287 "%s/sriov_numvfs", dirname); 288 if (!access(filename, F_OK) && 289 eal_parse_sysfs_value(filename, &tmp) == 0) 290 dev->max_vfs = (uint16_t)tmp; 291 } 292 293 /* get numa node, default to 0 if not present */ 294 snprintf(filename, sizeof(filename), "%s/numa_node", 295 dirname); 296 297 if (access(filename, F_OK) != -1) { 298 if (eal_parse_sysfs_value(filename, &tmp) == 0) 299 dev->device.numa_node = tmp; 300 else 301 dev->device.numa_node = -1; 302 } else { 303 dev->device.numa_node = 0; 304 } 305 306 pci_name_set(dev); 307 308 /* parse resources */ 309 snprintf(filename, sizeof(filename), "%s/resource", dirname); 310 if (pci_parse_sysfs_resource(filename, dev) < 0) { 311 RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__); 312 free(dev); 313 return -1; 314 } 315 316 /* parse driver */ 317 snprintf(filename, sizeof(filename), "%s/driver", dirname); 318 ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver)); 319 if (ret < 0) { 320 RTE_LOG(ERR, EAL, "Fail to get kernel driver\n"); 321 free(dev); 322 return -1; 323 } 324 325 if (!ret) { 326 if (!strcmp(driver, "vfio-pci")) 327 dev->kdrv = RTE_KDRV_VFIO; 328 else if (!strcmp(driver, "igb_uio")) 329 dev->kdrv = RTE_KDRV_IGB_UIO; 330 else if (!strcmp(driver, "uio_pci_generic")) 331 dev->kdrv = RTE_KDRV_UIO_GENERIC; 332 else 333 dev->kdrv = RTE_KDRV_UNKNOWN; 334 } else 335 dev->kdrv = RTE_KDRV_NONE; 336 337 /* device is valid, add in list (sorted) */ 338 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 339 rte_pci_add_device(dev); 340 } else { 341 struct rte_pci_device *dev2; 342 int ret; 343 344 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 345 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 346 if (ret > 0) 347 continue; 348 349 if (ret < 0) { 350 rte_pci_insert_device(dev2, dev); 351 } else { /* already registered */ 352 dev2->kdrv = dev->kdrv; 353 dev2->max_vfs = dev->max_vfs; 354 pci_name_set(dev2); 355 memmove(dev2->mem_resource, dev->mem_resource, 356 sizeof(dev->mem_resource)); 357 free(dev); 358 } 359 return 0; 360 } 361 362 rte_pci_add_device(dev); 363 } 364 365 return 0; 366 } 367 368 int 369 pci_update_device(const struct rte_pci_addr *addr) 370 { 371 char filename[PATH_MAX]; 372 373 snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT, 374 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 375 addr->function); 376 377 return pci_scan_one(filename, addr); 378 } 379 380 /* 381 * split up a pci address into its constituent parts. 382 */ 383 static int 384 parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr) 385 { 386 /* first split on ':' */ 387 union splitaddr { 388 struct { 389 char *domain; 390 char *bus; 391 char *devid; 392 char *function; 393 }; 394 char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */ 395 } splitaddr; 396 397 char *buf_copy = strndup(buf, bufsize); 398 if (buf_copy == NULL) 399 return -1; 400 401 if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') 402 != PCI_FMT_NVAL - 1) 403 goto error; 404 /* final split is on '.' between devid and function */ 405 splitaddr.function = strchr(splitaddr.devid,'.'); 406 if (splitaddr.function == NULL) 407 goto error; 408 *splitaddr.function++ = '\0'; 409 410 /* now convert to int values */ 411 errno = 0; 412 addr->domain = strtoul(splitaddr.domain, NULL, 16); 413 addr->bus = strtoul(splitaddr.bus, NULL, 16); 414 addr->devid = strtoul(splitaddr.devid, NULL, 16); 415 addr->function = strtoul(splitaddr.function, NULL, 10); 416 if (errno != 0) 417 goto error; 418 419 free(buf_copy); /* free the copy made with strdup */ 420 return 0; 421 error: 422 free(buf_copy); 423 return -1; 424 } 425 426 /* 427 * Scan the content of the PCI bus, and the devices in the devices 428 * list 429 */ 430 int 431 rte_pci_scan(void) 432 { 433 struct dirent *e; 434 DIR *dir; 435 char dirname[PATH_MAX]; 436 struct rte_pci_addr addr; 437 438 /* for debug purposes, PCI can be disabled */ 439 if (!rte_eal_has_pci()) 440 return 0; 441 442 #ifdef VFIO_PRESENT 443 if (!pci_vfio_is_enabled()) 444 RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n"); 445 #endif 446 447 dir = opendir(rte_pci_get_sysfs_path()); 448 if (dir == NULL) { 449 RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n", 450 __func__, strerror(errno)); 451 return -1; 452 } 453 454 while ((e = readdir(dir)) != NULL) { 455 if (e->d_name[0] == '.') 456 continue; 457 458 if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0) 459 continue; 460 461 snprintf(dirname, sizeof(dirname), "%s/%s", 462 rte_pci_get_sysfs_path(), e->d_name); 463 464 if (pci_scan_one(dirname, &addr) < 0) 465 goto error; 466 } 467 closedir(dir); 468 return 0; 469 470 error: 471 closedir(dir); 472 return -1; 473 } 474 475 /* 476 * Is pci device bound to any kdrv 477 */ 478 static inline int 479 pci_one_device_is_bound(void) 480 { 481 struct rte_pci_device *dev = NULL; 482 int ret = 0; 483 484 FOREACH_DEVICE_ON_PCIBUS(dev) { 485 if (dev->kdrv == RTE_KDRV_UNKNOWN || 486 dev->kdrv == RTE_KDRV_NONE) { 487 continue; 488 } else { 489 ret = 1; 490 break; 491 } 492 } 493 return ret; 494 } 495 496 /* 497 * Any one of the device bound to uio 498 */ 499 static inline int 500 pci_one_device_bound_uio(void) 501 { 502 struct rte_pci_device *dev = NULL; 503 struct rte_devargs *devargs; 504 int need_check; 505 506 FOREACH_DEVICE_ON_PCIBUS(dev) { 507 devargs = dev->device.devargs; 508 509 need_check = 0; 510 switch (rte_pci_bus.bus.conf.scan_mode) { 511 case RTE_BUS_SCAN_WHITELIST: 512 if (devargs && devargs->policy == RTE_DEV_WHITELISTED) 513 need_check = 1; 514 break; 515 case RTE_BUS_SCAN_UNDEFINED: 516 case RTE_BUS_SCAN_BLACKLIST: 517 if (devargs == NULL || 518 devargs->policy != RTE_DEV_BLACKLISTED) 519 need_check = 1; 520 break; 521 } 522 523 if (!need_check) 524 continue; 525 526 if (dev->kdrv == RTE_KDRV_IGB_UIO || 527 dev->kdrv == RTE_KDRV_UIO_GENERIC) { 528 return 1; 529 } 530 } 531 return 0; 532 } 533 534 /* 535 * Any one of the device has iova as va 536 */ 537 static inline int 538 pci_one_device_has_iova_va(void) 539 { 540 struct rte_pci_device *dev = NULL; 541 struct rte_pci_driver *drv = NULL; 542 543 FOREACH_DRIVER_ON_PCIBUS(drv) { 544 if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) { 545 FOREACH_DEVICE_ON_PCIBUS(dev) { 546 if (dev->kdrv == RTE_KDRV_VFIO && 547 rte_pci_match(drv, dev)) 548 return 1; 549 } 550 } 551 } 552 return 0; 553 } 554 555 #if defined(RTE_ARCH_X86) 556 static bool 557 pci_one_device_iommu_support_va(struct rte_pci_device *dev) 558 { 559 #define VTD_CAP_MGAW_SHIFT 16 560 #define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT) 561 #define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */ 562 struct rte_pci_addr *addr = &dev->addr; 563 char filename[PATH_MAX]; 564 FILE *fp; 565 uint64_t mgaw, vtd_cap_reg = 0; 566 567 snprintf(filename, sizeof(filename), 568 "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap", 569 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 570 addr->function); 571 if (access(filename, F_OK) == -1) { 572 /* We don't have an Intel IOMMU, assume VA supported*/ 573 return true; 574 } 575 576 /* We have an intel IOMMU */ 577 fp = fopen(filename, "r"); 578 if (fp == NULL) { 579 RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename); 580 return false; 581 } 582 583 if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) { 584 RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename); 585 fclose(fp); 586 return false; 587 } 588 589 fclose(fp); 590 591 mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1; 592 593 return rte_eal_check_dma_mask(mgaw) == 0 ? true : false; 594 } 595 #elif defined(RTE_ARCH_PPC_64) 596 static bool 597 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) 598 { 599 return false; 600 } 601 #else 602 static bool 603 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) 604 { 605 return true; 606 } 607 #endif 608 609 /* 610 * All devices IOMMUs support VA as IOVA 611 */ 612 static bool 613 pci_devices_iommu_support_va(void) 614 { 615 struct rte_pci_device *dev = NULL; 616 struct rte_pci_driver *drv = NULL; 617 618 FOREACH_DRIVER_ON_PCIBUS(drv) { 619 FOREACH_DEVICE_ON_PCIBUS(dev) { 620 if (!rte_pci_match(drv, dev)) 621 continue; 622 /* 623 * just one PCI device needs to be checked out because 624 * the IOMMU hardware is the same for all of them. 625 */ 626 return pci_one_device_iommu_support_va(dev); 627 } 628 } 629 return true; 630 } 631 632 /* 633 * Get iommu class of PCI devices on the bus. 634 */ 635 enum rte_iova_mode 636 rte_pci_get_iommu_class(void) 637 { 638 bool is_bound; 639 bool is_vfio_noiommu_enabled = true; 640 bool has_iova_va; 641 bool is_bound_uio; 642 bool iommu_no_va; 643 644 is_bound = pci_one_device_is_bound(); 645 if (!is_bound) 646 return RTE_IOVA_DC; 647 648 has_iova_va = pci_one_device_has_iova_va(); 649 is_bound_uio = pci_one_device_bound_uio(); 650 iommu_no_va = !pci_devices_iommu_support_va(); 651 #ifdef VFIO_PRESENT 652 is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ? 653 true : false; 654 #endif 655 656 if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled && 657 !iommu_no_va) 658 return RTE_IOVA_VA; 659 660 if (has_iova_va) { 661 RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. "); 662 if (is_vfio_noiommu_enabled) 663 RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n"); 664 if (is_bound_uio) 665 RTE_LOG(WARNING, EAL, "few device bound to UIO\n"); 666 if (iommu_no_va) 667 RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n"); 668 } 669 670 return RTE_IOVA_PA; 671 } 672 673 /* Read PCI config space. */ 674 int rte_pci_read_config(const struct rte_pci_device *device, 675 void *buf, size_t len, off_t offset) 676 { 677 char devname[RTE_DEV_NAME_MAX_LEN] = ""; 678 const struct rte_intr_handle *intr_handle = &device->intr_handle; 679 680 switch (device->kdrv) { 681 case RTE_KDRV_IGB_UIO: 682 return pci_uio_read_config(intr_handle, buf, len, offset); 683 #ifdef VFIO_PRESENT 684 case RTE_KDRV_VFIO: 685 return pci_vfio_read_config(intr_handle, buf, len, offset); 686 #endif 687 default: 688 rte_pci_device_name(&device->addr, devname, 689 RTE_DEV_NAME_MAX_LEN); 690 RTE_LOG(ERR, EAL, 691 "Unknown driver type for %s\n", devname); 692 return -1; 693 } 694 } 695 696 /* Write PCI config space. */ 697 int rte_pci_write_config(const struct rte_pci_device *device, 698 const void *buf, size_t len, off_t offset) 699 { 700 char devname[RTE_DEV_NAME_MAX_LEN] = ""; 701 const struct rte_intr_handle *intr_handle = &device->intr_handle; 702 703 switch (device->kdrv) { 704 case RTE_KDRV_IGB_UIO: 705 return pci_uio_write_config(intr_handle, buf, len, offset); 706 #ifdef VFIO_PRESENT 707 case RTE_KDRV_VFIO: 708 return pci_vfio_write_config(intr_handle, buf, len, offset); 709 #endif 710 default: 711 rte_pci_device_name(&device->addr, devname, 712 RTE_DEV_NAME_MAX_LEN); 713 RTE_LOG(ERR, EAL, 714 "Unknown driver type for %s\n", devname); 715 return -1; 716 } 717 } 718 719 #if defined(RTE_ARCH_X86) 720 static int 721 pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused, 722 struct rte_pci_ioport *p) 723 { 724 uint16_t start, end; 725 FILE *fp; 726 char *line = NULL; 727 char pci_id[16]; 728 int found = 0; 729 size_t linesz; 730 731 snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT, 732 dev->addr.domain, dev->addr.bus, 733 dev->addr.devid, dev->addr.function); 734 735 fp = fopen("/proc/ioports", "r"); 736 if (fp == NULL) { 737 RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__); 738 return -1; 739 } 740 741 while (getdelim(&line, &linesz, '\n', fp) > 0) { 742 char *ptr = line; 743 char *left; 744 int n; 745 746 n = strcspn(ptr, ":"); 747 ptr[n] = 0; 748 left = &ptr[n + 1]; 749 750 while (*left && isspace(*left)) 751 left++; 752 753 if (!strncmp(left, pci_id, strlen(pci_id))) { 754 found = 1; 755 756 while (*ptr && isspace(*ptr)) 757 ptr++; 758 759 sscanf(ptr, "%04hx-%04hx", &start, &end); 760 761 break; 762 } 763 } 764 765 free(line); 766 fclose(fp); 767 768 if (!found) 769 return -1; 770 771 p->base = start; 772 RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start); 773 774 return 0; 775 } 776 #endif 777 778 int 779 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 780 struct rte_pci_ioport *p) 781 { 782 int ret = -1; 783 784 switch (dev->kdrv) { 785 #ifdef VFIO_PRESENT 786 case RTE_KDRV_VFIO: 787 if (pci_vfio_is_enabled()) 788 ret = pci_vfio_ioport_map(dev, bar, p); 789 break; 790 #endif 791 case RTE_KDRV_IGB_UIO: 792 ret = pci_uio_ioport_map(dev, bar, p); 793 break; 794 case RTE_KDRV_UIO_GENERIC: 795 #if defined(RTE_ARCH_X86) 796 ret = pci_ioport_map(dev, bar, p); 797 #else 798 ret = pci_uio_ioport_map(dev, bar, p); 799 #endif 800 break; 801 case RTE_KDRV_NONE: 802 #if defined(RTE_ARCH_X86) 803 ret = pci_ioport_map(dev, bar, p); 804 #endif 805 break; 806 default: 807 break; 808 } 809 810 if (!ret) 811 p->dev = dev; 812 813 return ret; 814 } 815 816 void 817 rte_pci_ioport_read(struct rte_pci_ioport *p, 818 void *data, size_t len, off_t offset) 819 { 820 switch (p->dev->kdrv) { 821 #ifdef VFIO_PRESENT 822 case RTE_KDRV_VFIO: 823 pci_vfio_ioport_read(p, data, len, offset); 824 break; 825 #endif 826 case RTE_KDRV_IGB_UIO: 827 pci_uio_ioport_read(p, data, len, offset); 828 break; 829 case RTE_KDRV_UIO_GENERIC: 830 pci_uio_ioport_read(p, data, len, offset); 831 break; 832 case RTE_KDRV_NONE: 833 #if defined(RTE_ARCH_X86) 834 pci_uio_ioport_read(p, data, len, offset); 835 #endif 836 break; 837 default: 838 break; 839 } 840 } 841 842 void 843 rte_pci_ioport_write(struct rte_pci_ioport *p, 844 const void *data, size_t len, off_t offset) 845 { 846 switch (p->dev->kdrv) { 847 #ifdef VFIO_PRESENT 848 case RTE_KDRV_VFIO: 849 pci_vfio_ioport_write(p, data, len, offset); 850 break; 851 #endif 852 case RTE_KDRV_IGB_UIO: 853 pci_uio_ioport_write(p, data, len, offset); 854 break; 855 case RTE_KDRV_UIO_GENERIC: 856 pci_uio_ioport_write(p, data, len, offset); 857 break; 858 case RTE_KDRV_NONE: 859 #if defined(RTE_ARCH_X86) 860 pci_uio_ioport_write(p, data, len, offset); 861 #endif 862 break; 863 default: 864 break; 865 } 866 } 867 868 int 869 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 870 { 871 int ret = -1; 872 873 switch (p->dev->kdrv) { 874 #ifdef VFIO_PRESENT 875 case RTE_KDRV_VFIO: 876 if (pci_vfio_is_enabled()) 877 ret = pci_vfio_ioport_unmap(p); 878 break; 879 #endif 880 case RTE_KDRV_IGB_UIO: 881 ret = pci_uio_ioport_unmap(p); 882 break; 883 case RTE_KDRV_UIO_GENERIC: 884 #if defined(RTE_ARCH_X86) 885 ret = 0; 886 #else 887 ret = pci_uio_ioport_unmap(p); 888 #endif 889 break; 890 case RTE_KDRV_NONE: 891 #if defined(RTE_ARCH_X86) 892 ret = 0; 893 #endif 894 break; 895 default: 896 break; 897 } 898 899 return ret; 900 } 901