1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <dirent.h> 7 8 #include <rte_log.h> 9 #include <rte_bus.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <rte_eal_memconfig.h> 13 #include <rte_malloc.h> 14 #include <rte_devargs.h> 15 #include <rte_memcpy.h> 16 #include <rte_vfio.h> 17 18 #include "eal_private.h" 19 #include "eal_filesystem.h" 20 21 #include "private.h" 22 #include "pci_init.h" 23 24 /** 25 * @file 26 * PCI probing under linux 27 * 28 * This code is used to simulate a PCI probe by parsing information in sysfs. 29 * When a registered device matches a driver, it is then initialized with 30 * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it). 31 */ 32 33 extern struct rte_pci_bus rte_pci_bus; 34 35 static int 36 pci_get_kernel_driver_by_path(const char *filename, char *dri_name, 37 size_t len) 38 { 39 int count; 40 char path[PATH_MAX]; 41 char *name; 42 43 if (!filename || !dri_name) 44 return -1; 45 46 count = readlink(filename, path, PATH_MAX); 47 if (count >= PATH_MAX) 48 return -1; 49 50 /* For device does not have a driver */ 51 if (count < 0) 52 return 1; 53 54 path[count] = '\0'; 55 56 name = strrchr(path, '/'); 57 if (name) { 58 strlcpy(dri_name, name + 1, len); 59 return 0; 60 } 61 62 return -1; 63 } 64 65 /* Map pci device */ 66 int 67 rte_pci_map_device(struct rte_pci_device *dev) 68 { 69 int ret = -1; 70 71 /* try mapping the NIC resources using VFIO if it exists */ 72 switch (dev->kdrv) { 73 case RTE_KDRV_VFIO: 74 #ifdef VFIO_PRESENT 75 if (pci_vfio_is_enabled()) 76 ret = pci_vfio_map_resource(dev); 77 #endif 78 break; 79 case RTE_KDRV_IGB_UIO: 80 case RTE_KDRV_UIO_GENERIC: 81 if (rte_eal_using_phys_addrs()) { 82 /* map resources for devices that use uio */ 83 ret = pci_uio_map_resource(dev); 84 } 85 break; 86 default: 87 RTE_LOG(DEBUG, EAL, 88 " Not managed by a supported kernel driver, skipped\n"); 89 ret = 1; 90 break; 91 } 92 93 return ret; 94 } 95 96 /* Unmap pci device */ 97 void 98 rte_pci_unmap_device(struct rte_pci_device *dev) 99 { 100 /* try unmapping the NIC resources using VFIO if it exists */ 101 switch (dev->kdrv) { 102 case RTE_KDRV_VFIO: 103 #ifdef VFIO_PRESENT 104 if (pci_vfio_is_enabled()) 105 pci_vfio_unmap_resource(dev); 106 #endif 107 break; 108 case RTE_KDRV_IGB_UIO: 109 case RTE_KDRV_UIO_GENERIC: 110 /* unmap resources for devices that use uio */ 111 pci_uio_unmap_resource(dev); 112 break; 113 default: 114 RTE_LOG(DEBUG, EAL, 115 " Not managed by a supported kernel driver, skipped\n"); 116 break; 117 } 118 } 119 120 static int 121 find_max_end_va(const struct rte_memseg_list *msl, void *arg) 122 { 123 size_t sz = msl->memseg_arr.len * msl->page_sz; 124 void *end_va = RTE_PTR_ADD(msl->base_va, sz); 125 void **max_va = arg; 126 127 if (*max_va < end_va) 128 *max_va = end_va; 129 return 0; 130 } 131 132 void * 133 pci_find_max_end_va(void) 134 { 135 void *va = NULL; 136 137 rte_memseg_list_walk(find_max_end_va, &va); 138 return va; 139 } 140 141 142 /* parse one line of the "resource" sysfs file (note that the 'line' 143 * string is modified) 144 */ 145 int 146 pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, 147 uint64_t *end_addr, uint64_t *flags) 148 { 149 union pci_resource_info { 150 struct { 151 char *phys_addr; 152 char *end_addr; 153 char *flags; 154 }; 155 char *ptrs[PCI_RESOURCE_FMT_NVAL]; 156 } res_info; 157 158 if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) { 159 RTE_LOG(ERR, EAL, 160 "%s(): bad resource format\n", __func__); 161 return -1; 162 } 163 errno = 0; 164 *phys_addr = strtoull(res_info.phys_addr, NULL, 16); 165 *end_addr = strtoull(res_info.end_addr, NULL, 16); 166 *flags = strtoull(res_info.flags, NULL, 16); 167 if (errno != 0) { 168 RTE_LOG(ERR, EAL, 169 "%s(): bad resource format\n", __func__); 170 return -1; 171 } 172 173 return 0; 174 } 175 176 /* parse the "resource" sysfs file */ 177 static int 178 pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) 179 { 180 FILE *f; 181 char buf[BUFSIZ]; 182 int i; 183 uint64_t phys_addr, end_addr, flags; 184 185 f = fopen(filename, "r"); 186 if (f == NULL) { 187 RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n"); 188 return -1; 189 } 190 191 for (i = 0; i<PCI_MAX_RESOURCE; i++) { 192 193 if (fgets(buf, sizeof(buf), f) == NULL) { 194 RTE_LOG(ERR, EAL, 195 "%s(): cannot read resource\n", __func__); 196 goto error; 197 } 198 if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr, 199 &end_addr, &flags) < 0) 200 goto error; 201 202 if (flags & IORESOURCE_MEM) { 203 dev->mem_resource[i].phys_addr = phys_addr; 204 dev->mem_resource[i].len = end_addr - phys_addr + 1; 205 /* not mapped for now */ 206 dev->mem_resource[i].addr = NULL; 207 } 208 } 209 fclose(f); 210 return 0; 211 212 error: 213 fclose(f); 214 return -1; 215 } 216 217 /* Scan one pci sysfs entry, and fill the devices list from it. */ 218 static int 219 pci_scan_one(const char *dirname, const struct rte_pci_addr *addr) 220 { 221 char filename[PATH_MAX]; 222 unsigned long tmp; 223 struct rte_pci_device *dev; 224 char driver[PATH_MAX]; 225 int ret; 226 227 dev = malloc(sizeof(*dev)); 228 if (dev == NULL) 229 return -1; 230 231 memset(dev, 0, sizeof(*dev)); 232 dev->addr = *addr; 233 234 /* get vendor id */ 235 snprintf(filename, sizeof(filename), "%s/vendor", dirname); 236 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 237 free(dev); 238 return -1; 239 } 240 dev->id.vendor_id = (uint16_t)tmp; 241 242 /* get device id */ 243 snprintf(filename, sizeof(filename), "%s/device", dirname); 244 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 245 free(dev); 246 return -1; 247 } 248 dev->id.device_id = (uint16_t)tmp; 249 250 /* get subsystem_vendor id */ 251 snprintf(filename, sizeof(filename), "%s/subsystem_vendor", 252 dirname); 253 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 254 free(dev); 255 return -1; 256 } 257 dev->id.subsystem_vendor_id = (uint16_t)tmp; 258 259 /* get subsystem_device id */ 260 snprintf(filename, sizeof(filename), "%s/subsystem_device", 261 dirname); 262 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 263 free(dev); 264 return -1; 265 } 266 dev->id.subsystem_device_id = (uint16_t)tmp; 267 268 /* get class_id */ 269 snprintf(filename, sizeof(filename), "%s/class", 270 dirname); 271 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 272 free(dev); 273 return -1; 274 } 275 /* the least 24 bits are valid: class, subclass, program interface */ 276 dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; 277 278 /* get max_vfs */ 279 dev->max_vfs = 0; 280 snprintf(filename, sizeof(filename), "%s/max_vfs", dirname); 281 if (!access(filename, F_OK) && 282 eal_parse_sysfs_value(filename, &tmp) == 0) 283 dev->max_vfs = (uint16_t)tmp; 284 else { 285 /* for non igb_uio driver, need kernel version >= 3.8 */ 286 snprintf(filename, sizeof(filename), 287 "%s/sriov_numvfs", dirname); 288 if (!access(filename, F_OK) && 289 eal_parse_sysfs_value(filename, &tmp) == 0) 290 dev->max_vfs = (uint16_t)tmp; 291 } 292 293 /* get numa node, default to 0 if not present */ 294 snprintf(filename, sizeof(filename), "%s/numa_node", 295 dirname); 296 297 if (access(filename, F_OK) != -1) { 298 if (eal_parse_sysfs_value(filename, &tmp) == 0) 299 dev->device.numa_node = tmp; 300 else 301 dev->device.numa_node = -1; 302 } else { 303 dev->device.numa_node = 0; 304 } 305 306 pci_name_set(dev); 307 308 /* parse resources */ 309 snprintf(filename, sizeof(filename), "%s/resource", dirname); 310 if (pci_parse_sysfs_resource(filename, dev) < 0) { 311 RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__); 312 free(dev); 313 return -1; 314 } 315 316 /* parse driver */ 317 snprintf(filename, sizeof(filename), "%s/driver", dirname); 318 ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver)); 319 if (ret < 0) { 320 RTE_LOG(ERR, EAL, "Fail to get kernel driver\n"); 321 free(dev); 322 return -1; 323 } 324 325 if (!ret) { 326 if (!strcmp(driver, "vfio-pci")) 327 dev->kdrv = RTE_KDRV_VFIO; 328 else if (!strcmp(driver, "igb_uio")) 329 dev->kdrv = RTE_KDRV_IGB_UIO; 330 else if (!strcmp(driver, "uio_pci_generic")) 331 dev->kdrv = RTE_KDRV_UIO_GENERIC; 332 else 333 dev->kdrv = RTE_KDRV_UNKNOWN; 334 } else 335 dev->kdrv = RTE_KDRV_NONE; 336 337 /* device is valid, add in list (sorted) */ 338 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 339 rte_pci_add_device(dev); 340 } else { 341 struct rte_pci_device *dev2; 342 int ret; 343 344 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 345 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 346 if (ret > 0) 347 continue; 348 349 if (ret < 0) { 350 rte_pci_insert_device(dev2, dev); 351 } else { /* already registered */ 352 dev2->kdrv = dev->kdrv; 353 dev2->max_vfs = dev->max_vfs; 354 pci_name_set(dev2); 355 memmove(dev2->mem_resource, dev->mem_resource, 356 sizeof(dev->mem_resource)); 357 free(dev); 358 } 359 return 0; 360 } 361 362 rte_pci_add_device(dev); 363 } 364 365 return 0; 366 } 367 368 int 369 pci_update_device(const struct rte_pci_addr *addr) 370 { 371 char filename[PATH_MAX]; 372 373 snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT, 374 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 375 addr->function); 376 377 return pci_scan_one(filename, addr); 378 } 379 380 /* 381 * split up a pci address into its constituent parts. 382 */ 383 static int 384 parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr) 385 { 386 /* first split on ':' */ 387 union splitaddr { 388 struct { 389 char *domain; 390 char *bus; 391 char *devid; 392 char *function; 393 }; 394 char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */ 395 } splitaddr; 396 397 char *buf_copy = strndup(buf, bufsize); 398 if (buf_copy == NULL) 399 return -1; 400 401 if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') 402 != PCI_FMT_NVAL - 1) 403 goto error; 404 /* final split is on '.' between devid and function */ 405 splitaddr.function = strchr(splitaddr.devid,'.'); 406 if (splitaddr.function == NULL) 407 goto error; 408 *splitaddr.function++ = '\0'; 409 410 /* now convert to int values */ 411 errno = 0; 412 addr->domain = strtoul(splitaddr.domain, NULL, 16); 413 addr->bus = strtoul(splitaddr.bus, NULL, 16); 414 addr->devid = strtoul(splitaddr.devid, NULL, 16); 415 addr->function = strtoul(splitaddr.function, NULL, 10); 416 if (errno != 0) 417 goto error; 418 419 free(buf_copy); /* free the copy made with strdup */ 420 return 0; 421 error: 422 free(buf_copy); 423 return -1; 424 } 425 426 /* 427 * Scan the content of the PCI bus, and the devices in the devices 428 * list 429 */ 430 int 431 rte_pci_scan(void) 432 { 433 struct dirent *e; 434 DIR *dir; 435 char dirname[PATH_MAX]; 436 struct rte_pci_addr addr; 437 438 /* for debug purposes, PCI can be disabled */ 439 if (!rte_eal_has_pci()) 440 return 0; 441 442 #ifdef VFIO_PRESENT 443 if (!pci_vfio_is_enabled()) 444 RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n"); 445 #endif 446 447 dir = opendir(rte_pci_get_sysfs_path()); 448 if (dir == NULL) { 449 RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n", 450 __func__, strerror(errno)); 451 return -1; 452 } 453 454 while ((e = readdir(dir)) != NULL) { 455 if (e->d_name[0] == '.') 456 continue; 457 458 if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0) 459 continue; 460 461 snprintf(dirname, sizeof(dirname), "%s/%s", 462 rte_pci_get_sysfs_path(), e->d_name); 463 464 if (pci_scan_one(dirname, &addr) < 0) 465 goto error; 466 } 467 closedir(dir); 468 return 0; 469 470 error: 471 closedir(dir); 472 return -1; 473 } 474 475 /* 476 * Is pci device bound to any kdrv 477 */ 478 static inline int 479 pci_one_device_is_bound(void) 480 { 481 struct rte_pci_device *dev = NULL; 482 int ret = 0; 483 484 FOREACH_DEVICE_ON_PCIBUS(dev) { 485 if (dev->kdrv == RTE_KDRV_UNKNOWN || 486 dev->kdrv == RTE_KDRV_NONE) { 487 continue; 488 } else { 489 ret = 1; 490 break; 491 } 492 } 493 return ret; 494 } 495 496 /* 497 * Any one of the device bound to uio 498 */ 499 static inline int 500 pci_one_device_bound_uio(void) 501 { 502 struct rte_pci_device *dev = NULL; 503 struct rte_devargs *devargs; 504 int need_check; 505 506 FOREACH_DEVICE_ON_PCIBUS(dev) { 507 devargs = dev->device.devargs; 508 509 need_check = 0; 510 switch (rte_pci_bus.bus.conf.scan_mode) { 511 case RTE_BUS_SCAN_WHITELIST: 512 if (devargs && devargs->policy == RTE_DEV_WHITELISTED) 513 need_check = 1; 514 break; 515 case RTE_BUS_SCAN_UNDEFINED: 516 case RTE_BUS_SCAN_BLACKLIST: 517 if (devargs == NULL || 518 devargs->policy != RTE_DEV_BLACKLISTED) 519 need_check = 1; 520 break; 521 } 522 523 if (!need_check) 524 continue; 525 526 if (dev->kdrv == RTE_KDRV_IGB_UIO || 527 dev->kdrv == RTE_KDRV_UIO_GENERIC) { 528 return 1; 529 } 530 } 531 return 0; 532 } 533 534 /* 535 * Any one of the device has iova as va 536 */ 537 static inline int 538 pci_one_device_has_iova_va(void) 539 { 540 struct rte_pci_device *dev = NULL; 541 struct rte_pci_driver *drv = NULL; 542 543 FOREACH_DRIVER_ON_PCIBUS(drv) { 544 if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) { 545 FOREACH_DEVICE_ON_PCIBUS(dev) { 546 if (dev->kdrv == RTE_KDRV_VFIO && 547 rte_pci_match(drv, dev)) 548 return 1; 549 } 550 } 551 } 552 return 0; 553 } 554 555 #if defined(RTE_ARCH_X86) 556 static bool 557 pci_one_device_iommu_support_va(struct rte_pci_device *dev) 558 { 559 #define VTD_CAP_MGAW_SHIFT 16 560 #define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT) 561 #define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */ 562 struct rte_pci_addr *addr = &dev->addr; 563 char filename[PATH_MAX]; 564 FILE *fp; 565 uint64_t mgaw, vtd_cap_reg = 0; 566 567 snprintf(filename, sizeof(filename), 568 "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap", 569 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 570 addr->function); 571 if (access(filename, F_OK) == -1) { 572 /* We don't have an Intel IOMMU, assume VA supported*/ 573 return true; 574 } 575 576 /* We have an intel IOMMU */ 577 fp = fopen(filename, "r"); 578 if (fp == NULL) { 579 RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename); 580 return false; 581 } 582 583 if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) { 584 RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename); 585 fclose(fp); 586 return false; 587 } 588 589 fclose(fp); 590 591 mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1; 592 if (mgaw < X86_VA_WIDTH) 593 return false; 594 595 return true; 596 } 597 #elif defined(RTE_ARCH_PPC_64) 598 static bool 599 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) 600 { 601 return false; 602 } 603 #else 604 static bool 605 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) 606 { 607 return true; 608 } 609 #endif 610 611 /* 612 * All devices IOMMUs support VA as IOVA 613 */ 614 static bool 615 pci_devices_iommu_support_va(void) 616 { 617 struct rte_pci_device *dev = NULL; 618 struct rte_pci_driver *drv = NULL; 619 620 FOREACH_DRIVER_ON_PCIBUS(drv) { 621 FOREACH_DEVICE_ON_PCIBUS(dev) { 622 if (!rte_pci_match(drv, dev)) 623 continue; 624 if (!pci_one_device_iommu_support_va(dev)) 625 return false; 626 } 627 } 628 return true; 629 } 630 631 /* 632 * Get iommu class of PCI devices on the bus. 633 */ 634 enum rte_iova_mode 635 rte_pci_get_iommu_class(void) 636 { 637 bool is_bound; 638 bool is_vfio_noiommu_enabled = true; 639 bool has_iova_va; 640 bool is_bound_uio; 641 bool iommu_no_va; 642 643 is_bound = pci_one_device_is_bound(); 644 if (!is_bound) 645 return RTE_IOVA_DC; 646 647 has_iova_va = pci_one_device_has_iova_va(); 648 is_bound_uio = pci_one_device_bound_uio(); 649 iommu_no_va = !pci_devices_iommu_support_va(); 650 #ifdef VFIO_PRESENT 651 is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ? 652 true : false; 653 #endif 654 655 if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled && 656 !iommu_no_va) 657 return RTE_IOVA_VA; 658 659 if (has_iova_va) { 660 RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. "); 661 if (is_vfio_noiommu_enabled) 662 RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n"); 663 if (is_bound_uio) 664 RTE_LOG(WARNING, EAL, "few device bound to UIO\n"); 665 if (iommu_no_va) 666 RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n"); 667 } 668 669 return RTE_IOVA_PA; 670 } 671 672 /* Read PCI config space. */ 673 int rte_pci_read_config(const struct rte_pci_device *device, 674 void *buf, size_t len, off_t offset) 675 { 676 const struct rte_intr_handle *intr_handle = &device->intr_handle; 677 678 switch (intr_handle->type) { 679 case RTE_INTR_HANDLE_UIO: 680 case RTE_INTR_HANDLE_UIO_INTX: 681 return pci_uio_read_config(intr_handle, buf, len, offset); 682 683 #ifdef VFIO_PRESENT 684 case RTE_INTR_HANDLE_VFIO_MSIX: 685 case RTE_INTR_HANDLE_VFIO_MSI: 686 case RTE_INTR_HANDLE_VFIO_LEGACY: 687 return pci_vfio_read_config(intr_handle, buf, len, offset); 688 #endif 689 default: 690 RTE_LOG(ERR, EAL, 691 "Unknown handle type of fd %d\n", 692 intr_handle->fd); 693 return -1; 694 } 695 } 696 697 /* Write PCI config space. */ 698 int rte_pci_write_config(const struct rte_pci_device *device, 699 const void *buf, size_t len, off_t offset) 700 { 701 const struct rte_intr_handle *intr_handle = &device->intr_handle; 702 703 switch (intr_handle->type) { 704 case RTE_INTR_HANDLE_UIO: 705 case RTE_INTR_HANDLE_UIO_INTX: 706 return pci_uio_write_config(intr_handle, buf, len, offset); 707 708 #ifdef VFIO_PRESENT 709 case RTE_INTR_HANDLE_VFIO_MSIX: 710 case RTE_INTR_HANDLE_VFIO_MSI: 711 case RTE_INTR_HANDLE_VFIO_LEGACY: 712 return pci_vfio_write_config(intr_handle, buf, len, offset); 713 #endif 714 default: 715 RTE_LOG(ERR, EAL, 716 "Unknown handle type of fd %d\n", 717 intr_handle->fd); 718 return -1; 719 } 720 } 721 722 #if defined(RTE_ARCH_X86) 723 static int 724 pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused, 725 struct rte_pci_ioport *p) 726 { 727 uint16_t start, end; 728 FILE *fp; 729 char *line = NULL; 730 char pci_id[16]; 731 int found = 0; 732 size_t linesz; 733 734 snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT, 735 dev->addr.domain, dev->addr.bus, 736 dev->addr.devid, dev->addr.function); 737 738 fp = fopen("/proc/ioports", "r"); 739 if (fp == NULL) { 740 RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__); 741 return -1; 742 } 743 744 while (getdelim(&line, &linesz, '\n', fp) > 0) { 745 char *ptr = line; 746 char *left; 747 int n; 748 749 n = strcspn(ptr, ":"); 750 ptr[n] = 0; 751 left = &ptr[n + 1]; 752 753 while (*left && isspace(*left)) 754 left++; 755 756 if (!strncmp(left, pci_id, strlen(pci_id))) { 757 found = 1; 758 759 while (*ptr && isspace(*ptr)) 760 ptr++; 761 762 sscanf(ptr, "%04hx-%04hx", &start, &end); 763 764 break; 765 } 766 } 767 768 free(line); 769 fclose(fp); 770 771 if (!found) 772 return -1; 773 774 p->base = start; 775 RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start); 776 777 return 0; 778 } 779 #endif 780 781 int 782 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 783 struct rte_pci_ioport *p) 784 { 785 int ret = -1; 786 787 switch (dev->kdrv) { 788 #ifdef VFIO_PRESENT 789 case RTE_KDRV_VFIO: 790 if (pci_vfio_is_enabled()) 791 ret = pci_vfio_ioport_map(dev, bar, p); 792 break; 793 #endif 794 case RTE_KDRV_IGB_UIO: 795 ret = pci_uio_ioport_map(dev, bar, p); 796 break; 797 case RTE_KDRV_UIO_GENERIC: 798 #if defined(RTE_ARCH_X86) 799 ret = pci_ioport_map(dev, bar, p); 800 #else 801 ret = pci_uio_ioport_map(dev, bar, p); 802 #endif 803 break; 804 case RTE_KDRV_NONE: 805 #if defined(RTE_ARCH_X86) 806 ret = pci_ioport_map(dev, bar, p); 807 #endif 808 break; 809 default: 810 break; 811 } 812 813 if (!ret) 814 p->dev = dev; 815 816 return ret; 817 } 818 819 void 820 rte_pci_ioport_read(struct rte_pci_ioport *p, 821 void *data, size_t len, off_t offset) 822 { 823 switch (p->dev->kdrv) { 824 #ifdef VFIO_PRESENT 825 case RTE_KDRV_VFIO: 826 pci_vfio_ioport_read(p, data, len, offset); 827 break; 828 #endif 829 case RTE_KDRV_IGB_UIO: 830 pci_uio_ioport_read(p, data, len, offset); 831 break; 832 case RTE_KDRV_UIO_GENERIC: 833 pci_uio_ioport_read(p, data, len, offset); 834 break; 835 case RTE_KDRV_NONE: 836 #if defined(RTE_ARCH_X86) 837 pci_uio_ioport_read(p, data, len, offset); 838 #endif 839 break; 840 default: 841 break; 842 } 843 } 844 845 void 846 rte_pci_ioport_write(struct rte_pci_ioport *p, 847 const void *data, size_t len, off_t offset) 848 { 849 switch (p->dev->kdrv) { 850 #ifdef VFIO_PRESENT 851 case RTE_KDRV_VFIO: 852 pci_vfio_ioport_write(p, data, len, offset); 853 break; 854 #endif 855 case RTE_KDRV_IGB_UIO: 856 pci_uio_ioport_write(p, data, len, offset); 857 break; 858 case RTE_KDRV_UIO_GENERIC: 859 pci_uio_ioport_write(p, data, len, offset); 860 break; 861 case RTE_KDRV_NONE: 862 #if defined(RTE_ARCH_X86) 863 pci_uio_ioport_write(p, data, len, offset); 864 #endif 865 break; 866 default: 867 break; 868 } 869 } 870 871 int 872 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 873 { 874 int ret = -1; 875 876 switch (p->dev->kdrv) { 877 #ifdef VFIO_PRESENT 878 case RTE_KDRV_VFIO: 879 if (pci_vfio_is_enabled()) 880 ret = pci_vfio_ioport_unmap(p); 881 break; 882 #endif 883 case RTE_KDRV_IGB_UIO: 884 ret = pci_uio_ioport_unmap(p); 885 break; 886 case RTE_KDRV_UIO_GENERIC: 887 #if defined(RTE_ARCH_X86) 888 ret = 0; 889 #else 890 ret = pci_uio_ioport_unmap(p); 891 #endif 892 break; 893 case RTE_KDRV_NONE: 894 #if defined(RTE_ARCH_X86) 895 ret = 0; 896 #endif 897 break; 898 default: 899 break; 900 } 901 902 return ret; 903 } 904