1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <dirent.h> 7 8 #include <rte_log.h> 9 #include <rte_bus.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <rte_eal_memconfig.h> 13 #include <rte_malloc.h> 14 #include <rte_devargs.h> 15 #include <rte_memcpy.h> 16 #include <rte_vfio.h> 17 18 #include "eal_filesystem.h" 19 20 #include "private.h" 21 #include "pci_init.h" 22 23 /** 24 * @file 25 * PCI probing under linux 26 * 27 * This code is used to simulate a PCI probe by parsing information in sysfs. 28 * When a registered device matches a driver, it is then initialized with 29 * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it). 30 */ 31 32 extern struct rte_pci_bus rte_pci_bus; 33 34 static int 35 pci_get_kernel_driver_by_path(const char *filename, char *dri_name, 36 size_t len) 37 { 38 int count; 39 char path[PATH_MAX]; 40 char *name; 41 42 if (!filename || !dri_name) 43 return -1; 44 45 count = readlink(filename, path, PATH_MAX); 46 if (count >= PATH_MAX) 47 return -1; 48 49 /* For device does not have a driver */ 50 if (count < 0) 51 return 1; 52 53 path[count] = '\0'; 54 55 name = strrchr(path, '/'); 56 if (name) { 57 strlcpy(dri_name, name + 1, len); 58 return 0; 59 } 60 61 return -1; 62 } 63 64 /* Map pci device */ 65 int 66 rte_pci_map_device(struct rte_pci_device *dev) 67 { 68 int ret = -1; 69 70 /* try mapping the NIC resources using VFIO if it exists */ 71 switch (dev->kdrv) { 72 case RTE_KDRV_VFIO: 73 #ifdef VFIO_PRESENT 74 if (pci_vfio_is_enabled()) 75 ret = pci_vfio_map_resource(dev); 76 #endif 77 break; 78 case RTE_KDRV_IGB_UIO: 79 case RTE_KDRV_UIO_GENERIC: 80 if (rte_eal_using_phys_addrs()) { 81 /* map resources for devices that use uio */ 82 ret = pci_uio_map_resource(dev); 83 } 84 break; 85 default: 86 RTE_LOG(DEBUG, EAL, 87 " Not managed by a supported kernel driver, skipped\n"); 88 ret = 1; 89 break; 90 } 91 92 return ret; 93 } 94 95 /* Unmap pci device */ 96 void 97 rte_pci_unmap_device(struct rte_pci_device *dev) 98 { 99 /* try unmapping the NIC resources using VFIO if it exists */ 100 switch (dev->kdrv) { 101 case RTE_KDRV_VFIO: 102 #ifdef VFIO_PRESENT 103 if (pci_vfio_is_enabled()) 104 pci_vfio_unmap_resource(dev); 105 #endif 106 break; 107 case RTE_KDRV_IGB_UIO: 108 case RTE_KDRV_UIO_GENERIC: 109 /* unmap resources for devices that use uio */ 110 pci_uio_unmap_resource(dev); 111 break; 112 default: 113 RTE_LOG(DEBUG, EAL, 114 " Not managed by a supported kernel driver, skipped\n"); 115 break; 116 } 117 } 118 119 static int 120 find_max_end_va(const struct rte_memseg_list *msl, void *arg) 121 { 122 size_t sz = msl->memseg_arr.len * msl->page_sz; 123 void *end_va = RTE_PTR_ADD(msl->base_va, sz); 124 void **max_va = arg; 125 126 if (*max_va < end_va) 127 *max_va = end_va; 128 return 0; 129 } 130 131 void * 132 pci_find_max_end_va(void) 133 { 134 void *va = NULL; 135 136 rte_memseg_list_walk(find_max_end_va, &va); 137 return va; 138 } 139 140 141 /* parse one line of the "resource" sysfs file (note that the 'line' 142 * string is modified) 143 */ 144 int 145 pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, 146 uint64_t *end_addr, uint64_t *flags) 147 { 148 union pci_resource_info { 149 struct { 150 char *phys_addr; 151 char *end_addr; 152 char *flags; 153 }; 154 char *ptrs[PCI_RESOURCE_FMT_NVAL]; 155 } res_info; 156 157 if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) { 158 RTE_LOG(ERR, EAL, 159 "%s(): bad resource format\n", __func__); 160 return -1; 161 } 162 errno = 0; 163 *phys_addr = strtoull(res_info.phys_addr, NULL, 16); 164 *end_addr = strtoull(res_info.end_addr, NULL, 16); 165 *flags = strtoull(res_info.flags, NULL, 16); 166 if (errno != 0) { 167 RTE_LOG(ERR, EAL, 168 "%s(): bad resource format\n", __func__); 169 return -1; 170 } 171 172 return 0; 173 } 174 175 /* parse the "resource" sysfs file */ 176 static int 177 pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) 178 { 179 FILE *f; 180 char buf[BUFSIZ]; 181 int i; 182 uint64_t phys_addr, end_addr, flags; 183 184 f = fopen(filename, "r"); 185 if (f == NULL) { 186 RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n"); 187 return -1; 188 } 189 190 for (i = 0; i<PCI_MAX_RESOURCE; i++) { 191 192 if (fgets(buf, sizeof(buf), f) == NULL) { 193 RTE_LOG(ERR, EAL, 194 "%s(): cannot read resource\n", __func__); 195 goto error; 196 } 197 if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr, 198 &end_addr, &flags) < 0) 199 goto error; 200 201 if (flags & IORESOURCE_MEM) { 202 dev->mem_resource[i].phys_addr = phys_addr; 203 dev->mem_resource[i].len = end_addr - phys_addr + 1; 204 /* not mapped for now */ 205 dev->mem_resource[i].addr = NULL; 206 } 207 } 208 fclose(f); 209 return 0; 210 211 error: 212 fclose(f); 213 return -1; 214 } 215 216 /* Scan one pci sysfs entry, and fill the devices list from it. */ 217 static int 218 pci_scan_one(const char *dirname, const struct rte_pci_addr *addr) 219 { 220 char filename[PATH_MAX]; 221 unsigned long tmp; 222 struct rte_pci_device *dev; 223 char driver[PATH_MAX]; 224 int ret; 225 226 dev = malloc(sizeof(*dev)); 227 if (dev == NULL) 228 return -1; 229 230 memset(dev, 0, sizeof(*dev)); 231 dev->addr = *addr; 232 233 /* get vendor id */ 234 snprintf(filename, sizeof(filename), "%s/vendor", dirname); 235 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 236 free(dev); 237 return -1; 238 } 239 dev->id.vendor_id = (uint16_t)tmp; 240 241 /* get device id */ 242 snprintf(filename, sizeof(filename), "%s/device", dirname); 243 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 244 free(dev); 245 return -1; 246 } 247 dev->id.device_id = (uint16_t)tmp; 248 249 /* get subsystem_vendor id */ 250 snprintf(filename, sizeof(filename), "%s/subsystem_vendor", 251 dirname); 252 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 253 free(dev); 254 return -1; 255 } 256 dev->id.subsystem_vendor_id = (uint16_t)tmp; 257 258 /* get subsystem_device id */ 259 snprintf(filename, sizeof(filename), "%s/subsystem_device", 260 dirname); 261 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 262 free(dev); 263 return -1; 264 } 265 dev->id.subsystem_device_id = (uint16_t)tmp; 266 267 /* get class_id */ 268 snprintf(filename, sizeof(filename), "%s/class", 269 dirname); 270 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 271 free(dev); 272 return -1; 273 } 274 /* the least 24 bits are valid: class, subclass, program interface */ 275 dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; 276 277 /* get max_vfs */ 278 dev->max_vfs = 0; 279 snprintf(filename, sizeof(filename), "%s/max_vfs", dirname); 280 if (!access(filename, F_OK) && 281 eal_parse_sysfs_value(filename, &tmp) == 0) 282 dev->max_vfs = (uint16_t)tmp; 283 else { 284 /* for non igb_uio driver, need kernel version >= 3.8 */ 285 snprintf(filename, sizeof(filename), 286 "%s/sriov_numvfs", dirname); 287 if (!access(filename, F_OK) && 288 eal_parse_sysfs_value(filename, &tmp) == 0) 289 dev->max_vfs = (uint16_t)tmp; 290 } 291 292 /* get numa node, default to 0 if not present */ 293 snprintf(filename, sizeof(filename), "%s/numa_node", 294 dirname); 295 296 if (access(filename, F_OK) != -1) { 297 if (eal_parse_sysfs_value(filename, &tmp) == 0) 298 dev->device.numa_node = tmp; 299 else 300 dev->device.numa_node = -1; 301 } else { 302 dev->device.numa_node = 0; 303 } 304 305 pci_name_set(dev); 306 307 /* parse resources */ 308 snprintf(filename, sizeof(filename), "%s/resource", dirname); 309 if (pci_parse_sysfs_resource(filename, dev) < 0) { 310 RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__); 311 free(dev); 312 return -1; 313 } 314 315 /* parse driver */ 316 snprintf(filename, sizeof(filename), "%s/driver", dirname); 317 ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver)); 318 if (ret < 0) { 319 RTE_LOG(ERR, EAL, "Fail to get kernel driver\n"); 320 free(dev); 321 return -1; 322 } 323 324 if (!ret) { 325 if (!strcmp(driver, "vfio-pci")) 326 dev->kdrv = RTE_KDRV_VFIO; 327 else if (!strcmp(driver, "igb_uio")) 328 dev->kdrv = RTE_KDRV_IGB_UIO; 329 else if (!strcmp(driver, "uio_pci_generic")) 330 dev->kdrv = RTE_KDRV_UIO_GENERIC; 331 else 332 dev->kdrv = RTE_KDRV_UNKNOWN; 333 } else 334 dev->kdrv = RTE_KDRV_NONE; 335 336 /* device is valid, add in list (sorted) */ 337 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 338 rte_pci_add_device(dev); 339 } else { 340 struct rte_pci_device *dev2; 341 int ret; 342 343 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 344 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 345 if (ret > 0) 346 continue; 347 348 if (ret < 0) { 349 rte_pci_insert_device(dev2, dev); 350 } else { /* already registered */ 351 dev2->kdrv = dev->kdrv; 352 dev2->max_vfs = dev->max_vfs; 353 pci_name_set(dev2); 354 memmove(dev2->mem_resource, dev->mem_resource, 355 sizeof(dev->mem_resource)); 356 free(dev); 357 } 358 return 0; 359 } 360 361 rte_pci_add_device(dev); 362 } 363 364 return 0; 365 } 366 367 int 368 pci_update_device(const struct rte_pci_addr *addr) 369 { 370 char filename[PATH_MAX]; 371 372 snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT, 373 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 374 addr->function); 375 376 return pci_scan_one(filename, addr); 377 } 378 379 /* 380 * split up a pci address into its constituent parts. 381 */ 382 static int 383 parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr) 384 { 385 /* first split on ':' */ 386 union splitaddr { 387 struct { 388 char *domain; 389 char *bus; 390 char *devid; 391 char *function; 392 }; 393 char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */ 394 } splitaddr; 395 396 char *buf_copy = strndup(buf, bufsize); 397 if (buf_copy == NULL) 398 return -1; 399 400 if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') 401 != PCI_FMT_NVAL - 1) 402 goto error; 403 /* final split is on '.' between devid and function */ 404 splitaddr.function = strchr(splitaddr.devid,'.'); 405 if (splitaddr.function == NULL) 406 goto error; 407 *splitaddr.function++ = '\0'; 408 409 /* now convert to int values */ 410 errno = 0; 411 addr->domain = strtoul(splitaddr.domain, NULL, 16); 412 addr->bus = strtoul(splitaddr.bus, NULL, 16); 413 addr->devid = strtoul(splitaddr.devid, NULL, 16); 414 addr->function = strtoul(splitaddr.function, NULL, 10); 415 if (errno != 0) 416 goto error; 417 418 free(buf_copy); /* free the copy made with strdup */ 419 return 0; 420 error: 421 free(buf_copy); 422 return -1; 423 } 424 425 /* 426 * Scan the content of the PCI bus, and the devices in the devices 427 * list 428 */ 429 int 430 rte_pci_scan(void) 431 { 432 struct dirent *e; 433 DIR *dir; 434 char dirname[PATH_MAX]; 435 struct rte_pci_addr addr; 436 437 /* for debug purposes, PCI can be disabled */ 438 if (!rte_eal_has_pci()) 439 return 0; 440 441 #ifdef VFIO_PRESENT 442 if (!pci_vfio_is_enabled()) 443 RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n"); 444 #endif 445 446 dir = opendir(rte_pci_get_sysfs_path()); 447 if (dir == NULL) { 448 RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n", 449 __func__, strerror(errno)); 450 return -1; 451 } 452 453 while ((e = readdir(dir)) != NULL) { 454 if (e->d_name[0] == '.') 455 continue; 456 457 if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0) 458 continue; 459 460 snprintf(dirname, sizeof(dirname), "%s/%s", 461 rte_pci_get_sysfs_path(), e->d_name); 462 463 if (pci_scan_one(dirname, &addr) < 0) 464 goto error; 465 } 466 closedir(dir); 467 return 0; 468 469 error: 470 closedir(dir); 471 return -1; 472 } 473 474 /* 475 * Is pci device bound to any kdrv 476 */ 477 static inline int 478 pci_one_device_is_bound(void) 479 { 480 struct rte_pci_device *dev = NULL; 481 int ret = 0; 482 483 FOREACH_DEVICE_ON_PCIBUS(dev) { 484 if (dev->kdrv == RTE_KDRV_UNKNOWN || 485 dev->kdrv == RTE_KDRV_NONE) { 486 continue; 487 } else { 488 ret = 1; 489 break; 490 } 491 } 492 return ret; 493 } 494 495 /* 496 * Any one of the device bound to uio 497 */ 498 static inline int 499 pci_one_device_bound_uio(void) 500 { 501 struct rte_pci_device *dev = NULL; 502 struct rte_devargs *devargs; 503 int need_check; 504 505 FOREACH_DEVICE_ON_PCIBUS(dev) { 506 devargs = dev->device.devargs; 507 508 need_check = 0; 509 switch (rte_pci_bus.bus.conf.scan_mode) { 510 case RTE_BUS_SCAN_WHITELIST: 511 if (devargs && devargs->policy == RTE_DEV_WHITELISTED) 512 need_check = 1; 513 break; 514 case RTE_BUS_SCAN_UNDEFINED: 515 case RTE_BUS_SCAN_BLACKLIST: 516 if (devargs == NULL || 517 devargs->policy != RTE_DEV_BLACKLISTED) 518 need_check = 1; 519 break; 520 } 521 522 if (!need_check) 523 continue; 524 525 if (dev->kdrv == RTE_KDRV_IGB_UIO || 526 dev->kdrv == RTE_KDRV_UIO_GENERIC) { 527 return 1; 528 } 529 } 530 return 0; 531 } 532 533 /* 534 * Any one of the device has iova as va 535 */ 536 static inline int 537 pci_one_device_has_iova_va(void) 538 { 539 struct rte_pci_device *dev = NULL; 540 struct rte_pci_driver *drv = NULL; 541 542 FOREACH_DRIVER_ON_PCIBUS(drv) { 543 if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) { 544 FOREACH_DEVICE_ON_PCIBUS(dev) { 545 if (dev->kdrv == RTE_KDRV_VFIO && 546 rte_pci_match(drv, dev)) 547 return 1; 548 } 549 } 550 } 551 return 0; 552 } 553 554 #if defined(RTE_ARCH_X86) 555 static bool 556 pci_one_device_iommu_support_va(struct rte_pci_device *dev) 557 { 558 #define VTD_CAP_MGAW_SHIFT 16 559 #define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT) 560 #define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */ 561 struct rte_pci_addr *addr = &dev->addr; 562 char filename[PATH_MAX]; 563 FILE *fp; 564 uint64_t mgaw, vtd_cap_reg = 0; 565 566 snprintf(filename, sizeof(filename), 567 "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap", 568 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 569 addr->function); 570 if (access(filename, F_OK) == -1) { 571 /* We don't have an Intel IOMMU, assume VA supported*/ 572 return true; 573 } 574 575 /* We have an intel IOMMU */ 576 fp = fopen(filename, "r"); 577 if (fp == NULL) { 578 RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename); 579 return false; 580 } 581 582 if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) { 583 RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename); 584 fclose(fp); 585 return false; 586 } 587 588 fclose(fp); 589 590 mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1; 591 if (mgaw < X86_VA_WIDTH) 592 return false; 593 594 return true; 595 } 596 #elif defined(RTE_ARCH_PPC_64) 597 static bool 598 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) 599 { 600 return false; 601 } 602 #else 603 static bool 604 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) 605 { 606 return true; 607 } 608 #endif 609 610 /* 611 * All devices IOMMUs support VA as IOVA 612 */ 613 static bool 614 pci_devices_iommu_support_va(void) 615 { 616 struct rte_pci_device *dev = NULL; 617 struct rte_pci_driver *drv = NULL; 618 619 FOREACH_DRIVER_ON_PCIBUS(drv) { 620 FOREACH_DEVICE_ON_PCIBUS(dev) { 621 if (!rte_pci_match(drv, dev)) 622 continue; 623 if (!pci_one_device_iommu_support_va(dev)) 624 return false; 625 } 626 } 627 return true; 628 } 629 630 /* 631 * Get iommu class of PCI devices on the bus. 632 */ 633 enum rte_iova_mode 634 rte_pci_get_iommu_class(void) 635 { 636 bool is_bound; 637 bool is_vfio_noiommu_enabled = true; 638 bool has_iova_va; 639 bool is_bound_uio; 640 bool iommu_no_va; 641 642 is_bound = pci_one_device_is_bound(); 643 if (!is_bound) 644 return RTE_IOVA_DC; 645 646 has_iova_va = pci_one_device_has_iova_va(); 647 is_bound_uio = pci_one_device_bound_uio(); 648 iommu_no_va = !pci_devices_iommu_support_va(); 649 #ifdef VFIO_PRESENT 650 is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ? 651 true : false; 652 #endif 653 654 if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled && 655 !iommu_no_va) 656 return RTE_IOVA_VA; 657 658 if (has_iova_va) { 659 RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. "); 660 if (is_vfio_noiommu_enabled) 661 RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n"); 662 if (is_bound_uio) 663 RTE_LOG(WARNING, EAL, "few device bound to UIO\n"); 664 if (iommu_no_va) 665 RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n"); 666 } 667 668 return RTE_IOVA_PA; 669 } 670 671 /* Read PCI config space. */ 672 int rte_pci_read_config(const struct rte_pci_device *device, 673 void *buf, size_t len, off_t offset) 674 { 675 const struct rte_intr_handle *intr_handle = &device->intr_handle; 676 677 switch (intr_handle->type) { 678 case RTE_INTR_HANDLE_UIO: 679 case RTE_INTR_HANDLE_UIO_INTX: 680 return pci_uio_read_config(intr_handle, buf, len, offset); 681 682 #ifdef VFIO_PRESENT 683 case RTE_INTR_HANDLE_VFIO_MSIX: 684 case RTE_INTR_HANDLE_VFIO_MSI: 685 case RTE_INTR_HANDLE_VFIO_LEGACY: 686 return pci_vfio_read_config(intr_handle, buf, len, offset); 687 #endif 688 default: 689 RTE_LOG(ERR, EAL, 690 "Unknown handle type of fd %d\n", 691 intr_handle->fd); 692 return -1; 693 } 694 } 695 696 /* Write PCI config space. */ 697 int rte_pci_write_config(const struct rte_pci_device *device, 698 const void *buf, size_t len, off_t offset) 699 { 700 const struct rte_intr_handle *intr_handle = &device->intr_handle; 701 702 switch (intr_handle->type) { 703 case RTE_INTR_HANDLE_UIO: 704 case RTE_INTR_HANDLE_UIO_INTX: 705 return pci_uio_write_config(intr_handle, buf, len, offset); 706 707 #ifdef VFIO_PRESENT 708 case RTE_INTR_HANDLE_VFIO_MSIX: 709 case RTE_INTR_HANDLE_VFIO_MSI: 710 case RTE_INTR_HANDLE_VFIO_LEGACY: 711 return pci_vfio_write_config(intr_handle, buf, len, offset); 712 #endif 713 default: 714 RTE_LOG(ERR, EAL, 715 "Unknown handle type of fd %d\n", 716 intr_handle->fd); 717 return -1; 718 } 719 } 720 721 #if defined(RTE_ARCH_X86) 722 static int 723 pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused, 724 struct rte_pci_ioport *p) 725 { 726 uint16_t start, end; 727 FILE *fp; 728 char *line = NULL; 729 char pci_id[16]; 730 int found = 0; 731 size_t linesz; 732 733 snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT, 734 dev->addr.domain, dev->addr.bus, 735 dev->addr.devid, dev->addr.function); 736 737 fp = fopen("/proc/ioports", "r"); 738 if (fp == NULL) { 739 RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__); 740 return -1; 741 } 742 743 while (getdelim(&line, &linesz, '\n', fp) > 0) { 744 char *ptr = line; 745 char *left; 746 int n; 747 748 n = strcspn(ptr, ":"); 749 ptr[n] = 0; 750 left = &ptr[n + 1]; 751 752 while (*left && isspace(*left)) 753 left++; 754 755 if (!strncmp(left, pci_id, strlen(pci_id))) { 756 found = 1; 757 758 while (*ptr && isspace(*ptr)) 759 ptr++; 760 761 sscanf(ptr, "%04hx-%04hx", &start, &end); 762 763 break; 764 } 765 } 766 767 free(line); 768 fclose(fp); 769 770 if (!found) 771 return -1; 772 773 p->base = start; 774 RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start); 775 776 return 0; 777 } 778 #endif 779 780 int 781 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 782 struct rte_pci_ioport *p) 783 { 784 int ret = -1; 785 786 switch (dev->kdrv) { 787 #ifdef VFIO_PRESENT 788 case RTE_KDRV_VFIO: 789 if (pci_vfio_is_enabled()) 790 ret = pci_vfio_ioport_map(dev, bar, p); 791 break; 792 #endif 793 case RTE_KDRV_IGB_UIO: 794 ret = pci_uio_ioport_map(dev, bar, p); 795 break; 796 case RTE_KDRV_UIO_GENERIC: 797 #if defined(RTE_ARCH_X86) 798 ret = pci_ioport_map(dev, bar, p); 799 #else 800 ret = pci_uio_ioport_map(dev, bar, p); 801 #endif 802 break; 803 case RTE_KDRV_NONE: 804 #if defined(RTE_ARCH_X86) 805 ret = pci_ioport_map(dev, bar, p); 806 #endif 807 break; 808 default: 809 break; 810 } 811 812 if (!ret) 813 p->dev = dev; 814 815 return ret; 816 } 817 818 void 819 rte_pci_ioport_read(struct rte_pci_ioport *p, 820 void *data, size_t len, off_t offset) 821 { 822 switch (p->dev->kdrv) { 823 #ifdef VFIO_PRESENT 824 case RTE_KDRV_VFIO: 825 pci_vfio_ioport_read(p, data, len, offset); 826 break; 827 #endif 828 case RTE_KDRV_IGB_UIO: 829 pci_uio_ioport_read(p, data, len, offset); 830 break; 831 case RTE_KDRV_UIO_GENERIC: 832 pci_uio_ioport_read(p, data, len, offset); 833 break; 834 case RTE_KDRV_NONE: 835 #if defined(RTE_ARCH_X86) 836 pci_uio_ioport_read(p, data, len, offset); 837 #endif 838 break; 839 default: 840 break; 841 } 842 } 843 844 void 845 rte_pci_ioport_write(struct rte_pci_ioport *p, 846 const void *data, size_t len, off_t offset) 847 { 848 switch (p->dev->kdrv) { 849 #ifdef VFIO_PRESENT 850 case RTE_KDRV_VFIO: 851 pci_vfio_ioport_write(p, data, len, offset); 852 break; 853 #endif 854 case RTE_KDRV_IGB_UIO: 855 pci_uio_ioport_write(p, data, len, offset); 856 break; 857 case RTE_KDRV_UIO_GENERIC: 858 pci_uio_ioport_write(p, data, len, offset); 859 break; 860 case RTE_KDRV_NONE: 861 #if defined(RTE_ARCH_X86) 862 pci_uio_ioport_write(p, data, len, offset); 863 #endif 864 break; 865 default: 866 break; 867 } 868 } 869 870 int 871 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 872 { 873 int ret = -1; 874 875 switch (p->dev->kdrv) { 876 #ifdef VFIO_PRESENT 877 case RTE_KDRV_VFIO: 878 if (pci_vfio_is_enabled()) 879 ret = pci_vfio_ioport_unmap(p); 880 break; 881 #endif 882 case RTE_KDRV_IGB_UIO: 883 ret = pci_uio_ioport_unmap(p); 884 break; 885 case RTE_KDRV_UIO_GENERIC: 886 #if defined(RTE_ARCH_X86) 887 ret = 0; 888 #else 889 ret = pci_uio_ioport_unmap(p); 890 #endif 891 break; 892 case RTE_KDRV_NONE: 893 #if defined(RTE_ARCH_X86) 894 ret = 0; 895 #endif 896 break; 897 default: 898 break; 899 } 900 901 return ret; 902 } 903