1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <dirent.h> 7 8 #include <rte_log.h> 9 #include <rte_bus.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <rte_eal_memconfig.h> 13 #include <rte_malloc.h> 14 #include <rte_devargs.h> 15 #include <rte_memcpy.h> 16 #include <rte_vfio.h> 17 18 #include "eal_private.h" 19 #include "eal_filesystem.h" 20 21 #include "private.h" 22 #include "pci_init.h" 23 24 /** 25 * @file 26 * PCI probing under linux 27 * 28 * This code is used to simulate a PCI probe by parsing information in sysfs. 29 * When a registered device matches a driver, it is then initialized with 30 * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it). 31 */ 32 33 extern struct rte_pci_bus rte_pci_bus; 34 35 static int 36 pci_get_kernel_driver_by_path(const char *filename, char *dri_name) 37 { 38 int count; 39 char path[PATH_MAX]; 40 char *name; 41 42 if (!filename || !dri_name) 43 return -1; 44 45 count = readlink(filename, path, PATH_MAX); 46 if (count >= PATH_MAX) 47 return -1; 48 49 /* For device does not have a driver */ 50 if (count < 0) 51 return 1; 52 53 path[count] = '\0'; 54 55 name = strrchr(path, '/'); 56 if (name) { 57 strncpy(dri_name, name + 1, strlen(name + 1) + 1); 58 return 0; 59 } 60 61 return -1; 62 } 63 64 /* Map pci device */ 65 int 66 rte_pci_map_device(struct rte_pci_device *dev) 67 { 68 int ret = -1; 69 70 /* try mapping the NIC resources using VFIO if it exists */ 71 switch (dev->kdrv) { 72 case RTE_KDRV_VFIO: 73 #ifdef VFIO_PRESENT 74 if (pci_vfio_is_enabled()) 75 ret = pci_vfio_map_resource(dev); 76 #endif 77 break; 78 case RTE_KDRV_IGB_UIO: 79 case RTE_KDRV_UIO_GENERIC: 80 if (rte_eal_using_phys_addrs()) { 81 /* map resources for devices that use uio */ 82 ret = pci_uio_map_resource(dev); 83 } 84 break; 85 default: 86 RTE_LOG(DEBUG, EAL, 87 " Not managed by a supported kernel driver, skipped\n"); 88 ret = 1; 89 break; 90 } 91 92 return ret; 93 } 94 95 /* Unmap pci device */ 96 void 97 rte_pci_unmap_device(struct rte_pci_device *dev) 98 { 99 /* try unmapping the NIC resources using VFIO if it exists */ 100 switch (dev->kdrv) { 101 case RTE_KDRV_VFIO: 102 #ifdef VFIO_PRESENT 103 if (pci_vfio_is_enabled()) 104 pci_vfio_unmap_resource(dev); 105 #endif 106 break; 107 case RTE_KDRV_IGB_UIO: 108 case RTE_KDRV_UIO_GENERIC: 109 /* unmap resources for devices that use uio */ 110 pci_uio_unmap_resource(dev); 111 break; 112 default: 113 RTE_LOG(DEBUG, EAL, 114 " Not managed by a supported kernel driver, skipped\n"); 115 break; 116 } 117 } 118 119 void * 120 pci_find_max_end_va(void) 121 { 122 const struct rte_memseg *seg = rte_eal_get_physmem_layout(); 123 const struct rte_memseg *last = seg; 124 unsigned i = 0; 125 126 for (i = 0; i < RTE_MAX_MEMSEG; i++, seg++) { 127 if (seg->addr == NULL) 128 break; 129 130 if (seg->addr > last->addr) 131 last = seg; 132 133 } 134 return RTE_PTR_ADD(last->addr, last->len); 135 } 136 137 /* parse one line of the "resource" sysfs file (note that the 'line' 138 * string is modified) 139 */ 140 int 141 pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, 142 uint64_t *end_addr, uint64_t *flags) 143 { 144 union pci_resource_info { 145 struct { 146 char *phys_addr; 147 char *end_addr; 148 char *flags; 149 }; 150 char *ptrs[PCI_RESOURCE_FMT_NVAL]; 151 } res_info; 152 153 if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) { 154 RTE_LOG(ERR, EAL, 155 "%s(): bad resource format\n", __func__); 156 return -1; 157 } 158 errno = 0; 159 *phys_addr = strtoull(res_info.phys_addr, NULL, 16); 160 *end_addr = strtoull(res_info.end_addr, NULL, 16); 161 *flags = strtoull(res_info.flags, NULL, 16); 162 if (errno != 0) { 163 RTE_LOG(ERR, EAL, 164 "%s(): bad resource format\n", __func__); 165 return -1; 166 } 167 168 return 0; 169 } 170 171 /* parse the "resource" sysfs file */ 172 static int 173 pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) 174 { 175 FILE *f; 176 char buf[BUFSIZ]; 177 int i; 178 uint64_t phys_addr, end_addr, flags; 179 180 f = fopen(filename, "r"); 181 if (f == NULL) { 182 RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n"); 183 return -1; 184 } 185 186 for (i = 0; i<PCI_MAX_RESOURCE; i++) { 187 188 if (fgets(buf, sizeof(buf), f) == NULL) { 189 RTE_LOG(ERR, EAL, 190 "%s(): cannot read resource\n", __func__); 191 goto error; 192 } 193 if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr, 194 &end_addr, &flags) < 0) 195 goto error; 196 197 if (flags & IORESOURCE_MEM) { 198 dev->mem_resource[i].phys_addr = phys_addr; 199 dev->mem_resource[i].len = end_addr - phys_addr + 1; 200 /* not mapped for now */ 201 dev->mem_resource[i].addr = NULL; 202 } 203 } 204 fclose(f); 205 return 0; 206 207 error: 208 fclose(f); 209 return -1; 210 } 211 212 /* Scan one pci sysfs entry, and fill the devices list from it. */ 213 static int 214 pci_scan_one(const char *dirname, const struct rte_pci_addr *addr) 215 { 216 char filename[PATH_MAX]; 217 unsigned long tmp; 218 struct rte_pci_device *dev; 219 char driver[PATH_MAX]; 220 int ret; 221 222 dev = malloc(sizeof(*dev)); 223 if (dev == NULL) 224 return -1; 225 226 memset(dev, 0, sizeof(*dev)); 227 dev->addr = *addr; 228 229 /* get vendor id */ 230 snprintf(filename, sizeof(filename), "%s/vendor", dirname); 231 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 232 free(dev); 233 return -1; 234 } 235 dev->id.vendor_id = (uint16_t)tmp; 236 237 /* get device id */ 238 snprintf(filename, sizeof(filename), "%s/device", dirname); 239 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 240 free(dev); 241 return -1; 242 } 243 dev->id.device_id = (uint16_t)tmp; 244 245 /* get subsystem_vendor id */ 246 snprintf(filename, sizeof(filename), "%s/subsystem_vendor", 247 dirname); 248 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 249 free(dev); 250 return -1; 251 } 252 dev->id.subsystem_vendor_id = (uint16_t)tmp; 253 254 /* get subsystem_device id */ 255 snprintf(filename, sizeof(filename), "%s/subsystem_device", 256 dirname); 257 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 258 free(dev); 259 return -1; 260 } 261 dev->id.subsystem_device_id = (uint16_t)tmp; 262 263 /* get class_id */ 264 snprintf(filename, sizeof(filename), "%s/class", 265 dirname); 266 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 267 free(dev); 268 return -1; 269 } 270 /* the least 24 bits are valid: class, subclass, program interface */ 271 dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; 272 273 /* get max_vfs */ 274 dev->max_vfs = 0; 275 snprintf(filename, sizeof(filename), "%s/max_vfs", dirname); 276 if (!access(filename, F_OK) && 277 eal_parse_sysfs_value(filename, &tmp) == 0) 278 dev->max_vfs = (uint16_t)tmp; 279 else { 280 /* for non igb_uio driver, need kernel version >= 3.8 */ 281 snprintf(filename, sizeof(filename), 282 "%s/sriov_numvfs", dirname); 283 if (!access(filename, F_OK) && 284 eal_parse_sysfs_value(filename, &tmp) == 0) 285 dev->max_vfs = (uint16_t)tmp; 286 } 287 288 /* get numa node, default to 0 if not present */ 289 snprintf(filename, sizeof(filename), "%s/numa_node", 290 dirname); 291 292 if (access(filename, F_OK) != -1) { 293 if (eal_parse_sysfs_value(filename, &tmp) == 0) 294 dev->device.numa_node = tmp; 295 else 296 dev->device.numa_node = -1; 297 } else { 298 dev->device.numa_node = 0; 299 } 300 301 pci_name_set(dev); 302 303 /* parse resources */ 304 snprintf(filename, sizeof(filename), "%s/resource", dirname); 305 if (pci_parse_sysfs_resource(filename, dev) < 0) { 306 RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__); 307 free(dev); 308 return -1; 309 } 310 311 /* parse driver */ 312 snprintf(filename, sizeof(filename), "%s/driver", dirname); 313 ret = pci_get_kernel_driver_by_path(filename, driver); 314 if (ret < 0) { 315 RTE_LOG(ERR, EAL, "Fail to get kernel driver\n"); 316 free(dev); 317 return -1; 318 } 319 320 if (!ret) { 321 if (!strcmp(driver, "vfio-pci")) 322 dev->kdrv = RTE_KDRV_VFIO; 323 else if (!strcmp(driver, "igb_uio")) 324 dev->kdrv = RTE_KDRV_IGB_UIO; 325 else if (!strcmp(driver, "uio_pci_generic")) 326 dev->kdrv = RTE_KDRV_UIO_GENERIC; 327 else 328 dev->kdrv = RTE_KDRV_UNKNOWN; 329 } else 330 dev->kdrv = RTE_KDRV_NONE; 331 332 /* device is valid, add in list (sorted) */ 333 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 334 rte_pci_add_device(dev); 335 } else { 336 struct rte_pci_device *dev2; 337 int ret; 338 339 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 340 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 341 if (ret > 0) 342 continue; 343 344 if (ret < 0) { 345 rte_pci_insert_device(dev2, dev); 346 } else { /* already registered */ 347 dev2->kdrv = dev->kdrv; 348 dev2->max_vfs = dev->max_vfs; 349 pci_name_set(dev2); 350 memmove(dev2->mem_resource, dev->mem_resource, 351 sizeof(dev->mem_resource)); 352 free(dev); 353 } 354 return 0; 355 } 356 357 rte_pci_add_device(dev); 358 } 359 360 return 0; 361 } 362 363 int 364 pci_update_device(const struct rte_pci_addr *addr) 365 { 366 char filename[PATH_MAX]; 367 368 snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT, 369 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 370 addr->function); 371 372 return pci_scan_one(filename, addr); 373 } 374 375 /* 376 * split up a pci address into its constituent parts. 377 */ 378 static int 379 parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr) 380 { 381 /* first split on ':' */ 382 union splitaddr { 383 struct { 384 char *domain; 385 char *bus; 386 char *devid; 387 char *function; 388 }; 389 char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */ 390 } splitaddr; 391 392 char *buf_copy = strndup(buf, bufsize); 393 if (buf_copy == NULL) 394 return -1; 395 396 if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') 397 != PCI_FMT_NVAL - 1) 398 goto error; 399 /* final split is on '.' between devid and function */ 400 splitaddr.function = strchr(splitaddr.devid,'.'); 401 if (splitaddr.function == NULL) 402 goto error; 403 *splitaddr.function++ = '\0'; 404 405 /* now convert to int values */ 406 errno = 0; 407 addr->domain = strtoul(splitaddr.domain, NULL, 16); 408 addr->bus = strtoul(splitaddr.bus, NULL, 16); 409 addr->devid = strtoul(splitaddr.devid, NULL, 16); 410 addr->function = strtoul(splitaddr.function, NULL, 10); 411 if (errno != 0) 412 goto error; 413 414 free(buf_copy); /* free the copy made with strdup */ 415 return 0; 416 error: 417 free(buf_copy); 418 return -1; 419 } 420 421 /* 422 * Scan the content of the PCI bus, and the devices in the devices 423 * list 424 */ 425 int 426 rte_pci_scan(void) 427 { 428 struct dirent *e; 429 DIR *dir; 430 char dirname[PATH_MAX]; 431 struct rte_pci_addr addr; 432 433 /* for debug purposes, PCI can be disabled */ 434 if (!rte_eal_has_pci()) 435 return 0; 436 437 #ifdef VFIO_PRESENT 438 if (!pci_vfio_is_enabled()) 439 RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n"); 440 #endif 441 442 dir = opendir(rte_pci_get_sysfs_path()); 443 if (dir == NULL) { 444 RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n", 445 __func__, strerror(errno)); 446 return -1; 447 } 448 449 while ((e = readdir(dir)) != NULL) { 450 if (e->d_name[0] == '.') 451 continue; 452 453 if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0) 454 continue; 455 456 snprintf(dirname, sizeof(dirname), "%s/%s", 457 rte_pci_get_sysfs_path(), e->d_name); 458 459 if (pci_scan_one(dirname, &addr) < 0) 460 goto error; 461 } 462 closedir(dir); 463 return 0; 464 465 error: 466 closedir(dir); 467 return -1; 468 } 469 470 /* 471 * Is pci device bound to any kdrv 472 */ 473 static inline int 474 pci_one_device_is_bound(void) 475 { 476 struct rte_pci_device *dev = NULL; 477 int ret = 0; 478 479 FOREACH_DEVICE_ON_PCIBUS(dev) { 480 if (dev->kdrv == RTE_KDRV_UNKNOWN || 481 dev->kdrv == RTE_KDRV_NONE) { 482 continue; 483 } else { 484 ret = 1; 485 break; 486 } 487 } 488 return ret; 489 } 490 491 /* 492 * Any one of the device bound to uio 493 */ 494 static inline int 495 pci_one_device_bound_uio(void) 496 { 497 struct rte_pci_device *dev = NULL; 498 struct rte_devargs *devargs; 499 int need_check; 500 501 FOREACH_DEVICE_ON_PCIBUS(dev) { 502 devargs = dev->device.devargs; 503 504 need_check = 0; 505 switch (rte_pci_bus.bus.conf.scan_mode) { 506 case RTE_BUS_SCAN_WHITELIST: 507 if (devargs && devargs->policy == RTE_DEV_WHITELISTED) 508 need_check = 1; 509 break; 510 case RTE_BUS_SCAN_UNDEFINED: 511 case RTE_BUS_SCAN_BLACKLIST: 512 if (devargs == NULL || 513 devargs->policy != RTE_DEV_BLACKLISTED) 514 need_check = 1; 515 break; 516 } 517 518 if (!need_check) 519 continue; 520 521 if (dev->kdrv == RTE_KDRV_IGB_UIO || 522 dev->kdrv == RTE_KDRV_UIO_GENERIC) { 523 return 1; 524 } 525 } 526 return 0; 527 } 528 529 /* 530 * Any one of the device has iova as va 531 */ 532 static inline int 533 pci_one_device_has_iova_va(void) 534 { 535 struct rte_pci_device *dev = NULL; 536 struct rte_pci_driver *drv = NULL; 537 538 FOREACH_DRIVER_ON_PCIBUS(drv) { 539 if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) { 540 FOREACH_DEVICE_ON_PCIBUS(dev) { 541 if (dev->kdrv == RTE_KDRV_VFIO && 542 rte_pci_match(drv, dev)) 543 return 1; 544 } 545 } 546 } 547 return 0; 548 } 549 550 /* 551 * Get iommu class of PCI devices on the bus. 552 */ 553 enum rte_iova_mode 554 rte_pci_get_iommu_class(void) 555 { 556 bool is_bound; 557 bool is_vfio_noiommu_enabled = true; 558 bool has_iova_va; 559 bool is_bound_uio; 560 bool spapr_iommu = 561 #if defined(RTE_ARCH_PPC_64) 562 true; 563 #else 564 false; 565 #endif 566 567 is_bound = pci_one_device_is_bound(); 568 if (!is_bound) 569 return RTE_IOVA_DC; 570 571 has_iova_va = pci_one_device_has_iova_va(); 572 is_bound_uio = pci_one_device_bound_uio(); 573 #ifdef VFIO_PRESENT 574 is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ? 575 true : false; 576 #endif 577 578 if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled && 579 !spapr_iommu) 580 return RTE_IOVA_VA; 581 582 if (has_iova_va) { 583 RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. "); 584 if (is_vfio_noiommu_enabled) 585 RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n"); 586 if (is_bound_uio) 587 RTE_LOG(WARNING, EAL, "few device bound to UIO\n"); 588 if (spapr_iommu) 589 RTE_LOG(WARNING, EAL, "sPAPR IOMMU does not support IOVA as VA\n"); 590 } 591 592 return RTE_IOVA_PA; 593 } 594 595 /* Read PCI config space. */ 596 int rte_pci_read_config(const struct rte_pci_device *device, 597 void *buf, size_t len, off_t offset) 598 { 599 const struct rte_intr_handle *intr_handle = &device->intr_handle; 600 601 switch (intr_handle->type) { 602 case RTE_INTR_HANDLE_UIO: 603 case RTE_INTR_HANDLE_UIO_INTX: 604 return pci_uio_read_config(intr_handle, buf, len, offset); 605 606 #ifdef VFIO_PRESENT 607 case RTE_INTR_HANDLE_VFIO_MSIX: 608 case RTE_INTR_HANDLE_VFIO_MSI: 609 case RTE_INTR_HANDLE_VFIO_LEGACY: 610 return pci_vfio_read_config(intr_handle, buf, len, offset); 611 #endif 612 default: 613 RTE_LOG(ERR, EAL, 614 "Unknown handle type of fd %d\n", 615 intr_handle->fd); 616 return -1; 617 } 618 } 619 620 /* Write PCI config space. */ 621 int rte_pci_write_config(const struct rte_pci_device *device, 622 const void *buf, size_t len, off_t offset) 623 { 624 const struct rte_intr_handle *intr_handle = &device->intr_handle; 625 626 switch (intr_handle->type) { 627 case RTE_INTR_HANDLE_UIO: 628 case RTE_INTR_HANDLE_UIO_INTX: 629 return pci_uio_write_config(intr_handle, buf, len, offset); 630 631 #ifdef VFIO_PRESENT 632 case RTE_INTR_HANDLE_VFIO_MSIX: 633 case RTE_INTR_HANDLE_VFIO_MSI: 634 case RTE_INTR_HANDLE_VFIO_LEGACY: 635 return pci_vfio_write_config(intr_handle, buf, len, offset); 636 #endif 637 default: 638 RTE_LOG(ERR, EAL, 639 "Unknown handle type of fd %d\n", 640 intr_handle->fd); 641 return -1; 642 } 643 } 644 645 #if defined(RTE_ARCH_X86) 646 static int 647 pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused, 648 struct rte_pci_ioport *p) 649 { 650 uint16_t start, end; 651 FILE *fp; 652 char *line = NULL; 653 char pci_id[16]; 654 int found = 0; 655 size_t linesz; 656 657 snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT, 658 dev->addr.domain, dev->addr.bus, 659 dev->addr.devid, dev->addr.function); 660 661 fp = fopen("/proc/ioports", "r"); 662 if (fp == NULL) { 663 RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__); 664 return -1; 665 } 666 667 while (getdelim(&line, &linesz, '\n', fp) > 0) { 668 char *ptr = line; 669 char *left; 670 int n; 671 672 n = strcspn(ptr, ":"); 673 ptr[n] = 0; 674 left = &ptr[n + 1]; 675 676 while (*left && isspace(*left)) 677 left++; 678 679 if (!strncmp(left, pci_id, strlen(pci_id))) { 680 found = 1; 681 682 while (*ptr && isspace(*ptr)) 683 ptr++; 684 685 sscanf(ptr, "%04hx-%04hx", &start, &end); 686 687 break; 688 } 689 } 690 691 free(line); 692 fclose(fp); 693 694 if (!found) 695 return -1; 696 697 p->base = start; 698 RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start); 699 700 return 0; 701 } 702 #endif 703 704 int 705 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 706 struct rte_pci_ioport *p) 707 { 708 int ret = -1; 709 710 switch (dev->kdrv) { 711 #ifdef VFIO_PRESENT 712 case RTE_KDRV_VFIO: 713 if (pci_vfio_is_enabled()) 714 ret = pci_vfio_ioport_map(dev, bar, p); 715 break; 716 #endif 717 case RTE_KDRV_IGB_UIO: 718 ret = pci_uio_ioport_map(dev, bar, p); 719 break; 720 case RTE_KDRV_UIO_GENERIC: 721 #if defined(RTE_ARCH_X86) 722 ret = pci_ioport_map(dev, bar, p); 723 #else 724 ret = pci_uio_ioport_map(dev, bar, p); 725 #endif 726 break; 727 case RTE_KDRV_NONE: 728 #if defined(RTE_ARCH_X86) 729 ret = pci_ioport_map(dev, bar, p); 730 #endif 731 break; 732 default: 733 break; 734 } 735 736 if (!ret) 737 p->dev = dev; 738 739 return ret; 740 } 741 742 void 743 rte_pci_ioport_read(struct rte_pci_ioport *p, 744 void *data, size_t len, off_t offset) 745 { 746 switch (p->dev->kdrv) { 747 #ifdef VFIO_PRESENT 748 case RTE_KDRV_VFIO: 749 pci_vfio_ioport_read(p, data, len, offset); 750 break; 751 #endif 752 case RTE_KDRV_IGB_UIO: 753 pci_uio_ioport_read(p, data, len, offset); 754 break; 755 case RTE_KDRV_UIO_GENERIC: 756 pci_uio_ioport_read(p, data, len, offset); 757 break; 758 case RTE_KDRV_NONE: 759 #if defined(RTE_ARCH_X86) 760 pci_uio_ioport_read(p, data, len, offset); 761 #endif 762 break; 763 default: 764 break; 765 } 766 } 767 768 void 769 rte_pci_ioport_write(struct rte_pci_ioport *p, 770 const void *data, size_t len, off_t offset) 771 { 772 switch (p->dev->kdrv) { 773 #ifdef VFIO_PRESENT 774 case RTE_KDRV_VFIO: 775 pci_vfio_ioport_write(p, data, len, offset); 776 break; 777 #endif 778 case RTE_KDRV_IGB_UIO: 779 pci_uio_ioport_write(p, data, len, offset); 780 break; 781 case RTE_KDRV_UIO_GENERIC: 782 pci_uio_ioport_write(p, data, len, offset); 783 break; 784 case RTE_KDRV_NONE: 785 #if defined(RTE_ARCH_X86) 786 pci_uio_ioport_write(p, data, len, offset); 787 #endif 788 break; 789 default: 790 break; 791 } 792 } 793 794 int 795 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 796 { 797 int ret = -1; 798 799 switch (p->dev->kdrv) { 800 #ifdef VFIO_PRESENT 801 case RTE_KDRV_VFIO: 802 if (pci_vfio_is_enabled()) 803 ret = pci_vfio_ioport_unmap(p); 804 break; 805 #endif 806 case RTE_KDRV_IGB_UIO: 807 ret = pci_uio_ioport_unmap(p); 808 break; 809 case RTE_KDRV_UIO_GENERIC: 810 #if defined(RTE_ARCH_X86) 811 ret = 0; 812 #else 813 ret = pci_uio_ioport_unmap(p); 814 #endif 815 break; 816 case RTE_KDRV_NONE: 817 #if defined(RTE_ARCH_X86) 818 ret = 0; 819 #endif 820 break; 821 default: 822 break; 823 } 824 825 return ret; 826 } 827