1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <dirent.h> 7 8 #include <rte_log.h> 9 #include <rte_pci.h> 10 #include <rte_bus_pci.h> 11 #include <rte_malloc.h> 12 #include <rte_devargs.h> 13 #include <rte_memcpy.h> 14 #include <rte_vfio.h> 15 16 #include "eal_filesystem.h" 17 18 #include "private.h" 19 #include "pci_init.h" 20 21 /** 22 * @file 23 * PCI probing using Linux sysfs. 24 */ 25 26 static int 27 pci_get_kernel_driver_by_path(const char *filename, char *dri_name, 28 size_t len) 29 { 30 int count; 31 char path[PATH_MAX]; 32 char *name; 33 34 if (!filename || !dri_name) 35 return -1; 36 37 count = readlink(filename, path, PATH_MAX); 38 if (count >= PATH_MAX) 39 return -1; 40 41 /* For device does not have a driver */ 42 if (count < 0) 43 return 1; 44 45 path[count] = '\0'; 46 47 name = strrchr(path, '/'); 48 if (name) { 49 strlcpy(dri_name, name + 1, len); 50 return 0; 51 } 52 53 return -1; 54 } 55 56 /* Map pci device */ 57 int 58 rte_pci_map_device(struct rte_pci_device *dev) 59 { 60 int ret = -1; 61 62 /* try mapping the NIC resources using VFIO if it exists */ 63 switch (dev->kdrv) { 64 case RTE_PCI_KDRV_VFIO: 65 #ifdef VFIO_PRESENT 66 if (pci_vfio_is_enabled()) 67 ret = pci_vfio_map_resource(dev); 68 #endif 69 break; 70 case RTE_PCI_KDRV_IGB_UIO: 71 case RTE_PCI_KDRV_UIO_GENERIC: 72 if (rte_eal_using_phys_addrs()) { 73 /* map resources for devices that use uio */ 74 ret = pci_uio_map_resource(dev); 75 } 76 break; 77 default: 78 RTE_LOG(DEBUG, EAL, 79 " Not managed by a supported kernel driver, skipped\n"); 80 ret = 1; 81 break; 82 } 83 84 return ret; 85 } 86 87 /* Unmap pci device */ 88 void 89 rte_pci_unmap_device(struct rte_pci_device *dev) 90 { 91 /* try unmapping the NIC resources using VFIO if it exists */ 92 switch (dev->kdrv) { 93 case RTE_PCI_KDRV_VFIO: 94 #ifdef VFIO_PRESENT 95 if (pci_vfio_is_enabled()) 96 pci_vfio_unmap_resource(dev); 97 #endif 98 break; 99 case RTE_PCI_KDRV_IGB_UIO: 100 case RTE_PCI_KDRV_UIO_GENERIC: 101 /* unmap resources for devices that use uio */ 102 pci_uio_unmap_resource(dev); 103 break; 104 default: 105 RTE_LOG(DEBUG, EAL, 106 " Not managed by a supported kernel driver, skipped\n"); 107 break; 108 } 109 } 110 111 static int 112 find_max_end_va(const struct rte_memseg_list *msl, void *arg) 113 { 114 size_t sz = msl->len; 115 void *end_va = RTE_PTR_ADD(msl->base_va, sz); 116 void **max_va = arg; 117 118 if (*max_va < end_va) 119 *max_va = end_va; 120 return 0; 121 } 122 123 void * 124 pci_find_max_end_va(void) 125 { 126 void *va = NULL; 127 128 rte_memseg_list_walk(find_max_end_va, &va); 129 return va; 130 } 131 132 133 /* parse one line of the "resource" sysfs file (note that the 'line' 134 * string is modified) 135 */ 136 int 137 pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, 138 uint64_t *end_addr, uint64_t *flags) 139 { 140 union pci_resource_info { 141 struct { 142 char *phys_addr; 143 char *end_addr; 144 char *flags; 145 }; 146 char *ptrs[PCI_RESOURCE_FMT_NVAL]; 147 } res_info; 148 149 if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) { 150 RTE_LOG(ERR, EAL, 151 "%s(): bad resource format\n", __func__); 152 return -1; 153 } 154 errno = 0; 155 *phys_addr = strtoull(res_info.phys_addr, NULL, 16); 156 *end_addr = strtoull(res_info.end_addr, NULL, 16); 157 *flags = strtoull(res_info.flags, NULL, 16); 158 if (errno != 0) { 159 RTE_LOG(ERR, EAL, 160 "%s(): bad resource format\n", __func__); 161 return -1; 162 } 163 164 return 0; 165 } 166 167 /* parse the "resource" sysfs file */ 168 static int 169 pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) 170 { 171 FILE *f; 172 char buf[BUFSIZ]; 173 int i; 174 uint64_t phys_addr, end_addr, flags; 175 176 f = fopen(filename, "r"); 177 if (f == NULL) { 178 RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n"); 179 return -1; 180 } 181 182 for (i = 0; i<PCI_MAX_RESOURCE; i++) { 183 184 if (fgets(buf, sizeof(buf), f) == NULL) { 185 RTE_LOG(ERR, EAL, 186 "%s(): cannot read resource\n", __func__); 187 goto error; 188 } 189 if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr, 190 &end_addr, &flags) < 0) 191 goto error; 192 193 if (flags & IORESOURCE_MEM) { 194 dev->mem_resource[i].phys_addr = phys_addr; 195 dev->mem_resource[i].len = end_addr - phys_addr + 1; 196 /* not mapped for now */ 197 dev->mem_resource[i].addr = NULL; 198 } 199 } 200 fclose(f); 201 return 0; 202 203 error: 204 fclose(f); 205 return -1; 206 } 207 208 /* Scan one pci sysfs entry, and fill the devices list from it. */ 209 static int 210 pci_scan_one(const char *dirname, const struct rte_pci_addr *addr) 211 { 212 char filename[PATH_MAX]; 213 unsigned long tmp; 214 struct rte_pci_device *dev; 215 char driver[PATH_MAX]; 216 int ret; 217 218 dev = malloc(sizeof(*dev)); 219 if (dev == NULL) 220 return -1; 221 222 memset(dev, 0, sizeof(*dev)); 223 dev->device.bus = &rte_pci_bus.bus; 224 dev->addr = *addr; 225 226 /* get vendor id */ 227 snprintf(filename, sizeof(filename), "%s/vendor", dirname); 228 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 229 pci_free(dev); 230 return -1; 231 } 232 dev->id.vendor_id = (uint16_t)tmp; 233 234 /* get device id */ 235 snprintf(filename, sizeof(filename), "%s/device", dirname); 236 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 237 pci_free(dev); 238 return -1; 239 } 240 dev->id.device_id = (uint16_t)tmp; 241 242 /* get subsystem_vendor id */ 243 snprintf(filename, sizeof(filename), "%s/subsystem_vendor", 244 dirname); 245 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 246 pci_free(dev); 247 return -1; 248 } 249 dev->id.subsystem_vendor_id = (uint16_t)tmp; 250 251 /* get subsystem_device id */ 252 snprintf(filename, sizeof(filename), "%s/subsystem_device", 253 dirname); 254 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 255 pci_free(dev); 256 return -1; 257 } 258 dev->id.subsystem_device_id = (uint16_t)tmp; 259 260 /* get class_id */ 261 snprintf(filename, sizeof(filename), "%s/class", 262 dirname); 263 if (eal_parse_sysfs_value(filename, &tmp) < 0) { 264 pci_free(dev); 265 return -1; 266 } 267 /* the least 24 bits are valid: class, subclass, program interface */ 268 dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; 269 270 /* get max_vfs */ 271 dev->max_vfs = 0; 272 snprintf(filename, sizeof(filename), "%s/max_vfs", dirname); 273 if (!access(filename, F_OK) && 274 eal_parse_sysfs_value(filename, &tmp) == 0) 275 dev->max_vfs = (uint16_t)tmp; 276 else { 277 /* for non igb_uio driver, need kernel version >= 3.8 */ 278 snprintf(filename, sizeof(filename), 279 "%s/sriov_numvfs", dirname); 280 if (!access(filename, F_OK) && 281 eal_parse_sysfs_value(filename, &tmp) == 0) 282 dev->max_vfs = (uint16_t)tmp; 283 } 284 285 /* get numa node, default to 0 if not present */ 286 snprintf(filename, sizeof(filename), "%s/numa_node", dirname); 287 288 if (access(filename, F_OK) == 0 && 289 eal_parse_sysfs_value(filename, &tmp) == 0) 290 dev->device.numa_node = tmp; 291 else 292 dev->device.numa_node = SOCKET_ID_ANY; 293 294 pci_common_set(dev); 295 296 /* parse resources */ 297 snprintf(filename, sizeof(filename), "%s/resource", dirname); 298 if (pci_parse_sysfs_resource(filename, dev) < 0) { 299 RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__); 300 pci_free(dev); 301 return -1; 302 } 303 304 /* parse driver */ 305 snprintf(filename, sizeof(filename), "%s/driver", dirname); 306 ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver)); 307 if (ret < 0) { 308 RTE_LOG(ERR, EAL, "Fail to get kernel driver\n"); 309 pci_free(dev); 310 return -1; 311 } 312 313 if (!ret) { 314 if (!strcmp(driver, "vfio-pci")) 315 dev->kdrv = RTE_PCI_KDRV_VFIO; 316 else if (!strcmp(driver, "igb_uio")) 317 dev->kdrv = RTE_PCI_KDRV_IGB_UIO; 318 else if (!strcmp(driver, "uio_pci_generic")) 319 dev->kdrv = RTE_PCI_KDRV_UIO_GENERIC; 320 else 321 dev->kdrv = RTE_PCI_KDRV_UNKNOWN; 322 } else { 323 pci_free(dev); 324 return 0; 325 } 326 /* device is valid, add in list (sorted) */ 327 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 328 rte_pci_add_device(dev); 329 } else { 330 struct rte_pci_device *dev2; 331 int ret; 332 333 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 334 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 335 if (ret > 0) 336 continue; 337 338 if (ret < 0) { 339 rte_pci_insert_device(dev2, dev); 340 } else { /* already registered */ 341 if (!rte_dev_is_probed(&dev2->device)) { 342 dev2->kdrv = dev->kdrv; 343 dev2->max_vfs = dev->max_vfs; 344 dev2->id = dev->id; 345 pci_common_set(dev2); 346 memmove(dev2->mem_resource, 347 dev->mem_resource, 348 sizeof(dev->mem_resource)); 349 } else { 350 /** 351 * If device is plugged and driver is 352 * probed already, (This happens when 353 * we call rte_dev_probe which will 354 * scan all device on the bus) we don't 355 * need to do anything here unless... 356 **/ 357 if (dev2->kdrv != dev->kdrv || 358 dev2->max_vfs != dev->max_vfs || 359 memcmp(&dev2->id, &dev->id, sizeof(dev2->id))) 360 /* 361 * This should not happens. 362 * But it is still possible if 363 * we unbind a device from 364 * vfio or uio before hotplug 365 * remove and rebind it with 366 * a different configure. 367 * So we just print out the 368 * error as an alarm. 369 */ 370 RTE_LOG(ERR, EAL, "Unexpected device scan at %s!\n", 371 filename); 372 else if (dev2->device.devargs != 373 dev->device.devargs) { 374 rte_devargs_remove(dev2->device.devargs); 375 pci_common_set(dev2); 376 } 377 } 378 pci_free(dev); 379 } 380 return 0; 381 } 382 383 rte_pci_add_device(dev); 384 } 385 386 return 0; 387 } 388 389 /* 390 * split up a pci address into its constituent parts. 391 */ 392 static int 393 parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr) 394 { 395 /* first split on ':' */ 396 union splitaddr { 397 struct { 398 char *domain; 399 char *bus; 400 char *devid; 401 char *function; 402 }; 403 char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */ 404 } splitaddr; 405 406 char *buf_copy = strndup(buf, bufsize); 407 if (buf_copy == NULL) 408 return -1; 409 410 if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') 411 != PCI_FMT_NVAL - 1) 412 goto error; 413 /* final split is on '.' between devid and function */ 414 splitaddr.function = strchr(splitaddr.devid,'.'); 415 if (splitaddr.function == NULL) 416 goto error; 417 *splitaddr.function++ = '\0'; 418 419 /* now convert to int values */ 420 errno = 0; 421 addr->domain = strtoul(splitaddr.domain, NULL, 16); 422 addr->bus = strtoul(splitaddr.bus, NULL, 16); 423 addr->devid = strtoul(splitaddr.devid, NULL, 16); 424 addr->function = strtoul(splitaddr.function, NULL, 10); 425 if (errno != 0) 426 goto error; 427 428 free(buf_copy); /* free the copy made with strdup */ 429 return 0; 430 error: 431 free(buf_copy); 432 return -1; 433 } 434 435 /* 436 * Scan the content of the PCI bus, and the devices in the devices 437 * list 438 */ 439 int 440 rte_pci_scan(void) 441 { 442 struct dirent *e; 443 DIR *dir; 444 char dirname[PATH_MAX]; 445 struct rte_pci_addr addr; 446 447 /* for debug purposes, PCI can be disabled */ 448 if (!rte_eal_has_pci()) 449 return 0; 450 451 dir = opendir(rte_pci_get_sysfs_path()); 452 if (dir == NULL) { 453 RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n", 454 __func__, strerror(errno)); 455 return -1; 456 } 457 458 while ((e = readdir(dir)) != NULL) { 459 if (e->d_name[0] == '.') 460 continue; 461 462 if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0) 463 continue; 464 465 if (rte_pci_ignore_device(&addr)) 466 continue; 467 468 snprintf(dirname, sizeof(dirname), "%s/%s", 469 rte_pci_get_sysfs_path(), e->d_name); 470 471 if (pci_scan_one(dirname, &addr) < 0) 472 goto error; 473 } 474 closedir(dir); 475 return 0; 476 477 error: 478 closedir(dir); 479 return -1; 480 } 481 482 #if defined(RTE_ARCH_X86) 483 bool 484 pci_device_iommu_support_va(const struct rte_pci_device *dev) 485 { 486 #define VTD_CAP_MGAW_SHIFT 16 487 #define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT) 488 const struct rte_pci_addr *addr = &dev->addr; 489 char filename[PATH_MAX]; 490 FILE *fp; 491 uint64_t mgaw, vtd_cap_reg = 0; 492 493 snprintf(filename, sizeof(filename), 494 "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap", 495 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, 496 addr->function); 497 498 fp = fopen(filename, "r"); 499 if (fp == NULL) { 500 /* We don't have an Intel IOMMU, assume VA supported */ 501 if (errno == ENOENT) 502 return true; 503 504 RTE_LOG(ERR, EAL, "%s(): can't open %s: %s\n", 505 __func__, filename, strerror(errno)); 506 return false; 507 } 508 509 /* We have an Intel IOMMU */ 510 if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) { 511 RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename); 512 fclose(fp); 513 return false; 514 } 515 516 fclose(fp); 517 518 mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1; 519 520 /* 521 * Assuming there is no limitation by now. We can not know at this point 522 * because the memory has not been initialized yet. Setting the dma mask 523 * will force a check once memory initialization is done. We can not do 524 * a fallback to IOVA PA now, but if the dma check fails, the error 525 * message should advice for using '--iova-mode pa' if IOVA VA is the 526 * current mode. 527 */ 528 rte_mem_set_dma_mask(mgaw); 529 return true; 530 } 531 #elif defined(RTE_ARCH_PPC_64) 532 bool 533 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev) 534 { 535 /* 536 * All POWER systems support an IOMMU, but only IOMMUv2 supports 537 * IOVA = VA in DPDK. Check contents of /proc/cpuinfo to find the 538 * system. 539 * 540 * Platform | Model | IOMMU | VA? | Comment 541 * ---------+-------+---------+-----+--------------------------------- 542 * PowerNV | N/A | IOMMUv2 | Yes | OpenPOWER (Bare Metal) 543 * pSeries | ~qemu | IOMMUv2 | Yes | PowerVM Logical Partition (LPAR) 544 * pSeries | qemu | IOMMUv1 | No | QEMU Virtual Machine 545 */ 546 547 char *line = NULL; 548 size_t len = 0; 549 char filename[PATH_MAX] = "/proc/cpuinfo"; 550 FILE *fp = fopen(filename, "r"); 551 bool pseries = false, powernv = false, qemu = false; 552 bool ret = false; 553 554 if (fp == NULL) { 555 RTE_LOG(ERR, EAL, "%s(): can't open %s: %s\n", 556 __func__, filename, strerror(errno)); 557 return ret; 558 } 559 560 /* Check the "platform" and "model" fields */ 561 while (getline(&line, &len, fp) != -1) { 562 if (strstr(line, "platform") != NULL) { 563 if (strstr(line, "PowerNV") != NULL) { 564 RTE_LOG(DEBUG, EAL, "Running on a PowerNV platform\n"); 565 powernv = true; 566 } else if (strstr(line, "pSeries") != NULL) { 567 RTE_LOG(DEBUG, EAL, "Running on a pSeries platform\n"); 568 pseries = true; 569 } 570 } else if (strstr(line, "model") != NULL) { 571 if (strstr(line, "qemu") != NULL) { 572 RTE_LOG(DEBUG, EAL, "Found qemu emulation\n"); 573 qemu = true; 574 } 575 } 576 } 577 578 free(line); 579 fclose(fp); 580 581 if (powernv || (pseries && !qemu)) 582 ret = true; 583 return ret; 584 } 585 #else 586 bool 587 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev) 588 { 589 return true; 590 } 591 #endif 592 593 enum rte_iova_mode 594 pci_device_iova_mode(const struct rte_pci_driver *pdrv, 595 const struct rte_pci_device *pdev) 596 { 597 enum rte_iova_mode iova_mode = RTE_IOVA_DC; 598 599 switch (pdev->kdrv) { 600 case RTE_PCI_KDRV_VFIO: { 601 #ifdef VFIO_PRESENT 602 static int is_vfio_noiommu_enabled = -1; 603 604 if (is_vfio_noiommu_enabled == -1) { 605 if (rte_vfio_noiommu_is_enabled() == 1) 606 is_vfio_noiommu_enabled = 1; 607 else 608 is_vfio_noiommu_enabled = 0; 609 } 610 if (is_vfio_noiommu_enabled != 0) 611 iova_mode = RTE_IOVA_PA; 612 else if ((pdrv->drv_flags & RTE_PCI_DRV_NEED_IOVA_AS_VA) != 0) 613 iova_mode = RTE_IOVA_VA; 614 #endif 615 break; 616 } 617 618 case RTE_PCI_KDRV_IGB_UIO: 619 case RTE_PCI_KDRV_UIO_GENERIC: 620 iova_mode = RTE_IOVA_PA; 621 break; 622 623 default: 624 if ((pdrv->drv_flags & RTE_PCI_DRV_NEED_IOVA_AS_VA) != 0) 625 iova_mode = RTE_IOVA_VA; 626 break; 627 } 628 return iova_mode; 629 } 630 631 /* Read PCI config space. */ 632 int rte_pci_read_config(const struct rte_pci_device *device, 633 void *buf, size_t len, off_t offset) 634 { 635 char devname[RTE_DEV_NAME_MAX_LEN] = ""; 636 const struct rte_intr_handle *intr_handle = device->intr_handle; 637 638 switch (device->kdrv) { 639 case RTE_PCI_KDRV_IGB_UIO: 640 case RTE_PCI_KDRV_UIO_GENERIC: 641 return pci_uio_read_config(intr_handle, buf, len, offset); 642 #ifdef VFIO_PRESENT 643 case RTE_PCI_KDRV_VFIO: 644 return pci_vfio_read_config(intr_handle, buf, len, offset); 645 #endif 646 default: 647 rte_pci_device_name(&device->addr, devname, 648 RTE_DEV_NAME_MAX_LEN); 649 RTE_LOG(ERR, EAL, 650 "Unknown driver type for %s\n", devname); 651 return -1; 652 } 653 } 654 655 /* Write PCI config space. */ 656 int rte_pci_write_config(const struct rte_pci_device *device, 657 const void *buf, size_t len, off_t offset) 658 { 659 char devname[RTE_DEV_NAME_MAX_LEN] = ""; 660 const struct rte_intr_handle *intr_handle = device->intr_handle; 661 662 switch (device->kdrv) { 663 case RTE_PCI_KDRV_IGB_UIO: 664 case RTE_PCI_KDRV_UIO_GENERIC: 665 return pci_uio_write_config(intr_handle, buf, len, offset); 666 #ifdef VFIO_PRESENT 667 case RTE_PCI_KDRV_VFIO: 668 return pci_vfio_write_config(intr_handle, buf, len, offset); 669 #endif 670 default: 671 rte_pci_device_name(&device->addr, devname, 672 RTE_DEV_NAME_MAX_LEN); 673 RTE_LOG(ERR, EAL, 674 "Unknown driver type for %s\n", devname); 675 return -1; 676 } 677 } 678 679 int 680 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 681 struct rte_pci_ioport *p) 682 { 683 int ret = -1; 684 685 switch (dev->kdrv) { 686 #ifdef VFIO_PRESENT 687 case RTE_PCI_KDRV_VFIO: 688 if (pci_vfio_is_enabled()) 689 ret = pci_vfio_ioport_map(dev, bar, p); 690 break; 691 #endif 692 case RTE_PCI_KDRV_IGB_UIO: 693 case RTE_PCI_KDRV_UIO_GENERIC: 694 ret = pci_uio_ioport_map(dev, bar, p); 695 break; 696 default: 697 break; 698 } 699 700 if (!ret) 701 p->dev = dev; 702 703 return ret; 704 } 705 706 void 707 rte_pci_ioport_read(struct rte_pci_ioport *p, 708 void *data, size_t len, off_t offset) 709 { 710 switch (p->dev->kdrv) { 711 #ifdef VFIO_PRESENT 712 case RTE_PCI_KDRV_VFIO: 713 pci_vfio_ioport_read(p, data, len, offset); 714 break; 715 #endif 716 case RTE_PCI_KDRV_IGB_UIO: 717 case RTE_PCI_KDRV_UIO_GENERIC: 718 pci_uio_ioport_read(p, data, len, offset); 719 break; 720 default: 721 break; 722 } 723 } 724 725 void 726 rte_pci_ioport_write(struct rte_pci_ioport *p, 727 const void *data, size_t len, off_t offset) 728 { 729 switch (p->dev->kdrv) { 730 #ifdef VFIO_PRESENT 731 case RTE_PCI_KDRV_VFIO: 732 pci_vfio_ioport_write(p, data, len, offset); 733 break; 734 #endif 735 case RTE_PCI_KDRV_IGB_UIO: 736 case RTE_PCI_KDRV_UIO_GENERIC: 737 pci_uio_ioport_write(p, data, len, offset); 738 break; 739 default: 740 break; 741 } 742 } 743 744 int 745 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 746 { 747 int ret = -1; 748 749 switch (p->dev->kdrv) { 750 #ifdef VFIO_PRESENT 751 case RTE_PCI_KDRV_VFIO: 752 if (pci_vfio_is_enabled()) 753 ret = pci_vfio_ioport_unmap(p); 754 break; 755 #endif 756 case RTE_PCI_KDRV_IGB_UIO: 757 case RTE_PCI_KDRV_UIO_GENERIC: 758 ret = pci_uio_ioport_unmap(p); 759 break; 760 default: 761 break; 762 } 763 764 return ret; 765 } 766