1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <stdarg.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <fcntl.h> 15 #include <errno.h> 16 #include <dirent.h> 17 #include <limits.h> 18 #include <sys/queue.h> 19 #include <sys/mman.h> 20 #include <sys/ioctl.h> 21 #include <sys/pciio.h> 22 #include <dev/pci/pcireg.h> 23 24 #if defined(RTE_ARCH_X86) 25 #include <machine/cpufunc.h> 26 #endif 27 28 #include <rte_interrupts.h> 29 #include <rte_log.h> 30 #include <rte_pci.h> 31 #include <rte_bus_pci.h> 32 #include <rte_common.h> 33 #include <rte_launch.h> 34 #include <rte_memory.h> 35 #include <rte_eal.h> 36 #include <rte_per_lcore.h> 37 #include <rte_lcore.h> 38 #include <rte_malloc.h> 39 #include <rte_string_fns.h> 40 #include <rte_debug.h> 41 #include <rte_devargs.h> 42 43 #include "eal_filesystem.h" 44 #include "private.h" 45 46 /** 47 * @file 48 * PCI probing under BSD 49 * 50 * This code is used to simulate a PCI probe by parsing information in 51 * sysfs. Moreover, when a registered driver matches a device, the 52 * kernel driver currently using it is unloaded and replaced by 53 * igb_uio module, which is a very minimal userland driver for Intel 54 * network card, only providing access to PCI BAR to applications, and 55 * enabling bus master. 56 */ 57 58 extern struct rte_pci_bus rte_pci_bus; 59 60 /* Map pci device */ 61 int 62 rte_pci_map_device(struct rte_pci_device *dev) 63 { 64 int ret = -1; 65 66 /* try mapping the NIC resources */ 67 switch (dev->kdrv) { 68 case RTE_KDRV_NIC_UIO: 69 /* map resources for devices that use uio */ 70 ret = pci_uio_map_resource(dev); 71 break; 72 default: 73 RTE_LOG(DEBUG, EAL, 74 " Not managed by a supported kernel driver, skipped\n"); 75 ret = 1; 76 break; 77 } 78 79 return ret; 80 } 81 82 /* Unmap pci device */ 83 void 84 rte_pci_unmap_device(struct rte_pci_device *dev) 85 { 86 /* try unmapping the NIC resources */ 87 switch (dev->kdrv) { 88 case RTE_KDRV_NIC_UIO: 89 /* unmap resources for devices that use uio */ 90 pci_uio_unmap_resource(dev); 91 break; 92 default: 93 RTE_LOG(DEBUG, EAL, 94 " Not managed by a supported kernel driver, skipped\n"); 95 break; 96 } 97 } 98 99 void 100 pci_uio_free_resource(struct rte_pci_device *dev, 101 struct mapped_pci_resource *uio_res) 102 { 103 rte_free(uio_res); 104 105 if (dev->intr_handle.fd) { 106 close(dev->intr_handle.fd); 107 dev->intr_handle.fd = -1; 108 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 109 } 110 } 111 112 int 113 pci_uio_alloc_resource(struct rte_pci_device *dev, 114 struct mapped_pci_resource **uio_res) 115 { 116 char devname[PATH_MAX]; /* contains the /dev/uioX */ 117 struct rte_pci_addr *loc; 118 119 loc = &dev->addr; 120 121 snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u", 122 dev->addr.bus, dev->addr.devid, dev->addr.function); 123 124 if (access(devname, O_RDWR) < 0) { 125 RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, " 126 "skipping\n", loc->domain, loc->bus, loc->devid, loc->function); 127 return 1; 128 } 129 130 /* save fd if in primary process */ 131 dev->intr_handle.fd = open(devname, O_RDWR); 132 if (dev->intr_handle.fd < 0) { 133 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", 134 devname, strerror(errno)); 135 goto error; 136 } 137 dev->intr_handle.type = RTE_INTR_HANDLE_UIO; 138 139 /* allocate the mapping details for secondary processes*/ 140 *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0); 141 if (*uio_res == NULL) { 142 RTE_LOG(ERR, EAL, 143 "%s(): cannot store uio mmap details\n", __func__); 144 goto error; 145 } 146 147 strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path)); 148 memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr)); 149 150 return 0; 151 152 error: 153 pci_uio_free_resource(dev, *uio_res); 154 return -1; 155 } 156 157 int 158 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx, 159 struct mapped_pci_resource *uio_res, int map_idx) 160 { 161 int fd; 162 char *devname; 163 void *mapaddr; 164 uint64_t offset; 165 uint64_t pagesz; 166 struct pci_map *maps; 167 168 maps = uio_res->maps; 169 devname = uio_res->path; 170 pagesz = sysconf(_SC_PAGESIZE); 171 172 /* allocate memory to keep path */ 173 maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0); 174 if (maps[map_idx].path == NULL) { 175 RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n", 176 strerror(errno)); 177 return -1; 178 } 179 180 /* 181 * open resource file, to mmap it 182 */ 183 fd = open(devname, O_RDWR); 184 if (fd < 0) { 185 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", 186 devname, strerror(errno)); 187 goto error; 188 } 189 190 /* if matching map is found, then use it */ 191 offset = res_idx * pagesz; 192 mapaddr = pci_map_resource(NULL, fd, (off_t)offset, 193 (size_t)dev->mem_resource[res_idx].len, 0); 194 close(fd); 195 if (mapaddr == MAP_FAILED) 196 goto error; 197 198 maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr; 199 maps[map_idx].size = dev->mem_resource[res_idx].len; 200 maps[map_idx].addr = mapaddr; 201 maps[map_idx].offset = offset; 202 strcpy(maps[map_idx].path, devname); 203 dev->mem_resource[res_idx].addr = mapaddr; 204 205 return 0; 206 207 error: 208 rte_free(maps[map_idx].path); 209 return -1; 210 } 211 212 static int 213 pci_scan_one(int dev_pci_fd, struct pci_conf *conf) 214 { 215 struct rte_pci_device *dev; 216 struct pci_bar_io bar; 217 unsigned i, max; 218 219 dev = malloc(sizeof(*dev)); 220 if (dev == NULL) { 221 return -1; 222 } 223 224 memset(dev, 0, sizeof(*dev)); 225 dev->device.bus = &rte_pci_bus.bus; 226 227 dev->addr.domain = conf->pc_sel.pc_domain; 228 dev->addr.bus = conf->pc_sel.pc_bus; 229 dev->addr.devid = conf->pc_sel.pc_dev; 230 dev->addr.function = conf->pc_sel.pc_func; 231 232 /* get vendor id */ 233 dev->id.vendor_id = conf->pc_vendor; 234 235 /* get device id */ 236 dev->id.device_id = conf->pc_device; 237 238 /* get subsystem_vendor id */ 239 dev->id.subsystem_vendor_id = conf->pc_subvendor; 240 241 /* get subsystem_device id */ 242 dev->id.subsystem_device_id = conf->pc_subdevice; 243 244 /* get class id */ 245 dev->id.class_id = (conf->pc_class << 16) | 246 (conf->pc_subclass << 8) | 247 (conf->pc_progif); 248 249 /* TODO: get max_vfs */ 250 dev->max_vfs = 0; 251 252 /* FreeBSD has no NUMA support (yet) */ 253 dev->device.numa_node = 0; 254 255 pci_name_set(dev); 256 257 /* FreeBSD has only one pass through driver */ 258 dev->kdrv = RTE_KDRV_NIC_UIO; 259 260 /* parse resources */ 261 switch (conf->pc_hdr & PCIM_HDRTYPE) { 262 case PCIM_HDRTYPE_NORMAL: 263 max = PCIR_MAX_BAR_0; 264 break; 265 case PCIM_HDRTYPE_BRIDGE: 266 max = PCIR_MAX_BAR_1; 267 break; 268 case PCIM_HDRTYPE_CARDBUS: 269 max = PCIR_MAX_BAR_2; 270 break; 271 default: 272 goto skipdev; 273 } 274 275 for (i = 0; i <= max; i++) { 276 bar.pbi_sel = conf->pc_sel; 277 bar.pbi_reg = PCIR_BAR(i); 278 if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0) 279 continue; 280 281 dev->mem_resource[i].len = bar.pbi_length; 282 if (PCI_BAR_IO(bar.pbi_base)) { 283 dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf)); 284 continue; 285 } 286 dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf); 287 } 288 289 /* device is valid, add in list (sorted) */ 290 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 291 rte_pci_add_device(dev); 292 } 293 else { 294 struct rte_pci_device *dev2 = NULL; 295 int ret; 296 297 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 298 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 299 if (ret > 0) 300 continue; 301 else if (ret < 0) { 302 rte_pci_insert_device(dev2, dev); 303 } else { /* already registered */ 304 dev2->kdrv = dev->kdrv; 305 dev2->max_vfs = dev->max_vfs; 306 pci_name_set(dev2); 307 memmove(dev2->mem_resource, 308 dev->mem_resource, 309 sizeof(dev->mem_resource)); 310 free(dev); 311 } 312 return 0; 313 } 314 rte_pci_add_device(dev); 315 } 316 317 return 0; 318 319 skipdev: 320 free(dev); 321 return 0; 322 } 323 324 /* 325 * Scan the content of the PCI bus, and add the devices in the devices 326 * list. Call pci_scan_one() for each pci entry found. 327 */ 328 int 329 rte_pci_scan(void) 330 { 331 int fd; 332 unsigned dev_count = 0; 333 struct pci_conf matches[16]; 334 struct pci_conf_io conf_io = { 335 .pat_buf_len = 0, 336 .num_patterns = 0, 337 .patterns = NULL, 338 .match_buf_len = sizeof(matches), 339 .matches = &matches[0], 340 }; 341 struct rte_pci_addr pci_addr; 342 343 /* for debug purposes, PCI can be disabled */ 344 if (!rte_eal_has_pci()) 345 return 0; 346 347 fd = open("/dev/pci", O_RDONLY); 348 if (fd < 0) { 349 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 350 goto error; 351 } 352 353 do { 354 unsigned i; 355 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) { 356 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n", 357 __func__, strerror(errno)); 358 goto error; 359 } 360 361 for (i = 0; i < conf_io.num_matches; i++) { 362 pci_addr.domain = matches[i].pc_sel.pc_domain; 363 pci_addr.bus = matches[i].pc_sel.pc_bus; 364 pci_addr.devid = matches[i].pc_sel.pc_dev; 365 pci_addr.function = matches[i].pc_sel.pc_func; 366 367 if (rte_pci_ignore_device(&pci_addr)) 368 continue; 369 370 if (pci_scan_one(fd, &matches[i]) < 0) 371 goto error; 372 } 373 374 dev_count += conf_io.num_matches; 375 } while(conf_io.status == PCI_GETCONF_MORE_DEVS); 376 377 close(fd); 378 379 RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count); 380 return 0; 381 382 error: 383 if (fd >= 0) 384 close(fd); 385 return -1; 386 } 387 388 bool 389 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev) 390 { 391 return false; 392 } 393 394 enum rte_iova_mode 395 pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused, 396 const struct rte_pci_device *pdev) 397 { 398 /* Supports only RTE_KDRV_NIC_UIO */ 399 if (pdev->kdrv != RTE_KDRV_NIC_UIO) 400 RTE_LOG(DEBUG, EAL, "Unsupported kernel driver? Defaulting to IOVA as 'PA'\n"); 401 402 return RTE_IOVA_PA; 403 } 404 405 int 406 pci_update_device(const struct rte_pci_addr *addr) 407 { 408 int fd; 409 struct pci_conf matches[2]; 410 struct pci_match_conf match = { 411 .pc_sel = { 412 .pc_domain = addr->domain, 413 .pc_bus = addr->bus, 414 .pc_dev = addr->devid, 415 .pc_func = addr->function, 416 }, 417 }; 418 struct pci_conf_io conf_io = { 419 .pat_buf_len = 0, 420 .num_patterns = 1, 421 .patterns = &match, 422 .match_buf_len = sizeof(matches), 423 .matches = &matches[0], 424 }; 425 426 fd = open("/dev/pci", O_RDONLY); 427 if (fd < 0) { 428 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 429 goto error; 430 } 431 432 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) { 433 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n", 434 __func__, strerror(errno)); 435 goto error; 436 } 437 438 if (conf_io.num_matches != 1) 439 goto error; 440 441 if (pci_scan_one(fd, &matches[0]) < 0) 442 goto error; 443 444 close(fd); 445 446 return 0; 447 448 error: 449 if (fd >= 0) 450 close(fd); 451 return -1; 452 } 453 454 /* Read PCI config space. */ 455 int rte_pci_read_config(const struct rte_pci_device *dev, 456 void *buf, size_t len, off_t offset) 457 { 458 int fd = -1; 459 int size; 460 /* Copy Linux implementation's behaviour */ 461 const int return_len = len; 462 struct pci_io pi = { 463 .pi_sel = { 464 .pc_domain = dev->addr.domain, 465 .pc_bus = dev->addr.bus, 466 .pc_dev = dev->addr.devid, 467 .pc_func = dev->addr.function, 468 }, 469 .pi_reg = offset, 470 }; 471 472 fd = open("/dev/pci", O_RDWR); 473 if (fd < 0) { 474 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 475 goto error; 476 } 477 478 while (len > 0) { 479 size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1); 480 pi.pi_width = size; 481 482 if (ioctl(fd, PCIOCREAD, &pi) < 0) 483 goto error; 484 memcpy(buf, &pi.pi_data, size); 485 486 buf = (char *)buf + size; 487 pi.pi_reg += size; 488 len -= size; 489 } 490 close(fd); 491 492 return return_len; 493 494 error: 495 if (fd >= 0) 496 close(fd); 497 return -1; 498 } 499 500 /* Write PCI config space. */ 501 int rte_pci_write_config(const struct rte_pci_device *dev, 502 const void *buf, size_t len, off_t offset) 503 { 504 int fd = -1; 505 506 struct pci_io pi = { 507 .pi_sel = { 508 .pc_domain = dev->addr.domain, 509 .pc_bus = dev->addr.bus, 510 .pc_dev = dev->addr.devid, 511 .pc_func = dev->addr.function, 512 }, 513 .pi_reg = offset, 514 .pi_data = *(const uint32_t *)buf, 515 .pi_width = len, 516 }; 517 518 if (len == 3 || len > sizeof(pi.pi_data)) { 519 RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__); 520 goto error; 521 } 522 523 memcpy(&pi.pi_data, buf, len); 524 525 fd = open("/dev/pci", O_RDWR); 526 if (fd < 0) { 527 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 528 goto error; 529 } 530 531 if (ioctl(fd, PCIOCWRITE, &pi) < 0) 532 goto error; 533 534 close(fd); 535 return 0; 536 537 error: 538 if (fd >= 0) 539 close(fd); 540 return -1; 541 } 542 543 int 544 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 545 struct rte_pci_ioport *p) 546 { 547 int ret; 548 549 switch (dev->kdrv) { 550 #if defined(RTE_ARCH_X86) 551 case RTE_KDRV_NIC_UIO: 552 if (rte_eal_iopl_init() != 0) { 553 RTE_LOG(ERR, EAL, "%s(): insufficient ioport permissions for PCI device %s\n", 554 __func__, dev->name); 555 return -1; 556 } 557 if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) { 558 p->base = (uintptr_t)dev->mem_resource[bar].addr; 559 ret = 0; 560 } else 561 ret = -1; 562 break; 563 #endif 564 default: 565 ret = -1; 566 break; 567 } 568 569 if (!ret) 570 p->dev = dev; 571 572 return ret; 573 } 574 575 static void 576 pci_uio_ioport_read(struct rte_pci_ioport *p, 577 void *data, size_t len, off_t offset) 578 { 579 #if defined(RTE_ARCH_X86) 580 uint8_t *d; 581 int size; 582 unsigned short reg = p->base + offset; 583 584 for (d = data; len > 0; d += size, reg += size, len -= size) { 585 if (len >= 4) { 586 size = 4; 587 *(uint32_t *)d = inl(reg); 588 } else if (len >= 2) { 589 size = 2; 590 *(uint16_t *)d = inw(reg); 591 } else { 592 size = 1; 593 *d = inb(reg); 594 } 595 } 596 #else 597 RTE_SET_USED(p); 598 RTE_SET_USED(data); 599 RTE_SET_USED(len); 600 RTE_SET_USED(offset); 601 #endif 602 } 603 604 void 605 rte_pci_ioport_read(struct rte_pci_ioport *p, 606 void *data, size_t len, off_t offset) 607 { 608 switch (p->dev->kdrv) { 609 case RTE_KDRV_NIC_UIO: 610 pci_uio_ioport_read(p, data, len, offset); 611 break; 612 default: 613 break; 614 } 615 } 616 617 static void 618 pci_uio_ioport_write(struct rte_pci_ioport *p, 619 const void *data, size_t len, off_t offset) 620 { 621 #if defined(RTE_ARCH_X86) 622 const uint8_t *s; 623 int size; 624 unsigned short reg = p->base + offset; 625 626 for (s = data; len > 0; s += size, reg += size, len -= size) { 627 if (len >= 4) { 628 size = 4; 629 outl(reg, *(const uint32_t *)s); 630 } else if (len >= 2) { 631 size = 2; 632 outw(reg, *(const uint16_t *)s); 633 } else { 634 size = 1; 635 outb(reg, *s); 636 } 637 } 638 #else 639 RTE_SET_USED(p); 640 RTE_SET_USED(data); 641 RTE_SET_USED(len); 642 RTE_SET_USED(offset); 643 #endif 644 } 645 646 void 647 rte_pci_ioport_write(struct rte_pci_ioport *p, 648 const void *data, size_t len, off_t offset) 649 { 650 switch (p->dev->kdrv) { 651 case RTE_KDRV_NIC_UIO: 652 pci_uio_ioport_write(p, data, len, offset); 653 break; 654 default: 655 break; 656 } 657 } 658 659 int 660 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 661 { 662 int ret; 663 664 switch (p->dev->kdrv) { 665 #if defined(RTE_ARCH_X86) 666 case RTE_KDRV_NIC_UIO: 667 ret = 0; 668 break; 669 #endif 670 default: 671 ret = -1; 672 break; 673 } 674 675 return ret; 676 } 677