1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <stdarg.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <fcntl.h> 15 #include <errno.h> 16 #include <dirent.h> 17 #include <limits.h> 18 #include <sys/queue.h> 19 #include <sys/mman.h> 20 #include <sys/ioctl.h> 21 #include <sys/pciio.h> 22 #include <dev/pci/pcireg.h> 23 24 #if defined(RTE_ARCH_X86) 25 #include <machine/cpufunc.h> 26 #endif 27 28 #include <rte_interrupts.h> 29 #include <rte_log.h> 30 #include <rte_pci.h> 31 #include <rte_bus_pci.h> 32 #include <rte_common.h> 33 #include <rte_launch.h> 34 #include <rte_memory.h> 35 #include <rte_eal.h> 36 #include <rte_per_lcore.h> 37 #include <rte_lcore.h> 38 #include <rte_malloc.h> 39 #include <rte_string_fns.h> 40 #include <rte_debug.h> 41 #include <rte_devargs.h> 42 43 #include "eal_filesystem.h" 44 #include "private.h" 45 46 /** 47 * @file 48 * PCI probing under BSD 49 * 50 * This code is used to simulate a PCI probe by parsing information in 51 * sysfs. Moreover, when a registered driver matches a device, the 52 * kernel driver currently using it is unloaded and replaced by 53 * igb_uio module, which is a very minimal userland driver for Intel 54 * network card, only providing access to PCI BAR to applications, and 55 * enabling bus master. 56 */ 57 58 extern struct rte_pci_bus rte_pci_bus; 59 60 /* Map pci device */ 61 int 62 rte_pci_map_device(struct rte_pci_device *dev) 63 { 64 int ret = -1; 65 66 /* try mapping the NIC resources */ 67 switch (dev->kdrv) { 68 case RTE_PCI_KDRV_NIC_UIO: 69 /* map resources for devices that use uio */ 70 ret = pci_uio_map_resource(dev); 71 break; 72 default: 73 RTE_LOG(DEBUG, EAL, 74 " Not managed by a supported kernel driver, skipped\n"); 75 ret = 1; 76 break; 77 } 78 79 return ret; 80 } 81 82 /* Unmap pci device */ 83 void 84 rte_pci_unmap_device(struct rte_pci_device *dev) 85 { 86 /* try unmapping the NIC resources */ 87 switch (dev->kdrv) { 88 case RTE_PCI_KDRV_NIC_UIO: 89 /* unmap resources for devices that use uio */ 90 pci_uio_unmap_resource(dev); 91 break; 92 default: 93 RTE_LOG(DEBUG, EAL, 94 " Not managed by a supported kernel driver, skipped\n"); 95 break; 96 } 97 } 98 99 void 100 pci_uio_free_resource(struct rte_pci_device *dev, 101 struct mapped_pci_resource *uio_res) 102 { 103 rte_free(uio_res); 104 105 if (dev->intr_handle.fd) { 106 close(dev->intr_handle.fd); 107 dev->intr_handle.fd = -1; 108 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 109 } 110 } 111 112 int 113 pci_uio_alloc_resource(struct rte_pci_device *dev, 114 struct mapped_pci_resource **uio_res) 115 { 116 char devname[PATH_MAX]; /* contains the /dev/uioX */ 117 struct rte_pci_addr *loc; 118 119 loc = &dev->addr; 120 121 snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u", 122 dev->addr.bus, dev->addr.devid, dev->addr.function); 123 124 if (access(devname, O_RDWR) < 0) { 125 RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, " 126 "skipping\n", loc->domain, loc->bus, loc->devid, loc->function); 127 return 1; 128 } 129 130 /* save fd if in primary process */ 131 dev->intr_handle.fd = open(devname, O_RDWR); 132 if (dev->intr_handle.fd < 0) { 133 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", 134 devname, strerror(errno)); 135 goto error; 136 } 137 dev->intr_handle.type = RTE_INTR_HANDLE_UIO; 138 139 /* allocate the mapping details for secondary processes*/ 140 *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0); 141 if (*uio_res == NULL) { 142 RTE_LOG(ERR, EAL, 143 "%s(): cannot store uio mmap details\n", __func__); 144 goto error; 145 } 146 147 strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path)); 148 memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr)); 149 150 return 0; 151 152 error: 153 pci_uio_free_resource(dev, *uio_res); 154 return -1; 155 } 156 157 int 158 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx, 159 struct mapped_pci_resource *uio_res, int map_idx) 160 { 161 int fd; 162 char *devname; 163 void *mapaddr; 164 uint64_t offset; 165 uint64_t pagesz; 166 struct pci_map *maps; 167 168 maps = uio_res->maps; 169 devname = uio_res->path; 170 pagesz = sysconf(_SC_PAGESIZE); 171 172 /* allocate memory to keep path */ 173 maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0); 174 if (maps[map_idx].path == NULL) { 175 RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n", 176 strerror(errno)); 177 return -1; 178 } 179 180 /* 181 * open resource file, to mmap it 182 */ 183 fd = open(devname, O_RDWR); 184 if (fd < 0) { 185 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", 186 devname, strerror(errno)); 187 goto error; 188 } 189 190 /* if matching map is found, then use it */ 191 offset = res_idx * pagesz; 192 mapaddr = pci_map_resource(NULL, fd, (off_t)offset, 193 (size_t)dev->mem_resource[res_idx].len, 0); 194 close(fd); 195 if (mapaddr == NULL) 196 goto error; 197 198 maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr; 199 maps[map_idx].size = dev->mem_resource[res_idx].len; 200 maps[map_idx].addr = mapaddr; 201 maps[map_idx].offset = offset; 202 strcpy(maps[map_idx].path, devname); 203 dev->mem_resource[res_idx].addr = mapaddr; 204 205 return 0; 206 207 error: 208 rte_free(maps[map_idx].path); 209 return -1; 210 } 211 212 static int 213 pci_scan_one(int dev_pci_fd, struct pci_conf *conf) 214 { 215 struct rte_pci_device *dev; 216 struct pci_bar_io bar; 217 unsigned i, max; 218 219 dev = malloc(sizeof(*dev)); 220 if (dev == NULL) { 221 return -1; 222 } 223 224 memset(dev, 0, sizeof(*dev)); 225 dev->device.bus = &rte_pci_bus.bus; 226 227 dev->addr.domain = conf->pc_sel.pc_domain; 228 dev->addr.bus = conf->pc_sel.pc_bus; 229 dev->addr.devid = conf->pc_sel.pc_dev; 230 dev->addr.function = conf->pc_sel.pc_func; 231 232 /* get vendor id */ 233 dev->id.vendor_id = conf->pc_vendor; 234 235 /* get device id */ 236 dev->id.device_id = conf->pc_device; 237 238 /* get subsystem_vendor id */ 239 dev->id.subsystem_vendor_id = conf->pc_subvendor; 240 241 /* get subsystem_device id */ 242 dev->id.subsystem_device_id = conf->pc_subdevice; 243 244 /* get class id */ 245 dev->id.class_id = (conf->pc_class << 16) | 246 (conf->pc_subclass << 8) | 247 (conf->pc_progif); 248 249 /* TODO: get max_vfs */ 250 dev->max_vfs = 0; 251 252 /* FreeBSD has no NUMA support (yet) */ 253 dev->device.numa_node = 0; 254 255 pci_name_set(dev); 256 257 /* FreeBSD has only one pass through driver */ 258 dev->kdrv = RTE_PCI_KDRV_NIC_UIO; 259 260 /* parse resources */ 261 switch (conf->pc_hdr & PCIM_HDRTYPE) { 262 case PCIM_HDRTYPE_NORMAL: 263 max = PCIR_MAX_BAR_0; 264 break; 265 case PCIM_HDRTYPE_BRIDGE: 266 max = PCIR_MAX_BAR_1; 267 break; 268 case PCIM_HDRTYPE_CARDBUS: 269 max = PCIR_MAX_BAR_2; 270 break; 271 default: 272 goto skipdev; 273 } 274 275 for (i = 0; i <= max; i++) { 276 bar.pbi_sel = conf->pc_sel; 277 bar.pbi_reg = PCIR_BAR(i); 278 if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0) 279 continue; 280 281 dev->mem_resource[i].len = bar.pbi_length; 282 if (PCI_BAR_IO(bar.pbi_base)) { 283 dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf)); 284 continue; 285 } 286 dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf); 287 } 288 289 /* device is valid, add in list (sorted) */ 290 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 291 rte_pci_add_device(dev); 292 } 293 else { 294 struct rte_pci_device *dev2 = NULL; 295 int ret; 296 297 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 298 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 299 if (ret > 0) 300 continue; 301 else if (ret < 0) { 302 rte_pci_insert_device(dev2, dev); 303 } else { /* already registered */ 304 dev2->kdrv = dev->kdrv; 305 dev2->max_vfs = dev->max_vfs; 306 pci_name_set(dev2); 307 memmove(dev2->mem_resource, 308 dev->mem_resource, 309 sizeof(dev->mem_resource)); 310 free(dev); 311 } 312 return 0; 313 } 314 rte_pci_add_device(dev); 315 } 316 317 return 0; 318 319 skipdev: 320 free(dev); 321 return 0; 322 } 323 324 /* 325 * Scan the content of the PCI bus, and add the devices in the devices 326 * list. Call pci_scan_one() for each pci entry found. 327 */ 328 int 329 rte_pci_scan(void) 330 { 331 int fd; 332 unsigned dev_count = 0; 333 struct pci_conf matches[16]; 334 struct pci_conf_io conf_io = { 335 .pat_buf_len = 0, 336 .num_patterns = 0, 337 .patterns = NULL, 338 .match_buf_len = sizeof(matches), 339 .matches = &matches[0], 340 }; 341 struct rte_pci_addr pci_addr; 342 343 /* for debug purposes, PCI can be disabled */ 344 if (!rte_eal_has_pci()) 345 return 0; 346 347 fd = open("/dev/pci", O_RDONLY); 348 if (fd < 0) { 349 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 350 goto error; 351 } 352 353 do { 354 unsigned i; 355 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) { 356 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n", 357 __func__, strerror(errno)); 358 goto error; 359 } 360 361 for (i = 0; i < conf_io.num_matches; i++) { 362 pci_addr.domain = matches[i].pc_sel.pc_domain; 363 pci_addr.bus = matches[i].pc_sel.pc_bus; 364 pci_addr.devid = matches[i].pc_sel.pc_dev; 365 pci_addr.function = matches[i].pc_sel.pc_func; 366 367 if (rte_pci_ignore_device(&pci_addr)) 368 continue; 369 370 if (pci_scan_one(fd, &matches[i]) < 0) 371 goto error; 372 } 373 374 dev_count += conf_io.num_matches; 375 } while(conf_io.status == PCI_GETCONF_MORE_DEVS); 376 377 close(fd); 378 379 RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count); 380 return 0; 381 382 error: 383 if (fd >= 0) 384 close(fd); 385 return -1; 386 } 387 388 bool 389 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev) 390 { 391 return false; 392 } 393 394 enum rte_iova_mode 395 pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused, 396 const struct rte_pci_device *pdev) 397 { 398 if (pdev->kdrv != RTE_PCI_KDRV_NIC_UIO) 399 RTE_LOG(DEBUG, EAL, "Unsupported kernel driver? Defaulting to IOVA as 'PA'\n"); 400 401 return RTE_IOVA_PA; 402 } 403 404 int 405 pci_update_device(const struct rte_pci_addr *addr) 406 { 407 int fd; 408 struct pci_conf matches[2]; 409 struct pci_match_conf match = { 410 .pc_sel = { 411 .pc_domain = addr->domain, 412 .pc_bus = addr->bus, 413 .pc_dev = addr->devid, 414 .pc_func = addr->function, 415 }, 416 }; 417 struct pci_conf_io conf_io = { 418 .pat_buf_len = 0, 419 .num_patterns = 1, 420 .patterns = &match, 421 .match_buf_len = sizeof(matches), 422 .matches = &matches[0], 423 }; 424 425 fd = open("/dev/pci", O_RDONLY); 426 if (fd < 0) { 427 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 428 goto error; 429 } 430 431 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) { 432 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n", 433 __func__, strerror(errno)); 434 goto error; 435 } 436 437 if (conf_io.num_matches != 1) 438 goto error; 439 440 if (pci_scan_one(fd, &matches[0]) < 0) 441 goto error; 442 443 close(fd); 444 445 return 0; 446 447 error: 448 if (fd >= 0) 449 close(fd); 450 return -1; 451 } 452 453 /* Read PCI config space. */ 454 int rte_pci_read_config(const struct rte_pci_device *dev, 455 void *buf, size_t len, off_t offset) 456 { 457 int fd = -1; 458 int size; 459 /* Copy Linux implementation's behaviour */ 460 const int return_len = len; 461 struct pci_io pi = { 462 .pi_sel = { 463 .pc_domain = dev->addr.domain, 464 .pc_bus = dev->addr.bus, 465 .pc_dev = dev->addr.devid, 466 .pc_func = dev->addr.function, 467 }, 468 .pi_reg = offset, 469 }; 470 471 fd = open("/dev/pci", O_RDWR); 472 if (fd < 0) { 473 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 474 goto error; 475 } 476 477 while (len > 0) { 478 size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1); 479 pi.pi_width = size; 480 481 if (ioctl(fd, PCIOCREAD, &pi) < 0) 482 goto error; 483 memcpy(buf, &pi.pi_data, size); 484 485 buf = (char *)buf + size; 486 pi.pi_reg += size; 487 len -= size; 488 } 489 close(fd); 490 491 return return_len; 492 493 error: 494 if (fd >= 0) 495 close(fd); 496 return -1; 497 } 498 499 /* Write PCI config space. */ 500 int rte_pci_write_config(const struct rte_pci_device *dev, 501 const void *buf, size_t len, off_t offset) 502 { 503 int fd = -1; 504 505 struct pci_io pi = { 506 .pi_sel = { 507 .pc_domain = dev->addr.domain, 508 .pc_bus = dev->addr.bus, 509 .pc_dev = dev->addr.devid, 510 .pc_func = dev->addr.function, 511 }, 512 .pi_reg = offset, 513 .pi_data = *(const uint32_t *)buf, 514 .pi_width = len, 515 }; 516 517 if (len == 3 || len > sizeof(pi.pi_data)) { 518 RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__); 519 goto error; 520 } 521 522 memcpy(&pi.pi_data, buf, len); 523 524 fd = open("/dev/pci", O_RDWR); 525 if (fd < 0) { 526 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 527 goto error; 528 } 529 530 if (ioctl(fd, PCIOCWRITE, &pi) < 0) 531 goto error; 532 533 close(fd); 534 return 0; 535 536 error: 537 if (fd >= 0) 538 close(fd); 539 return -1; 540 } 541 542 int 543 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 544 struct rte_pci_ioport *p) 545 { 546 int ret; 547 548 switch (dev->kdrv) { 549 #if defined(RTE_ARCH_X86) 550 case RTE_PCI_KDRV_NIC_UIO: 551 if (rte_eal_iopl_init() != 0) { 552 RTE_LOG(ERR, EAL, "%s(): insufficient ioport permissions for PCI device %s\n", 553 __func__, dev->name); 554 return -1; 555 } 556 if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) { 557 p->base = (uintptr_t)dev->mem_resource[bar].addr; 558 ret = 0; 559 } else 560 ret = -1; 561 break; 562 #endif 563 default: 564 ret = -1; 565 break; 566 } 567 568 if (!ret) 569 p->dev = dev; 570 571 return ret; 572 } 573 574 static void 575 pci_uio_ioport_read(struct rte_pci_ioport *p, 576 void *data, size_t len, off_t offset) 577 { 578 #if defined(RTE_ARCH_X86) 579 uint8_t *d; 580 int size; 581 unsigned short reg = p->base + offset; 582 583 for (d = data; len > 0; d += size, reg += size, len -= size) { 584 if (len >= 4) { 585 size = 4; 586 *(uint32_t *)d = inl(reg); 587 } else if (len >= 2) { 588 size = 2; 589 *(uint16_t *)d = inw(reg); 590 } else { 591 size = 1; 592 *d = inb(reg); 593 } 594 } 595 #else 596 RTE_SET_USED(p); 597 RTE_SET_USED(data); 598 RTE_SET_USED(len); 599 RTE_SET_USED(offset); 600 #endif 601 } 602 603 void 604 rte_pci_ioport_read(struct rte_pci_ioport *p, 605 void *data, size_t len, off_t offset) 606 { 607 switch (p->dev->kdrv) { 608 case RTE_PCI_KDRV_NIC_UIO: 609 pci_uio_ioport_read(p, data, len, offset); 610 break; 611 default: 612 break; 613 } 614 } 615 616 static void 617 pci_uio_ioport_write(struct rte_pci_ioport *p, 618 const void *data, size_t len, off_t offset) 619 { 620 #if defined(RTE_ARCH_X86) 621 const uint8_t *s; 622 int size; 623 unsigned short reg = p->base + offset; 624 625 for (s = data; len > 0; s += size, reg += size, len -= size) { 626 if (len >= 4) { 627 size = 4; 628 outl(reg, *(const uint32_t *)s); 629 } else if (len >= 2) { 630 size = 2; 631 outw(reg, *(const uint16_t *)s); 632 } else { 633 size = 1; 634 outb(reg, *s); 635 } 636 } 637 #else 638 RTE_SET_USED(p); 639 RTE_SET_USED(data); 640 RTE_SET_USED(len); 641 RTE_SET_USED(offset); 642 #endif 643 } 644 645 void 646 rte_pci_ioport_write(struct rte_pci_ioport *p, 647 const void *data, size_t len, off_t offset) 648 { 649 switch (p->dev->kdrv) { 650 case RTE_PCI_KDRV_NIC_UIO: 651 pci_uio_ioport_write(p, data, len, offset); 652 break; 653 default: 654 break; 655 } 656 } 657 658 int 659 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 660 { 661 int ret; 662 663 switch (p->dev->kdrv) { 664 #if defined(RTE_ARCH_X86) 665 case RTE_PCI_KDRV_NIC_UIO: 666 ret = 0; 667 break; 668 #endif 669 default: 670 ret = -1; 671 break; 672 } 673 674 return ret; 675 } 676