1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <stdarg.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <fcntl.h> 15 #include <errno.h> 16 #include <dirent.h> 17 #include <limits.h> 18 #include <sys/queue.h> 19 #include <sys/mman.h> 20 #include <sys/ioctl.h> 21 #include <sys/pciio.h> 22 #include <dev/pci/pcireg.h> 23 24 #if defined(RTE_ARCH_X86) 25 #include <machine/cpufunc.h> 26 #endif 27 28 #include <rte_interrupts.h> 29 #include <rte_log.h> 30 #include <rte_pci.h> 31 #include <rte_bus_pci.h> 32 #include <rte_common.h> 33 #include <rte_launch.h> 34 #include <rte_memory.h> 35 #include <rte_eal.h> 36 #include <rte_per_lcore.h> 37 #include <rte_lcore.h> 38 #include <rte_malloc.h> 39 #include <rte_string_fns.h> 40 #include <rte_debug.h> 41 #include <rte_devargs.h> 42 43 #include "eal_filesystem.h" 44 #include "private.h" 45 46 /** 47 * @file 48 * PCI probing under BSD. 49 */ 50 51 extern struct rte_pci_bus rte_pci_bus; 52 53 /* Map pci device */ 54 int 55 rte_pci_map_device(struct rte_pci_device *dev) 56 { 57 int ret = -1; 58 59 /* try mapping the NIC resources */ 60 switch (dev->kdrv) { 61 case RTE_PCI_KDRV_NIC_UIO: 62 /* map resources for devices that use uio */ 63 ret = pci_uio_map_resource(dev); 64 break; 65 default: 66 RTE_LOG(DEBUG, EAL, 67 " Not managed by a supported kernel driver, skipped\n"); 68 ret = 1; 69 break; 70 } 71 72 return ret; 73 } 74 75 /* Unmap pci device */ 76 void 77 rte_pci_unmap_device(struct rte_pci_device *dev) 78 { 79 /* try unmapping the NIC resources */ 80 switch (dev->kdrv) { 81 case RTE_PCI_KDRV_NIC_UIO: 82 /* unmap resources for devices that use uio */ 83 pci_uio_unmap_resource(dev); 84 break; 85 default: 86 RTE_LOG(DEBUG, EAL, 87 " Not managed by a supported kernel driver, skipped\n"); 88 break; 89 } 90 } 91 92 void 93 pci_uio_free_resource(struct rte_pci_device *dev, 94 struct mapped_pci_resource *uio_res) 95 { 96 rte_free(uio_res); 97 98 if (rte_intr_fd_get(dev->intr_handle)) { 99 close(rte_intr_fd_get(dev->intr_handle)); 100 rte_intr_fd_set(dev->intr_handle, -1); 101 rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN); 102 } 103 } 104 105 int 106 pci_uio_alloc_resource(struct rte_pci_device *dev, 107 struct mapped_pci_resource **uio_res) 108 { 109 char devname[PATH_MAX]; /* contains the /dev/uioX */ 110 struct rte_pci_addr *loc; 111 112 loc = &dev->addr; 113 114 snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u", 115 dev->addr.bus, dev->addr.devid, dev->addr.function); 116 117 if (access(devname, O_RDWR) < 0) { 118 RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, " 119 "skipping\n", loc->domain, loc->bus, loc->devid, loc->function); 120 return 1; 121 } 122 123 /* save fd if in primary process */ 124 if (rte_intr_fd_set(dev->intr_handle, open(devname, O_RDWR))) { 125 RTE_LOG(WARNING, EAL, "Failed to save fd"); 126 goto error; 127 } 128 129 if (rte_intr_fd_get(dev->intr_handle) < 0) { 130 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", 131 devname, strerror(errno)); 132 goto error; 133 } 134 135 if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UIO)) 136 goto error; 137 138 /* allocate the mapping details for secondary processes*/ 139 *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0); 140 if (*uio_res == NULL) { 141 RTE_LOG(ERR, EAL, 142 "%s(): cannot store uio mmap details\n", __func__); 143 goto error; 144 } 145 146 strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path)); 147 memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr)); 148 149 return 0; 150 151 error: 152 pci_uio_free_resource(dev, *uio_res); 153 return -1; 154 } 155 156 int 157 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx, 158 struct mapped_pci_resource *uio_res, int map_idx) 159 { 160 int fd; 161 char *devname; 162 void *mapaddr; 163 uint64_t offset; 164 uint64_t pagesz; 165 struct pci_map *maps; 166 167 maps = uio_res->maps; 168 devname = uio_res->path; 169 pagesz = sysconf(_SC_PAGESIZE); 170 171 /* allocate memory to keep path */ 172 maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0); 173 if (maps[map_idx].path == NULL) { 174 RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n", 175 strerror(errno)); 176 return -1; 177 } 178 179 /* 180 * open resource file, to mmap it 181 */ 182 fd = open(devname, O_RDWR); 183 if (fd < 0) { 184 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", 185 devname, strerror(errno)); 186 goto error; 187 } 188 189 /* if matching map is found, then use it */ 190 offset = res_idx * pagesz; 191 mapaddr = pci_map_resource(NULL, fd, (off_t)offset, 192 (size_t)dev->mem_resource[res_idx].len, 0); 193 close(fd); 194 if (mapaddr == NULL) 195 goto error; 196 197 maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr; 198 maps[map_idx].size = dev->mem_resource[res_idx].len; 199 maps[map_idx].addr = mapaddr; 200 maps[map_idx].offset = offset; 201 strcpy(maps[map_idx].path, devname); 202 dev->mem_resource[res_idx].addr = mapaddr; 203 204 return 0; 205 206 error: 207 rte_free(maps[map_idx].path); 208 return -1; 209 } 210 211 static int 212 pci_scan_one(int dev_pci_fd, struct pci_conf *conf) 213 { 214 struct rte_pci_device *dev; 215 struct pci_bar_io bar; 216 unsigned i, max; 217 218 dev = malloc(sizeof(*dev)); 219 if (dev == NULL) { 220 return -1; 221 } 222 223 memset(dev, 0, sizeof(*dev)); 224 dev->device.bus = &rte_pci_bus.bus; 225 226 dev->addr.domain = conf->pc_sel.pc_domain; 227 dev->addr.bus = conf->pc_sel.pc_bus; 228 dev->addr.devid = conf->pc_sel.pc_dev; 229 dev->addr.function = conf->pc_sel.pc_func; 230 231 /* get vendor id */ 232 dev->id.vendor_id = conf->pc_vendor; 233 234 /* get device id */ 235 dev->id.device_id = conf->pc_device; 236 237 /* get subsystem_vendor id */ 238 dev->id.subsystem_vendor_id = conf->pc_subvendor; 239 240 /* get subsystem_device id */ 241 dev->id.subsystem_device_id = conf->pc_subdevice; 242 243 /* get class id */ 244 dev->id.class_id = (conf->pc_class << 16) | 245 (conf->pc_subclass << 8) | 246 (conf->pc_progif); 247 248 /* TODO: get max_vfs */ 249 dev->max_vfs = 0; 250 251 /* FreeBSD has no NUMA support (yet) */ 252 dev->device.numa_node = 0; 253 254 pci_name_set(dev); 255 256 /* FreeBSD has only one pass through driver */ 257 dev->kdrv = RTE_PCI_KDRV_NIC_UIO; 258 259 /* parse resources */ 260 switch (conf->pc_hdr & PCIM_HDRTYPE) { 261 case PCIM_HDRTYPE_NORMAL: 262 max = PCIR_MAX_BAR_0; 263 break; 264 case PCIM_HDRTYPE_BRIDGE: 265 max = PCIR_MAX_BAR_1; 266 break; 267 case PCIM_HDRTYPE_CARDBUS: 268 max = PCIR_MAX_BAR_2; 269 break; 270 default: 271 goto skipdev; 272 } 273 274 for (i = 0; i <= max; i++) { 275 bar.pbi_sel = conf->pc_sel; 276 bar.pbi_reg = PCIR_BAR(i); 277 if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0) 278 continue; 279 280 dev->mem_resource[i].len = bar.pbi_length; 281 if (PCI_BAR_IO(bar.pbi_base)) { 282 dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf)); 283 continue; 284 } 285 dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf); 286 } 287 288 /* device is valid, add in list (sorted) */ 289 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 290 rte_pci_add_device(dev); 291 } 292 else { 293 struct rte_pci_device *dev2 = NULL; 294 int ret; 295 296 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 297 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 298 if (ret > 0) 299 continue; 300 else if (ret < 0) { 301 rte_pci_insert_device(dev2, dev); 302 } else { /* already registered */ 303 dev2->kdrv = dev->kdrv; 304 dev2->max_vfs = dev->max_vfs; 305 pci_name_set(dev2); 306 memmove(dev2->mem_resource, 307 dev->mem_resource, 308 sizeof(dev->mem_resource)); 309 free(dev); 310 } 311 return 0; 312 } 313 rte_pci_add_device(dev); 314 } 315 316 return 0; 317 318 skipdev: 319 free(dev); 320 return 0; 321 } 322 323 /* 324 * Scan the content of the PCI bus, and add the devices in the devices 325 * list. Call pci_scan_one() for each pci entry found. 326 */ 327 int 328 rte_pci_scan(void) 329 { 330 int fd; 331 unsigned dev_count = 0; 332 struct pci_conf matches[16]; 333 struct pci_conf_io conf_io = { 334 .pat_buf_len = 0, 335 .num_patterns = 0, 336 .patterns = NULL, 337 .match_buf_len = sizeof(matches), 338 .matches = &matches[0], 339 }; 340 struct rte_pci_addr pci_addr; 341 342 /* for debug purposes, PCI can be disabled */ 343 if (!rte_eal_has_pci()) 344 return 0; 345 346 fd = open("/dev/pci", O_RDONLY); 347 if (fd < 0) { 348 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 349 goto error; 350 } 351 352 do { 353 unsigned i; 354 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) { 355 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n", 356 __func__, strerror(errno)); 357 goto error; 358 } 359 360 for (i = 0; i < conf_io.num_matches; i++) { 361 pci_addr.domain = matches[i].pc_sel.pc_domain; 362 pci_addr.bus = matches[i].pc_sel.pc_bus; 363 pci_addr.devid = matches[i].pc_sel.pc_dev; 364 pci_addr.function = matches[i].pc_sel.pc_func; 365 366 if (rte_pci_ignore_device(&pci_addr)) 367 continue; 368 369 if (pci_scan_one(fd, &matches[i]) < 0) 370 goto error; 371 } 372 373 dev_count += conf_io.num_matches; 374 } while(conf_io.status == PCI_GETCONF_MORE_DEVS); 375 376 close(fd); 377 378 RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count); 379 return 0; 380 381 error: 382 if (fd >= 0) 383 close(fd); 384 return -1; 385 } 386 387 bool 388 pci_device_iommu_support_va(__rte_unused const struct rte_pci_device *dev) 389 { 390 return false; 391 } 392 393 enum rte_iova_mode 394 pci_device_iova_mode(const struct rte_pci_driver *pdrv __rte_unused, 395 const struct rte_pci_device *pdev) 396 { 397 if (pdev->kdrv != RTE_PCI_KDRV_NIC_UIO) 398 RTE_LOG(DEBUG, EAL, "Unsupported kernel driver? Defaulting to IOVA as 'PA'\n"); 399 400 return RTE_IOVA_PA; 401 } 402 403 /* Read PCI config space. */ 404 int rte_pci_read_config(const struct rte_pci_device *dev, 405 void *buf, size_t len, off_t offset) 406 { 407 int fd = -1; 408 int size; 409 /* Copy Linux implementation's behaviour */ 410 const int return_len = len; 411 struct pci_io pi = { 412 .pi_sel = { 413 .pc_domain = dev->addr.domain, 414 .pc_bus = dev->addr.bus, 415 .pc_dev = dev->addr.devid, 416 .pc_func = dev->addr.function, 417 }, 418 .pi_reg = offset, 419 }; 420 421 fd = open("/dev/pci", O_RDWR); 422 if (fd < 0) { 423 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 424 goto error; 425 } 426 427 while (len > 0) { 428 size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1); 429 pi.pi_width = size; 430 431 if (ioctl(fd, PCIOCREAD, &pi) < 0) 432 goto error; 433 memcpy(buf, &pi.pi_data, size); 434 435 buf = (char *)buf + size; 436 pi.pi_reg += size; 437 len -= size; 438 } 439 close(fd); 440 441 return return_len; 442 443 error: 444 if (fd >= 0) 445 close(fd); 446 return -1; 447 } 448 449 /* Write PCI config space. */ 450 int rte_pci_write_config(const struct rte_pci_device *dev, 451 const void *buf, size_t len, off_t offset) 452 { 453 int fd = -1; 454 455 struct pci_io pi = { 456 .pi_sel = { 457 .pc_domain = dev->addr.domain, 458 .pc_bus = dev->addr.bus, 459 .pc_dev = dev->addr.devid, 460 .pc_func = dev->addr.function, 461 }, 462 .pi_reg = offset, 463 .pi_data = *(const uint32_t *)buf, 464 .pi_width = len, 465 }; 466 467 if (len == 3 || len > sizeof(pi.pi_data)) { 468 RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__); 469 goto error; 470 } 471 472 memcpy(&pi.pi_data, buf, len); 473 474 fd = open("/dev/pci", O_RDWR); 475 if (fd < 0) { 476 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 477 goto error; 478 } 479 480 if (ioctl(fd, PCIOCWRITE, &pi) < 0) 481 goto error; 482 483 close(fd); 484 return 0; 485 486 error: 487 if (fd >= 0) 488 close(fd); 489 return -1; 490 } 491 492 int 493 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 494 struct rte_pci_ioport *p) 495 { 496 int ret; 497 498 switch (dev->kdrv) { 499 #if defined(RTE_ARCH_X86) 500 case RTE_PCI_KDRV_NIC_UIO: 501 if (rte_eal_iopl_init() != 0) { 502 RTE_LOG(ERR, EAL, "%s(): insufficient ioport permissions for PCI device %s\n", 503 __func__, dev->name); 504 return -1; 505 } 506 if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) { 507 p->base = (uintptr_t)dev->mem_resource[bar].addr; 508 ret = 0; 509 } else 510 ret = -1; 511 break; 512 #endif 513 default: 514 ret = -1; 515 break; 516 } 517 518 if (!ret) 519 p->dev = dev; 520 521 return ret; 522 } 523 524 static void 525 pci_uio_ioport_read(struct rte_pci_ioport *p, 526 void *data, size_t len, off_t offset) 527 { 528 #if defined(RTE_ARCH_X86) 529 uint8_t *d; 530 int size; 531 unsigned short reg = p->base + offset; 532 533 for (d = data; len > 0; d += size, reg += size, len -= size) { 534 if (len >= 4) { 535 size = 4; 536 *(uint32_t *)d = inl(reg); 537 } else if (len >= 2) { 538 size = 2; 539 *(uint16_t *)d = inw(reg); 540 } else { 541 size = 1; 542 *d = inb(reg); 543 } 544 } 545 #else 546 RTE_SET_USED(p); 547 RTE_SET_USED(data); 548 RTE_SET_USED(len); 549 RTE_SET_USED(offset); 550 #endif 551 } 552 553 void 554 rte_pci_ioport_read(struct rte_pci_ioport *p, 555 void *data, size_t len, off_t offset) 556 { 557 switch (p->dev->kdrv) { 558 case RTE_PCI_KDRV_NIC_UIO: 559 pci_uio_ioport_read(p, data, len, offset); 560 break; 561 default: 562 break; 563 } 564 } 565 566 static void 567 pci_uio_ioport_write(struct rte_pci_ioport *p, 568 const void *data, size_t len, off_t offset) 569 { 570 #if defined(RTE_ARCH_X86) 571 const uint8_t *s; 572 int size; 573 unsigned short reg = p->base + offset; 574 575 for (s = data; len > 0; s += size, reg += size, len -= size) { 576 if (len >= 4) { 577 size = 4; 578 outl(reg, *(const uint32_t *)s); 579 } else if (len >= 2) { 580 size = 2; 581 outw(reg, *(const uint16_t *)s); 582 } else { 583 size = 1; 584 outb(reg, *s); 585 } 586 } 587 #else 588 RTE_SET_USED(p); 589 RTE_SET_USED(data); 590 RTE_SET_USED(len); 591 RTE_SET_USED(offset); 592 #endif 593 } 594 595 void 596 rte_pci_ioport_write(struct rte_pci_ioport *p, 597 const void *data, size_t len, off_t offset) 598 { 599 switch (p->dev->kdrv) { 600 case RTE_PCI_KDRV_NIC_UIO: 601 pci_uio_ioport_write(p, data, len, offset); 602 break; 603 default: 604 break; 605 } 606 } 607 608 int 609 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 610 { 611 int ret; 612 613 switch (p->dev->kdrv) { 614 #if defined(RTE_ARCH_X86) 615 case RTE_PCI_KDRV_NIC_UIO: 616 ret = 0; 617 break; 618 #endif 619 default: 620 ret = -1; 621 break; 622 } 623 624 return ret; 625 } 626