1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <ctype.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <stdarg.h> 39 #include <unistd.h> 40 #include <inttypes.h> 41 #include <sys/types.h> 42 #include <sys/stat.h> 43 #include <fcntl.h> 44 #include <errno.h> 45 #include <dirent.h> 46 #include <limits.h> 47 #include <sys/queue.h> 48 #include <sys/mman.h> 49 #include <sys/ioctl.h> 50 #include <sys/pciio.h> 51 #include <dev/pci/pcireg.h> 52 53 #if defined(RTE_ARCH_X86) 54 #include <machine/cpufunc.h> 55 #endif 56 57 #include <rte_interrupts.h> 58 #include <rte_log.h> 59 #include <rte_pci.h> 60 #include <rte_bus_pci.h> 61 #include <rte_common.h> 62 #include <rte_launch.h> 63 #include <rte_memory.h> 64 #include <rte_eal.h> 65 #include <rte_eal_memconfig.h> 66 #include <rte_per_lcore.h> 67 #include <rte_lcore.h> 68 #include <rte_malloc.h> 69 #include <rte_string_fns.h> 70 #include <rte_debug.h> 71 #include <rte_devargs.h> 72 73 #include "eal_filesystem.h" 74 #include "private.h" 75 76 /** 77 * @file 78 * PCI probing under BSD 79 * 80 * This code is used to simulate a PCI probe by parsing information in 81 * sysfs. Moreover, when a registered driver matches a device, the 82 * kernel driver currently using it is unloaded and replaced by 83 * igb_uio module, which is a very minimal userland driver for Intel 84 * network card, only providing access to PCI BAR to applications, and 85 * enabling bus master. 86 */ 87 88 extern struct rte_pci_bus rte_pci_bus; 89 90 /* Map pci device */ 91 int 92 rte_pci_map_device(struct rte_pci_device *dev) 93 { 94 int ret = -1; 95 96 /* try mapping the NIC resources */ 97 switch (dev->kdrv) { 98 case RTE_KDRV_NIC_UIO: 99 /* map resources for devices that use uio */ 100 ret = pci_uio_map_resource(dev); 101 break; 102 default: 103 RTE_LOG(DEBUG, EAL, 104 " Not managed by a supported kernel driver, skipped\n"); 105 ret = 1; 106 break; 107 } 108 109 return ret; 110 } 111 112 /* Unmap pci device */ 113 void 114 rte_pci_unmap_device(struct rte_pci_device *dev) 115 { 116 /* try unmapping the NIC resources */ 117 switch (dev->kdrv) { 118 case RTE_KDRV_NIC_UIO: 119 /* unmap resources for devices that use uio */ 120 pci_uio_unmap_resource(dev); 121 break; 122 default: 123 RTE_LOG(DEBUG, EAL, 124 " Not managed by a supported kernel driver, skipped\n"); 125 break; 126 } 127 } 128 129 void 130 pci_uio_free_resource(struct rte_pci_device *dev, 131 struct mapped_pci_resource *uio_res) 132 { 133 rte_free(uio_res); 134 135 if (dev->intr_handle.fd) { 136 close(dev->intr_handle.fd); 137 dev->intr_handle.fd = -1; 138 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 139 } 140 } 141 142 int 143 pci_uio_alloc_resource(struct rte_pci_device *dev, 144 struct mapped_pci_resource **uio_res) 145 { 146 char devname[PATH_MAX]; /* contains the /dev/uioX */ 147 struct rte_pci_addr *loc; 148 149 loc = &dev->addr; 150 151 snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u", 152 dev->addr.bus, dev->addr.devid, dev->addr.function); 153 154 if (access(devname, O_RDWR) < 0) { 155 RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, " 156 "skipping\n", loc->domain, loc->bus, loc->devid, loc->function); 157 return 1; 158 } 159 160 /* save fd if in primary process */ 161 dev->intr_handle.fd = open(devname, O_RDWR); 162 if (dev->intr_handle.fd < 0) { 163 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", 164 devname, strerror(errno)); 165 goto error; 166 } 167 dev->intr_handle.type = RTE_INTR_HANDLE_UIO; 168 169 /* allocate the mapping details for secondary processes*/ 170 *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0); 171 if (*uio_res == NULL) { 172 RTE_LOG(ERR, EAL, 173 "%s(): cannot store uio mmap details\n", __func__); 174 goto error; 175 } 176 177 snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname); 178 memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr)); 179 180 return 0; 181 182 error: 183 pci_uio_free_resource(dev, *uio_res); 184 return -1; 185 } 186 187 int 188 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx, 189 struct mapped_pci_resource *uio_res, int map_idx) 190 { 191 int fd; 192 char *devname; 193 void *mapaddr; 194 uint64_t offset; 195 uint64_t pagesz; 196 struct pci_map *maps; 197 198 maps = uio_res->maps; 199 devname = uio_res->path; 200 pagesz = sysconf(_SC_PAGESIZE); 201 202 /* allocate memory to keep path */ 203 maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0); 204 if (maps[map_idx].path == NULL) { 205 RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n", 206 strerror(errno)); 207 return -1; 208 } 209 210 /* 211 * open resource file, to mmap it 212 */ 213 fd = open(devname, O_RDWR); 214 if (fd < 0) { 215 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", 216 devname, strerror(errno)); 217 goto error; 218 } 219 220 /* if matching map is found, then use it */ 221 offset = res_idx * pagesz; 222 mapaddr = pci_map_resource(NULL, fd, (off_t)offset, 223 (size_t)dev->mem_resource[res_idx].len, 0); 224 close(fd); 225 if (mapaddr == MAP_FAILED) 226 goto error; 227 228 maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr; 229 maps[map_idx].size = dev->mem_resource[res_idx].len; 230 maps[map_idx].addr = mapaddr; 231 maps[map_idx].offset = offset; 232 strcpy(maps[map_idx].path, devname); 233 dev->mem_resource[res_idx].addr = mapaddr; 234 235 return 0; 236 237 error: 238 rte_free(maps[map_idx].path); 239 return -1; 240 } 241 242 static int 243 pci_scan_one(int dev_pci_fd, struct pci_conf *conf) 244 { 245 struct rte_pci_device *dev; 246 struct pci_bar_io bar; 247 unsigned i, max; 248 249 dev = malloc(sizeof(*dev)); 250 if (dev == NULL) { 251 return -1; 252 } 253 254 memset(dev, 0, sizeof(*dev)); 255 dev->addr.domain = conf->pc_sel.pc_domain; 256 dev->addr.bus = conf->pc_sel.pc_bus; 257 dev->addr.devid = conf->pc_sel.pc_dev; 258 dev->addr.function = conf->pc_sel.pc_func; 259 260 /* get vendor id */ 261 dev->id.vendor_id = conf->pc_vendor; 262 263 /* get device id */ 264 dev->id.device_id = conf->pc_device; 265 266 /* get subsystem_vendor id */ 267 dev->id.subsystem_vendor_id = conf->pc_subvendor; 268 269 /* get subsystem_device id */ 270 dev->id.subsystem_device_id = conf->pc_subdevice; 271 272 /* get class id */ 273 dev->id.class_id = (conf->pc_class << 16) | 274 (conf->pc_subclass << 8) | 275 (conf->pc_progif); 276 277 /* TODO: get max_vfs */ 278 dev->max_vfs = 0; 279 280 /* FreeBSD has no NUMA support (yet) */ 281 dev->device.numa_node = 0; 282 283 pci_name_set(dev); 284 285 /* FreeBSD has only one pass through driver */ 286 dev->kdrv = RTE_KDRV_NIC_UIO; 287 288 /* parse resources */ 289 switch (conf->pc_hdr & PCIM_HDRTYPE) { 290 case PCIM_HDRTYPE_NORMAL: 291 max = PCIR_MAX_BAR_0; 292 break; 293 case PCIM_HDRTYPE_BRIDGE: 294 max = PCIR_MAX_BAR_1; 295 break; 296 case PCIM_HDRTYPE_CARDBUS: 297 max = PCIR_MAX_BAR_2; 298 break; 299 default: 300 goto skipdev; 301 } 302 303 for (i = 0; i <= max; i++) { 304 bar.pbi_sel = conf->pc_sel; 305 bar.pbi_reg = PCIR_BAR(i); 306 if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0) 307 continue; 308 309 dev->mem_resource[i].len = bar.pbi_length; 310 if (PCI_BAR_IO(bar.pbi_base)) { 311 dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf)); 312 continue; 313 } 314 dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf); 315 } 316 317 /* device is valid, add in list (sorted) */ 318 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) { 319 rte_pci_add_device(dev); 320 } 321 else { 322 struct rte_pci_device *dev2 = NULL; 323 int ret; 324 325 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) { 326 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr); 327 if (ret > 0) 328 continue; 329 else if (ret < 0) { 330 rte_pci_insert_device(dev2, dev); 331 } else { /* already registered */ 332 dev2->kdrv = dev->kdrv; 333 dev2->max_vfs = dev->max_vfs; 334 pci_name_set(dev2); 335 memmove(dev2->mem_resource, 336 dev->mem_resource, 337 sizeof(dev->mem_resource)); 338 free(dev); 339 } 340 return 0; 341 } 342 rte_pci_add_device(dev); 343 } 344 345 return 0; 346 347 skipdev: 348 free(dev); 349 return 0; 350 } 351 352 /* 353 * Scan the content of the PCI bus, and add the devices in the devices 354 * list. Call pci_scan_one() for each pci entry found. 355 */ 356 int 357 rte_pci_scan(void) 358 { 359 int fd; 360 unsigned dev_count = 0; 361 struct pci_conf matches[16]; 362 struct pci_conf_io conf_io = { 363 .pat_buf_len = 0, 364 .num_patterns = 0, 365 .patterns = NULL, 366 .match_buf_len = sizeof(matches), 367 .matches = &matches[0], 368 }; 369 370 /* for debug purposes, PCI can be disabled */ 371 if (!rte_eal_has_pci()) 372 return 0; 373 374 fd = open("/dev/pci", O_RDONLY); 375 if (fd < 0) { 376 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 377 goto error; 378 } 379 380 do { 381 unsigned i; 382 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) { 383 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n", 384 __func__, strerror(errno)); 385 goto error; 386 } 387 388 for (i = 0; i < conf_io.num_matches; i++) 389 if (pci_scan_one(fd, &matches[i]) < 0) 390 goto error; 391 392 dev_count += conf_io.num_matches; 393 } while(conf_io.status == PCI_GETCONF_MORE_DEVS); 394 395 close(fd); 396 397 RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count); 398 return 0; 399 400 error: 401 if (fd >= 0) 402 close(fd); 403 return -1; 404 } 405 406 /* 407 * Get iommu class of PCI devices on the bus. 408 */ 409 enum rte_iova_mode 410 rte_pci_get_iommu_class(void) 411 { 412 /* Supports only RTE_KDRV_NIC_UIO */ 413 return RTE_IOVA_PA; 414 } 415 416 int 417 pci_update_device(const struct rte_pci_addr *addr) 418 { 419 int fd; 420 struct pci_conf matches[2]; 421 struct pci_match_conf match = { 422 .pc_sel = { 423 .pc_domain = addr->domain, 424 .pc_bus = addr->bus, 425 .pc_dev = addr->devid, 426 .pc_func = addr->function, 427 }, 428 }; 429 struct pci_conf_io conf_io = { 430 .pat_buf_len = 0, 431 .num_patterns = 1, 432 .patterns = &match, 433 .match_buf_len = sizeof(matches), 434 .matches = &matches[0], 435 }; 436 437 fd = open("/dev/pci", O_RDONLY); 438 if (fd < 0) { 439 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 440 goto error; 441 } 442 443 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) { 444 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n", 445 __func__, strerror(errno)); 446 goto error; 447 } 448 449 if (conf_io.num_matches != 1) 450 goto error; 451 452 if (pci_scan_one(fd, &matches[0]) < 0) 453 goto error; 454 455 close(fd); 456 457 return 0; 458 459 error: 460 if (fd >= 0) 461 close(fd); 462 return -1; 463 } 464 465 /* Read PCI config space. */ 466 int rte_pci_read_config(const struct rte_pci_device *dev, 467 void *buf, size_t len, off_t offset) 468 { 469 int fd = -1; 470 int size; 471 struct pci_io pi = { 472 .pi_sel = { 473 .pc_domain = dev->addr.domain, 474 .pc_bus = dev->addr.bus, 475 .pc_dev = dev->addr.devid, 476 .pc_func = dev->addr.function, 477 }, 478 .pi_reg = offset, 479 }; 480 481 fd = open("/dev/pci", O_RDWR); 482 if (fd < 0) { 483 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 484 goto error; 485 } 486 487 while (len > 0) { 488 size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1); 489 pi.pi_width = size; 490 491 if (ioctl(fd, PCIOCREAD, &pi) < 0) 492 goto error; 493 memcpy(buf, &pi.pi_data, size); 494 495 buf = (char *)buf + size; 496 pi.pi_reg += size; 497 len -= size; 498 } 499 close(fd); 500 501 return 0; 502 503 error: 504 if (fd >= 0) 505 close(fd); 506 return -1; 507 } 508 509 /* Write PCI config space. */ 510 int rte_pci_write_config(const struct rte_pci_device *dev, 511 const void *buf, size_t len, off_t offset) 512 { 513 int fd = -1; 514 515 struct pci_io pi = { 516 .pi_sel = { 517 .pc_domain = dev->addr.domain, 518 .pc_bus = dev->addr.bus, 519 .pc_dev = dev->addr.devid, 520 .pc_func = dev->addr.function, 521 }, 522 .pi_reg = offset, 523 .pi_data = *(const uint32_t *)buf, 524 .pi_width = len, 525 }; 526 527 if (len == 3 || len > sizeof(pi.pi_data)) { 528 RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__); 529 goto error; 530 } 531 532 memcpy(&pi.pi_data, buf, len); 533 534 fd = open("/dev/pci", O_RDWR); 535 if (fd < 0) { 536 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__); 537 goto error; 538 } 539 540 if (ioctl(fd, PCIOCWRITE, &pi) < 0) 541 goto error; 542 543 close(fd); 544 return 0; 545 546 error: 547 if (fd >= 0) 548 close(fd); 549 return -1; 550 } 551 552 int 553 rte_pci_ioport_map(struct rte_pci_device *dev, int bar, 554 struct rte_pci_ioport *p) 555 { 556 int ret; 557 558 switch (dev->kdrv) { 559 #if defined(RTE_ARCH_X86) 560 case RTE_KDRV_NIC_UIO: 561 if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) { 562 p->base = (uintptr_t)dev->mem_resource[bar].addr; 563 ret = 0; 564 } else 565 ret = -1; 566 break; 567 #endif 568 default: 569 ret = -1; 570 break; 571 } 572 573 if (!ret) 574 p->dev = dev; 575 576 return ret; 577 } 578 579 static void 580 pci_uio_ioport_read(struct rte_pci_ioport *p, 581 void *data, size_t len, off_t offset) 582 { 583 #if defined(RTE_ARCH_X86) 584 uint8_t *d; 585 int size; 586 unsigned short reg = p->base + offset; 587 588 for (d = data; len > 0; d += size, reg += size, len -= size) { 589 if (len >= 4) { 590 size = 4; 591 *(uint32_t *)d = inl(reg); 592 } else if (len >= 2) { 593 size = 2; 594 *(uint16_t *)d = inw(reg); 595 } else { 596 size = 1; 597 *d = inb(reg); 598 } 599 } 600 #else 601 RTE_SET_USED(p); 602 RTE_SET_USED(data); 603 RTE_SET_USED(len); 604 RTE_SET_USED(offset); 605 #endif 606 } 607 608 void 609 rte_pci_ioport_read(struct rte_pci_ioport *p, 610 void *data, size_t len, off_t offset) 611 { 612 switch (p->dev->kdrv) { 613 case RTE_KDRV_NIC_UIO: 614 pci_uio_ioport_read(p, data, len, offset); 615 break; 616 default: 617 break; 618 } 619 } 620 621 static void 622 pci_uio_ioport_write(struct rte_pci_ioport *p, 623 const void *data, size_t len, off_t offset) 624 { 625 #if defined(RTE_ARCH_X86) 626 const uint8_t *s; 627 int size; 628 unsigned short reg = p->base + offset; 629 630 for (s = data; len > 0; s += size, reg += size, len -= size) { 631 if (len >= 4) { 632 size = 4; 633 outl(reg, *(const uint32_t *)s); 634 } else if (len >= 2) { 635 size = 2; 636 outw(reg, *(const uint16_t *)s); 637 } else { 638 size = 1; 639 outb(reg, *s); 640 } 641 } 642 #else 643 RTE_SET_USED(p); 644 RTE_SET_USED(data); 645 RTE_SET_USED(len); 646 RTE_SET_USED(offset); 647 #endif 648 } 649 650 void 651 rte_pci_ioport_write(struct rte_pci_ioport *p, 652 const void *data, size_t len, off_t offset) 653 { 654 switch (p->dev->kdrv) { 655 case RTE_KDRV_NIC_UIO: 656 pci_uio_ioport_write(p, data, len, offset); 657 break; 658 default: 659 break; 660 } 661 } 662 663 int 664 rte_pci_ioport_unmap(struct rte_pci_ioport *p) 665 { 666 int ret; 667 668 switch (p->dev->kdrv) { 669 #if defined(RTE_ARCH_X86) 670 case RTE_KDRV_NIC_UIO: 671 ret = 0; 672 break; 673 #endif 674 default: 675 ret = -1; 676 break; 677 } 678 679 return ret; 680 } 681