1 /* $NetBSD: pci.c,v 1.148 2015/08/24 23:55:04 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 1995, 1996, 1997, 1998 5 * Christopher G. Demetriou. All rights reserved. 6 * Copyright (c) 1994 Charles M. Hannum. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles M. Hannum. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * PCI bus autoconfiguration. 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.148 2015/08/24 23:55:04 pooka Exp $"); 40 41 #ifdef _KERNEL_OPT 42 #include "opt_pci.h" 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/malloc.h> 47 #include <sys/systm.h> 48 #include <sys/device.h> 49 #include <sys/module.h> 50 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #include <dev/pci/pcidevs.h> 54 55 #include <net/if.h> 56 57 #include "locators.h" 58 59 static bool pci_child_register(device_t); 60 61 #ifdef PCI_CONFIG_DUMP 62 int pci_config_dump = 1; 63 #else 64 int pci_config_dump = 0; 65 #endif 66 67 int pciprint(void *, const char *); 68 69 #ifdef PCI_MACHDEP_ENUMERATE_BUS 70 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS 71 #else 72 int pci_enumerate_bus(struct pci_softc *, const int *, 73 int (*)(const struct pci_attach_args *), struct pci_attach_args *); 74 #endif 75 76 /* 77 * Important note about PCI-ISA bridges: 78 * 79 * Callbacks are used to configure these devices so that ISA/EISA bridges 80 * can attach their child busses after PCI configuration is done. 81 * 82 * This works because: 83 * (1) there can be at most one ISA/EISA bridge per PCI bus, and 84 * (2) any ISA/EISA bridges must be attached to primary PCI 85 * busses (i.e. bus zero). 86 * 87 * That boils down to: there can only be one of these outstanding 88 * at a time, it is cleared when configuring PCI bus 0 before any 89 * subdevices have been found, and it is run after all subdevices 90 * of PCI bus 0 have been found. 91 * 92 * This is needed because there are some (legacy) PCI devices which 93 * can show up as ISA/EISA devices as well (the prime example of which 94 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge, 95 * and the bridge is seen before the video board is, the board can show 96 * up as an ISA device, and that can (bogusly) complicate the PCI device's 97 * attach code, or make the PCI device not be properly attached at all. 98 * 99 * We use the generic config_defer() facility to achieve this. 100 */ 101 102 int 103 pcirescan(device_t self, const char *ifattr, const int *locators) 104 { 105 struct pci_softc *sc = device_private(self); 106 107 KASSERT(ifattr && !strcmp(ifattr, "pci")); 108 KASSERT(locators); 109 110 pci_enumerate_bus(sc, locators, NULL, NULL); 111 112 return 0; 113 } 114 115 int 116 pcimatch(device_t parent, cfdata_t cf, void *aux) 117 { 118 struct pcibus_attach_args *pba = aux; 119 120 /* Check the locators */ 121 if (cf->cf_loc[PCIBUSCF_BUS] != PCIBUSCF_BUS_DEFAULT && 122 cf->cf_loc[PCIBUSCF_BUS] != pba->pba_bus) 123 return 0; 124 125 /* sanity */ 126 if (pba->pba_bus < 0 || pba->pba_bus > 255) 127 return 0; 128 129 /* 130 * XXX check other (hardware?) indicators 131 */ 132 133 return 1; 134 } 135 136 void 137 pciattach(device_t parent, device_t self, void *aux) 138 { 139 struct pcibus_attach_args *pba = aux; 140 struct pci_softc *sc = device_private(self); 141 int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled; 142 const char *sep = ""; 143 static const int wildcard[PCICF_NLOCS] = { 144 PCICF_DEV_DEFAULT, PCICF_FUNCTION_DEFAULT 145 }; 146 147 sc->sc_dev = self; 148 149 pci_attach_hook(parent, self, pba); 150 151 aprint_naive("\n"); 152 aprint_normal("\n"); 153 154 io_enabled = (pba->pba_flags & PCI_FLAGS_IO_OKAY); 155 mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_OKAY); 156 mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY); 157 mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY); 158 mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY); 159 160 if (io_enabled == 0 && mem_enabled == 0) { 161 aprint_error_dev(self, "no spaces enabled!\n"); 162 goto fail; 163 } 164 165 #define PRINT(str) \ 166 do { \ 167 aprint_verbose("%s%s", sep, str); \ 168 sep = ", "; \ 169 } while (/*CONSTCOND*/0) 170 171 aprint_verbose_dev(self, ""); 172 173 if (io_enabled) 174 PRINT("i/o space"); 175 if (mem_enabled) 176 PRINT("memory space"); 177 aprint_verbose(" enabled"); 178 179 if (mrl_enabled || mrm_enabled || mwi_enabled) { 180 if (mrl_enabled) 181 PRINT("rd/line"); 182 if (mrm_enabled) 183 PRINT("rd/mult"); 184 if (mwi_enabled) 185 PRINT("wr/inv"); 186 aprint_verbose(" ok"); 187 } 188 189 aprint_verbose("\n"); 190 191 #undef PRINT 192 193 sc->sc_iot = pba->pba_iot; 194 sc->sc_memt = pba->pba_memt; 195 sc->sc_dmat = pba->pba_dmat; 196 sc->sc_dmat64 = pba->pba_dmat64; 197 sc->sc_pc = pba->pba_pc; 198 sc->sc_bus = pba->pba_bus; 199 sc->sc_bridgetag = pba->pba_bridgetag; 200 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus); 201 sc->sc_intrswiz = pba->pba_intrswiz; 202 sc->sc_intrtag = pba->pba_intrtag; 203 sc->sc_flags = pba->pba_flags; 204 205 device_pmf_driver_set_child_register(sc->sc_dev, pci_child_register); 206 207 pcirescan(sc->sc_dev, "pci", wildcard); 208 209 fail: 210 if (!pmf_device_register(self, NULL, NULL)) 211 aprint_error_dev(self, "couldn't establish power handler\n"); 212 } 213 214 int 215 pcidetach(device_t self, int flags) 216 { 217 int rc; 218 219 if ((rc = config_detach_children(self, flags)) != 0) 220 return rc; 221 pmf_device_deregister(self); 222 return 0; 223 } 224 225 int 226 pciprint(void *aux, const char *pnp) 227 { 228 struct pci_attach_args *pa = aux; 229 char devinfo[256]; 230 const struct pci_quirkdata *qd; 231 232 if (pnp) { 233 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo)); 234 aprint_normal("%s at %s", devinfo, pnp); 235 } 236 aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function); 237 if (pci_config_dump) { 238 printf(": "); 239 pci_conf_print(pa->pa_pc, pa->pa_tag, NULL); 240 if (!pnp) 241 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo)); 242 printf("%s at %s", devinfo, pnp ? pnp : "?"); 243 printf(" dev %d function %d (", pa->pa_device, pa->pa_function); 244 #ifdef __i386__ 245 printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx", 246 *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag, 247 (long)pa->pa_intrswiz, (long)pa->pa_intrpin); 248 #else 249 printf("intrswiz %#lx, intrpin %#lx", 250 (long)pa->pa_intrswiz, (long)pa->pa_intrpin); 251 #endif 252 printf(", i/o %s, mem %s,", 253 pa->pa_flags & PCI_FLAGS_IO_OKAY ? "on" : "off", 254 pa->pa_flags & PCI_FLAGS_MEM_OKAY ? "on" : "off"); 255 qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id), 256 PCI_PRODUCT(pa->pa_id)); 257 if (qd == NULL) { 258 printf(" no quirks"); 259 } else { 260 snprintb(devinfo, sizeof (devinfo), 261 "\002\001multifn\002singlefn\003skipfunc0" 262 "\004skipfunc1\005skipfunc2\006skipfunc3" 263 "\007skipfunc4\010skipfunc5\011skipfunc6" 264 "\012skipfunc7", qd->quirks); 265 printf(" quirks %s", devinfo); 266 } 267 printf(")"); 268 } 269 return UNCONF; 270 } 271 272 int 273 pci_probe_device(struct pci_softc *sc, pcitag_t tag, 274 int (*match)(const struct pci_attach_args *), 275 struct pci_attach_args *pap) 276 { 277 pci_chipset_tag_t pc = sc->sc_pc; 278 struct pci_attach_args pa; 279 pcireg_t id, /* csr, */ pciclass, intr, bhlcr, bar, endbar; 280 #ifdef __HAVE_PCI_MSI_MSIX 281 pcireg_t cap; 282 int off; 283 #endif 284 int ret, pin, bus, device, function, i, width; 285 int locs[PCICF_NLOCS]; 286 287 pci_decompose_tag(pc, tag, &bus, &device, &function); 288 289 /* a driver already attached? */ 290 if (sc->PCI_SC_DEVICESC(device, function).c_dev != NULL && !match) 291 return 0; 292 293 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 294 if (PCI_HDRTYPE_TYPE(bhlcr) > 2) 295 return 0; 296 297 id = pci_conf_read(pc, tag, PCI_ID_REG); 298 /* csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); */ 299 pciclass = pci_conf_read(pc, tag, PCI_CLASS_REG); 300 301 /* Invalid vendor ID value? */ 302 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 303 return 0; 304 /* XXX Not invalid, but we've done this ~forever. */ 305 if (PCI_VENDOR(id) == 0) 306 return 0; 307 308 /* Collect memory range info */ 309 memset(sc->PCI_SC_DEVICESC(device, function).c_range, 0, 310 sizeof(sc->PCI_SC_DEVICESC(device, function).c_range)); 311 i = 0; 312 switch (PCI_HDRTYPE_TYPE(bhlcr)) { 313 case PCI_HDRTYPE_PPB: 314 endbar = PCI_MAPREG_PPB_END; 315 break; 316 case PCI_HDRTYPE_PCB: 317 endbar = PCI_MAPREG_PCB_END; 318 break; 319 default: 320 endbar = PCI_MAPREG_END; 321 break; 322 } 323 for (bar = PCI_MAPREG_START; bar < endbar; bar += width) { 324 struct pci_range *r; 325 pcireg_t type; 326 327 width = 4; 328 if (pci_mapreg_probe(pc, tag, bar, &type) == 0) 329 continue; 330 331 if (PCI_MAPREG_TYPE(type) == PCI_MAPREG_TYPE_MEM) { 332 if (PCI_MAPREG_MEM_TYPE(type) == 333 PCI_MAPREG_MEM_TYPE_64BIT) 334 width = 8; 335 336 r = &sc->PCI_SC_DEVICESC(device, function).c_range[i++]; 337 if (pci_mapreg_info(pc, tag, bar, type, 338 &r->r_offset, &r->r_size, &r->r_flags) != 0) 339 break; 340 if ((PCI_VENDOR(id) == PCI_VENDOR_ATI) && (bar == 0x10) 341 && (r->r_size == 0x1000000)) { 342 struct pci_range *nr; 343 /* 344 * this has to be a mach64 345 * split things up so each half-aperture can 346 * be mapped PREFETCHABLE except the last page 347 * which may contain registers 348 */ 349 r->r_size = 0x7ff000; 350 r->r_flags = BUS_SPACE_MAP_LINEAR | 351 BUS_SPACE_MAP_PREFETCHABLE; 352 nr = &sc->PCI_SC_DEVICESC(device, 353 function).c_range[i++]; 354 nr->r_offset = r->r_offset + 0x800000; 355 nr->r_size = 0x7ff000; 356 nr->r_flags = BUS_SPACE_MAP_LINEAR | 357 BUS_SPACE_MAP_PREFETCHABLE; 358 } 359 360 } 361 } 362 363 pa.pa_iot = sc->sc_iot; 364 pa.pa_memt = sc->sc_memt; 365 pa.pa_dmat = sc->sc_dmat; 366 pa.pa_dmat64 = sc->sc_dmat64; 367 pa.pa_pc = pc; 368 pa.pa_bus = bus; 369 pa.pa_device = device; 370 pa.pa_function = function; 371 pa.pa_tag = tag; 372 pa.pa_id = id; 373 pa.pa_class = pciclass; 374 375 /* 376 * Set up memory, I/O enable, and PCI command flags 377 * as appropriate. 378 */ 379 pa.pa_flags = sc->sc_flags; 380 381 /* 382 * If the cache line size is not configured, then 383 * clear the MRL/MRM/MWI command-ok flags. 384 */ 385 if (PCI_CACHELINE(bhlcr) == 0) { 386 pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY| 387 PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY); 388 } 389 390 if (sc->sc_bridgetag == NULL) { 391 pa.pa_intrswiz = 0; 392 pa.pa_intrtag = tag; 393 } else { 394 pa.pa_intrswiz = sc->sc_intrswiz + device; 395 pa.pa_intrtag = sc->sc_intrtag; 396 } 397 398 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG); 399 400 pin = PCI_INTERRUPT_PIN(intr); 401 pa.pa_rawintrpin = pin; 402 if (pin == PCI_INTERRUPT_PIN_NONE) { 403 /* no interrupt */ 404 pa.pa_intrpin = 0; 405 } else { 406 /* 407 * swizzle it based on the number of busses we're 408 * behind and our device number. 409 */ 410 pa.pa_intrpin = /* XXX */ 411 ((pin + pa.pa_intrswiz - 1) % 4) + 1; 412 } 413 pa.pa_intrline = PCI_INTERRUPT_LINE(intr); 414 415 #ifdef __HAVE_PCI_MSI_MSIX 416 if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSIMAP, &off, &cap)) { 417 /* 418 * XXX Should we enable MSI mapping ourselves on 419 * systems that have it disabled? 420 */ 421 if (cap & PCI_HT_MSI_ENABLED) { 422 uint64_t addr; 423 if ((cap & PCI_HT_MSI_FIXED) == 0) { 424 addr = pci_conf_read(pc, tag, 425 off + PCI_HT_MSI_ADDR_LO); 426 addr |= (uint64_t)pci_conf_read(pc, tag, 427 off + PCI_HT_MSI_ADDR_HI) << 32; 428 } else 429 addr = PCI_HT_MSI_FIXED_ADDR; 430 431 /* 432 * XXX This will fail to enable MSI on systems 433 * that don't use the canonical address. 434 */ 435 if (addr == PCI_HT_MSI_FIXED_ADDR) { 436 pa.pa_flags |= PCI_FLAGS_MSI_OKAY; 437 pa.pa_flags |= PCI_FLAGS_MSIX_OKAY; 438 } 439 } 440 } 441 #endif 442 443 if (match != NULL) { 444 ret = (*match)(&pa); 445 if (ret != 0 && pap != NULL) 446 *pap = pa; 447 } else { 448 struct pci_child *c; 449 locs[PCICF_DEV] = device; 450 locs[PCICF_FUNCTION] = function; 451 452 c = &sc->PCI_SC_DEVICESC(device, function); 453 pci_conf_capture(pc, tag, &c->c_conf); 454 if (pci_get_powerstate(pc, tag, &c->c_powerstate) == 0) 455 c->c_psok = true; 456 else 457 c->c_psok = false; 458 459 c->c_dev = config_found_sm_loc(sc->sc_dev, "pci", locs, &pa, 460 pciprint, config_stdsubmatch); 461 462 ret = (c->c_dev != NULL); 463 } 464 465 return ret; 466 } 467 468 void 469 pcidevdetached(device_t self, device_t child) 470 { 471 struct pci_softc *sc = device_private(self); 472 int d, f; 473 pcitag_t tag; 474 struct pci_child *c; 475 476 d = device_locator(child, PCICF_DEV); 477 f = device_locator(child, PCICF_FUNCTION); 478 479 c = &sc->PCI_SC_DEVICESC(d, f); 480 481 KASSERT(c->c_dev == child); 482 483 tag = pci_make_tag(sc->sc_pc, sc->sc_bus, d, f); 484 if (c->c_psok) 485 pci_set_powerstate(sc->sc_pc, tag, c->c_powerstate); 486 pci_conf_restore(sc->sc_pc, tag, &c->c_conf); 487 c->c_dev = NULL; 488 } 489 490 CFATTACH_DECL3_NEW(pci, sizeof(struct pci_softc), 491 pcimatch, pciattach, pcidetach, NULL, pcirescan, pcidevdetached, 492 DVF_DETACH_SHUTDOWN); 493 494 int 495 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid, 496 int *offset, pcireg_t *value) 497 { 498 pcireg_t reg; 499 unsigned int ofs; 500 501 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 502 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT)) 503 return 0; 504 505 /* Determine the Capability List Pointer register to start with. */ 506 reg = pci_conf_read(pc, tag, PCI_BHLC_REG); 507 switch (PCI_HDRTYPE_TYPE(reg)) { 508 case 0: /* standard device header */ 509 case 1: /* PCI-PCI bridge header */ 510 ofs = PCI_CAPLISTPTR_REG; 511 break; 512 case 2: /* PCI-CardBus Bridge header */ 513 ofs = PCI_CARDBUS_CAPLISTPTR_REG; 514 break; 515 default: 516 return 0; 517 } 518 519 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs)); 520 while (ofs != 0) { 521 if ((ofs & 3) || (ofs < 0x40)) { 522 int bus, device, function; 523 524 pci_decompose_tag(pc, tag, &bus, &device, &function); 525 526 printf("Skipping broken PCI header on %d:%d:%d\n", 527 bus, device, function); 528 break; 529 } 530 reg = pci_conf_read(pc, tag, ofs); 531 if (PCI_CAPLIST_CAP(reg) == capid) { 532 if (offset) 533 *offset = ofs; 534 if (value) 535 *value = reg; 536 return 1; 537 } 538 ofs = PCI_CAPLIST_NEXT(reg); 539 } 540 541 return 0; 542 } 543 544 int 545 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid, 546 int *offset, pcireg_t *value) 547 { 548 pcireg_t reg; 549 unsigned int ofs; 550 551 if (pci_get_capability(pc, tag, PCI_CAP_LDT, &ofs, NULL) == 0) 552 return 0; 553 554 while (ofs != 0) { 555 #ifdef DIAGNOSTIC 556 if ((ofs & 3) || (ofs < 0x40)) 557 panic("pci_get_ht_capability"); 558 #endif 559 reg = pci_conf_read(pc, tag, ofs); 560 if (PCI_HT_CAP(reg) == capid) { 561 if (offset) 562 *offset = ofs; 563 if (value) 564 *value = reg; 565 return 1; 566 } 567 ofs = PCI_CAPLIST_NEXT(reg); 568 } 569 570 return 0; 571 } 572 573 /* 574 * return number of the devices's MSI vectors 575 * return 0 if the device does not support MSI 576 */ 577 int 578 pci_msi_count(pci_chipset_tag_t pc, pcitag_t tag) 579 { 580 pcireg_t reg; 581 uint32_t mmc; 582 int count, offset; 583 584 if (pci_get_capability(pc, tag, PCI_CAP_MSI, &offset, NULL) == 0) 585 return 0; 586 587 reg = pci_conf_read(pc, tag, offset + PCI_MSI_CTL); 588 mmc = PCI_MSI_CTL_MMC(reg); 589 count = 1 << mmc; 590 if (count > PCI_MSI_MAX_VECTORS) { 591 aprint_error("detect an illegal device! The device use reserved MMC values.\n"); 592 return 0; 593 } 594 595 return count; 596 } 597 598 /* 599 * return number of the devices's MSI-X vectors 600 * return 0 if the device does not support MSI-X 601 */ 602 int 603 pci_msix_count(pci_chipset_tag_t pc, pcitag_t tag) 604 { 605 pcireg_t reg; 606 int offset; 607 608 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &offset, NULL) == 0) 609 return 0; 610 611 reg = pci_conf_read(pc, tag, offset + PCI_MSIX_CTL); 612 613 return PCI_MSIX_CTL_TBLSIZE(reg); 614 } 615 616 int 617 pci_find_device(struct pci_attach_args *pa, 618 int (*match)(const struct pci_attach_args *)) 619 { 620 extern struct cfdriver pci_cd; 621 device_t pcidev; 622 int i; 623 static const int wildcard[2] = { 624 PCICF_DEV_DEFAULT, 625 PCICF_FUNCTION_DEFAULT 626 }; 627 628 for (i = 0; i < pci_cd.cd_ndevs; i++) { 629 pcidev = device_lookup(&pci_cd, i); 630 if (pcidev != NULL && 631 pci_enumerate_bus(device_private(pcidev), wildcard, 632 match, pa) != 0) 633 return 1; 634 } 635 return 0; 636 } 637 638 #ifndef PCI_MACHDEP_ENUMERATE_BUS 639 /* 640 * Generic PCI bus enumeration routine. Used unless machine-dependent 641 * code needs to provide something else. 642 */ 643 int 644 pci_enumerate_bus(struct pci_softc *sc, const int *locators, 645 int (*match)(const struct pci_attach_args *), struct pci_attach_args *pap) 646 { 647 pci_chipset_tag_t pc = sc->sc_pc; 648 int device, function, nfunctions, ret; 649 const struct pci_quirkdata *qd; 650 pcireg_t id, bhlcr; 651 pcitag_t tag; 652 uint8_t devs[32]; 653 int i, n; 654 655 n = pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs, __arraycount(devs)); 656 for (i = 0; i < n; i++) { 657 device = devs[i]; 658 659 if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) && 660 (locators[PCICF_DEV] != device)) 661 continue; 662 663 tag = pci_make_tag(pc, sc->sc_bus, device, 0); 664 665 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 666 if (PCI_HDRTYPE_TYPE(bhlcr) > 2) 667 continue; 668 669 id = pci_conf_read(pc, tag, PCI_ID_REG); 670 671 /* Invalid vendor ID value? */ 672 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 673 continue; 674 /* XXX Not invalid, but we've done this ~forever. */ 675 if (PCI_VENDOR(id) == 0) 676 continue; 677 678 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id)); 679 680 if (qd != NULL && 681 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0) 682 nfunctions = 8; 683 else if (qd != NULL && 684 (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0) 685 nfunctions = 1; 686 else 687 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1; 688 689 #ifdef __PCI_DEV_FUNCORDER 690 char funcs[8]; 691 int j; 692 for (j = 0; j < nfunctions; j++) { 693 funcs[j] = j; 694 } 695 if (j < __arraycount(funcs)) 696 funcs[j] = -1; 697 if (nfunctions > 1) { 698 pci_dev_funcorder(sc->sc_pc, sc->sc_bus, device, 699 nfunctions, funcs); 700 } 701 for (j = 0; 702 j < 8 && (function = funcs[j]) < 8 && function >= 0; 703 j++) { 704 #else 705 for (function = 0; function < nfunctions; function++) { 706 #endif 707 if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT) 708 && (locators[PCICF_FUNCTION] != function)) 709 continue; 710 711 if (qd != NULL && 712 (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0) 713 continue; 714 tag = pci_make_tag(pc, sc->sc_bus, device, function); 715 ret = pci_probe_device(sc, tag, match, pap); 716 if (match != NULL && ret != 0) 717 return ret; 718 } 719 } 720 return 0; 721 } 722 #endif /* PCI_MACHDEP_ENUMERATE_BUS */ 723 724 725 /* 726 * Vital Product Data (PCI 2.2) 727 */ 728 729 int 730 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count, 731 pcireg_t *data) 732 { 733 uint32_t reg; 734 int ofs, i, j; 735 736 KASSERT(data != NULL); 737 KASSERT((offset + count) < 0x7fff); 738 739 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0) 740 return 1; 741 742 for (i = 0; i < count; offset += sizeof(*data), i++) { 743 reg &= 0x0000ffff; 744 reg &= ~PCI_VPD_OPFLAG; 745 reg |= PCI_VPD_ADDRESS(offset); 746 pci_conf_write(pc, tag, ofs, reg); 747 748 /* 749 * PCI 2.2 does not specify how long we should poll 750 * for completion nor whether the operation can fail. 751 */ 752 j = 0; 753 do { 754 if (j++ == 20) 755 return 1; 756 delay(4); 757 reg = pci_conf_read(pc, tag, ofs); 758 } while ((reg & PCI_VPD_OPFLAG) == 0); 759 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs)); 760 } 761 762 return 0; 763 } 764 765 int 766 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count, 767 pcireg_t *data) 768 { 769 pcireg_t reg; 770 int ofs, i, j; 771 772 KASSERT(data != NULL); 773 KASSERT((offset + count) < 0x7fff); 774 775 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0) 776 return 1; 777 778 for (i = 0; i < count; offset += sizeof(*data), i++) { 779 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]); 780 781 reg &= 0x0000ffff; 782 reg |= PCI_VPD_OPFLAG; 783 reg |= PCI_VPD_ADDRESS(offset); 784 pci_conf_write(pc, tag, ofs, reg); 785 786 /* 787 * PCI 2.2 does not specify how long we should poll 788 * for completion nor whether the operation can fail. 789 */ 790 j = 0; 791 do { 792 if (j++ == 20) 793 return 1; 794 delay(1); 795 reg = pci_conf_read(pc, tag, ofs); 796 } while (reg & PCI_VPD_OPFLAG); 797 } 798 799 return 0; 800 } 801 802 int 803 pci_dma64_available(const struct pci_attach_args *pa) 804 { 805 #ifdef _PCI_HAVE_DMA64 806 if (BUS_DMA_TAG_VALID(pa->pa_dmat64)) 807 return 1; 808 #endif 809 return 0; 810 } 811 812 void 813 pci_conf_capture(pci_chipset_tag_t pc, pcitag_t tag, 814 struct pci_conf_state *pcs) 815 { 816 int off; 817 818 for (off = 0; off < 16; off++) 819 pcs->reg[off] = pci_conf_read(pc, tag, (off * 4)); 820 821 return; 822 } 823 824 void 825 pci_conf_restore(pci_chipset_tag_t pc, pcitag_t tag, 826 struct pci_conf_state *pcs) 827 { 828 int off; 829 pcireg_t val; 830 831 for (off = 15; off >= 0; off--) { 832 val = pci_conf_read(pc, tag, (off * 4)); 833 if (val != pcs->reg[off]) 834 pci_conf_write(pc, tag, (off * 4), pcs->reg[off]); 835 } 836 837 return; 838 } 839 840 /* 841 * Power Management Capability (Rev 2.2) 842 */ 843 static int 844 pci_get_powerstate_int(pci_chipset_tag_t pc, pcitag_t tag , pcireg_t *state, 845 int offset) 846 { 847 pcireg_t value, now; 848 849 value = pci_conf_read(pc, tag, offset + PCI_PMCSR); 850 now = value & PCI_PMCSR_STATE_MASK; 851 switch (now) { 852 case PCI_PMCSR_STATE_D0: 853 case PCI_PMCSR_STATE_D1: 854 case PCI_PMCSR_STATE_D2: 855 case PCI_PMCSR_STATE_D3: 856 *state = now; 857 return 0; 858 default: 859 return EINVAL; 860 } 861 } 862 863 int 864 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag , pcireg_t *state) 865 { 866 int offset; 867 pcireg_t value; 868 869 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value)) 870 return EOPNOTSUPP; 871 872 return pci_get_powerstate_int(pc, tag, state, offset); 873 } 874 875 static int 876 pci_set_powerstate_int(pci_chipset_tag_t pc, pcitag_t tag, pcireg_t state, 877 int offset, pcireg_t cap_reg) 878 { 879 pcireg_t value, cap, now; 880 881 cap = cap_reg >> PCI_PMCR_SHIFT; 882 value = pci_conf_read(pc, tag, offset + PCI_PMCSR); 883 now = value & PCI_PMCSR_STATE_MASK; 884 value &= ~PCI_PMCSR_STATE_MASK; 885 886 if (now == state) 887 return 0; 888 switch (state) { 889 case PCI_PMCSR_STATE_D0: 890 break; 891 case PCI_PMCSR_STATE_D1: 892 if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3) { 893 printf("invalid transition from %d to D1\n", (int)now); 894 return EINVAL; 895 } 896 if (!(cap & PCI_PMCR_D1SUPP)) { 897 printf("D1 not supported\n"); 898 return EOPNOTSUPP; 899 } 900 break; 901 case PCI_PMCSR_STATE_D2: 902 if (now == PCI_PMCSR_STATE_D3) { 903 printf("invalid transition from %d to D2\n", (int)now); 904 return EINVAL; 905 } 906 if (!(cap & PCI_PMCR_D2SUPP)) { 907 printf("D2 not supported\n"); 908 return EOPNOTSUPP; 909 } 910 break; 911 case PCI_PMCSR_STATE_D3: 912 break; 913 default: 914 return EINVAL; 915 } 916 value |= state; 917 pci_conf_write(pc, tag, offset + PCI_PMCSR, value); 918 /* delay according to pcipm1.2, ch. 5.6.1 */ 919 if (state == PCI_PMCSR_STATE_D3 || now == PCI_PMCSR_STATE_D3) 920 DELAY(10000); 921 else if (state == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D2) 922 DELAY(200); 923 924 return 0; 925 } 926 927 int 928 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, pcireg_t state) 929 { 930 int offset; 931 pcireg_t value; 932 933 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value)) { 934 printf("pci_set_powerstate not supported\n"); 935 return EOPNOTSUPP; 936 } 937 938 return pci_set_powerstate_int(pc, tag, state, offset, value); 939 } 940 941 int 942 pci_activate(pci_chipset_tag_t pc, pcitag_t tag, device_t dev, 943 int (*wakefun)(pci_chipset_tag_t, pcitag_t, device_t, pcireg_t)) 944 { 945 pcireg_t pmode; 946 int error; 947 948 if ((error = pci_get_powerstate(pc, tag, &pmode))) 949 return error; 950 951 switch (pmode) { 952 case PCI_PMCSR_STATE_D0: 953 break; 954 case PCI_PMCSR_STATE_D3: 955 if (wakefun == NULL) { 956 /* 957 * The card has lost all configuration data in 958 * this state, so punt. 959 */ 960 aprint_error_dev(dev, 961 "unable to wake up from power state D3\n"); 962 return EOPNOTSUPP; 963 } 964 /*FALLTHROUGH*/ 965 default: 966 if (wakefun) { 967 error = (*wakefun)(pc, tag, dev, pmode); 968 if (error) 969 return error; 970 } 971 aprint_normal_dev(dev, "waking up from power state D%d\n", 972 pmode); 973 if ((error = pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0))) 974 return error; 975 } 976 return 0; 977 } 978 979 int 980 pci_activate_null(pci_chipset_tag_t pc, pcitag_t tag, 981 device_t dev, pcireg_t state) 982 { 983 return 0; 984 } 985 986 struct pci_child_power { 987 struct pci_conf_state p_pciconf; 988 pci_chipset_tag_t p_pc; 989 pcitag_t p_tag; 990 bool p_has_pm; 991 int p_pm_offset; 992 pcireg_t p_pm_cap; 993 pcireg_t p_class; 994 pcireg_t p_csr; 995 }; 996 997 static bool 998 pci_child_suspend(device_t dv, const pmf_qual_t *qual) 999 { 1000 struct pci_child_power *priv = device_pmf_bus_private(dv); 1001 pcireg_t ocsr, csr; 1002 1003 pci_conf_capture(priv->p_pc, priv->p_tag, &priv->p_pciconf); 1004 1005 if (!priv->p_has_pm) 1006 return true; /* ??? hopefully handled by ACPI */ 1007 if (PCI_CLASS(priv->p_class) == PCI_CLASS_DISPLAY) 1008 return true; /* XXX */ 1009 1010 /* disable decoding and busmastering, see pcipm1.2 ch. 8.2.1 */ 1011 ocsr = pci_conf_read(priv->p_pc, priv->p_tag, PCI_COMMAND_STATUS_REG); 1012 csr = ocsr & ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE 1013 | PCI_COMMAND_MASTER_ENABLE); 1014 pci_conf_write(priv->p_pc, priv->p_tag, PCI_COMMAND_STATUS_REG, csr); 1015 if (pci_set_powerstate_int(priv->p_pc, priv->p_tag, 1016 PCI_PMCSR_STATE_D3, priv->p_pm_offset, priv->p_pm_cap)) { 1017 pci_conf_write(priv->p_pc, priv->p_tag, 1018 PCI_COMMAND_STATUS_REG, ocsr); 1019 aprint_error_dev(dv, "unsupported state, continuing.\n"); 1020 return false; 1021 } 1022 return true; 1023 } 1024 1025 static bool 1026 pci_child_resume(device_t dv, const pmf_qual_t *qual) 1027 { 1028 struct pci_child_power *priv = device_pmf_bus_private(dv); 1029 1030 if (priv->p_has_pm && 1031 pci_set_powerstate_int(priv->p_pc, priv->p_tag, 1032 PCI_PMCSR_STATE_D0, priv->p_pm_offset, priv->p_pm_cap)) { 1033 aprint_error_dev(dv, "unsupported state, continuing.\n"); 1034 return false; 1035 } 1036 1037 pci_conf_restore(priv->p_pc, priv->p_tag, &priv->p_pciconf); 1038 1039 return true; 1040 } 1041 1042 static bool 1043 pci_child_shutdown(device_t dv, int how) 1044 { 1045 struct pci_child_power *priv = device_pmf_bus_private(dv); 1046 pcireg_t csr; 1047 1048 /* restore original bus-mastering state */ 1049 csr = pci_conf_read(priv->p_pc, priv->p_tag, PCI_COMMAND_STATUS_REG); 1050 csr &= ~PCI_COMMAND_MASTER_ENABLE; 1051 csr |= priv->p_csr & PCI_COMMAND_MASTER_ENABLE; 1052 pci_conf_write(priv->p_pc, priv->p_tag, PCI_COMMAND_STATUS_REG, csr); 1053 return true; 1054 } 1055 1056 static void 1057 pci_child_deregister(device_t dv) 1058 { 1059 struct pci_child_power *priv = device_pmf_bus_private(dv); 1060 1061 free(priv, M_DEVBUF); 1062 } 1063 1064 static bool 1065 pci_child_register(device_t child) 1066 { 1067 device_t self = device_parent(child); 1068 struct pci_softc *sc = device_private(self); 1069 struct pci_child_power *priv; 1070 int device, function, off; 1071 pcireg_t reg; 1072 1073 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK); 1074 1075 device = device_locator(child, PCICF_DEV); 1076 function = device_locator(child, PCICF_FUNCTION); 1077 1078 priv->p_pc = sc->sc_pc; 1079 priv->p_tag = pci_make_tag(priv->p_pc, sc->sc_bus, device, 1080 function); 1081 priv->p_class = pci_conf_read(priv->p_pc, priv->p_tag, PCI_CLASS_REG); 1082 priv->p_csr = pci_conf_read(priv->p_pc, priv->p_tag, 1083 PCI_COMMAND_STATUS_REG); 1084 1085 if (pci_get_capability(priv->p_pc, priv->p_tag, 1086 PCI_CAP_PWRMGMT, &off, ®)) { 1087 priv->p_has_pm = true; 1088 priv->p_pm_offset = off; 1089 priv->p_pm_cap = reg; 1090 } else { 1091 priv->p_has_pm = false; 1092 priv->p_pm_offset = -1; 1093 } 1094 1095 device_pmf_bus_register(child, priv, pci_child_suspend, 1096 pci_child_resume, pci_child_shutdown, pci_child_deregister); 1097 1098 return true; 1099 } 1100 1101 MODULE(MODULE_CLASS_DRIVER, pci, NULL); 1102 1103 static int 1104 pci_modcmd(modcmd_t cmd, void *priv) 1105 { 1106 if (cmd == MODULE_CMD_INIT || cmd == MODULE_CMD_FINI) 1107 return 0; 1108 return ENOTTY; 1109 } 1110