1 /* $OpenBSD: pci.c,v 1.67 2009/07/26 13:21:18 kettenis Exp $ */ 2 /* $NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1995, 1996 Christopher G. Demetriou. All rights reserved. 6 * Copyright (c) 1994 Charles Hannum. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles Hannum. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * PCI bus autoconfiguration. 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/device.h> 41 #include <sys/malloc.h> 42 43 #include <dev/pci/pcireg.h> 44 #include <dev/pci/pcivar.h> 45 #include <dev/pci/pcidevs.h> 46 #include <dev/pci/ppbreg.h> 47 48 int pcimatch(struct device *, void *, void *); 49 void pciattach(struct device *, struct device *, void *); 50 int pcidetach(struct device *, int); 51 void pcipower(int, void *); 52 53 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \ 54 sizeof(pcireg_t)) 55 struct pci_dev { 56 LIST_ENTRY(pci_dev) pd_next; 57 struct device *pd_dev; 58 pcitag_t pd_tag; /* pci register tag */ 59 pcireg_t pd_csr; 60 pcireg_t pd_bhlc; 61 pcireg_t pd_int; 62 pcireg_t pd_map[NMAPREG]; 63 }; 64 65 #ifdef APERTURE 66 extern int allowaperture; 67 #endif 68 69 struct cfattach pci_ca = { 70 sizeof(struct pci_softc), pcimatch, pciattach, pcidetach 71 }; 72 73 struct cfdriver pci_cd = { 74 NULL, "pci", DV_DULL 75 }; 76 77 int pci_ndomains; 78 79 int pciprint(void *, const char *); 80 int pcisubmatch(struct device *, void *, void *); 81 82 #ifdef PCI_MACHDEP_ENUMERATE_BUS 83 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS 84 #else 85 int pci_enumerate_bus(struct pci_softc *, 86 int (*)(struct pci_attach_args *), struct pci_attach_args *); 87 #endif 88 int pci_reserve_resources(struct pci_attach_args *); 89 90 /* 91 * Important note about PCI-ISA bridges: 92 * 93 * Callbacks are used to configure these devices so that ISA/EISA bridges 94 * can attach their child busses after PCI configuration is done. 95 * 96 * This works because: 97 * (1) there can be at most one ISA/EISA bridge per PCI bus, and 98 * (2) any ISA/EISA bridges must be attached to primary PCI 99 * busses (i.e. bus zero). 100 * 101 * That boils down to: there can only be one of these outstanding 102 * at a time, it is cleared when configuring PCI bus 0 before any 103 * subdevices have been found, and it is run after all subdevices 104 * of PCI bus 0 have been found. 105 * 106 * This is needed because there are some (legacy) PCI devices which 107 * can show up as ISA/EISA devices as well (the prime example of which 108 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge, 109 * and the bridge is seen before the video board is, the board can show 110 * up as an ISA device, and that can (bogusly) complicate the PCI device's 111 * attach code, or make the PCI device not be properly attached at all. 112 * 113 * We use the generic config_defer() facility to achieve this. 114 */ 115 116 int 117 pcimatch(struct device *parent, void *match, void *aux) 118 { 119 struct cfdata *cf = match; 120 struct pcibus_attach_args *pba = aux; 121 122 if (strcmp(pba->pba_busname, cf->cf_driver->cd_name)) 123 return (0); 124 125 /* Check the locators */ 126 if (cf->pcibuscf_bus != PCIBUS_UNK_BUS && 127 cf->pcibuscf_bus != pba->pba_bus) 128 return (0); 129 130 /* sanity */ 131 if (pba->pba_bus < 0 || pba->pba_bus > 255) 132 return (0); 133 134 /* 135 * XXX check other (hardware?) indicators 136 */ 137 138 return (1); 139 } 140 141 void 142 pciattach(struct device *parent, struct device *self, void *aux) 143 { 144 struct pcibus_attach_args *pba = aux; 145 struct pci_softc *sc = (struct pci_softc *)self; 146 147 pci_attach_hook(parent, self, pba); 148 149 printf("\n"); 150 151 LIST_INIT(&sc->sc_devs); 152 sc->sc_powerhook = powerhook_establish(pcipower, sc); 153 154 sc->sc_iot = pba->pba_iot; 155 sc->sc_memt = pba->pba_memt; 156 sc->sc_dmat = pba->pba_dmat; 157 sc->sc_pc = pba->pba_pc; 158 sc->sc_ioex = pba->pba_ioex; 159 sc->sc_memex = pba->pba_memex; 160 sc->sc_pmemex = pba->pba_pmemex; 161 sc->sc_domain = pba->pba_domain; 162 sc->sc_bus = pba->pba_bus; 163 sc->sc_bridgetag = pba->pba_bridgetag; 164 sc->sc_bridgeih = pba->pba_bridgeih; 165 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus); 166 sc->sc_intrswiz = pba->pba_intrswiz; 167 sc->sc_intrtag = pba->pba_intrtag; 168 pci_enumerate_bus(sc, pci_reserve_resources, NULL); 169 pci_enumerate_bus(sc, NULL, NULL); 170 } 171 172 int 173 pcidetach(struct device *self, int flags) 174 { 175 return pci_detach_devices((struct pci_softc *)self, flags); 176 } 177 178 /* save and restore the pci config space */ 179 void 180 pcipower(int why, void *arg) 181 { 182 struct pci_softc *sc = (struct pci_softc *)arg; 183 struct pci_dev *pd; 184 pcireg_t reg; 185 int i; 186 187 LIST_FOREACH(pd, &sc->sc_devs, pd_next) { 188 if (why != PWR_RESUME) { 189 for (i = 0; i < NMAPREG; i++) 190 pd->pd_map[i] = pci_conf_read(sc->sc_pc, 191 pd->pd_tag, PCI_MAPREG_START + (i * 4)); 192 pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag, 193 PCI_COMMAND_STATUS_REG); 194 pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, 195 PCI_BHLC_REG); 196 pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag, 197 PCI_INTERRUPT_REG); 198 } else { 199 for (i = 0; i < NMAPREG; i++) 200 pci_conf_write(sc->sc_pc, pd->pd_tag, 201 PCI_MAPREG_START + (i * 4), 202 pd->pd_map[i]); 203 reg = pci_conf_read(sc->sc_pc, pd->pd_tag, 204 PCI_COMMAND_STATUS_REG); 205 pci_conf_write(sc->sc_pc, pd->pd_tag, 206 PCI_COMMAND_STATUS_REG, 207 (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff)); 208 pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG, 209 pd->pd_bhlc); 210 pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG, 211 pd->pd_int); 212 } 213 } 214 } 215 216 int 217 pciprint(void *aux, const char *pnp) 218 { 219 struct pci_attach_args *pa = aux; 220 char devinfo[256]; 221 222 if (pnp) { 223 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, 224 sizeof devinfo); 225 printf("%s at %s", devinfo, pnp); 226 } 227 printf(" dev %d function %d", pa->pa_device, pa->pa_function); 228 if (!pnp) { 229 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, 230 sizeof devinfo); 231 printf(" %s", devinfo); 232 } 233 234 return (UNCONF); 235 } 236 237 int 238 pcisubmatch(struct device *parent, void *match, void *aux) 239 { 240 struct cfdata *cf = match; 241 struct pci_attach_args *pa = aux; 242 243 if (cf->pcicf_dev != PCI_UNK_DEV && 244 cf->pcicf_dev != pa->pa_device) 245 return (0); 246 if (cf->pcicf_function != PCI_UNK_FUNCTION && 247 cf->pcicf_function != pa->pa_function) 248 return (0); 249 250 return ((*cf->cf_attach->ca_match)(parent, match, aux)); 251 } 252 253 int 254 pci_probe_device(struct pci_softc *sc, pcitag_t tag, 255 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap) 256 { 257 pci_chipset_tag_t pc = sc->sc_pc; 258 struct pci_attach_args pa; 259 struct pci_dev *pd; 260 struct device *dev; 261 pcireg_t id, csr, class, intr, bhlcr; 262 int ret = 0, pin, bus, device, function; 263 264 pci_decompose_tag(pc, tag, &bus, &device, &function); 265 266 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 267 if (PCI_HDRTYPE_TYPE(bhlcr) > 2) 268 return (0); 269 270 id = pci_conf_read(pc, tag, PCI_ID_REG); 271 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 272 class = pci_conf_read(pc, tag, PCI_CLASS_REG); 273 274 /* Invalid vendor ID value? */ 275 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 276 return (0); 277 /* XXX Not invalid, but we've done this ~forever. */ 278 if (PCI_VENDOR(id) == 0) 279 return (0); 280 281 pa.pa_iot = sc->sc_iot; 282 pa.pa_memt = sc->sc_memt; 283 pa.pa_dmat = sc->sc_dmat; 284 pa.pa_pc = pc; 285 pa.pa_ioex = sc->sc_ioex; 286 pa.pa_memex = sc->sc_memex; 287 pa.pa_pmemex = sc->sc_pmemex; 288 pa.pa_domain = sc->sc_domain; 289 pa.pa_bus = bus; 290 pa.pa_device = device; 291 pa.pa_function = function; 292 pa.pa_tag = tag; 293 pa.pa_id = id; 294 pa.pa_class = class; 295 pa.pa_bridgetag = sc->sc_bridgetag; 296 pa.pa_bridgeih = sc->sc_bridgeih; 297 298 /* This is a simplification of the NetBSD code. 299 We don't support turning off I/O or memory 300 on broken hardware. <csapuntz@stanford.edu> */ 301 pa.pa_flags = PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED; 302 303 if (sc->sc_bridgetag == NULL) { 304 pa.pa_intrswiz = 0; 305 pa.pa_intrtag = tag; 306 } else { 307 pa.pa_intrswiz = sc->sc_intrswiz + device; 308 pa.pa_intrtag = sc->sc_intrtag; 309 } 310 311 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG); 312 313 pin = PCI_INTERRUPT_PIN(intr); 314 pa.pa_rawintrpin = pin; 315 if (pin == PCI_INTERRUPT_PIN_NONE) { 316 /* no interrupt */ 317 pa.pa_intrpin = 0; 318 } else { 319 /* 320 * swizzle it based on the number of busses we're 321 * behind and our device number. 322 */ 323 pa.pa_intrpin = /* XXX */ 324 ((pin + pa.pa_intrswiz - 1) % 4) + 1; 325 } 326 pa.pa_intrline = PCI_INTERRUPT_LINE(intr); 327 328 if (match != NULL) { 329 ret = (*match)(&pa); 330 if (ret != 0 && pap != NULL) 331 *pap = pa; 332 } else { 333 if ((dev = config_found_sm(&sc->sc_dev, &pa, pciprint, 334 pcisubmatch))) { 335 pcireg_t reg; 336 337 /* skip header type != 0 */ 338 reg = pci_conf_read(pc, tag, PCI_BHLC_REG); 339 if (PCI_HDRTYPE_TYPE(reg) != 0) 340 return(0); 341 if (pci_get_capability(pc, tag, 342 PCI_CAP_PWRMGMT, NULL, NULL) == 0) 343 return(0); 344 if (!(pd = malloc(sizeof *pd, M_DEVBUF, 345 M_NOWAIT))) 346 return(0); 347 pd->pd_tag = tag; 348 pd->pd_dev = dev; 349 LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next); 350 } 351 } 352 353 return (ret); 354 } 355 356 int 357 pci_detach_devices(struct pci_softc *sc, int flags) 358 { 359 struct pci_dev *pd, *next; 360 int ret; 361 362 ret = config_detach_children(&sc->sc_dev, flags); 363 if (ret != 0) 364 return (ret); 365 366 for (pd = LIST_FIRST(&sc->sc_devs); 367 pd != LIST_END(&sc->sc_devs); pd = next) { 368 next = LIST_NEXT(pd, pd_next); 369 free(pd, M_DEVBUF); 370 } 371 LIST_INIT(&sc->sc_devs); 372 373 return (0); 374 } 375 376 int 377 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid, 378 int *offset, pcireg_t *value) 379 { 380 pcireg_t reg; 381 unsigned int ofs; 382 383 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 384 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT)) 385 return (0); 386 387 /* Determine the Capability List Pointer register to start with. */ 388 reg = pci_conf_read(pc, tag, PCI_BHLC_REG); 389 switch (PCI_HDRTYPE_TYPE(reg)) { 390 case 0: /* standard device header */ 391 case 1: /* PCI-PCI bridge header */ 392 ofs = PCI_CAPLISTPTR_REG; 393 break; 394 case 2: /* PCI-CardBus bridge header */ 395 ofs = PCI_CARDBUS_CAPLISTPTR_REG; 396 break; 397 default: 398 return (0); 399 } 400 401 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs)); 402 while (ofs != 0) { 403 #ifdef DIAGNOSTIC 404 if ((ofs & 3) || (ofs < 0x40)) 405 panic("pci_get_capability"); 406 #endif 407 reg = pci_conf_read(pc, tag, ofs); 408 if (PCI_CAPLIST_CAP(reg) == capid) { 409 if (offset) 410 *offset = ofs; 411 if (value) 412 *value = reg; 413 return (1); 414 } 415 ofs = PCI_CAPLIST_NEXT(reg); 416 } 417 418 return (0); 419 } 420 421 int 422 pci_find_device(struct pci_attach_args *pa, 423 int (*match)(struct pci_attach_args *)) 424 { 425 extern struct cfdriver pci_cd; 426 struct device *pcidev; 427 int i; 428 429 for (i = 0; i < pci_cd.cd_ndevs; i++) { 430 pcidev = pci_cd.cd_devs[i]; 431 if (pcidev != NULL && 432 pci_enumerate_bus((struct pci_softc *)pcidev, 433 match, pa) != 0) 434 return (1); 435 } 436 return (0); 437 } 438 439 int 440 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state) 441 { 442 pcireg_t reg; 443 int offset; 444 445 if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) { 446 reg = pci_conf_read(pc, tag, offset + PCI_PMCSR); 447 if ((reg & PCI_PMCSR_STATE_MASK) != state) { 448 pci_conf_write(pc, tag, offset + PCI_PMCSR, 449 (reg & ~PCI_PMCSR_STATE_MASK) | state); 450 return (reg & PCI_PMCSR_STATE_MASK); 451 } 452 } 453 return (state); 454 } 455 456 #ifndef PCI_MACHDEP_ENUMERATE_BUS 457 /* 458 * Generic PCI bus enumeration routine. Used unless machine-dependent 459 * code needs to provide something else. 460 */ 461 int 462 pci_enumerate_bus(struct pci_softc *sc, 463 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap) 464 { 465 pci_chipset_tag_t pc = sc->sc_pc; 466 int device, function, nfunctions, ret; 467 const struct pci_quirkdata *qd; 468 pcireg_t id, bhlcr; 469 pcitag_t tag; 470 471 for (device = 0; device < sc->sc_maxndevs; device++) { 472 tag = pci_make_tag(pc, sc->sc_bus, device, 0); 473 474 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 475 if (PCI_HDRTYPE_TYPE(bhlcr) > 2) 476 continue; 477 478 id = pci_conf_read(pc, tag, PCI_ID_REG); 479 480 /* Invalid vendor ID value? */ 481 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 482 continue; 483 /* XXX Not invalid, but we've done this ~forever. */ 484 if (PCI_VENDOR(id) == 0) 485 continue; 486 487 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id)); 488 489 if (qd != NULL && 490 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0) 491 nfunctions = 8; 492 else if (qd != NULL && 493 (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0) 494 nfunctions = 1; 495 else 496 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1; 497 498 for (function = 0; function < nfunctions; function++) { 499 tag = pci_make_tag(pc, sc->sc_bus, device, function); 500 ret = pci_probe_device(sc, tag, match, pap); 501 if (match != NULL && ret != 0) 502 return (ret); 503 } 504 } 505 506 return (0); 507 } 508 #endif /* PCI_MACHDEP_ENUMERATE_BUS */ 509 510 int 511 pci_reserve_resources(struct pci_attach_args *pa) 512 { 513 pci_chipset_tag_t pc = pa->pa_pc; 514 pcitag_t tag = pa->pa_tag; 515 pcireg_t bhlc, blr, type; 516 bus_addr_t base, limit; 517 bus_size_t size; 518 int reg, reg_start, reg_end; 519 int flags; 520 521 bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG); 522 switch (PCI_HDRTYPE_TYPE(bhlc)) { 523 case 0: 524 reg_start = PCI_MAPREG_START; 525 reg_end = PCI_MAPREG_END; 526 break; 527 case 1: /* PCI-PCI bridge */ 528 reg_start = PCI_MAPREG_START; 529 reg_end = PCI_MAPREG_PPB_END; 530 break; 531 case 2: /* PCI-CardBus bridge */ 532 reg_start = PCI_MAPREG_START; 533 reg_end = PCI_MAPREG_PCB_END; 534 break; 535 default: 536 return (0); 537 } 538 539 for (reg = reg_start; reg < reg_end; reg += 4) { 540 if (!pci_mapreg_probe(pc, tag, reg, &type)) 541 continue; 542 543 if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags)) 544 continue; 545 546 if (base == 0) 547 continue; 548 549 switch (type) { 550 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 551 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 552 #ifdef BUS_SPACE_MAP_PREFETCHABLE 553 if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) && 554 pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex, 555 base, size, EX_NOWAIT) == 0) { 556 break; 557 } 558 #endif 559 if (pa->pa_memex && extent_alloc_region(pa->pa_memex, 560 base, size, EX_NOWAIT)) { 561 printf("mem address conflict 0x%x/0x%x\n", 562 base, size); 563 pci_conf_write(pc, tag, reg, 0); 564 if (type & PCI_MAPREG_MEM_TYPE_64BIT) 565 pci_conf_write(pc, tag, reg + 4, 0); 566 } 567 break; 568 case PCI_MAPREG_TYPE_IO: 569 if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex, 570 base, size, EX_NOWAIT)) { 571 printf("io address conflict 0x%x/0x%x\n", 572 base, size); 573 pci_conf_write(pc, tag, reg, 0); 574 } 575 break; 576 } 577 578 if (type & PCI_MAPREG_MEM_TYPE_64BIT) 579 reg += 4; 580 } 581 582 if (PCI_HDRTYPE_TYPE(bhlc) != 1) 583 return (0); 584 585 /* Figure out the I/O address range of the bridge. */ 586 blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS); 587 base = (blr & 0x000000f0) << 8; 588 limit = (blr & 0x000f000) | 0x00000fff; 589 blr = pci_conf_read(pc, tag, PPB_REG_IO_HI); 590 base |= (blr & 0x0000ffff) << 16; 591 limit |= (blr & 0xffff0000); 592 if (limit > base) 593 size = (limit - base + 1); 594 else 595 size = 0; 596 if (pa->pa_ioex && base > 0 && size > 0) { 597 if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) { 598 printf("bridge io address conflict 0x%x/0x%x\n", 599 base, size); 600 blr &= 0xffff0000; 601 blr |= 0x000000f0; 602 pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr); 603 } 604 } 605 606 /* Figure out the memory mapped I/O address range of the bridge. */ 607 blr = pci_conf_read(pc, tag, PPB_REG_MEM); 608 base = (blr & 0x0000fff0) << 16; 609 limit = (blr & 0xfff00000) | 0x000fffff; 610 if (limit > base) 611 size = (limit - base + 1); 612 else 613 size = 0; 614 if (pa->pa_memex && base > 0 && size > 0) { 615 if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) { 616 printf("bridge mem address conflict 0x%x/0x%x\n", 617 base, size); 618 pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0); 619 } 620 } 621 622 /* Figure out the prefetchable memory address range of the bridge. */ 623 blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM); 624 base = (blr & 0x0000fff0) << 16; 625 limit = (blr & 0xfff00000) | 0x000fffff; 626 if (limit > base) 627 size = (limit - base + 1); 628 else 629 size = 0; 630 if (pa->pa_pmemex && base > 0 && size > 0) { 631 if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) { 632 printf("bridge mem address conflict 0x%x/0x%x\n", 633 base, size); 634 pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0); 635 } 636 } else if (pa->pa_memex && base > 0 && size > 0) { 637 if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) { 638 printf("bridge mem address conflict 0x%x/0x%x\n", 639 base, size); 640 pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0); 641 } 642 } 643 644 return (0); 645 } 646 647 /* 648 * Vital Product Data (PCI 2.2) 649 */ 650 651 int 652 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count, 653 pcireg_t *data) 654 { 655 uint32_t reg; 656 int ofs, i, j; 657 658 KASSERT(data != NULL); 659 KASSERT((offset + count) < 0x7fff); 660 661 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0) 662 return (1); 663 664 for (i = 0; i < count; offset += sizeof(*data), i++) { 665 reg &= 0x0000ffff; 666 reg &= ~PCI_VPD_OPFLAG; 667 reg |= PCI_VPD_ADDRESS(offset); 668 pci_conf_write(pc, tag, ofs, reg); 669 670 /* 671 * PCI 2.2 does not specify how long we should poll 672 * for completion nor whether the operation can fail. 673 */ 674 j = 0; 675 do { 676 if (j++ == 20) 677 return (1); 678 delay(4); 679 reg = pci_conf_read(pc, tag, ofs); 680 } while ((reg & PCI_VPD_OPFLAG) == 0); 681 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs)); 682 } 683 684 return (0); 685 } 686 687 int 688 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count, 689 pcireg_t *data) 690 { 691 pcireg_t reg; 692 int ofs, i, j; 693 694 KASSERT(data != NULL); 695 KASSERT((offset + count) < 0x7fff); 696 697 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0) 698 return (1); 699 700 for (i = 0; i < count; offset += sizeof(*data), i++) { 701 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]); 702 703 reg &= 0x0000ffff; 704 reg |= PCI_VPD_OPFLAG; 705 reg |= PCI_VPD_ADDRESS(offset); 706 pci_conf_write(pc, tag, ofs, reg); 707 708 /* 709 * PCI 2.2 does not specify how long we should poll 710 * for completion nor whether the operation can fail. 711 */ 712 j = 0; 713 do { 714 if (j++ == 20) 715 return (1); 716 delay(1); 717 reg = pci_conf_read(pc, tag, ofs); 718 } while (reg & PCI_VPD_OPFLAG); 719 } 720 721 return (0); 722 } 723 724 int 725 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids, 726 int nent) 727 { 728 const struct pci_matchid *pm; 729 int i; 730 731 for (i = 0, pm = ids; i < nent; i++, pm++) 732 if (PCI_VENDOR(pa->pa_id) == pm->pm_vid && 733 PCI_PRODUCT(pa->pa_id) == pm->pm_pid) 734 return (1); 735 return (0); 736 } 737 738 #ifdef USER_PCICONF 739 /* 740 * This is the user interface to PCI configuration space. 741 */ 742 743 #include <sys/pciio.h> 744 #include <sys/fcntl.h> 745 746 #ifdef DEBUG 747 #define PCIDEBUG(x) printf x 748 #else 749 #define PCIDEBUG(x) 750 #endif 751 752 753 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p); 754 int pciclose(dev_t dev, int flag, int devtype, struct proc *p); 755 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); 756 757 int 758 pciopen(dev_t dev, int oflags, int devtype, struct proc *p) 759 { 760 PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs)); 761 762 if (minor(dev) >= pci_ndomains) { 763 return ENXIO; 764 } 765 766 #ifndef APERTURE 767 if ((oflags & FWRITE) && securelevel > 0) { 768 return EPERM; 769 } 770 #else 771 if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) { 772 return EPERM; 773 } 774 #endif 775 return (0); 776 } 777 778 int 779 pciclose(dev_t dev, int flag, int devtype, struct proc *p) 780 { 781 PCIDEBUG(("pciclose\n")); 782 return (0); 783 } 784 785 int 786 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 787 { 788 struct pcisel *sel = (struct pcisel *)data; 789 struct pci_io *io; 790 struct pci_rom *rom; 791 int i, error; 792 pcitag_t tag; 793 struct pci_softc *pci = NULL; 794 pci_chipset_tag_t pc; 795 796 switch (cmd) { 797 case PCIOCREAD: 798 break; 799 case PCIOCWRITE: 800 if (!(flag & FWRITE)) 801 return EPERM; 802 break; 803 case PCIOCGETROM: 804 break; 805 default: 806 return ENOTTY; 807 } 808 809 for (i = 0; i < pci_cd.cd_ndevs; i++) { 810 pci = pci_cd.cd_devs[i]; 811 if (pci != NULL && pci->sc_domain == minor(dev) && 812 pci->sc_bus == sel->pc_bus) 813 break; 814 } 815 if (i >= pci_cd.cd_ndevs) 816 return ENXIO; 817 818 /* Check bounds */ 819 if (pci->sc_bus >= 256 || 820 sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) || 821 sel->pc_func >= 8) 822 return EINVAL; 823 824 pc = pci->sc_pc; 825 tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func); 826 827 switch (cmd) { 828 case PCIOCREAD: 829 io = (struct pci_io *)data; 830 switch (io->pi_width) { 831 case 4: 832 /* Make sure the register is properly aligned */ 833 if (io->pi_reg & 0x3) 834 return EINVAL; 835 io->pi_data = pci_conf_read(pc, tag, io->pi_reg); 836 error = 0; 837 break; 838 default: 839 error = ENODEV; 840 break; 841 } 842 break; 843 844 case PCIOCWRITE: 845 io = (struct pci_io *)data; 846 switch (io->pi_width) { 847 case 4: 848 /* Make sure the register is properly aligned */ 849 if (io->pi_reg & 0x3) 850 return EINVAL; 851 pci_conf_write(pc, tag, io->pi_reg, io->pi_data); 852 error = 0; 853 break; 854 default: 855 error = ENODEV; 856 break; 857 } 858 break; 859 860 case PCIOCGETROM: 861 { 862 pcireg_t addr, mask, bhlc; 863 bus_space_handle_t h; 864 bus_size_t len, off; 865 char buf[256]; 866 int s; 867 868 bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG); 869 if (PCI_HDRTYPE_TYPE(bhlc) != 0) 870 return (ENODEV); 871 872 s = splhigh(); 873 addr = pci_conf_read(pc, tag, PCI_ROM_REG); 874 pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE); 875 mask = pci_conf_read(pc, tag, PCI_ROM_REG); 876 pci_conf_write(pc, tag, PCI_ROM_REG, addr); 877 splx(s); 878 879 /* 880 * Section 6.2.5.2 `Expansion ROM Base Addres Register', 881 * 882 * tells us that only the upper 21 bits are writable. 883 * This means that the size of a ROM must be a 884 * multiple of 2 KB. So reading the ROM in chunks of 885 * 256 bytes should work just fine. 886 */ 887 if ((PCI_ROM_ADDR(addr) == 0 || 888 PCI_ROM_SIZE(mask) % sizeof(buf)) != 0) 889 return (ENODEV); 890 891 rom = (struct pci_rom *)data; 892 if (rom->pr_romlen < PCI_ROM_SIZE(mask)) { 893 error = ENOMEM; 894 goto fail; 895 } 896 897 error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr), 898 PCI_ROM_SIZE(mask), 0, &h); 899 if (error) 900 goto fail; 901 902 off = 0; 903 len = PCI_ROM_SIZE(mask); 904 while (len > 0 && error == 0) { 905 s = splhigh(); 906 pci_conf_write(pc, tag, PCI_ROM_REG, 907 addr | PCI_ROM_ENABLE); 908 bus_space_read_region_1(pci->sc_memt, h, off, 909 buf, sizeof(buf)); 910 pci_conf_write(pc, tag, PCI_ROM_REG, addr); 911 splx(s); 912 913 error = copyout(buf, rom->pr_rom + off, sizeof(buf)); 914 off += sizeof(buf); 915 len -= sizeof(buf); 916 } 917 918 bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask)); 919 920 fail: 921 rom->pr_romlen = PCI_ROM_SIZE(mask); 922 break; 923 } 924 925 default: 926 error = ENOTTY; 927 break; 928 } 929 930 return (error); 931 } 932 933 #endif 934