1 /* $NetBSD: pci_machdep.c,v 1.52 2011/10/18 23:43:36 dyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by Charles M. Hannum. 48 * 4. The name of the author may not be used to endorse or promote products 49 * derived from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 */ 62 63 /* 64 * Machine-specific functions for PCI autoconfiguration. 65 * 66 * On PCs, there are two methods of generating PCI configuration cycles. 67 * We try to detect the appropriate mechanism for this machine and set 68 * up a few function pointers to access the correct method directly. 69 * 70 * The configuration method can be hard-coded in the config file by 71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode 72 * as defined section 3.6.4.1, `Generating Configuration Cycles'. 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.52 2011/10/18 23:43:36 dyoung Exp $"); 77 78 #include <sys/types.h> 79 #include <sys/param.h> 80 #include <sys/time.h> 81 #include <sys/systm.h> 82 #include <sys/errno.h> 83 #include <sys/device.h> 84 #include <sys/bus.h> 85 #include <sys/cpu.h> 86 #include <sys/kmem.h> 87 88 #include <uvm/uvm_extern.h> 89 90 #include <machine/bus_private.h> 91 92 #include <machine/pio.h> 93 #include <machine/lock.h> 94 95 #include <dev/isa/isareg.h> 96 #include <dev/isa/isavar.h> 97 #include <dev/pci/pcivar.h> 98 #include <dev/pci/pcireg.h> 99 #include <dev/pci/pccbbreg.h> 100 #include <dev/pci/pcidevs.h> 101 #include <dev/pci/genfb_pcivar.h> 102 103 #include <dev/wsfb/genfbvar.h> 104 #include <arch/x86/include/genfb_machdep.h> 105 #include <dev/ic/vgareg.h> 106 107 #include "acpica.h" 108 #include "genfb.h" 109 #include "isa.h" 110 #include "opt_acpi.h" 111 #include "opt_ddb.h" 112 #include "opt_mpbios.h" 113 #include "opt_vga.h" 114 #include "pci.h" 115 #include "wsdisplay.h" 116 117 #ifdef DDB 118 #include <machine/db_machdep.h> 119 #include <ddb/db_sym.h> 120 #include <ddb/db_extern.h> 121 #endif 122 123 #ifdef VGA_POST 124 #include <x86/vga_post.h> 125 #endif 126 127 #include <machine/autoconf.h> 128 #include <machine/bootinfo.h> 129 130 #ifdef MPBIOS 131 #include <machine/mpbiosvar.h> 132 #endif 133 134 #if NACPICA > 0 135 #include <machine/mpacpi.h> 136 #endif 137 138 #include <machine/mpconfig.h> 139 140 #include "opt_pci_conf_mode.h" 141 142 #ifdef __i386__ 143 #include "opt_xbox.h" 144 #ifdef XBOX 145 #include <machine/xbox.h> 146 #endif 147 #endif 148 149 #ifdef PCI_CONF_MODE 150 #if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2) 151 static int pci_mode = PCI_CONF_MODE; 152 #else 153 #error Invalid PCI configuration mode. 154 #endif 155 #else 156 static int pci_mode = -1; 157 #endif 158 159 struct pci_conf_lock { 160 uint32_t cl_cpuno; /* 0: unlocked 161 * 1 + n: locked by CPU n (0 <= n) 162 */ 163 uint32_t cl_sel; /* the address that's being read. */ 164 }; 165 166 static void pci_conf_unlock(struct pci_conf_lock *); 167 static uint32_t pci_conf_selector(pcitag_t, int); 168 static unsigned int pci_conf_port(pcitag_t, int); 169 static void pci_conf_select(uint32_t); 170 static void pci_conf_lock(struct pci_conf_lock *, uint32_t); 171 static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *); 172 struct pci_bridge_hook_arg { 173 void (*func)(pci_chipset_tag_t, pcitag_t, void *); 174 void *arg; 175 }; 176 177 #define PCI_MODE1_ENABLE 0x80000000UL 178 #define PCI_MODE1_ADDRESS_REG 0x0cf8 179 #define PCI_MODE1_DATA_REG 0x0cfc 180 181 #define PCI_MODE2_ENABLE_REG 0x0cf8 182 #define PCI_MODE2_FORWARD_REG 0x0cfa 183 184 #define _m1tag(b, d, f) \ 185 (PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)) 186 #define _qe(bus, dev, fcn, vend, prod) \ 187 {_m1tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)} 188 struct { 189 uint32_t tag; 190 pcireg_t id; 191 } pcim1_quirk_tbl[] = { 192 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1), 193 /* XXX Triflex2 not tested */ 194 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2), 195 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4), 196 /* Triton needed for Connectix Virtual PC */ 197 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 198 /* Connectix Virtual PC 5 has a 440BX */ 199 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 200 /* Parallels Desktop for Mac */ 201 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO), 202 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS), 203 /* SIS 740 */ 204 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740), 205 /* SIS 741 */ 206 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741), 207 {0, 0xffffffff} /* patchable */ 208 }; 209 #undef _m1tag 210 #undef _id 211 #undef _qe 212 213 /* 214 * PCI doesn't have any special needs; just use the generic versions 215 * of these functions. 216 */ 217 struct x86_bus_dma_tag pci_bus_dma_tag = { 218 ._tag_needs_free = 0, 219 #if defined(_LP64) || defined(PAE) 220 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD, 221 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD, 222 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD, 223 #else 224 ._bounce_thresh = 0, 225 ._bounce_alloc_lo = 0, 226 ._bounce_alloc_hi = 0, 227 #endif 228 ._may_bounce = NULL, 229 }; 230 231 #ifdef _LP64 232 struct x86_bus_dma_tag pci_bus_dma64_tag = { 233 ._tag_needs_free = 0, 234 ._bounce_thresh = 0, 235 ._bounce_alloc_lo = 0, 236 ._bounce_alloc_hi = 0, 237 ._may_bounce = NULL, 238 }; 239 #endif 240 241 static struct pci_conf_lock cl0 = { 242 .cl_cpuno = 0UL 243 , .cl_sel = 0UL 244 }; 245 246 static struct pci_conf_lock * const cl = &cl0; 247 248 #if NGENFB > 0 && NACPICA > 0 && defined(VGA_POST) 249 extern int acpi_md_vbios_reset; 250 extern int acpi_md_vesa_modenum; 251 #endif 252 253 static struct genfb_colormap_callback gfb_cb; 254 static struct genfb_pmf_callback pmf_cb; 255 static struct genfb_mode_callback mode_cb; 256 #ifdef VGA_POST 257 static struct vga_post *vga_posth = NULL; 258 #endif 259 260 static void 261 pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel) 262 { 263 uint32_t cpuno; 264 265 KASSERT(sel != 0); 266 267 kpreempt_disable(); 268 cpuno = cpu_number() + 1; 269 /* If the kernel enters pci_conf_lock() through an interrupt 270 * handler, then the CPU may already hold the lock. 271 * 272 * If the CPU does not already hold the lock, spin until 273 * we can acquire it. 274 */ 275 if (cpuno == cl->cl_cpuno) { 276 ocl->cl_cpuno = cpuno; 277 } else { 278 u_int spins; 279 280 ocl->cl_cpuno = 0; 281 282 spins = SPINLOCK_BACKOFF_MIN; 283 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) { 284 SPINLOCK_BACKOFF(spins); 285 #ifdef LOCKDEBUG 286 if (SPINLOCK_SPINOUT(spins)) { 287 panic("%s: cpu %" PRId32 288 " spun out waiting for cpu %" PRId32, 289 __func__, cpuno, cl->cl_cpuno); 290 } 291 #endif /* LOCKDEBUG */ 292 } 293 } 294 295 /* Only one CPU can be here, so an interlocked atomic_swap(3) 296 * is not necessary. 297 * 298 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel, 299 * and applying atomic_cas_32_ni() is not an atomic operation, 300 * however, any interrupt that, in the middle of the 301 * operation, modifies cl->cl_sel, will also restore 302 * cl->cl_sel. So cl->cl_sel will have the same value when 303 * we apply atomic_cas_32_ni() as when we evaluated it, 304 * before. 305 */ 306 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel); 307 pci_conf_select(sel); 308 } 309 310 static void 311 pci_conf_unlock(struct pci_conf_lock *ocl) 312 { 313 uint32_t sel; 314 315 sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel); 316 pci_conf_select(ocl->cl_sel); 317 if (ocl->cl_cpuno != cl->cl_cpuno) 318 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno); 319 kpreempt_enable(); 320 } 321 322 static uint32_t 323 pci_conf_selector(pcitag_t tag, int reg) 324 { 325 static const pcitag_t mode2_mask = { 326 .mode2 = { 327 .enable = 0xff 328 , .forward = 0xff 329 } 330 }; 331 332 switch (pci_mode) { 333 case 1: 334 return tag.mode1 | reg; 335 case 2: 336 return tag.mode1 & mode2_mask.mode1; 337 default: 338 panic("%s: mode not configured", __func__); 339 } 340 } 341 342 static unsigned int 343 pci_conf_port(pcitag_t tag, int reg) 344 { 345 switch (pci_mode) { 346 case 1: 347 return PCI_MODE1_DATA_REG; 348 case 2: 349 return tag.mode2.port | reg; 350 default: 351 panic("%s: mode not configured", __func__); 352 } 353 } 354 355 static void 356 pci_conf_select(uint32_t sel) 357 { 358 pcitag_t tag; 359 360 switch (pci_mode) { 361 case 1: 362 outl(PCI_MODE1_ADDRESS_REG, sel); 363 return; 364 case 2: 365 tag.mode1 = sel; 366 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable); 367 if (tag.mode2.enable != 0) 368 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward); 369 return; 370 default: 371 panic("%s: mode not configured", __func__); 372 } 373 } 374 375 void 376 pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba) 377 { 378 379 if (pba->pba_bus == 0) 380 aprint_normal(": configuration mode %d", pci_mode); 381 #ifdef MPBIOS 382 mpbios_pci_attach_hook(parent, self, pba); 383 #endif 384 #if NACPICA > 0 385 mpacpi_pci_attach_hook(parent, self, pba); 386 #endif 387 } 388 389 int 390 pci_bus_maxdevs(pci_chipset_tag_t pc, int busno) 391 { 392 393 #if defined(__i386__) && defined(XBOX) 394 /* 395 * Scanning above the first device is fatal on the Microsoft Xbox. 396 * If busno=1, only allow for one device. 397 */ 398 if (arch_i386_is_xbox) { 399 if (busno == 1) 400 return 1; 401 else if (busno > 1) 402 return 0; 403 } 404 #endif 405 406 /* 407 * Bus number is irrelevant. If Configuration Mechanism 2 is in 408 * use, can only have devices 0-15 on any bus. If Configuration 409 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal' 410 * range). 411 */ 412 if (pci_mode == 2) 413 return (16); 414 else 415 return (32); 416 } 417 418 pcitag_t 419 pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function) 420 { 421 pci_chipset_tag_t ipc; 422 pcitag_t tag; 423 424 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 425 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0) 426 continue; 427 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx, 428 pc, bus, device, function); 429 } 430 431 switch (pci_mode) { 432 case 1: 433 if (bus >= 256 || device >= 32 || function >= 8) 434 panic("%s: bad request", __func__); 435 436 tag.mode1 = PCI_MODE1_ENABLE | 437 (bus << 16) | (device << 11) | (function << 8); 438 return tag; 439 case 2: 440 if (bus >= 256 || device >= 16 || function >= 8) 441 panic("%s: bad request", __func__); 442 443 tag.mode2.port = 0xc000 | (device << 8); 444 tag.mode2.enable = 0xf0 | (function << 1); 445 tag.mode2.forward = bus; 446 return tag; 447 default: 448 panic("%s: mode not configured", __func__); 449 } 450 } 451 452 void 453 pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag, 454 int *bp, int *dp, int *fp) 455 { 456 pci_chipset_tag_t ipc; 457 458 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 459 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0) 460 continue; 461 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx, 462 pc, tag, bp, dp, fp); 463 return; 464 } 465 466 switch (pci_mode) { 467 case 1: 468 if (bp != NULL) 469 *bp = (tag.mode1 >> 16) & 0xff; 470 if (dp != NULL) 471 *dp = (tag.mode1 >> 11) & 0x1f; 472 if (fp != NULL) 473 *fp = (tag.mode1 >> 8) & 0x7; 474 return; 475 case 2: 476 if (bp != NULL) 477 *bp = tag.mode2.forward & 0xff; 478 if (dp != NULL) 479 *dp = (tag.mode2.port >> 8) & 0xf; 480 if (fp != NULL) 481 *fp = (tag.mode2.enable >> 1) & 0x7; 482 return; 483 default: 484 panic("%s: mode not configured", __func__); 485 } 486 } 487 488 pcireg_t 489 pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg) 490 { 491 pci_chipset_tag_t ipc; 492 pcireg_t data; 493 struct pci_conf_lock ocl; 494 495 KASSERT((reg & 0x3) == 0); 496 497 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 498 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0) 499 continue; 500 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg); 501 } 502 503 #if defined(__i386__) && defined(XBOX) 504 if (arch_i386_is_xbox) { 505 int bus, dev, fn; 506 pci_decompose_tag(pc, tag, &bus, &dev, &fn); 507 if (bus == 0 && dev == 0 && (fn == 1 || fn == 2)) 508 return (pcireg_t)-1; 509 } 510 #endif 511 512 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 513 data = inl(pci_conf_port(tag, reg)); 514 pci_conf_unlock(&ocl); 515 return data; 516 } 517 518 void 519 pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data) 520 { 521 pci_chipset_tag_t ipc; 522 struct pci_conf_lock ocl; 523 524 KASSERT((reg & 0x3) == 0); 525 526 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 527 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 528 continue; 529 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg, 530 data); 531 return; 532 } 533 534 #if defined(__i386__) && defined(XBOX) 535 if (arch_i386_is_xbox) { 536 int bus, dev, fn; 537 pci_decompose_tag(pc, tag, &bus, &dev, &fn); 538 if (bus == 0 && dev == 0 && (fn == 1 || fn == 2)) 539 return; 540 } 541 #endif 542 543 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 544 outl(pci_conf_port(tag, reg), data); 545 pci_conf_unlock(&ocl); 546 } 547 548 void 549 pci_mode_set(int mode) 550 { 551 KASSERT(pci_mode == -1 || pci_mode == mode); 552 553 pci_mode = mode; 554 } 555 556 int 557 pci_mode_detect(void) 558 { 559 uint32_t sav, val; 560 int i; 561 pcireg_t idreg; 562 563 if (pci_mode != -1) 564 return pci_mode; 565 566 /* 567 * We try to divine which configuration mode the host bridge wants. 568 */ 569 570 sav = inl(PCI_MODE1_ADDRESS_REG); 571 572 pci_mode = 1; /* assume this for now */ 573 /* 574 * catch some known buggy implementations of mode 1 575 */ 576 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) { 577 pcitag_t t; 578 579 if (!pcim1_quirk_tbl[i].tag) 580 break; 581 t.mode1 = pcim1_quirk_tbl[i].tag; 582 idreg = pci_conf_read(0, t, PCI_ID_REG); /* needs "pci_mode" */ 583 if (idreg == pcim1_quirk_tbl[i].id) { 584 #ifdef DEBUG 585 printf("known mode 1 PCI chipset (%08x)\n", 586 idreg); 587 #endif 588 return (pci_mode); 589 } 590 } 591 592 /* 593 * Strong check for standard compliant mode 1: 594 * 1. bit 31 ("enable") can be set 595 * 2. byte/word access does not affect register 596 */ 597 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE); 598 outb(PCI_MODE1_ADDRESS_REG + 3, 0); 599 outw(PCI_MODE1_ADDRESS_REG + 2, 0); 600 val = inl(PCI_MODE1_ADDRESS_REG); 601 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) { 602 #ifdef DEBUG 603 printf("pci_mode_detect: mode 1 enable failed (%x)\n", 604 val); 605 #endif 606 goto not1; 607 } 608 outl(PCI_MODE1_ADDRESS_REG, 0); 609 val = inl(PCI_MODE1_ADDRESS_REG); 610 if ((val & 0x80fffffc) != 0) 611 goto not1; 612 return (pci_mode); 613 not1: 614 outl(PCI_MODE1_ADDRESS_REG, sav); 615 616 /* 617 * This mode 2 check is quite weak (and known to give false 618 * positives on some Compaq machines). 619 * However, this doesn't matter, because this is the 620 * last test, and simply no PCI devices will be found if 621 * this happens. 622 */ 623 outb(PCI_MODE2_ENABLE_REG, 0); 624 outb(PCI_MODE2_FORWARD_REG, 0); 625 if (inb(PCI_MODE2_ENABLE_REG) != 0 || 626 inb(PCI_MODE2_FORWARD_REG) != 0) 627 goto not2; 628 return (pci_mode = 2); 629 not2: 630 631 return (pci_mode = 0); 632 } 633 634 /* 635 * Determine which flags should be passed to the primary PCI bus's 636 * autoconfiguration node. We use this to detect broken chipsets 637 * which cannot safely use memory-mapped device access. 638 */ 639 int 640 pci_bus_flags(void) 641 { 642 int rval = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY | 643 PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY | PCI_FLAGS_MWI_OKAY; 644 int device, maxndevs; 645 pcitag_t tag; 646 pcireg_t id; 647 648 maxndevs = pci_bus_maxdevs(NULL, 0); 649 650 for (device = 0; device < maxndevs; device++) { 651 tag = pci_make_tag(NULL, 0, device, 0); 652 id = pci_conf_read(NULL, tag, PCI_ID_REG); 653 654 /* Invalid vendor ID value? */ 655 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 656 continue; 657 /* XXX Not invalid, but we've done this ~forever. */ 658 if (PCI_VENDOR(id) == 0) 659 continue; 660 661 switch (PCI_VENDOR(id)) { 662 case PCI_VENDOR_SIS: 663 switch (PCI_PRODUCT(id)) { 664 case PCI_PRODUCT_SIS_85C496: 665 goto disable_mem; 666 } 667 break; 668 } 669 } 670 671 return (rval); 672 673 disable_mem: 674 printf("Warning: broken PCI-Host bridge detected; " 675 "disabling memory-mapped access\n"); 676 rval &= ~(PCI_FLAGS_MEM_OKAY|PCI_FLAGS_MRL_OKAY|PCI_FLAGS_MRM_OKAY| 677 PCI_FLAGS_MWI_OKAY); 678 return (rval); 679 } 680 681 void 682 pci_device_foreach(pci_chipset_tag_t pc, int maxbus, 683 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 684 { 685 pci_device_foreach_min(pc, 0, maxbus, func, context); 686 } 687 688 void 689 pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus, 690 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 691 { 692 const struct pci_quirkdata *qd; 693 int bus, device, function, maxdevs, nfuncs; 694 pcireg_t id, bhlcr; 695 pcitag_t tag; 696 697 for (bus = minbus; bus <= maxbus; bus++) { 698 maxdevs = pci_bus_maxdevs(pc, bus); 699 for (device = 0; device < maxdevs; device++) { 700 tag = pci_make_tag(pc, bus, device, 0); 701 id = pci_conf_read(pc, tag, PCI_ID_REG); 702 703 /* Invalid vendor ID value? */ 704 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 705 continue; 706 /* XXX Not invalid, but we've done this ~forever. */ 707 if (PCI_VENDOR(id) == 0) 708 continue; 709 710 qd = pci_lookup_quirkdata(PCI_VENDOR(id), 711 PCI_PRODUCT(id)); 712 713 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 714 if (PCI_HDRTYPE_MULTIFN(bhlcr) || 715 (qd != NULL && 716 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)) 717 nfuncs = 8; 718 else 719 nfuncs = 1; 720 721 for (function = 0; function < nfuncs; function++) { 722 tag = pci_make_tag(pc, bus, device, function); 723 id = pci_conf_read(pc, tag, PCI_ID_REG); 724 725 /* Invalid vendor ID value? */ 726 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 727 continue; 728 /* 729 * XXX Not invalid, but we've done this 730 * ~forever. 731 */ 732 if (PCI_VENDOR(id) == 0) 733 continue; 734 (*func)(pc, tag, context); 735 } 736 } 737 } 738 } 739 740 void 741 pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus, 742 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx) 743 { 744 struct pci_bridge_hook_arg bridge_hook; 745 746 bridge_hook.func = func; 747 bridge_hook.arg = ctx; 748 749 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook, 750 &bridge_hook); 751 } 752 753 static void 754 pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx) 755 { 756 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx; 757 pcireg_t reg; 758 759 reg = pci_conf_read(pc, tag, PCI_CLASS_REG); 760 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE && 761 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI || 762 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) { 763 (*bridge_hook->func)(pc, tag, bridge_hook->arg); 764 } 765 } 766 767 static const void * 768 bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit) 769 { 770 switch (bit) { 771 case PCI_OVERRIDE_CONF_READ: 772 return ov->ov_conf_read; 773 case PCI_OVERRIDE_CONF_WRITE: 774 return ov->ov_conf_write; 775 case PCI_OVERRIDE_INTR_MAP: 776 return ov->ov_intr_map; 777 case PCI_OVERRIDE_INTR_STRING: 778 return ov->ov_intr_string; 779 case PCI_OVERRIDE_INTR_EVCNT: 780 return ov->ov_intr_evcnt; 781 case PCI_OVERRIDE_INTR_ESTABLISH: 782 return ov->ov_intr_establish; 783 case PCI_OVERRIDE_INTR_DISESTABLISH: 784 return ov->ov_intr_disestablish; 785 case PCI_OVERRIDE_MAKE_TAG: 786 return ov->ov_make_tag; 787 case PCI_OVERRIDE_DECOMPOSE_TAG: 788 return ov->ov_decompose_tag; 789 default: 790 return NULL; 791 } 792 } 793 794 void 795 pci_chipset_tag_destroy(pci_chipset_tag_t pc) 796 { 797 kmem_free(pc, sizeof(struct pci_chipset_tag)); 798 } 799 800 int 801 pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present, 802 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp) 803 { 804 uint64_t bit, bits, nbits; 805 pci_chipset_tag_t pc; 806 const void *fp; 807 808 if (ov == NULL || present == 0) 809 return EINVAL; 810 811 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP); 812 813 if (pc == NULL) 814 return ENOMEM; 815 816 pc->pc_super = opc; 817 818 for (bits = present; bits != 0; bits = nbits) { 819 nbits = bits & (bits - 1); 820 bit = nbits ^ bits; 821 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) { 822 #ifdef DEBUG 823 printf("%s: missing bit %" PRIx64 "\n", __func__, bit); 824 #endif 825 goto einval; 826 } 827 } 828 829 pc->pc_ov = ov; 830 pc->pc_present = present; 831 pc->pc_ctx = ctx; 832 833 *pcp = pc; 834 835 return 0; 836 einval: 837 kmem_free(pc, sizeof(struct pci_chipset_tag)); 838 return EINVAL; 839 } 840 841 static void 842 x86_genfb_set_mapreg(void *opaque, int index, int r, int g, int b) 843 { 844 outb(0x3c0 + VGA_DAC_ADDRW, index); 845 outb(0x3c0 + VGA_DAC_PALETTE, (uint8_t)r >> 2); 846 outb(0x3c0 + VGA_DAC_PALETTE, (uint8_t)g >> 2); 847 outb(0x3c0 + VGA_DAC_PALETTE, (uint8_t)b >> 2); 848 } 849 850 static bool 851 x86_genfb_setmode(struct genfb_softc *sc, int newmode) 852 { 853 #if NGENFB > 0 854 static int curmode = WSDISPLAYIO_MODE_EMUL; 855 856 switch (newmode) { 857 case WSDISPLAYIO_MODE_EMUL: 858 x86_genfb_mtrr_init(sc->sc_fboffset, 859 sc->sc_height * sc->sc_stride); 860 #if NACPICA > 0 && defined(VGA_POST) 861 if (curmode != newmode) { 862 if (vga_posth != NULL && acpi_md_vesa_modenum != 0) { 863 vga_post_set_vbe(vga_posth, 864 acpi_md_vesa_modenum); 865 } 866 } 867 #endif 868 break; 869 } 870 871 curmode = newmode; 872 #endif 873 return true; 874 } 875 876 static bool 877 x86_genfb_suspend(device_t dev, const pmf_qual_t *qual) 878 { 879 return true; 880 } 881 882 static bool 883 x86_genfb_resume(device_t dev, const pmf_qual_t *qual) 884 { 885 #if NGENFB > 0 886 struct pci_genfb_softc *psc = device_private(dev); 887 888 #if NACPICA > 0 && defined(VGA_POST) 889 if (vga_posth != NULL && acpi_md_vbios_reset == 2) { 890 vga_post_call(vga_posth); 891 if (acpi_md_vesa_modenum != 0) 892 vga_post_set_vbe(vga_posth, acpi_md_vesa_modenum); 893 } 894 #endif 895 genfb_restore_palette(&psc->sc_gen); 896 #endif 897 898 return true; 899 } 900 901 device_t 902 device_pci_register(device_t dev, void *aux) 903 { 904 static bool found_console = false; 905 906 device_pci_props_register(dev, aux); 907 908 /* 909 * Handle network interfaces here, the attachment information is 910 * not available driver-independently later. 911 * 912 * For disks, there is nothing useful available at attach time. 913 */ 914 if (device_class(dev) == DV_IFNET) { 915 struct btinfo_netif *bin = lookup_bootinfo(BTINFO_NETIF); 916 if (bin == NULL) 917 return NULL; 918 919 /* 920 * We don't check the driver name against the device name 921 * passed by the boot ROM. The ROM should stay usable if 922 * the driver becomes obsolete. The physical attachment 923 * information (checked below) must be sufficient to 924 * idenfity the device. 925 */ 926 if (bin->bus == BI_BUS_PCI && 927 device_is_a(device_parent(dev), "pci")) { 928 struct pci_attach_args *paa = aux; 929 int b, d, f; 930 931 /* 932 * Calculate BIOS representation of: 933 * 934 * <bus,device,function> 935 * 936 * and compare. 937 */ 938 pci_decompose_tag(paa->pa_pc, paa->pa_tag, &b, &d, &f); 939 if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 940 return dev; 941 } 942 } 943 if (device_parent(dev) && device_is_a(device_parent(dev), "pci") && 944 found_console == false) { 945 struct btinfo_framebuffer *fbinfo; 946 struct pci_attach_args *pa = aux; 947 prop_dictionary_t dict; 948 949 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY) { 950 #if NWSDISPLAY > 0 && NGENFB > 0 951 extern struct vcons_screen x86_genfb_console_screen; 952 struct rasops_info *ri; 953 954 ri = &x86_genfb_console_screen.scr_ri; 955 #endif 956 957 fbinfo = lookup_bootinfo(BTINFO_FRAMEBUFFER); 958 dict = device_properties(dev); 959 /* 960 * framebuffer drivers other than genfb can work 961 * without the address property 962 */ 963 if (fbinfo != NULL) { 964 if (fbinfo->physaddr != 0) { 965 prop_dictionary_set_uint32(dict, "width", 966 fbinfo->width); 967 prop_dictionary_set_uint32(dict, "height", 968 fbinfo->height); 969 prop_dictionary_set_uint8(dict, "depth", 970 fbinfo->depth); 971 prop_dictionary_set_uint16(dict, "linebytes", 972 fbinfo->stride); 973 974 prop_dictionary_set_uint64(dict, "address", 975 fbinfo->physaddr); 976 #if NWSDISPLAY > 0 && NGENFB > 0 977 if (ri->ri_bits != NULL) { 978 prop_dictionary_set_uint64(dict, 979 "virtual_address", 980 (vaddr_t)ri->ri_bits); 981 } 982 #endif 983 } 984 #if notyet 985 prop_dictionary_set_bool(dict, "splash", 986 fbinfo->flags & BI_FB_SPLASH ? 987 true : false); 988 #endif 989 if (fbinfo->depth == 8) { 990 gfb_cb.gcc_cookie = NULL; 991 gfb_cb.gcc_set_mapreg = 992 x86_genfb_set_mapreg; 993 prop_dictionary_set_uint64(dict, 994 "cmap_callback", 995 (uint64_t)(uintptr_t)&gfb_cb); 996 } 997 if (fbinfo->physaddr != 0) { 998 mode_cb.gmc_setmode = x86_genfb_setmode; 999 prop_dictionary_set_uint64(dict, 1000 "mode_callback", 1001 (uint64_t)(uintptr_t)&mode_cb); 1002 } 1003 1004 #if NWSDISPLAY > 0 && NGENFB > 0 1005 if (device_is_a(dev, "genfb")) { 1006 x86_genfb_set_console_dev(dev); 1007 #ifdef DDB 1008 db_trap_callback = 1009 x86_genfb_ddb_trap_callback; 1010 #endif 1011 } 1012 #endif 1013 } 1014 prop_dictionary_set_bool(dict, "is_console", true); 1015 prop_dictionary_set_bool(dict, "clear-screen", false); 1016 #if NWSDISPLAY > 0 && NGENFB > 0 1017 prop_dictionary_set_uint16(dict, "cursor-row", 1018 x86_genfb_console_screen.scr_ri.ri_crow); 1019 #endif 1020 #if notyet 1021 prop_dictionary_set_bool(dict, "splash", 1022 fbinfo->flags & BI_FB_SPLASH ? true : false); 1023 #endif 1024 pmf_cb.gpc_suspend = x86_genfb_suspend; 1025 pmf_cb.gpc_resume = x86_genfb_resume; 1026 prop_dictionary_set_uint64(dict, 1027 "pmf_callback", (uint64_t)(uintptr_t)&pmf_cb); 1028 #ifdef VGA_POST 1029 vga_posth = vga_post_init(pa->pa_bus, pa->pa_device, 1030 pa->pa_function); 1031 #endif 1032 found_console = true; 1033 return NULL; 1034 } 1035 } 1036 return NULL; 1037 } 1038