1 /* $NetBSD: pci_machdep.c,v 1.98 2023/11/21 23:22:23 gutteridge Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by Charles M. Hannum. 48 * 4. The name of the author may not be used to endorse or promote products 49 * derived from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 */ 62 63 /* 64 * Machine-specific functions for PCI autoconfiguration. 65 * 66 * On PCs, there are two methods of generating PCI configuration cycles. 67 * We try to detect the appropriate mechanism for this machine and set 68 * up a few function pointers to access the correct method directly. 69 * 70 * The configuration method can be hard-coded in the config file by 71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode 72 * as defined in section 3.6.4.1, `Generating Configuration Cycles'. 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.98 2023/11/21 23:22:23 gutteridge Exp $"); 77 78 #include <sys/types.h> 79 #include <sys/param.h> 80 #include <sys/time.h> 81 #include <sys/systm.h> 82 #include <sys/errno.h> 83 #include <sys/device.h> 84 #include <sys/bus.h> 85 #include <sys/cpu.h> 86 #include <sys/kmem.h> 87 88 #include <uvm/uvm_extern.h> 89 90 #include <machine/bus_private.h> 91 92 #include <machine/pio.h> 93 #include <machine/lock.h> 94 95 #include <dev/isa/isareg.h> 96 #include <dev/isa/isavar.h> 97 #include <dev/pci/pcivar.h> 98 #include <dev/pci/pcireg.h> 99 #include <dev/pci/pccbbreg.h> 100 #include <dev/pci/pcidevs.h> 101 #include <dev/pci/ppbvar.h> 102 #include <dev/pci/genfb_pcivar.h> 103 104 #include <dev/wsfb/genfbvar.h> 105 #include <arch/x86/include/genfb_machdep.h> 106 #include <arch/xen/include/hypervisor.h> 107 #include <arch/xen/include/xen.h> 108 #include <dev/ic/vgareg.h> 109 110 #include "acpica.h" 111 #include "genfb.h" 112 #include "isa.h" 113 #include "opt_acpi.h" 114 #include "opt_ddb.h" 115 #include "opt_mpbios.h" 116 #include "opt_puc.h" 117 #include "opt_vga.h" 118 #include "pci.h" 119 #include "wsdisplay.h" 120 #include "com.h" 121 #include "opt_xen.h" 122 123 #ifdef DDB 124 #include <machine/db_machdep.h> 125 #include <ddb/db_sym.h> 126 #include <ddb/db_extern.h> 127 #endif 128 129 #ifdef VGA_POST 130 #include <x86/vga_post.h> 131 #endif 132 133 #include <x86/cpuvar.h> 134 135 #include <machine/autoconf.h> 136 #include <machine/bootinfo.h> 137 138 #ifdef MPBIOS 139 #include <machine/mpbiosvar.h> 140 #endif 141 142 #if NACPICA > 0 143 #include <machine/mpacpi.h> 144 #if !defined(NO_PCI_EXTENDED_CONFIG) 145 #include <dev/acpi/acpivar.h> 146 #include <dev/acpi/acpi_mcfg.h> 147 #endif 148 #endif 149 150 #include <machine/mpconfig.h> 151 152 #if NCOM > 0 153 #include <dev/pci/puccn.h> 154 #endif 155 156 #ifndef XENPV 157 #include <x86/efi.h> 158 #endif 159 160 #include "opt_pci_conf_mode.h" 161 162 #ifdef PCI_CONF_MODE 163 #if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2) 164 static int pci_mode = PCI_CONF_MODE; 165 #else 166 #error Invalid PCI configuration mode. 167 #endif 168 #else 169 static int pci_mode = -1; 170 #endif 171 172 struct pci_conf_lock { 173 uint32_t cl_cpuno; /* 0: unlocked 174 * 1 + n: locked by CPU n (0 <= n) 175 */ 176 uint32_t cl_sel; /* the address that's being read. */ 177 }; 178 179 static void pci_conf_unlock(struct pci_conf_lock *); 180 static uint32_t pci_conf_selector(pcitag_t, int); 181 static unsigned int pci_conf_port(pcitag_t, int); 182 static void pci_conf_select(uint32_t); 183 static void pci_conf_lock(struct pci_conf_lock *, uint32_t); 184 static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *); 185 struct pci_bridge_hook_arg { 186 void (*func)(pci_chipset_tag_t, pcitag_t, void *); 187 void *arg; 188 }; 189 190 #define PCI_MODE1_ENABLE 0x80000000UL 191 #define PCI_MODE1_ADDRESS_REG 0x0cf8 192 #define PCI_MODE1_DATA_REG 0x0cfc 193 194 #define PCI_MODE2_ENABLE_REG 0x0cf8 195 #define PCI_MODE2_FORWARD_REG 0x0cfa 196 197 #define _tag(b, d, f) \ 198 {.mode1 = PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)} 199 #define _qe(bus, dev, fcn, vend, prod) \ 200 {_tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)} 201 const struct { 202 pcitag_t tag; 203 pcireg_t id; 204 } pcim1_quirk_tbl[] = { 205 _qe(0, 0, 0, PCI_VENDOR_INVALID, 0x0000), /* patchable */ 206 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1), 207 /* XXX Triflex2 not tested */ 208 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2), 209 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4), 210 #if 0 211 /* Triton needed for Connectix Virtual PC */ 212 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 213 /* Connectix Virtual PC 5 has a 440BX */ 214 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 215 /* Parallels Desktop for Mac */ 216 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO), 217 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS), 218 /* SIS 740 */ 219 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740), 220 /* SIS 741 */ 221 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741), 222 /* VIA Technologies VX900 */ 223 _qe(0, 0, 0, PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VX900_HB) 224 #endif 225 }; 226 #undef _tag 227 #undef _qe 228 229 /* arch/xen does not support MSI/MSI-X yet. */ 230 #ifdef __HAVE_PCI_MSI_MSIX 231 #define PCI_QUIRK_DISABLE_MSI 1 /* Neither MSI nor MSI-X work */ 232 #define PCI_QUIRK_DISABLE_MSIX 2 /* MSI-X does not work */ 233 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI and MSI-X works */ 234 235 #define _dme(vend, prod) \ 236 { PCI_QUIRK_DISABLE_MSI, PCI_ID_CODE(vend, prod) } 237 #define _dmxe(vend, prod) \ 238 { PCI_QUIRK_DISABLE_MSIX, PCI_ID_CODE(vend, prod) } 239 #define _emve(vend, prod) \ 240 { PCI_QUIRK_ENABLE_MSI_VM, PCI_ID_CODE(vend, prod) } 241 const struct { 242 int type; 243 pcireg_t id; 244 } pci_msi_quirk_tbl[] = { 245 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCMC), 246 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 247 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437MX), 248 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437VX), 249 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439HX), 250 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439TX), 251 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX), 252 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_AGP), 253 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82440MX), 254 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), 255 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), 256 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_AGP), 257 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 258 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_NOAGP), 259 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX), 260 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX_AGP), 261 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH), 262 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH), 263 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB), 264 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82820_MCH), 265 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1), 266 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB), 267 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_PCHB), 268 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_PCHB), 269 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC751_SC), 270 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC761_SC), 271 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC762_NB), 272 273 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), /* QEMU */ 274 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), /* VMWare */ 275 }; 276 #undef _dme 277 #undef _dmxe 278 #undef _emve 279 #endif /* __HAVE_PCI_MSI_MSIX */ 280 281 /* 282 * PCI doesn't have any special needs; just use the generic versions 283 * of these functions. 284 */ 285 struct x86_bus_dma_tag pci_bus_dma_tag = { 286 ._tag_needs_free = 0, 287 #if defined(_LP64) || defined(PAE) 288 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD, 289 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD, 290 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD, 291 #else 292 ._bounce_thresh = 0, 293 ._bounce_alloc_lo = 0, 294 ._bounce_alloc_hi = 0, 295 #endif 296 ._may_bounce = NULL, 297 }; 298 299 #ifdef _LP64 300 struct x86_bus_dma_tag pci_bus_dma64_tag = { 301 ._tag_needs_free = 0, 302 ._bounce_thresh = 0, 303 ._bounce_alloc_lo = 0, 304 ._bounce_alloc_hi = 0, 305 ._may_bounce = NULL, 306 }; 307 #endif 308 309 static struct pci_conf_lock cl0 = { 310 .cl_cpuno = 0UL 311 , .cl_sel = 0UL 312 }; 313 314 static struct pci_conf_lock * const cl = &cl0; 315 316 static struct genfb_colormap_callback gfb_cb; 317 static struct genfb_pmf_callback pmf_cb; 318 static struct genfb_mode_callback mode_cb; 319 #ifdef VGA_POST 320 static struct vga_post *vga_posth = NULL; 321 #endif 322 323 static void 324 pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel) 325 { 326 uint32_t cpuno; 327 328 KASSERT(sel != 0); 329 330 kpreempt_disable(); 331 cpuno = cpu_number() + 1; 332 /* If the kernel enters pci_conf_lock() through an interrupt 333 * handler, then the CPU may already hold the lock. 334 * 335 * If the CPU does not already hold the lock, spin until 336 * we can acquire it. 337 */ 338 if (cpuno == cl->cl_cpuno) { 339 ocl->cl_cpuno = cpuno; 340 } else { 341 #ifdef LOCKDEBUG 342 u_int spins = 0; 343 #endif 344 u_int count; 345 count = SPINLOCK_BACKOFF_MIN; 346 347 ocl->cl_cpuno = 0; 348 349 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) { 350 SPINLOCK_BACKOFF(count); 351 #ifdef LOCKDEBUG 352 if (SPINLOCK_SPINOUT(spins)) { 353 panic("%s: cpu %" PRId32 354 " spun out waiting for cpu %" PRId32, 355 __func__, cpuno, cl->cl_cpuno); 356 } 357 #endif 358 } 359 } 360 361 /* Only one CPU can be here, so an interlocked atomic_swap(3) 362 * is not necessary. 363 * 364 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel, 365 * and applying atomic_cas_32_ni() is not an atomic operation, 366 * however, any interrupt that, in the middle of the 367 * operation, modifies cl->cl_sel, will also restore 368 * cl->cl_sel. So cl->cl_sel will have the same value when 369 * we apply atomic_cas_32_ni() as when we evaluated it, 370 * before. 371 */ 372 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel); 373 pci_conf_select(sel); 374 } 375 376 static void 377 pci_conf_unlock(struct pci_conf_lock *ocl) 378 { 379 atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel); 380 pci_conf_select(ocl->cl_sel); 381 if (ocl->cl_cpuno != cl->cl_cpuno) 382 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno); 383 kpreempt_enable(); 384 } 385 386 static uint32_t 387 pci_conf_selector(pcitag_t tag, int reg) 388 { 389 static const pcitag_t mode2_mask = { 390 .mode2 = { 391 .enable = 0xff 392 , .forward = 0xff 393 } 394 }; 395 396 switch (pci_mode) { 397 case 1: 398 return tag.mode1 | reg; 399 case 2: 400 return tag.mode1 & mode2_mask.mode1; 401 default: 402 panic("%s: mode %d not configured", __func__, pci_mode); 403 } 404 } 405 406 static unsigned int 407 pci_conf_port(pcitag_t tag, int reg) 408 { 409 switch (pci_mode) { 410 case 1: 411 return PCI_MODE1_DATA_REG; 412 case 2: 413 return tag.mode2.port | reg; 414 default: 415 panic("%s: mode %d not configured", __func__, pci_mode); 416 } 417 } 418 419 static void 420 pci_conf_select(uint32_t sel) 421 { 422 pcitag_t tag; 423 424 switch (pci_mode) { 425 case 1: 426 outl(PCI_MODE1_ADDRESS_REG, sel); 427 return; 428 case 2: 429 tag.mode1 = sel; 430 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable); 431 if (tag.mode2.enable != 0) 432 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward); 433 return; 434 default: 435 panic("%s: mode %d not configured", __func__, pci_mode); 436 } 437 } 438 439 static int 440 pci_mode_check(void) 441 { 442 pcireg_t x; 443 pcitag_t t; 444 int device; 445 const int maxdev = pci_bus_maxdevs(NULL, 0); 446 447 for (device = 0; device < maxdev; device++) { 448 t = pci_make_tag(NULL, 0, device, 0); 449 x = pci_conf_read(NULL, t, PCI_CLASS_REG); 450 if (PCI_CLASS(x) == PCI_CLASS_BRIDGE && 451 PCI_SUBCLASS(x) == PCI_SUBCLASS_BRIDGE_HOST) 452 return 0; 453 x = pci_conf_read(NULL, t, PCI_ID_REG); 454 switch (PCI_VENDOR(x)) { 455 case PCI_VENDOR_COMPAQ: 456 case PCI_VENDOR_INTEL: 457 case PCI_VENDOR_VIATECH: 458 return 0; 459 } 460 } 461 return -1; 462 } 463 #ifdef __HAVE_PCI_MSI_MSIX 464 static int 465 pci_has_msi_quirk(pcireg_t id, int type) 466 { 467 int i; 468 469 for (i = 0; i < __arraycount(pci_msi_quirk_tbl); i++) { 470 if (id == pci_msi_quirk_tbl[i].id && 471 type == pci_msi_quirk_tbl[i].type) 472 return 1; 473 } 474 475 return 0; 476 } 477 #endif 478 479 void 480 pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba) 481 { 482 #ifdef __HAVE_PCI_MSI_MSIX 483 pci_chipset_tag_t pc = pba->pba_pc; 484 pcitag_t tag; 485 pcireg_t id, class; 486 int i; 487 bool havehb = false; 488 #endif 489 490 if (pba->pba_bus == 0) 491 aprint_normal(": configuration mode %d", pci_mode); 492 #ifdef MPBIOS 493 mpbios_pci_attach_hook(parent, self, pba); 494 #endif 495 #if NACPICA > 0 496 mpacpi_pci_attach_hook(parent, self, pba); 497 #endif 498 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 499 acpimcfg_map_bus(self, pba->pba_pc, pba->pba_bus); 500 #endif 501 502 #ifdef __HAVE_PCI_MSI_MSIX 503 /* 504 * In order to decide whether the system supports MSI we look 505 * at the host bridge, which should be device 0 on bus 0. 506 * It is better to not enable MSI on systems that 507 * support it than the other way around, so be conservative 508 * here. So we don't enable MSI if we don't find a host 509 * bridge there. We also deliberately don't enable MSI on 510 * chipsets from low-end manufacturers like VIA and SiS. 511 */ 512 for (i = 0; i <= 7; i++) { 513 tag = pci_make_tag(pc, 0, 0, i); 514 id = pci_conf_read(pc, tag, PCI_ID_REG); 515 class = pci_conf_read(pc, tag, PCI_CLASS_REG); 516 517 if (PCI_CLASS(class) == PCI_CLASS_BRIDGE && 518 PCI_SUBCLASS(class) == PCI_SUBCLASS_BRIDGE_HOST) { 519 havehb = true; 520 break; 521 } 522 } 523 if (havehb == false) 524 return; 525 526 /* VMware and KVM use old chipset, but they can use MSI/MSI-X */ 527 if ((cpu_feature[1] & CPUID2_RAZ) 528 && (pci_has_msi_quirk(id, PCI_QUIRK_ENABLE_MSI_VM))) { 529 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 530 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY; 531 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSI)) { 532 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 533 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 534 aprint_verbose("\n"); 535 aprint_verbose_dev(self, 536 "This pci host supports neither MSI nor MSI-X."); 537 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSIX)) { 538 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 539 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 540 aprint_verbose("\n"); 541 aprint_verbose_dev(self, 542 "This pci host does not support MSI-X."); 543 #if NACPICA > 0 544 } else if (acpi_active && 545 AcpiGbl_FADT.Header.Revision >= 4 && 546 (AcpiGbl_FADT.BootFlags & ACPI_FADT_NO_MSI) != 0) { 547 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 548 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 549 aprint_verbose("\n"); 550 aprint_verbose_dev(self, 551 "MSI support disabled via ACPI IAPC_BOOT_ARCH flag.\n"); 552 #endif 553 } else { 554 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 555 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY; 556 } 557 558 /* 559 * Don't enable MSI on a HyperTransport bus. In order to 560 * determine that bus 0 is a HyperTransport bus, we look at 561 * device 24 function 0, which is the HyperTransport 562 * host/primary interface integrated on most 64-bit AMD CPUs. 563 * If that device has a HyperTransport capability, bus 0 must 564 * be a HyperTransport bus and we disable MSI. 565 */ 566 if (24 < pci_bus_maxdevs(pc, 0)) { 567 tag = pci_make_tag(pc, 0, 24, 0); 568 if (pci_get_capability(pc, tag, PCI_CAP_LDT, NULL, NULL)) { 569 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 570 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 571 } 572 } 573 574 #endif /* __HAVE_PCI_MSI_MSIX */ 575 } 576 577 int 578 pci_bus_maxdevs(pci_chipset_tag_t pc, int busno) 579 { 580 /* 581 * Bus number is irrelevant. If Configuration Mechanism 2 is in 582 * use, can only have devices 0-15 on any bus. If Configuration 583 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal' 584 * range). 585 */ 586 if (pci_mode == 2) 587 return (16); 588 else 589 return (32); 590 } 591 592 pcitag_t 593 pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function) 594 { 595 pci_chipset_tag_t ipc; 596 pcitag_t tag; 597 598 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 599 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0) 600 continue; 601 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx, 602 pc, bus, device, function); 603 } 604 605 switch (pci_mode) { 606 case 1: 607 if (bus >= 256 || device >= 32 || function >= 8) 608 panic("%s: bad request(%d, %d, %d)", __func__, 609 bus, device, function); 610 611 tag.mode1 = PCI_MODE1_ENABLE | 612 (bus << 16) | (device << 11) | (function << 8); 613 return tag; 614 case 2: 615 if (bus >= 256 || device >= 16 || function >= 8) 616 panic("%s: bad request(%d, %d, %d)", __func__, 617 bus, device, function); 618 619 tag.mode2.port = 0xc000 | (device << 8); 620 tag.mode2.enable = 0xf0 | (function << 1); 621 tag.mode2.forward = bus; 622 return tag; 623 default: 624 panic("%s: mode %d not configured", __func__, pci_mode); 625 } 626 } 627 628 void 629 pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag, 630 int *bp, int *dp, int *fp) 631 { 632 pci_chipset_tag_t ipc; 633 634 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 635 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0) 636 continue; 637 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx, 638 pc, tag, bp, dp, fp); 639 return; 640 } 641 642 switch (pci_mode) { 643 case 1: 644 if (bp != NULL) 645 *bp = (tag.mode1 >> 16) & 0xff; 646 if (dp != NULL) 647 *dp = (tag.mode1 >> 11) & 0x1f; 648 if (fp != NULL) 649 *fp = (tag.mode1 >> 8) & 0x7; 650 return; 651 case 2: 652 if (bp != NULL) 653 *bp = tag.mode2.forward & 0xff; 654 if (dp != NULL) 655 *dp = (tag.mode2.port >> 8) & 0xf; 656 if (fp != NULL) 657 *fp = (tag.mode2.enable >> 1) & 0x7; 658 return; 659 default: 660 panic("%s: mode %d not configured", __func__, pci_mode); 661 } 662 } 663 664 pcireg_t 665 pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg) 666 { 667 pci_chipset_tag_t ipc; 668 pcireg_t data; 669 struct pci_conf_lock ocl; 670 int dev; 671 672 KASSERT((reg & 0x3) == 0); 673 674 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 675 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0) 676 continue; 677 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg); 678 } 679 680 pci_decompose_tag(pc, tag, NULL, &dev, NULL); 681 if (__predict_false(pci_mode == 2 && dev >= 16)) 682 return (pcireg_t) -1; 683 684 if (reg < 0) 685 return (pcireg_t) -1; 686 if (reg >= PCI_CONF_SIZE) { 687 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 688 if (reg >= PCI_EXTCONF_SIZE) 689 return (pcireg_t) -1; 690 acpimcfg_conf_read(pc, tag, reg, &data); 691 return data; 692 #else 693 return (pcireg_t) -1; 694 #endif 695 } 696 697 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 698 data = inl(pci_conf_port(tag, reg)); 699 pci_conf_unlock(&ocl); 700 return data; 701 } 702 703 void 704 pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data) 705 { 706 pci_chipset_tag_t ipc; 707 struct pci_conf_lock ocl; 708 int dev; 709 710 KASSERT((reg & 0x3) == 0); 711 712 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 713 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 714 continue; 715 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg, 716 data); 717 return; 718 } 719 720 pci_decompose_tag(pc, tag, NULL, &dev, NULL); 721 if (__predict_false(pci_mode == 2 && dev >= 16)) { 722 return; 723 } 724 725 if (reg < 0) 726 return; 727 if (reg >= PCI_CONF_SIZE) { 728 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 729 if (reg >= PCI_EXTCONF_SIZE) 730 return; 731 acpimcfg_conf_write(pc, tag, reg, data); 732 #endif 733 return; 734 } 735 736 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 737 outl(pci_conf_port(tag, reg), data); 738 pci_conf_unlock(&ocl); 739 } 740 741 #ifdef XENPV 742 void 743 pci_conf_write16(pci_chipset_tag_t pc, pcitag_t tag, int reg, uint16_t data) 744 { 745 pci_chipset_tag_t ipc; 746 struct pci_conf_lock ocl; 747 int dev; 748 749 KASSERT((reg & 0x1) == 0); 750 751 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 752 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 753 continue; 754 panic("pci_conf_write16 and override"); 755 } 756 757 pci_decompose_tag(pc, tag, NULL, &dev, NULL); 758 if (__predict_false(pci_mode == 2 && dev >= 16)) { 759 return; 760 } 761 762 if (reg < 0) 763 return; 764 if (reg >= PCI_CONF_SIZE) { 765 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 766 if (reg >= PCI_EXTCONF_SIZE) 767 return; 768 panic("pci_conf_write16 and reg >= PCI_CONF_SIZE"); 769 #endif 770 return; 771 } 772 773 pci_conf_lock(&ocl, pci_conf_selector(tag, reg & ~0x3)); 774 outl(pci_conf_port(tag, reg & ~0x3) + (reg & 0x3), data); 775 pci_conf_unlock(&ocl); 776 } 777 #endif /* XENPV */ 778 779 void 780 pci_mode_set(int mode) 781 { 782 KASSERT(pci_mode == -1 || pci_mode == mode); 783 784 pci_mode = mode; 785 } 786 787 int 788 pci_mode_detect(void) 789 { 790 uint32_t sav, val; 791 int i; 792 pcireg_t idreg; 793 794 if (pci_mode != -1) 795 return pci_mode; 796 797 /* 798 * We try to divine which configuration mode the host bridge wants. 799 */ 800 801 sav = inl(PCI_MODE1_ADDRESS_REG); 802 803 pci_mode = 1; /* assume this for now */ 804 /* 805 * catch some known buggy implementations of mode 1 806 */ 807 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) { 808 pcitag_t t; 809 810 if (PCI_VENDOR(pcim1_quirk_tbl[i].id) == PCI_VENDOR_INVALID) 811 continue; 812 t.mode1 = pcim1_quirk_tbl[i].tag.mode1; 813 idreg = pci_conf_read(NULL, t, PCI_ID_REG); /* needs "pci_mode" */ 814 if (idreg == pcim1_quirk_tbl[i].id) { 815 #ifdef DEBUG 816 printf("%s: known mode 1 PCI chipset (%08x)\n", 817 __func__, idreg); 818 #endif 819 return (pci_mode); 820 } 821 } 822 823 #if 0 824 extern char cpu_brand_string[]; 825 const char *reason, *system_vendor, *system_product; 826 if (memcmp(cpu_brand_string, "QEMU", 4) == 0) 827 /* PR 45671, https://bugs.launchpad.net/qemu/+bug/897771 */ 828 reason = "QEMU"; 829 else if ((system_vendor = pmf_get_platform("system-vendor")) != NULL && 830 strcmp(system_vendor, "Xen") == 0 && 831 (system_product = pmf_get_platform("system-product")) != NULL && 832 strcmp(system_product, "HVM domU") == 0) 833 reason = "Xen"; 834 else 835 reason = NULL; 836 837 if (reason) { 838 #ifdef DEBUG 839 printf("%s: forcing PCI mode 1 for %s\n", __func__, reason); 840 #endif 841 return (pci_mode); 842 } 843 #endif 844 /* 845 * Strong check for standard compliant mode 1: 846 * 1. bit 31 ("enable") can be set 847 * 2. byte/word access does not affect register 848 */ 849 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE); 850 outb(PCI_MODE1_ADDRESS_REG + 3, 0); 851 outw(PCI_MODE1_ADDRESS_REG + 2, 0); 852 val = inl(PCI_MODE1_ADDRESS_REG); 853 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) { 854 #ifdef DEBUG 855 printf("%s: mode 1 enable failed (%x)\n", __func__, val); 856 #endif 857 /* Try out mode 1 to see if we can find a host bridge. */ 858 if (pci_mode_check() == 0) { 859 #ifdef DEBUG 860 printf("%s: mode 1 functional, using\n", __func__); 861 #endif 862 return (pci_mode); 863 } 864 goto not1; 865 } 866 outl(PCI_MODE1_ADDRESS_REG, 0); 867 val = inl(PCI_MODE1_ADDRESS_REG); 868 if ((val & 0x80fffffc) != 0) 869 goto not1; 870 return (pci_mode); 871 not1: 872 outl(PCI_MODE1_ADDRESS_REG, sav); 873 874 /* 875 * This mode 2 check is quite weak (and known to give false 876 * positives on some Compaq machines). 877 * However, this doesn't matter, because this is the 878 * last test, and simply no PCI devices will be found if 879 * this happens. 880 */ 881 outb(PCI_MODE2_ENABLE_REG, 0); 882 outb(PCI_MODE2_FORWARD_REG, 0); 883 if (inb(PCI_MODE2_ENABLE_REG) != 0 || 884 inb(PCI_MODE2_FORWARD_REG) != 0) 885 goto not2; 886 return (pci_mode = 2); 887 not2: 888 889 return (pci_mode = 0); 890 } 891 892 void 893 pci_device_foreach(pci_chipset_tag_t pc, int maxbus, 894 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 895 { 896 pci_device_foreach_min(pc, 0, maxbus, func, context); 897 } 898 899 void 900 pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus, 901 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 902 { 903 const struct pci_quirkdata *qd; 904 int bus, device, function, maxdevs, nfuncs; 905 pcireg_t id, bhlcr; 906 pcitag_t tag; 907 908 for (bus = minbus; bus <= maxbus; bus++) { 909 maxdevs = pci_bus_maxdevs(pc, bus); 910 for (device = 0; device < maxdevs; device++) { 911 tag = pci_make_tag(pc, bus, device, 0); 912 id = pci_conf_read(pc, tag, PCI_ID_REG); 913 914 /* Invalid vendor ID value? */ 915 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 916 continue; 917 /* XXX Not invalid, but we've done this ~forever. */ 918 if (PCI_VENDOR(id) == 0) 919 continue; 920 921 qd = pci_lookup_quirkdata(PCI_VENDOR(id), 922 PCI_PRODUCT(id)); 923 924 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 925 if (PCI_HDRTYPE_MULTIFN(bhlcr) || 926 (qd != NULL && 927 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)) 928 nfuncs = 8; 929 else 930 nfuncs = 1; 931 932 for (function = 0; function < nfuncs; function++) { 933 tag = pci_make_tag(pc, bus, device, function); 934 id = pci_conf_read(pc, tag, PCI_ID_REG); 935 936 /* Invalid vendor ID value? */ 937 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 938 continue; 939 /* 940 * XXX Not invalid, but we've done this 941 * ~forever. 942 */ 943 if (PCI_VENDOR(id) == 0) 944 continue; 945 (*func)(pc, tag, context); 946 } 947 } 948 } 949 } 950 951 void 952 pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus, 953 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx) 954 { 955 struct pci_bridge_hook_arg bridge_hook; 956 957 bridge_hook.func = func; 958 bridge_hook.arg = ctx; 959 960 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook, 961 &bridge_hook); 962 } 963 964 static void 965 pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx) 966 { 967 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx; 968 pcireg_t reg; 969 970 reg = pci_conf_read(pc, tag, PCI_CLASS_REG); 971 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE && 972 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI || 973 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) { 974 (*bridge_hook->func)(pc, tag, bridge_hook->arg); 975 } 976 } 977 978 static const void * 979 bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit) 980 { 981 switch (bit) { 982 case PCI_OVERRIDE_CONF_READ: 983 return ov->ov_conf_read; 984 case PCI_OVERRIDE_CONF_WRITE: 985 return ov->ov_conf_write; 986 case PCI_OVERRIDE_INTR_MAP: 987 return ov->ov_intr_map; 988 case PCI_OVERRIDE_INTR_STRING: 989 return ov->ov_intr_string; 990 case PCI_OVERRIDE_INTR_EVCNT: 991 return ov->ov_intr_evcnt; 992 case PCI_OVERRIDE_INTR_ESTABLISH: 993 return ov->ov_intr_establish; 994 case PCI_OVERRIDE_INTR_DISESTABLISH: 995 return ov->ov_intr_disestablish; 996 case PCI_OVERRIDE_MAKE_TAG: 997 return ov->ov_make_tag; 998 case PCI_OVERRIDE_DECOMPOSE_TAG: 999 return ov->ov_decompose_tag; 1000 default: 1001 return NULL; 1002 } 1003 } 1004 1005 void 1006 pci_chipset_tag_destroy(pci_chipset_tag_t pc) 1007 { 1008 kmem_free(pc, sizeof(struct pci_chipset_tag)); 1009 } 1010 1011 int 1012 pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present, 1013 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp) 1014 { 1015 uint64_t bit, bits, nbits; 1016 pci_chipset_tag_t pc; 1017 const void *fp; 1018 1019 if (ov == NULL || present == 0) 1020 return EINVAL; 1021 1022 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP); 1023 pc->pc_super = opc; 1024 1025 for (bits = present; bits != 0; bits = nbits) { 1026 nbits = bits & (bits - 1); 1027 bit = nbits ^ bits; 1028 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) { 1029 #ifdef DEBUG 1030 printf("%s: missing bit %" PRIx64 "\n", __func__, bit); 1031 #endif 1032 goto einval; 1033 } 1034 } 1035 1036 pc->pc_ov = ov; 1037 pc->pc_present = present; 1038 pc->pc_ctx = ctx; 1039 1040 *pcp = pc; 1041 1042 return 0; 1043 einval: 1044 kmem_free(pc, sizeof(struct pci_chipset_tag)); 1045 return EINVAL; 1046 } 1047 1048 static void 1049 x86_genfb_set_mapreg(void *opaque, int index, int r, int g, int b) 1050 { 1051 outb(IO_VGA + VGA_DAC_ADDRW, index); 1052 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)r >> 2); 1053 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)g >> 2); 1054 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)b >> 2); 1055 } 1056 1057 static bool 1058 x86_genfb_setmode(struct genfb_softc *sc, int newmode) 1059 { 1060 #if NGENFB > 0 1061 # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1062 static int curmode = WSDISPLAYIO_MODE_EMUL; 1063 # endif 1064 1065 switch (newmode) { 1066 case WSDISPLAYIO_MODE_EMUL: 1067 # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1068 if (curmode != newmode) { 1069 if (vga_posth != NULL && acpi_md_vesa_modenum != 0) { 1070 vga_post_set_vbe(vga_posth, 1071 acpi_md_vesa_modenum); 1072 } 1073 } 1074 # endif 1075 break; 1076 } 1077 1078 # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1079 curmode = newmode; 1080 # endif 1081 #endif 1082 return true; 1083 } 1084 1085 static bool 1086 x86_genfb_suspend(device_t dev, const pmf_qual_t *qual) 1087 { 1088 return true; 1089 } 1090 1091 static bool 1092 x86_genfb_resume(device_t dev, const pmf_qual_t *qual) 1093 { 1094 #if NGENFB > 0 1095 struct pci_genfb_softc *psc = device_private(dev); 1096 1097 #if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1098 if (vga_posth != NULL && acpi_md_vbios_reset == 2) { 1099 vga_post_call(vga_posth); 1100 if (acpi_md_vesa_modenum != 0) 1101 vga_post_set_vbe(vga_posth, acpi_md_vesa_modenum); 1102 } 1103 #endif 1104 genfb_restore_palette(&psc->sc_gen); 1105 #endif 1106 1107 return true; 1108 } 1109 1110 static void 1111 populate_fbinfo(device_t dev, prop_dictionary_t dict) 1112 { 1113 #if NWSDISPLAY > 0 && NGENFB > 0 1114 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri; 1115 #endif 1116 const void *fbptr = NULL; 1117 struct btinfo_framebuffer fbinfo; 1118 1119 1120 #if NWSDISPLAY > 0 && NGENFB > 0 && defined(XEN) && defined(DOM0OPS) 1121 if ((vm_guest == VM_GUEST_XENPVH || vm_guest == VM_GUEST_XENPV) && 1122 xendomain_is_dom0()) 1123 fbptr = xen_genfb_getbtinfo(); 1124 #endif 1125 if (fbptr == NULL) 1126 fbptr = lookup_bootinfo(BTINFO_FRAMEBUFFER); 1127 1128 if (fbptr == NULL) 1129 return; 1130 1131 memcpy(&fbinfo, fbptr, sizeof(fbinfo)); 1132 1133 if (fbinfo.physaddr != 0) { 1134 prop_dictionary_set_uint32(dict, "width", fbinfo.width); 1135 prop_dictionary_set_uint32(dict, "height", fbinfo.height); 1136 prop_dictionary_set_uint8(dict, "depth", fbinfo.depth); 1137 prop_dictionary_set_uint16(dict, "linebytes", fbinfo.stride); 1138 1139 prop_dictionary_set_uint64(dict, "address", fbinfo.physaddr); 1140 #if NWSDISPLAY > 0 && NGENFB > 0 1141 if (ri->ri_bits != NULL) { 1142 prop_dictionary_set_uint64(dict, "virtual_address", 1143 ri->ri_hwbits != NULL ? 1144 (vaddr_t)ri->ri_hworigbits : 1145 (vaddr_t)ri->ri_origbits); 1146 } 1147 #endif 1148 } 1149 #if notyet 1150 prop_dictionary_set_bool(dict, "splash", 1151 (fbinfo.flags & BI_FB_SPLASH) != 0); 1152 #endif 1153 if (fbinfo.depth == 8) { 1154 gfb_cb.gcc_cookie = NULL; 1155 gfb_cb.gcc_set_mapreg = x86_genfb_set_mapreg; 1156 prop_dictionary_set_uint64(dict, "cmap_callback", 1157 (uint64_t)(uintptr_t)&gfb_cb); 1158 } 1159 if (fbinfo.physaddr != 0) { 1160 mode_cb.gmc_setmode = x86_genfb_setmode; 1161 prop_dictionary_set_uint64(dict, "mode_callback", 1162 (uint64_t)(uintptr_t)&mode_cb); 1163 } 1164 1165 #if NWSDISPLAY > 0 && NGENFB > 0 1166 if (device_is_a(dev, "genfb")) { 1167 prop_dictionary_set_bool(dict, "enable_shadowfb", 1168 ri->ri_hwbits != NULL); 1169 1170 x86_genfb_set_console_dev(dev); 1171 #ifdef DDB 1172 db_trap_callback = x86_genfb_ddb_trap_callback; 1173 #endif 1174 } 1175 #endif 1176 } 1177 1178 device_t 1179 device_pci_register(device_t dev, void *aux) 1180 { 1181 device_t parent = device_parent(dev); 1182 1183 device_pci_props_register(dev, aux); 1184 1185 /* 1186 * Handle network interfaces here, the attachment information is 1187 * not available driver-independently later. 1188 * 1189 * For disks, there is nothing useful available at attach time. 1190 */ 1191 if (device_class(dev) == DV_IFNET) { 1192 struct btinfo_netif *bin = lookup_bootinfo(BTINFO_NETIF); 1193 if (bin == NULL) 1194 return NULL; 1195 1196 /* 1197 * We don't check the driver name against the device name 1198 * passed by the boot ROM. The ROM should stay usable if 1199 * the driver becomes obsolete. The physical attachment 1200 * information (checked below) must be sufficient to 1201 * identify the device. 1202 */ 1203 if (bin->bus == BI_BUS_PCI && device_is_a(parent, "pci")) { 1204 struct pci_attach_args *paa = aux; 1205 int b, d, f; 1206 1207 /* 1208 * Calculate BIOS representation of: 1209 * 1210 * <bus,device,function> 1211 * 1212 * and compare. 1213 */ 1214 pci_decompose_tag(paa->pa_pc, paa->pa_tag, &b, &d, &f); 1215 if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 1216 return dev; 1217 1218 #ifndef XENPV 1219 /* 1220 * efiboot reports parent ppb bus/device/function. 1221 */ 1222 device_t grand = device_parent(parent); 1223 if (efi_probe() && grand && device_is_a(grand, "ppb")) { 1224 struct ppb_softc *ppb_sc = device_private(grand); 1225 pci_decompose_tag(ppb_sc->sc_pc, ppb_sc->sc_tag, 1226 &b, &d, &f); 1227 if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 1228 return dev; 1229 } 1230 #endif 1231 } 1232 } 1233 if (parent && device_is_a(parent, "pci") && 1234 x86_found_console == false) { 1235 struct pci_attach_args *pa = aux; 1236 1237 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY) { 1238 prop_dictionary_t dict = device_properties(dev); 1239 /* 1240 * framebuffer drivers other than genfb can work 1241 * without the address property 1242 */ 1243 populate_fbinfo(dev, dict); 1244 1245 /* 1246 * If the bootloader requested console=pc and 1247 * specified a framebuffer, and if 1248 * x86_genfb_cnattach succeeded in setting it 1249 * up during consinit, then consinit will call 1250 * genfb_cnattach which makes genfb_is_console 1251 * return true. In this case, if it's the 1252 * first genfb we've seen, we will instruct the 1253 * genfb driver via the is_console property 1254 * that it has been selected as the console. 1255 * 1256 * If not all of that happened, then consinit 1257 * can't have selected a genfb console, so this 1258 * device is definitely not the console. 1259 * 1260 * XXX What happens if there's more than one 1261 * PCI display device, and the bootloader picks 1262 * the second one's framebuffer as the console 1263 * framebuffer address? Tough...but this has 1264 * probably never worked. 1265 */ 1266 #if NGENFB > 0 1267 prop_dictionary_set_bool(dict, "is_console", 1268 genfb_is_console()); 1269 #else 1270 prop_dictionary_set_bool(dict, "is_console", 1271 true); 1272 #endif 1273 1274 prop_dictionary_set_bool(dict, "clear-screen", false); 1275 #if NWSDISPLAY > 0 && NGENFB > 0 1276 prop_dictionary_set_uint16(dict, "cursor-row", 1277 x86_genfb_console_screen.scr_ri.ri_crow); 1278 #endif 1279 #if notyet 1280 prop_dictionary_set_bool(dict, "splash", 1281 (fbinfo->flags & BI_FB_SPLASH) != 0); 1282 #endif 1283 pmf_cb.gpc_suspend = x86_genfb_suspend; 1284 pmf_cb.gpc_resume = x86_genfb_resume; 1285 prop_dictionary_set_uint64(dict, 1286 "pmf_callback", (uint64_t)(uintptr_t)&pmf_cb); 1287 #ifdef VGA_POST 1288 vga_posth = vga_post_init(pa->pa_bus, pa->pa_device, 1289 pa->pa_function); 1290 #endif 1291 x86_found_console = true; 1292 return NULL; 1293 } 1294 } 1295 return NULL; 1296 } 1297 1298 #ifndef PUC_CNBUS 1299 #define PUC_CNBUS 0 1300 #endif 1301 1302 #if NCOM > 0 1303 int 1304 cpu_puc_cnprobe(struct consdev *cn, struct pci_attach_args *pa) 1305 { 1306 pci_mode_detect(); 1307 pa->pa_iot = x86_bus_space_io; 1308 pa->pa_memt = x86_bus_space_mem; 1309 pa->pa_pc = 0; 1310 pa->pa_tag = pci_make_tag(0, PUC_CNBUS, pci_bus_maxdevs(NULL, 0) - 1, 1311 0); 1312 1313 return 0; 1314 } 1315 #endif 1316