1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org> 3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org> 4 * Copyright (c) 2000, BSDi 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $ 29 */ 30 31 #include "opt_acpi.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/fcntl.h> 39 #include <sys/conf.h> 40 #include <sys/kernel.h> 41 #include <sys/queue.h> 42 #include <sys/sysctl.h> 43 #include <sys/endian.h> 44 #include <sys/machintr.h> 45 46 #include <machine/msi_machdep.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 #include <vm/vm_extern.h> 51 52 #include <sys/bus.h> 53 #include <sys/rman.h> 54 #include <sys/device.h> 55 56 #include <sys/pciio.h> 57 #include <bus/pci/pcireg.h> 58 #include <bus/pci/pcivar.h> 59 #include <bus/pci/pci_private.h> 60 61 #include <bus/u4b/controller/xhcireg.h> 62 #include <bus/u4b/controller/ehcireg.h> 63 #include <bus/u4b/controller/ohcireg.h> 64 #include <bus/u4b/controller/uhcireg.h> 65 66 #include "pcib_if.h" 67 #include "pci_if.h" 68 69 #ifdef __HAVE_ACPI 70 #include <contrib/dev/acpica/acpi.h> 71 #include "acpi_if.h" 72 #else 73 #define ACPI_PWR_FOR_SLEEP(x, y, z) 74 #endif 75 76 typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *); 77 78 static uint32_t pci_mapbase(unsigned mapreg); 79 static const char *pci_maptype(unsigned mapreg); 80 static int pci_mapsize(unsigned testval); 81 static int pci_maprange(unsigned mapreg); 82 static void pci_fixancient(pcicfgregs *cfg); 83 84 static int pci_porten(device_t pcib, int b, int s, int f); 85 static int pci_memen(device_t pcib, int b, int s, int f); 86 static void pci_assign_interrupt(device_t bus, device_t dev, 87 int force_route); 88 static int pci_add_map(device_t pcib, device_t bus, device_t dev, 89 int b, int s, int f, int reg, 90 struct resource_list *rl, int force, int prefetch); 91 static int pci_probe(device_t dev); 92 static int pci_attach(device_t dev); 93 static void pci_child_detached(device_t, device_t); 94 static void pci_load_vendor_data(void); 95 static int pci_describe_parse_line(char **ptr, int *vendor, 96 int *device, char **desc); 97 static char *pci_describe_device(device_t dev); 98 static int pci_modevent(module_t mod, int what, void *arg); 99 static void pci_hdrtypedata(device_t pcib, int b, int s, int f, 100 pcicfgregs *cfg); 101 static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg); 102 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, 103 int reg, uint32_t *data); 104 #if 0 105 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, 106 int reg, uint32_t data); 107 #endif 108 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); 109 static void pci_disable_msi(device_t dev); 110 static void pci_enable_msi(device_t dev, uint64_t address, 111 uint16_t data); 112 static void pci_setup_msix_vector(device_t dev, u_int index, 113 uint64_t address, uint32_t data); 114 static void pci_mask_msix_vector(device_t dev, u_int index); 115 static void pci_unmask_msix_vector(device_t dev, u_int index); 116 static void pci_mask_msix_allvectors(device_t dev); 117 static struct msix_vector *pci_find_msix_vector(device_t dev, int rid); 118 static int pci_msi_blacklisted(void); 119 static void pci_resume_msi(device_t dev); 120 static void pci_resume_msix(device_t dev); 121 static int pcie_slotimpl(const pcicfgregs *); 122 static void pci_print_verbose_expr(const pcicfgregs *); 123 124 static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *); 125 static void pci_read_cap_ht(device_t, int, int, pcicfgregs *); 126 static void pci_read_cap_msi(device_t, int, int, pcicfgregs *); 127 static void pci_read_cap_msix(device_t, int, int, pcicfgregs *); 128 static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *); 129 static void pci_read_cap_subvendor(device_t, int, int, 130 pcicfgregs *); 131 static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *); 132 static void pci_read_cap_express(device_t, int, int, pcicfgregs *); 133 134 static device_method_t pci_methods[] = { 135 /* Device interface */ 136 DEVMETHOD(device_probe, pci_probe), 137 DEVMETHOD(device_attach, pci_attach), 138 DEVMETHOD(device_detach, bus_generic_detach), 139 DEVMETHOD(device_shutdown, bus_generic_shutdown), 140 DEVMETHOD(device_suspend, pci_suspend), 141 DEVMETHOD(device_resume, pci_resume), 142 143 /* Bus interface */ 144 DEVMETHOD(bus_print_child, pci_print_child), 145 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), 146 DEVMETHOD(bus_read_ivar, pci_read_ivar), 147 DEVMETHOD(bus_write_ivar, pci_write_ivar), 148 DEVMETHOD(bus_driver_added, pci_driver_added), 149 DEVMETHOD(bus_child_detached, pci_child_detached), 150 DEVMETHOD(bus_setup_intr, pci_setup_intr), 151 DEVMETHOD(bus_teardown_intr, pci_teardown_intr), 152 153 DEVMETHOD(bus_get_resource_list,pci_get_resource_list), 154 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 155 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 156 DEVMETHOD(bus_delete_resource, pci_delete_resource), 157 DEVMETHOD(bus_alloc_resource, pci_alloc_resource), 158 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 159 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 160 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 161 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method), 162 DEVMETHOD(bus_child_location_str, pci_child_location_str_method), 163 164 /* PCI interface */ 165 DEVMETHOD(pci_read_config, pci_read_config_method), 166 DEVMETHOD(pci_write_config, pci_write_config_method), 167 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), 168 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), 169 DEVMETHOD(pci_enable_io, pci_enable_io_method), 170 DEVMETHOD(pci_disable_io, pci_disable_io_method), 171 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), 172 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), 173 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), 174 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), 175 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), 176 DEVMETHOD(pci_find_extcap, pci_find_extcap_method), 177 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), 178 DEVMETHOD(pci_release_msi, pci_release_msi_method), 179 DEVMETHOD(pci_alloc_msix_vector, pci_alloc_msix_vector_method), 180 DEVMETHOD(pci_release_msix_vector, pci_release_msix_vector_method), 181 DEVMETHOD(pci_msi_count, pci_msi_count_method), 182 DEVMETHOD(pci_msix_count, pci_msix_count_method), 183 184 DEVMETHOD_END 185 }; 186 187 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0); 188 189 static devclass_t pci_devclass; 190 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL); 191 MODULE_VERSION(pci, 1); 192 193 static char *pci_vendordata; 194 static size_t pci_vendordata_size; 195 196 197 static const struct pci_read_cap { 198 int cap; 199 pci_read_cap_t read_cap; 200 } pci_read_caps[] = { 201 { PCIY_PMG, pci_read_cap_pmgt }, 202 { PCIY_HT, pci_read_cap_ht }, 203 { PCIY_MSI, pci_read_cap_msi }, 204 { PCIY_MSIX, pci_read_cap_msix }, 205 { PCIY_VPD, pci_read_cap_vpd }, 206 { PCIY_SUBVENDOR, pci_read_cap_subvendor }, 207 { PCIY_PCIX, pci_read_cap_pcix }, 208 { PCIY_EXPRESS, pci_read_cap_express }, 209 { 0, NULL } /* required last entry */ 210 }; 211 212 struct pci_quirk { 213 uint32_t devid; /* Vendor/device of the card */ 214 int type; 215 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ 216 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */ 217 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */ 218 int arg1; 219 int arg2; 220 }; 221 222 struct pci_quirk pci_quirks[] = { 223 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */ 224 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, 225 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, 226 /* As does the Serverworks OSB4 (the SMBus mapping register) */ 227 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, 228 229 /* 230 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge 231 * or the CMIC-SL (AKA ServerWorks GC_LE). 232 */ 233 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 234 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 235 236 /* 237 * MSI doesn't work on earlier Intel chipsets including 238 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. 239 */ 240 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 241 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 242 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 243 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 244 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 245 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 246 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 247 248 /* 249 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX 250 * bridge. 251 */ 252 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 253 254 /* 255 * Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have 256 * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit 257 * of the command register is set. 258 */ 259 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, 260 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, 261 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, 262 { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, 263 { 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, 264 265 { 0 } 266 }; 267 268 /* map register information */ 269 #define PCI_MAPMEM 0x01 /* memory map */ 270 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ 271 #define PCI_MAPPORT 0x04 /* port map */ 272 273 #define PCI_MSIX_RID2VEC(rid) ((rid) - 1) /* rid -> MSI-X vector # */ 274 #define PCI_MSIX_VEC2RID(vec) ((vec) + 1) /* MSI-X vector # -> rid */ 275 276 struct devlist pci_devq; 277 uint32_t pci_generation; 278 uint32_t pci_numdevs = 0; 279 static int pcie_chipset, pcix_chipset; 280 281 /* sysctl vars */ 282 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters"); 283 284 static int pci_enable_io_modes = 1; 285 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes); 286 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW, 287 &pci_enable_io_modes, 1, 288 "Enable I/O and memory bits in the config register. Some BIOSes do not" 289 " enable these bits correctly. We'd like to do this all the time, but" 290 " there are some peripherals that this causes problems with."); 291 292 static int pci_do_power_nodriver = 0; 293 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver); 294 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW, 295 &pci_do_power_nodriver, 0, 296 "Place a function into D3 state when no driver attaches to it. 0 means" 297 " disable. 1 means conservatively place devices into D3 state. 2 means" 298 " aggressively place devices into D3 state. 3 means put absolutely" 299 " everything in D3 state."); 300 301 static int pci_do_power_resume = 1; 302 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume); 303 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW, 304 &pci_do_power_resume, 1, 305 "Transition from D3 -> D0 on resume."); 306 307 static int pci_do_msi = 1; 308 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi); 309 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1, 310 "Enable support for MSI interrupts"); 311 312 static int pci_do_msix = 1; 313 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix); 314 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1, 315 "Enable support for MSI-X interrupts"); 316 317 static int pci_honor_msi_blacklist = 1; 318 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist); 319 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD, 320 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI"); 321 322 #if defined(__x86_64__) 323 static int pci_usb_takeover = 1; 324 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover); 325 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RD, 326 &pci_usb_takeover, 1, 327 "Enable early takeover of USB controllers. Disable this if you depend on" 328 " BIOS emulation of USB devices, that is you use USB devices (like" 329 " keyboard or mouse) but do not load USB drivers"); 330 #endif 331 332 static int pci_msi_cpuid; 333 334 static int 335 pci_has_quirk(uint32_t devid, int quirk) 336 { 337 const struct pci_quirk *q; 338 339 for (q = &pci_quirks[0]; q->devid; q++) { 340 if (q->devid == devid && q->type == quirk) 341 return (1); 342 } 343 return (0); 344 } 345 346 /* Find a device_t by bus/slot/function in domain 0 */ 347 348 device_t 349 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) 350 { 351 352 return (pci_find_dbsf(0, bus, slot, func)); 353 } 354 355 /* Find a device_t by domain/bus/slot/function */ 356 357 device_t 358 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) 359 { 360 struct pci_devinfo *dinfo; 361 362 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { 363 if ((dinfo->cfg.domain == domain) && 364 (dinfo->cfg.bus == bus) && 365 (dinfo->cfg.slot == slot) && 366 (dinfo->cfg.func == func)) { 367 return (dinfo->cfg.dev); 368 } 369 } 370 371 return (NULL); 372 } 373 374 /* Find a device_t by vendor/device ID */ 375 376 device_t 377 pci_find_device(uint16_t vendor, uint16_t device) 378 { 379 struct pci_devinfo *dinfo; 380 381 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { 382 if ((dinfo->cfg.vendor == vendor) && 383 (dinfo->cfg.device == device)) { 384 return (dinfo->cfg.dev); 385 } 386 } 387 388 return (NULL); 389 } 390 391 device_t 392 pci_find_class(uint8_t class, uint8_t subclass) 393 { 394 struct pci_devinfo *dinfo; 395 396 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { 397 if (dinfo->cfg.baseclass == class && 398 dinfo->cfg.subclass == subclass) { 399 return (dinfo->cfg.dev); 400 } 401 } 402 403 return (NULL); 404 } 405 406 device_t 407 pci_iterate_class(struct pci_devinfo **dinfop, uint8_t class, uint8_t subclass) 408 { 409 struct pci_devinfo *dinfo; 410 411 if (*dinfop) 412 dinfo = STAILQ_NEXT(*dinfop, pci_links); 413 else 414 dinfo = STAILQ_FIRST(&pci_devq); 415 416 while (dinfo) { 417 if (dinfo->cfg.baseclass == class && 418 dinfo->cfg.subclass == subclass) { 419 *dinfop = dinfo; 420 return (dinfo->cfg.dev); 421 } 422 dinfo = STAILQ_NEXT(dinfo, pci_links); 423 } 424 *dinfop = NULL; 425 return (NULL); 426 } 427 428 /* return base address of memory or port map */ 429 430 static uint32_t 431 pci_mapbase(uint32_t mapreg) 432 { 433 434 if (PCI_BAR_MEM(mapreg)) 435 return (mapreg & PCIM_BAR_MEM_BASE); 436 else 437 return (mapreg & PCIM_BAR_IO_BASE); 438 } 439 440 /* return map type of memory or port map */ 441 442 static const char * 443 pci_maptype(unsigned mapreg) 444 { 445 446 if (PCI_BAR_IO(mapreg)) 447 return ("I/O Port"); 448 if (mapreg & PCIM_BAR_MEM_PREFETCH) 449 return ("Prefetchable Memory"); 450 return ("Memory"); 451 } 452 453 /* return log2 of map size decoded for memory or port map */ 454 455 static int 456 pci_mapsize(uint32_t testval) 457 { 458 int ln2size; 459 460 testval = pci_mapbase(testval); 461 ln2size = 0; 462 if (testval != 0) { 463 while ((testval & 1) == 0) 464 { 465 ln2size++; 466 testval >>= 1; 467 } 468 } 469 return (ln2size); 470 } 471 472 /* return log2 of address range supported by map register */ 473 474 static int 475 pci_maprange(unsigned mapreg) 476 { 477 int ln2range = 0; 478 479 if (PCI_BAR_IO(mapreg)) 480 ln2range = 32; 481 else 482 switch (mapreg & PCIM_BAR_MEM_TYPE) { 483 case PCIM_BAR_MEM_32: 484 ln2range = 32; 485 break; 486 case PCIM_BAR_MEM_1MB: 487 ln2range = 20; 488 break; 489 case PCIM_BAR_MEM_64: 490 ln2range = 64; 491 break; 492 } 493 return (ln2range); 494 } 495 496 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ 497 498 static void 499 pci_fixancient(pcicfgregs *cfg) 500 { 501 if (cfg->hdrtype != 0) 502 return; 503 504 /* PCI to PCI bridges use header type 1 */ 505 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) 506 cfg->hdrtype = 1; 507 } 508 509 /* extract header type specific config data */ 510 511 static void 512 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) 513 { 514 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) 515 switch (cfg->hdrtype) { 516 case 0: 517 cfg->subvendor = REG(PCIR_SUBVEND_0, 2); 518 cfg->subdevice = REG(PCIR_SUBDEV_0, 2); 519 cfg->nummaps = PCI_MAXMAPS_0; 520 break; 521 case 1: 522 cfg->nummaps = PCI_MAXMAPS_1; 523 break; 524 case 2: 525 cfg->subvendor = REG(PCIR_SUBVEND_2, 2); 526 cfg->subdevice = REG(PCIR_SUBDEV_2, 2); 527 cfg->nummaps = PCI_MAXMAPS_2; 528 break; 529 } 530 #undef REG 531 } 532 533 /* read configuration header into pcicfgregs structure */ 534 struct pci_devinfo * 535 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size) 536 { 537 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) 538 pcicfgregs *cfg = NULL; 539 struct pci_devinfo *devlist_entry; 540 struct devlist *devlist_head; 541 542 devlist_head = &pci_devq; 543 544 devlist_entry = NULL; 545 546 if (REG(PCIR_DEVVENDOR, 4) != -1) { 547 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 548 549 cfg = &devlist_entry->cfg; 550 551 cfg->domain = d; 552 cfg->bus = b; 553 cfg->slot = s; 554 cfg->func = f; 555 cfg->vendor = REG(PCIR_VENDOR, 2); 556 cfg->device = REG(PCIR_DEVICE, 2); 557 cfg->cmdreg = REG(PCIR_COMMAND, 2); 558 cfg->statreg = REG(PCIR_STATUS, 2); 559 cfg->baseclass = REG(PCIR_CLASS, 1); 560 cfg->subclass = REG(PCIR_SUBCLASS, 1); 561 cfg->progif = REG(PCIR_PROGIF, 1); 562 cfg->revid = REG(PCIR_REVID, 1); 563 cfg->hdrtype = REG(PCIR_HDRTYPE, 1); 564 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); 565 cfg->lattimer = REG(PCIR_LATTIMER, 1); 566 cfg->intpin = REG(PCIR_INTPIN, 1); 567 cfg->intline = REG(PCIR_INTLINE, 1); 568 569 cfg->mingnt = REG(PCIR_MINGNT, 1); 570 cfg->maxlat = REG(PCIR_MAXLAT, 1); 571 572 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; 573 cfg->hdrtype &= ~PCIM_MFDEV; 574 575 pci_fixancient(cfg); 576 pci_hdrtypedata(pcib, b, s, f, cfg); 577 578 pci_read_capabilities(pcib, cfg); 579 580 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links); 581 582 devlist_entry->conf.pc_sel.pc_domain = cfg->domain; 583 devlist_entry->conf.pc_sel.pc_bus = cfg->bus; 584 devlist_entry->conf.pc_sel.pc_dev = cfg->slot; 585 devlist_entry->conf.pc_sel.pc_func = cfg->func; 586 devlist_entry->conf.pc_hdr = cfg->hdrtype; 587 588 devlist_entry->conf.pc_subvendor = cfg->subvendor; 589 devlist_entry->conf.pc_subdevice = cfg->subdevice; 590 devlist_entry->conf.pc_vendor = cfg->vendor; 591 devlist_entry->conf.pc_device = cfg->device; 592 593 devlist_entry->conf.pc_class = cfg->baseclass; 594 devlist_entry->conf.pc_subclass = cfg->subclass; 595 devlist_entry->conf.pc_progif = cfg->progif; 596 devlist_entry->conf.pc_revid = cfg->revid; 597 598 pci_numdevs++; 599 pci_generation++; 600 } 601 return (devlist_entry); 602 #undef REG 603 } 604 605 static int 606 pci_fixup_nextptr(int *nextptr0) 607 { 608 int nextptr = *nextptr0; 609 610 /* "Next pointer" is only one byte */ 611 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d", nextptr)); 612 613 if (nextptr & 0x3) { 614 /* 615 * PCI local bus spec 3.0: 616 * 617 * "... The bottom two bits of all pointers are reserved 618 * and must be implemented as 00b although software must 619 * mask them to allow for future uses of these bits ..." 620 */ 621 if (bootverbose) { 622 kprintf("Illegal PCI extended capability " 623 "offset, fixup 0x%02x -> 0x%02x\n", 624 nextptr, nextptr & ~0x3); 625 } 626 nextptr &= ~0x3; 627 } 628 *nextptr0 = nextptr; 629 630 if (nextptr < 0x40) { 631 if (nextptr != 0) { 632 kprintf("Illegal PCI extended capability " 633 "offset 0x%02x", nextptr); 634 } 635 return 0; 636 } 637 return 1; 638 } 639 640 static void 641 pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg) 642 { 643 #define REG(n, w) \ 644 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) 645 646 struct pcicfg_pp *pp = &cfg->pp; 647 648 if (pp->pp_cap) 649 return; 650 651 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2); 652 pp->pp_status = ptr + PCIR_POWER_STATUS; 653 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR; 654 655 if ((nextptr - ptr) > PCIR_POWER_DATA) { 656 /* 657 * XXX 658 * We should write to data_select and read back from 659 * data_scale to determine whether data register is 660 * implemented. 661 */ 662 #ifdef foo 663 pp->pp_data = ptr + PCIR_POWER_DATA; 664 #else 665 pp->pp_data = 0; 666 #endif 667 } 668 669 #undef REG 670 } 671 672 static void 673 pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg) 674 { 675 #if defined(__x86_64__) 676 677 #define REG(n, w) \ 678 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) 679 680 struct pcicfg_ht *ht = &cfg->ht; 681 uint64_t addr; 682 uint32_t val; 683 684 /* Determine HT-specific capability type. */ 685 val = REG(ptr + PCIR_HT_COMMAND, 2); 686 687 if ((val & 0xe000) == PCIM_HTCAP_SLAVE) 688 cfg->ht.ht_slave = ptr; 689 690 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING) 691 return; 692 693 if (!(val & PCIM_HTCMD_MSI_FIXED)) { 694 /* Sanity check the mapping window. */ 695 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4); 696 addr <<= 32; 697 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4); 698 if (addr != MSI_X86_ADDR_BASE) { 699 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d " 700 "has non-default MSI window 0x%llx\n", 701 cfg->domain, cfg->bus, cfg->slot, cfg->func, 702 (long long)addr); 703 } 704 } else { 705 addr = MSI_X86_ADDR_BASE; 706 } 707 708 ht->ht_msimap = ptr; 709 ht->ht_msictrl = val; 710 ht->ht_msiaddr = addr; 711 712 #undef REG 713 714 #endif /* __x86_64__ */ 715 } 716 717 static void 718 pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg) 719 { 720 #define REG(n, w) \ 721 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) 722 723 struct pcicfg_msi *msi = &cfg->msi; 724 725 msi->msi_location = ptr; 726 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); 727 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1); 728 729 #undef REG 730 } 731 732 static void 733 pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg) 734 { 735 #define REG(n, w) \ 736 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) 737 738 struct pcicfg_msix *msix = &cfg->msix; 739 uint32_t val; 740 741 msix->msix_location = ptr; 742 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); 743 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1; 744 745 val = REG(ptr + PCIR_MSIX_TABLE, 4); 746 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); 747 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; 748 749 val = REG(ptr + PCIR_MSIX_PBA, 4); 750 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); 751 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; 752 753 TAILQ_INIT(&msix->msix_vectors); 754 755 #undef REG 756 } 757 758 static void 759 pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg) 760 { 761 cfg->vpd.vpd_reg = ptr; 762 } 763 764 static void 765 pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg) 766 { 767 #define REG(n, w) \ 768 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) 769 770 /* Should always be true. */ 771 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) { 772 uint32_t val; 773 774 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); 775 cfg->subvendor = val & 0xffff; 776 cfg->subdevice = val >> 16; 777 } 778 779 #undef REG 780 } 781 782 static void 783 pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg) 784 { 785 /* 786 * Assume we have a PCI-X chipset if we have 787 * at least one PCI-PCI bridge with a PCI-X 788 * capability. Note that some systems with 789 * PCI-express or HT chipsets might match on 790 * this check as well. 791 */ 792 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) 793 pcix_chipset = 1; 794 795 cfg->pcix.pcix_ptr = ptr; 796 } 797 798 static int 799 pcie_slotimpl(const pcicfgregs *cfg) 800 { 801 const struct pcicfg_expr *expr = &cfg->expr; 802 uint16_t port_type; 803 804 /* 805 * - Slot implemented bit is meaningful iff current port is 806 * root port or down stream port. 807 * - Testing for root port or down stream port is meanningful 808 * iff PCI configure has type 1 header. 809 */ 810 811 if (cfg->hdrtype != 1) 812 return 0; 813 814 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE; 815 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT) 816 return 0; 817 818 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL)) 819 return 0; 820 821 return 1; 822 } 823 824 static void 825 pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg) 826 { 827 #define REG(n, w) \ 828 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) 829 830 struct pcicfg_expr *expr = &cfg->expr; 831 832 /* 833 * Assume we have a PCI-express chipset if we have 834 * at least one PCI-express device. 835 */ 836 pcie_chipset = 1; 837 838 expr->expr_ptr = ptr; 839 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2); 840 841 /* 842 * Read slot capabilities. Slot capabilities exists iff 843 * current port's slot is implemented 844 */ 845 if (pcie_slotimpl(cfg)) 846 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4); 847 848 #undef REG 849 } 850 851 static void 852 pci_read_capabilities(device_t pcib, pcicfgregs *cfg) 853 { 854 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) 855 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) 856 857 uint32_t val; 858 int nextptr, ptrptr; 859 860 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) { 861 /* No capabilities */ 862 return; 863 } 864 865 switch (cfg->hdrtype & PCIM_HDRTYPE) { 866 case 0: 867 case 1: 868 ptrptr = PCIR_CAP_PTR; 869 break; 870 case 2: 871 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ 872 break; 873 default: 874 return; /* no capabilities support */ 875 } 876 nextptr = REG(ptrptr, 1); /* sanity check? */ 877 878 /* 879 * Read capability entries. 880 */ 881 while (pci_fixup_nextptr(&nextptr)) { 882 const struct pci_read_cap *rc; 883 int ptr = nextptr; 884 885 /* Find the next entry */ 886 nextptr = REG(ptr + PCICAP_NEXTPTR, 1); 887 888 /* Process this entry */ 889 val = REG(ptr + PCICAP_ID, 1); 890 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) { 891 if (rc->cap == val) { 892 rc->read_cap(pcib, ptr, nextptr, cfg); 893 break; 894 } 895 } 896 } 897 898 #if defined(__x86_64__) 899 /* 900 * Enable the MSI mapping window for all HyperTransport 901 * slaves. PCI-PCI bridges have their windows enabled via 902 * PCIB_MAP_MSI(). 903 */ 904 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && 905 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { 906 device_printf(pcib, 907 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", 908 cfg->domain, cfg->bus, cfg->slot, cfg->func); 909 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; 910 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, 911 2); 912 } 913 #endif 914 915 /* REG and WREG use carry through to next functions */ 916 } 917 918 /* 919 * PCI Vital Product Data 920 */ 921 922 #define PCI_VPD_TIMEOUT 1000000 923 924 static int 925 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) 926 { 927 int count = PCI_VPD_TIMEOUT; 928 929 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); 930 931 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); 932 933 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { 934 if (--count < 0) 935 return (ENXIO); 936 DELAY(1); /* limit looping */ 937 } 938 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); 939 940 return (0); 941 } 942 943 #if 0 944 static int 945 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) 946 { 947 int count = PCI_VPD_TIMEOUT; 948 949 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); 950 951 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); 952 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); 953 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { 954 if (--count < 0) 955 return (ENXIO); 956 DELAY(1); /* limit looping */ 957 } 958 959 return (0); 960 } 961 #endif 962 963 #undef PCI_VPD_TIMEOUT 964 965 struct vpd_readstate { 966 device_t pcib; 967 pcicfgregs *cfg; 968 uint32_t val; 969 int bytesinval; 970 int off; 971 uint8_t cksum; 972 }; 973 974 static int 975 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) 976 { 977 uint32_t reg; 978 uint8_t byte; 979 980 if (vrs->bytesinval == 0) { 981 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) 982 return (ENXIO); 983 vrs->val = le32toh(reg); 984 vrs->off += 4; 985 byte = vrs->val & 0xff; 986 vrs->bytesinval = 3; 987 } else { 988 vrs->val = vrs->val >> 8; 989 byte = vrs->val & 0xff; 990 vrs->bytesinval--; 991 } 992 993 vrs->cksum += byte; 994 *data = byte; 995 return (0); 996 } 997 998 int 999 pcie_slot_implemented(device_t dev) 1000 { 1001 struct pci_devinfo *dinfo = device_get_ivars(dev); 1002 1003 return pcie_slotimpl(&dinfo->cfg); 1004 } 1005 1006 void 1007 pcie_set_max_readrq(device_t dev, uint16_t rqsize) 1008 { 1009 uint8_t expr_ptr; 1010 uint16_t val; 1011 1012 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK; 1013 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) { 1014 panic("%s: invalid max read request size 0x%02x", 1015 device_get_nameunit(dev), rqsize); 1016 } 1017 1018 expr_ptr = pci_get_pciecap_ptr(dev); 1019 if (!expr_ptr) 1020 panic("%s: not PCIe device", device_get_nameunit(dev)); 1021 1022 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2); 1023 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) { 1024 if (bootverbose) 1025 device_printf(dev, "adjust device control 0x%04x", val); 1026 1027 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK; 1028 val |= rqsize; 1029 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2); 1030 1031 if (bootverbose) 1032 kprintf(" -> 0x%04x\n", val); 1033 } 1034 } 1035 1036 uint16_t 1037 pcie_get_max_readrq(device_t dev) 1038 { 1039 uint8_t expr_ptr; 1040 uint16_t val; 1041 1042 expr_ptr = pci_get_pciecap_ptr(dev); 1043 if (!expr_ptr) 1044 panic("%s: not PCIe device", device_get_nameunit(dev)); 1045 1046 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2); 1047 return (val & PCIEM_DEVCTL_MAX_READRQ_MASK); 1048 } 1049 1050 static void 1051 pci_read_vpd(device_t pcib, pcicfgregs *cfg) 1052 { 1053 struct vpd_readstate vrs; 1054 int state; 1055 int name; 1056 int remain; 1057 int i; 1058 int alloc, off; /* alloc/off for RO/W arrays */ 1059 int cksumvalid; 1060 int dflen; 1061 uint8_t byte; 1062 uint8_t byte2; 1063 1064 /* init vpd reader */ 1065 vrs.bytesinval = 0; 1066 vrs.off = 0; 1067 vrs.pcib = pcib; 1068 vrs.cfg = cfg; 1069 vrs.cksum = 0; 1070 1071 state = 0; 1072 name = remain = i = 0; /* shut up stupid gcc */ 1073 alloc = off = 0; /* shut up stupid gcc */ 1074 dflen = 0; /* shut up stupid gcc */ 1075 cksumvalid = -1; 1076 while (state >= 0) { 1077 if (vpd_nextbyte(&vrs, &byte)) { 1078 state = -2; 1079 break; 1080 } 1081 #if 0 1082 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \ 1083 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val, 1084 vrs.off, vrs.bytesinval, byte, state, remain, name, i); 1085 #endif 1086 switch (state) { 1087 case 0: /* item name */ 1088 if (byte & 0x80) { 1089 if (vpd_nextbyte(&vrs, &byte2)) { 1090 state = -2; 1091 break; 1092 } 1093 remain = byte2; 1094 if (vpd_nextbyte(&vrs, &byte2)) { 1095 state = -2; 1096 break; 1097 } 1098 remain |= byte2 << 8; 1099 if (remain > (0x7f*4 - vrs.off)) { 1100 state = -1; 1101 kprintf( 1102 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n", 1103 cfg->domain, cfg->bus, cfg->slot, 1104 cfg->func, remain); 1105 } 1106 name = byte & 0x7f; 1107 } else { 1108 remain = byte & 0x7; 1109 name = (byte >> 3) & 0xf; 1110 } 1111 switch (name) { 1112 case 0x2: /* String */ 1113 cfg->vpd.vpd_ident = kmalloc(remain + 1, 1114 M_DEVBUF, M_WAITOK); 1115 i = 0; 1116 state = 1; 1117 break; 1118 case 0xf: /* End */ 1119 state = -1; 1120 break; 1121 case 0x10: /* VPD-R */ 1122 alloc = 8; 1123 off = 0; 1124 cfg->vpd.vpd_ros = kmalloc(alloc * 1125 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, 1126 M_WAITOK | M_ZERO); 1127 state = 2; 1128 break; 1129 case 0x11: /* VPD-W */ 1130 alloc = 8; 1131 off = 0; 1132 cfg->vpd.vpd_w = kmalloc(alloc * 1133 sizeof(*cfg->vpd.vpd_w), M_DEVBUF, 1134 M_WAITOK | M_ZERO); 1135 state = 5; 1136 break; 1137 default: /* Invalid data, abort */ 1138 state = -1; 1139 break; 1140 } 1141 break; 1142 1143 case 1: /* Identifier String */ 1144 cfg->vpd.vpd_ident[i++] = byte; 1145 remain--; 1146 if (remain == 0) { 1147 cfg->vpd.vpd_ident[i] = '\0'; 1148 state = 0; 1149 } 1150 break; 1151 1152 case 2: /* VPD-R Keyword Header */ 1153 if (off == alloc) { 1154 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros, 1155 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros), 1156 M_DEVBUF, M_WAITOK | M_ZERO); 1157 } 1158 cfg->vpd.vpd_ros[off].keyword[0] = byte; 1159 if (vpd_nextbyte(&vrs, &byte2)) { 1160 state = -2; 1161 break; 1162 } 1163 cfg->vpd.vpd_ros[off].keyword[1] = byte2; 1164 if (vpd_nextbyte(&vrs, &byte2)) { 1165 state = -2; 1166 break; 1167 } 1168 dflen = byte2; 1169 if (dflen == 0 && 1170 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", 1171 2) == 0) { 1172 /* 1173 * if this happens, we can't trust the rest 1174 * of the VPD. 1175 */ 1176 kprintf( 1177 "pci%d:%d:%d:%d: bad keyword length: %d\n", 1178 cfg->domain, cfg->bus, cfg->slot, 1179 cfg->func, dflen); 1180 cksumvalid = 0; 1181 state = -1; 1182 break; 1183 } else if (dflen == 0) { 1184 cfg->vpd.vpd_ros[off].value = kmalloc(1 * 1185 sizeof(*cfg->vpd.vpd_ros[off].value), 1186 M_DEVBUF, M_WAITOK); 1187 cfg->vpd.vpd_ros[off].value[0] = '\x00'; 1188 } else 1189 cfg->vpd.vpd_ros[off].value = kmalloc( 1190 (dflen + 1) * 1191 sizeof(*cfg->vpd.vpd_ros[off].value), 1192 M_DEVBUF, M_WAITOK); 1193 remain -= 3; 1194 i = 0; 1195 /* keep in sync w/ state 3's transistions */ 1196 if (dflen == 0 && remain == 0) 1197 state = 0; 1198 else if (dflen == 0) 1199 state = 2; 1200 else 1201 state = 3; 1202 break; 1203 1204 case 3: /* VPD-R Keyword Value */ 1205 cfg->vpd.vpd_ros[off].value[i++] = byte; 1206 if (strncmp(cfg->vpd.vpd_ros[off].keyword, 1207 "RV", 2) == 0 && cksumvalid == -1) { 1208 if (vrs.cksum == 0) 1209 cksumvalid = 1; 1210 else { 1211 if (bootverbose) 1212 kprintf( 1213 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n", 1214 cfg->domain, cfg->bus, 1215 cfg->slot, cfg->func, 1216 vrs.cksum); 1217 cksumvalid = 0; 1218 state = -1; 1219 break; 1220 } 1221 } 1222 dflen--; 1223 remain--; 1224 /* keep in sync w/ state 2's transistions */ 1225 if (dflen == 0) 1226 cfg->vpd.vpd_ros[off++].value[i++] = '\0'; 1227 if (dflen == 0 && remain == 0) { 1228 cfg->vpd.vpd_rocnt = off; 1229 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros, 1230 off * sizeof(*cfg->vpd.vpd_ros), 1231 M_DEVBUF, M_WAITOK | M_ZERO); 1232 state = 0; 1233 } else if (dflen == 0) 1234 state = 2; 1235 break; 1236 1237 case 4: 1238 remain--; 1239 if (remain == 0) 1240 state = 0; 1241 break; 1242 1243 case 5: /* VPD-W Keyword Header */ 1244 if (off == alloc) { 1245 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w, 1246 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w), 1247 M_DEVBUF, M_WAITOK | M_ZERO); 1248 } 1249 cfg->vpd.vpd_w[off].keyword[0] = byte; 1250 if (vpd_nextbyte(&vrs, &byte2)) { 1251 state = -2; 1252 break; 1253 } 1254 cfg->vpd.vpd_w[off].keyword[1] = byte2; 1255 if (vpd_nextbyte(&vrs, &byte2)) { 1256 state = -2; 1257 break; 1258 } 1259 cfg->vpd.vpd_w[off].len = dflen = byte2; 1260 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval; 1261 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) * 1262 sizeof(*cfg->vpd.vpd_w[off].value), 1263 M_DEVBUF, M_WAITOK); 1264 remain -= 3; 1265 i = 0; 1266 /* keep in sync w/ state 6's transistions */ 1267 if (dflen == 0 && remain == 0) 1268 state = 0; 1269 else if (dflen == 0) 1270 state = 5; 1271 else 1272 state = 6; 1273 break; 1274 1275 case 6: /* VPD-W Keyword Value */ 1276 cfg->vpd.vpd_w[off].value[i++] = byte; 1277 dflen--; 1278 remain--; 1279 /* keep in sync w/ state 5's transistions */ 1280 if (dflen == 0) 1281 cfg->vpd.vpd_w[off++].value[i++] = '\0'; 1282 if (dflen == 0 && remain == 0) { 1283 cfg->vpd.vpd_wcnt = off; 1284 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w, 1285 off * sizeof(*cfg->vpd.vpd_w), 1286 M_DEVBUF, M_WAITOK | M_ZERO); 1287 state = 0; 1288 } else if (dflen == 0) 1289 state = 5; 1290 break; 1291 1292 default: 1293 kprintf("pci%d:%d:%d:%d: invalid state: %d\n", 1294 cfg->domain, cfg->bus, cfg->slot, cfg->func, 1295 state); 1296 state = -1; 1297 break; 1298 } 1299 } 1300 1301 if (cksumvalid == 0 || state < -1) { 1302 /* read-only data bad, clean up */ 1303 if (cfg->vpd.vpd_ros != NULL) { 1304 for (off = 0; cfg->vpd.vpd_ros[off].value; off++) 1305 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF); 1306 kfree(cfg->vpd.vpd_ros, M_DEVBUF); 1307 cfg->vpd.vpd_ros = NULL; 1308 } 1309 } 1310 if (state < -1) { 1311 /* I/O error, clean up */ 1312 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n", 1313 cfg->domain, cfg->bus, cfg->slot, cfg->func); 1314 if (cfg->vpd.vpd_ident != NULL) { 1315 kfree(cfg->vpd.vpd_ident, M_DEVBUF); 1316 cfg->vpd.vpd_ident = NULL; 1317 } 1318 if (cfg->vpd.vpd_w != NULL) { 1319 for (off = 0; cfg->vpd.vpd_w[off].value; off++) 1320 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF); 1321 kfree(cfg->vpd.vpd_w, M_DEVBUF); 1322 cfg->vpd.vpd_w = NULL; 1323 } 1324 } 1325 cfg->vpd.vpd_cached = 1; 1326 #undef REG 1327 #undef WREG 1328 } 1329 1330 int 1331 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) 1332 { 1333 struct pci_devinfo *dinfo = device_get_ivars(child); 1334 pcicfgregs *cfg = &dinfo->cfg; 1335 1336 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) 1337 pci_read_vpd(device_get_parent(dev), cfg); 1338 1339 *identptr = cfg->vpd.vpd_ident; 1340 1341 if (*identptr == NULL) 1342 return (ENXIO); 1343 1344 return (0); 1345 } 1346 1347 int 1348 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, 1349 const char **vptr) 1350 { 1351 struct pci_devinfo *dinfo = device_get_ivars(child); 1352 pcicfgregs *cfg = &dinfo->cfg; 1353 int i; 1354 1355 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) 1356 pci_read_vpd(device_get_parent(dev), cfg); 1357 1358 for (i = 0; i < cfg->vpd.vpd_rocnt; i++) 1359 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, 1360 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { 1361 *vptr = cfg->vpd.vpd_ros[i].value; 1362 } 1363 1364 if (i != cfg->vpd.vpd_rocnt) 1365 return (0); 1366 1367 *vptr = NULL; 1368 return (ENXIO); 1369 } 1370 1371 /* 1372 * Return the offset in configuration space of the requested extended 1373 * capability entry or 0 if the specified capability was not found. 1374 */ 1375 int 1376 pci_find_extcap_method(device_t dev, device_t child, int capability, 1377 int *capreg) 1378 { 1379 struct pci_devinfo *dinfo = device_get_ivars(child); 1380 pcicfgregs *cfg = &dinfo->cfg; 1381 u_int32_t status; 1382 u_int8_t ptr; 1383 1384 /* 1385 * Check the CAP_LIST bit of the PCI status register first. 1386 */ 1387 status = pci_read_config(child, PCIR_STATUS, 2); 1388 if (!(status & PCIM_STATUS_CAPPRESENT)) 1389 return (ENXIO); 1390 1391 /* 1392 * Determine the start pointer of the capabilities list. 1393 */ 1394 switch (cfg->hdrtype & PCIM_HDRTYPE) { 1395 case 0: 1396 case 1: 1397 ptr = PCIR_CAP_PTR; 1398 break; 1399 case 2: 1400 ptr = PCIR_CAP_PTR_2; 1401 break; 1402 default: 1403 /* XXX: panic? */ 1404 return (ENXIO); /* no extended capabilities support */ 1405 } 1406 ptr = pci_read_config(child, ptr, 1); 1407 1408 /* 1409 * Traverse the capabilities list. 1410 */ 1411 while (ptr != 0) { 1412 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { 1413 if (capreg != NULL) 1414 *capreg = ptr; 1415 return (0); 1416 } 1417 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); 1418 } 1419 1420 return (ENOENT); 1421 } 1422 1423 /* 1424 * Support for MSI-X message interrupts. 1425 */ 1426 static void 1427 pci_setup_msix_vector(device_t dev, u_int index, uint64_t address, 1428 uint32_t data) 1429 { 1430 struct pci_devinfo *dinfo = device_get_ivars(dev); 1431 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1432 uint32_t offset; 1433 1434 KASSERT(msix->msix_msgnum > index, ("bogus index")); 1435 offset = msix->msix_table_offset + index * 16; 1436 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); 1437 bus_write_4(msix->msix_table_res, offset + 4, address >> 32); 1438 bus_write_4(msix->msix_table_res, offset + 8, data); 1439 1440 /* Enable MSI -> HT mapping. */ 1441 pci_ht_map_msi(dev, address); 1442 } 1443 1444 static void 1445 pci_mask_msix_vector(device_t dev, u_int index) 1446 { 1447 struct pci_devinfo *dinfo = device_get_ivars(dev); 1448 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1449 uint32_t offset, val; 1450 1451 KASSERT(msix->msix_msgnum > index, ("bogus index")); 1452 offset = msix->msix_table_offset + index * 16 + 12; 1453 val = bus_read_4(msix->msix_table_res, offset); 1454 if (!(val & PCIM_MSIX_VCTRL_MASK)) { 1455 val |= PCIM_MSIX_VCTRL_MASK; 1456 bus_write_4(msix->msix_table_res, offset, val); 1457 } 1458 } 1459 1460 static void 1461 pci_unmask_msix_vector(device_t dev, u_int index) 1462 { 1463 struct pci_devinfo *dinfo = device_get_ivars(dev); 1464 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1465 uint32_t offset, val; 1466 1467 KASSERT(msix->msix_msgnum > index, ("bogus index")); 1468 offset = msix->msix_table_offset + index * 16 + 12; 1469 val = bus_read_4(msix->msix_table_res, offset); 1470 if (val & PCIM_MSIX_VCTRL_MASK) { 1471 val &= ~PCIM_MSIX_VCTRL_MASK; 1472 bus_write_4(msix->msix_table_res, offset, val); 1473 } 1474 } 1475 1476 int 1477 pci_pending_msix_vector(device_t dev, u_int index) 1478 { 1479 struct pci_devinfo *dinfo = device_get_ivars(dev); 1480 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1481 uint32_t offset, bit; 1482 1483 KASSERT(msix->msix_table_res != NULL && msix->msix_pba_res != NULL, 1484 ("MSI-X is not setup yet")); 1485 1486 KASSERT(msix->msix_msgnum > index, ("bogus index")); 1487 offset = msix->msix_pba_offset + (index / 32) * 4; 1488 bit = 1 << index % 32; 1489 return (bus_read_4(msix->msix_pba_res, offset) & bit); 1490 } 1491 1492 /* 1493 * Restore MSI-X registers and table during resume. If MSI-X is 1494 * enabled then walk the virtual table to restore the actual MSI-X 1495 * table. 1496 */ 1497 static void 1498 pci_resume_msix(device_t dev) 1499 { 1500 struct pci_devinfo *dinfo = device_get_ivars(dev); 1501 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1502 1503 if (msix->msix_table_res != NULL) { 1504 const struct msix_vector *mv; 1505 1506 pci_mask_msix_allvectors(dev); 1507 1508 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) { 1509 u_int vector; 1510 1511 if (mv->mv_address == 0) 1512 continue; 1513 1514 vector = PCI_MSIX_RID2VEC(mv->mv_rid); 1515 pci_setup_msix_vector(dev, vector, 1516 mv->mv_address, mv->mv_data); 1517 pci_unmask_msix_vector(dev, vector); 1518 } 1519 } 1520 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, 1521 msix->msix_ctrl, 2); 1522 } 1523 1524 /* 1525 * Attempt to allocate one MSI-X message at the specified vector on cpuid. 1526 * 1527 * After this function returns, the MSI-X's rid will be saved in rid0. 1528 */ 1529 int 1530 pci_alloc_msix_vector_method(device_t dev, device_t child, u_int vector, 1531 int *rid0, int cpuid) 1532 { 1533 struct pci_devinfo *dinfo = device_get_ivars(child); 1534 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1535 struct msix_vector *mv; 1536 struct resource_list_entry *rle; 1537 int error, irq, rid; 1538 1539 KASSERT(msix->msix_table_res != NULL && 1540 msix->msix_pba_res != NULL, ("MSI-X is not setup yet")); 1541 KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d", cpuid)); 1542 KASSERT(vector < msix->msix_msgnum, 1543 ("invalid MSI-X vector %u, total %d", vector, msix->msix_msgnum)); 1544 1545 if (bootverbose) { 1546 device_printf(child, 1547 "attempting to allocate MSI-X #%u vector (%d supported)\n", 1548 vector, msix->msix_msgnum); 1549 } 1550 1551 /* Set rid according to vector number */ 1552 rid = PCI_MSIX_VEC2RID(vector); 1553 1554 /* Vector has already been allocated */ 1555 mv = pci_find_msix_vector(child, rid); 1556 if (mv != NULL) 1557 return EBUSY; 1558 1559 /* Allocate a message. */ 1560 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq, cpuid); 1561 if (error) 1562 return error; 1563 resource_list_add(&dinfo->resources, SYS_RES_IRQ, rid, 1564 irq, irq, 1, cpuid); 1565 1566 if (bootverbose) { 1567 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); 1568 device_printf(child, "using IRQ %lu for MSI-X on cpu%d\n", 1569 rle->start, cpuid); 1570 } 1571 1572 /* Update counts of alloc'd messages. */ 1573 msix->msix_alloc++; 1574 1575 mv = kmalloc(sizeof(*mv), M_DEVBUF, M_WAITOK | M_ZERO); 1576 mv->mv_rid = rid; 1577 TAILQ_INSERT_TAIL(&msix->msix_vectors, mv, mv_link); 1578 1579 *rid0 = rid; 1580 return 0; 1581 } 1582 1583 int 1584 pci_release_msix_vector_method(device_t dev, device_t child, int rid) 1585 { 1586 struct pci_devinfo *dinfo = device_get_ivars(child); 1587 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1588 struct resource_list_entry *rle; 1589 struct msix_vector *mv; 1590 int irq, cpuid; 1591 1592 KASSERT(msix->msix_table_res != NULL && 1593 msix->msix_pba_res != NULL, ("MSI-X is not setup yet")); 1594 KASSERT(msix->msix_alloc > 0, ("No MSI-X allocated")); 1595 KASSERT(rid > 0, ("invalid rid %d", rid)); 1596 1597 mv = pci_find_msix_vector(child, rid); 1598 KASSERT(mv != NULL, ("MSI-X rid %d is not allocated", rid)); 1599 KASSERT(mv->mv_address == 0, ("MSI-X rid %d not teardown", rid)); 1600 1601 /* Make sure resource is no longer allocated. */ 1602 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); 1603 KASSERT(rle != NULL, ("missing MSI-X resource, rid %d", rid)); 1604 KASSERT(rle->res == NULL, 1605 ("MSI-X resource is still allocated, rid %d", rid)); 1606 1607 irq = rle->start; 1608 cpuid = rle->cpuid; 1609 1610 /* Free the resource list entries. */ 1611 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, rid); 1612 1613 /* Release the IRQ. */ 1614 PCIB_RELEASE_MSIX(device_get_parent(dev), child, irq, cpuid); 1615 1616 TAILQ_REMOVE(&msix->msix_vectors, mv, mv_link); 1617 kfree(mv, M_DEVBUF); 1618 1619 msix->msix_alloc--; 1620 return (0); 1621 } 1622 1623 /* 1624 * Return the max supported MSI-X messages this device supports. 1625 * Basically, assuming the MD code can alloc messages, this function 1626 * should return the maximum value that pci_alloc_msix() can return. 1627 * Thus, it is subject to the tunables, etc. 1628 */ 1629 int 1630 pci_msix_count_method(device_t dev, device_t child) 1631 { 1632 struct pci_devinfo *dinfo = device_get_ivars(child); 1633 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1634 1635 if (pci_do_msix && msix->msix_location != 0) 1636 return (msix->msix_msgnum); 1637 return (0); 1638 } 1639 1640 int 1641 pci_setup_msix(device_t dev) 1642 { 1643 struct pci_devinfo *dinfo = device_get_ivars(dev); 1644 pcicfgregs *cfg = &dinfo->cfg; 1645 struct resource_list_entry *rle; 1646 struct resource *table_res, *pba_res; 1647 1648 KASSERT(cfg->msix.msix_table_res == NULL && 1649 cfg->msix.msix_pba_res == NULL, ("MSI-X has been setup yet")); 1650 1651 /* If rid 0 is allocated, then fail. */ 1652 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); 1653 if (rle != NULL && rle->res != NULL) 1654 return (ENXIO); 1655 1656 /* Already have allocated MSIs? */ 1657 if (cfg->msi.msi_alloc != 0) 1658 return (ENXIO); 1659 1660 /* If MSI is blacklisted for this system, fail. */ 1661 if (pci_msi_blacklisted()) 1662 return (ENXIO); 1663 1664 /* MSI-X capability present? */ 1665 if (cfg->msix.msix_location == 0 || cfg->msix.msix_msgnum == 0 || 1666 !pci_do_msix) 1667 return (ENODEV); 1668 1669 KASSERT(cfg->msix.msix_alloc == 0 && 1670 TAILQ_EMPTY(&cfg->msix.msix_vectors), 1671 ("MSI-X vector has been allocated")); 1672 1673 /* Make sure the appropriate BARs are mapped. */ 1674 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, 1675 cfg->msix.msix_table_bar); 1676 if (rle == NULL || rle->res == NULL || 1677 !(rman_get_flags(rle->res) & RF_ACTIVE)) 1678 return (ENXIO); 1679 table_res = rle->res; 1680 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { 1681 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, 1682 cfg->msix.msix_pba_bar); 1683 if (rle == NULL || rle->res == NULL || 1684 !(rman_get_flags(rle->res) & RF_ACTIVE)) 1685 return (ENXIO); 1686 } 1687 pba_res = rle->res; 1688 1689 cfg->msix.msix_table_res = table_res; 1690 cfg->msix.msix_pba_res = pba_res; 1691 1692 pci_mask_msix_allvectors(dev); 1693 1694 return 0; 1695 } 1696 1697 void 1698 pci_teardown_msix(device_t dev) 1699 { 1700 struct pci_devinfo *dinfo = device_get_ivars(dev); 1701 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1702 1703 KASSERT(msix->msix_table_res != NULL && 1704 msix->msix_pba_res != NULL, ("MSI-X is not setup yet")); 1705 KASSERT(msix->msix_alloc == 0 && TAILQ_EMPTY(&msix->msix_vectors), 1706 ("MSI-X vector is still allocated")); 1707 1708 pci_mask_msix_allvectors(dev); 1709 1710 msix->msix_table_res = NULL; 1711 msix->msix_pba_res = NULL; 1712 } 1713 1714 void 1715 pci_enable_msix(device_t dev) 1716 { 1717 struct pci_devinfo *dinfo = device_get_ivars(dev); 1718 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1719 1720 KASSERT(msix->msix_table_res != NULL && 1721 msix->msix_pba_res != NULL, ("MSI-X is not setup yet")); 1722 1723 /* Update control register to enable MSI-X. */ 1724 msix->msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1725 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, 1726 msix->msix_ctrl, 2); 1727 } 1728 1729 void 1730 pci_disable_msix(device_t dev) 1731 { 1732 struct pci_devinfo *dinfo = device_get_ivars(dev); 1733 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1734 1735 KASSERT(msix->msix_table_res != NULL && 1736 msix->msix_pba_res != NULL, ("MSI-X is not setup yet")); 1737 1738 /* Disable MSI -> HT mapping. */ 1739 pci_ht_map_msi(dev, 0); 1740 1741 /* Update control register to disable MSI-X. */ 1742 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; 1743 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, 1744 msix->msix_ctrl, 2); 1745 } 1746 1747 static void 1748 pci_mask_msix_allvectors(device_t dev) 1749 { 1750 struct pci_devinfo *dinfo = device_get_ivars(dev); 1751 u_int i; 1752 1753 for (i = 0; i < dinfo->cfg.msix.msix_msgnum; ++i) 1754 pci_mask_msix_vector(dev, i); 1755 } 1756 1757 static struct msix_vector * 1758 pci_find_msix_vector(device_t dev, int rid) 1759 { 1760 struct pci_devinfo *dinfo = device_get_ivars(dev); 1761 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1762 struct msix_vector *mv; 1763 1764 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) { 1765 if (mv->mv_rid == rid) 1766 return mv; 1767 } 1768 return NULL; 1769 } 1770 1771 /* 1772 * HyperTransport MSI mapping control 1773 */ 1774 void 1775 pci_ht_map_msi(device_t dev, uint64_t addr) 1776 { 1777 struct pci_devinfo *dinfo = device_get_ivars(dev); 1778 struct pcicfg_ht *ht = &dinfo->cfg.ht; 1779 1780 if (!ht->ht_msimap) 1781 return; 1782 1783 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && 1784 ht->ht_msiaddr >> 20 == addr >> 20) { 1785 /* Enable MSI -> HT mapping. */ 1786 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; 1787 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, 1788 ht->ht_msictrl, 2); 1789 } 1790 1791 if (!addr && (ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { 1792 /* Disable MSI -> HT mapping. */ 1793 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; 1794 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, 1795 ht->ht_msictrl, 2); 1796 } 1797 } 1798 1799 /* 1800 * Support for MSI message signalled interrupts. 1801 */ 1802 static void 1803 pci_enable_msi(device_t dev, uint64_t address, uint16_t data) 1804 { 1805 struct pci_devinfo *dinfo = device_get_ivars(dev); 1806 struct pcicfg_msi *msi = &dinfo->cfg.msi; 1807 1808 /* Write data and address values. */ 1809 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, 1810 address & 0xffffffff, 4); 1811 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { 1812 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH, 1813 address >> 32, 4); 1814 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT, 1815 data, 2); 1816 } else 1817 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data, 1818 2); 1819 1820 /* Enable MSI in the control register. */ 1821 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; 1822 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 1823 2); 1824 1825 /* Enable MSI -> HT mapping. */ 1826 pci_ht_map_msi(dev, address); 1827 } 1828 1829 static void 1830 pci_disable_msi(device_t dev) 1831 { 1832 struct pci_devinfo *dinfo = device_get_ivars(dev); 1833 struct pcicfg_msi *msi = &dinfo->cfg.msi; 1834 1835 /* Disable MSI -> HT mapping. */ 1836 pci_ht_map_msi(dev, 0); 1837 1838 /* Disable MSI in the control register. */ 1839 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; 1840 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 1841 2); 1842 } 1843 1844 /* 1845 * Restore MSI registers during resume. If MSI is enabled then 1846 * restore the data and address registers in addition to the control 1847 * register. 1848 */ 1849 static void 1850 pci_resume_msi(device_t dev) 1851 { 1852 struct pci_devinfo *dinfo = device_get_ivars(dev); 1853 struct pcicfg_msi *msi = &dinfo->cfg.msi; 1854 uint64_t address; 1855 uint16_t data; 1856 1857 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { 1858 address = msi->msi_addr; 1859 data = msi->msi_data; 1860 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, 1861 address & 0xffffffff, 4); 1862 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { 1863 pci_write_config(dev, msi->msi_location + 1864 PCIR_MSI_ADDR_HIGH, address >> 32, 4); 1865 pci_write_config(dev, msi->msi_location + 1866 PCIR_MSI_DATA_64BIT, data, 2); 1867 } else 1868 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, 1869 data, 2); 1870 } 1871 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 1872 2); 1873 } 1874 1875 /* 1876 * Returns true if the specified device is blacklisted because MSI 1877 * doesn't work. 1878 */ 1879 int 1880 pci_msi_device_blacklisted(device_t dev) 1881 { 1882 struct pci_quirk *q; 1883 1884 if (!pci_honor_msi_blacklist) 1885 return (0); 1886 1887 for (q = &pci_quirks[0]; q->devid; q++) { 1888 if (q->devid == pci_get_devid(dev) && 1889 q->type == PCI_QUIRK_DISABLE_MSI) 1890 return (1); 1891 } 1892 return (0); 1893 } 1894 1895 /* 1896 * Determine if MSI is blacklisted globally on this sytem. Currently, 1897 * we just check for blacklisted chipsets as represented by the 1898 * host-PCI bridge at device 0:0:0. In the future, it may become 1899 * necessary to check other system attributes, such as the kenv values 1900 * that give the motherboard manufacturer and model number. 1901 */ 1902 static int 1903 pci_msi_blacklisted(void) 1904 { 1905 device_t dev; 1906 1907 if (!pci_honor_msi_blacklist) 1908 return (0); 1909 1910 /* 1911 * Always assume that MSI-X works in virtual machines. This is 1912 * for example needed for most (or all) qemu based setups, since 1913 * the emulated chipsets tend to be very old. 1914 */ 1915 if (vmm_guest != VMM_GUEST_NONE) 1916 return (0); 1917 1918 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ 1919 if (!(pcie_chipset || pcix_chipset)) 1920 return (1); 1921 1922 dev = pci_find_bsf(0, 0, 0); 1923 if (dev != NULL) 1924 return (pci_msi_device_blacklisted(dev)); 1925 return (0); 1926 } 1927 1928 /* 1929 * Attempt to allocate count MSI messages on start_cpuid. 1930 * 1931 * If start_cpuid < 0, then the MSI messages' target CPU will be 1932 * selected automaticly. 1933 * 1934 * If the caller explicitly specified the MSI messages' target CPU, 1935 * i.e. start_cpuid >= 0, then we will try to allocate the count MSI 1936 * messages on the specified CPU, if the allocation fails due to MD 1937 * does not have enough vectors (EMSGSIZE), then we will try next 1938 * available CPU, until the allocation fails on all CPUs. 1939 * 1940 * EMSGSIZE will be returned, if all available CPUs does not have 1941 * enough vectors for the requested amount of MSI messages. Caller 1942 * should either reduce the amount of MSI messages to be requested, 1943 * or simply giving up using MSI. 1944 * 1945 * The available SYS_RES_IRQ resources' rids, which are >= 1, are 1946 * returned in 'rid' array, if the allocation succeeds. 1947 */ 1948 int 1949 pci_alloc_msi_method(device_t dev, device_t child, int *rid, int count, 1950 int start_cpuid) 1951 { 1952 struct pci_devinfo *dinfo = device_get_ivars(child); 1953 pcicfgregs *cfg = &dinfo->cfg; 1954 struct resource_list_entry *rle; 1955 int error, i, irqs[32], cpuid = 0; 1956 uint16_t ctrl; 1957 1958 KASSERT(count != 0 && count <= 32 && powerof2(count), 1959 ("invalid MSI count %d", count)); 1960 KASSERT(start_cpuid < ncpus, ("invalid cpuid %d", start_cpuid)); 1961 1962 /* If rid 0 is allocated, then fail. */ 1963 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); 1964 if (rle != NULL && rle->res != NULL) 1965 return (ENXIO); 1966 1967 /* Already have allocated messages? */ 1968 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_table_res != NULL) 1969 return (ENXIO); 1970 1971 /* If MSI is blacklisted for this system, fail. */ 1972 if (pci_msi_blacklisted()) 1973 return (ENXIO); 1974 1975 /* MSI capability present? */ 1976 if (cfg->msi.msi_location == 0 || cfg->msi.msi_msgnum == 0 || 1977 !pci_do_msi) 1978 return (ENODEV); 1979 1980 KASSERT(count <= cfg->msi.msi_msgnum, ("large MSI count %d, max %d", 1981 count, cfg->msi.msi_msgnum)); 1982 1983 if (bootverbose) { 1984 device_printf(child, 1985 "attempting to allocate %d MSI vector%s (%d supported)\n", 1986 count, count > 1 ? "s" : "", cfg->msi.msi_msgnum); 1987 } 1988 1989 if (start_cpuid < 0) 1990 start_cpuid = atomic_fetchadd_int(&pci_msi_cpuid, 1) % ncpus; 1991 1992 error = EINVAL; 1993 for (i = 0; i < ncpus; ++i) { 1994 cpuid = (start_cpuid + i) % ncpus; 1995 1996 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, count, 1997 cfg->msi.msi_msgnum, irqs, cpuid); 1998 if (error == 0) 1999 break; 2000 else if (error != EMSGSIZE) 2001 return error; 2002 } 2003 if (error) 2004 return error; 2005 2006 /* 2007 * We now have N messages mapped onto SYS_RES_IRQ resources in 2008 * the irqs[] array, so add new resources starting at rid 1. 2009 */ 2010 for (i = 0; i < count; i++) { 2011 rid[i] = i + 1; 2012 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, 2013 irqs[i], irqs[i], 1, cpuid); 2014 } 2015 2016 if (bootverbose) { 2017 if (count == 1) { 2018 device_printf(child, "using IRQ %d on cpu%d for MSI\n", 2019 irqs[0], cpuid); 2020 } else { 2021 int run; 2022 2023 /* 2024 * Be fancy and try to print contiguous runs 2025 * of IRQ values as ranges. 'run' is true if 2026 * we are in a range. 2027 */ 2028 device_printf(child, "using IRQs %d", irqs[0]); 2029 run = 0; 2030 for (i = 1; i < count; i++) { 2031 2032 /* Still in a run? */ 2033 if (irqs[i] == irqs[i - 1] + 1) { 2034 run = 1; 2035 continue; 2036 } 2037 2038 /* Finish previous range. */ 2039 if (run) { 2040 kprintf("-%d", irqs[i - 1]); 2041 run = 0; 2042 } 2043 2044 /* Start new range. */ 2045 kprintf(",%d", irqs[i]); 2046 } 2047 2048 /* Unfinished range? */ 2049 if (run) 2050 kprintf("-%d", irqs[count - 1]); 2051 kprintf(" for MSI on cpu%d\n", cpuid); 2052 } 2053 } 2054 2055 /* Update control register with count. */ 2056 ctrl = cfg->msi.msi_ctrl; 2057 ctrl &= ~PCIM_MSICTRL_MME_MASK; 2058 ctrl |= (ffs(count) - 1) << 4; 2059 cfg->msi.msi_ctrl = ctrl; 2060 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); 2061 2062 /* Update counts of alloc'd messages. */ 2063 cfg->msi.msi_alloc = count; 2064 cfg->msi.msi_handlers = 0; 2065 return (0); 2066 } 2067 2068 /* Release the MSI messages associated with this device. */ 2069 int 2070 pci_release_msi_method(device_t dev, device_t child) 2071 { 2072 struct pci_devinfo *dinfo = device_get_ivars(child); 2073 struct pcicfg_msi *msi = &dinfo->cfg.msi; 2074 struct resource_list_entry *rle; 2075 int i, irqs[32], cpuid = -1; 2076 2077 /* Do we have any messages to release? */ 2078 if (msi->msi_alloc == 0) 2079 return (ENODEV); 2080 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); 2081 2082 /* Make sure none of the resources are allocated. */ 2083 if (msi->msi_handlers > 0) 2084 return (EBUSY); 2085 for (i = 0; i < msi->msi_alloc; i++) { 2086 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); 2087 KASSERT(rle != NULL, ("missing MSI resource")); 2088 if (rle->res != NULL) 2089 return (EBUSY); 2090 if (i == 0) { 2091 cpuid = rle->cpuid; 2092 KASSERT(cpuid >= 0 && cpuid < ncpus, 2093 ("invalid MSI target cpuid %d", cpuid)); 2094 } else { 2095 KASSERT(rle->cpuid == cpuid, 2096 ("MSI targets different cpus, " 2097 "was cpu%d, now cpu%d", cpuid, rle->cpuid)); 2098 } 2099 irqs[i] = rle->start; 2100 } 2101 2102 /* Update control register with 0 count. */ 2103 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), 2104 ("%s: MSI still enabled", __func__)); 2105 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; 2106 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, 2107 msi->msi_ctrl, 2); 2108 2109 /* Release the messages. */ 2110 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs, 2111 cpuid); 2112 for (i = 0; i < msi->msi_alloc; i++) 2113 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); 2114 2115 /* Update alloc count. */ 2116 msi->msi_alloc = 0; 2117 msi->msi_addr = 0; 2118 msi->msi_data = 0; 2119 return (0); 2120 } 2121 2122 /* 2123 * Return the max supported MSI messages this device supports. 2124 * Basically, assuming the MD code can alloc messages, this function 2125 * should return the maximum value that pci_alloc_msi() can return. 2126 * Thus, it is subject to the tunables, etc. 2127 */ 2128 int 2129 pci_msi_count_method(device_t dev, device_t child) 2130 { 2131 struct pci_devinfo *dinfo = device_get_ivars(child); 2132 struct pcicfg_msi *msi = &dinfo->cfg.msi; 2133 2134 if (pci_do_msi && msi->msi_location != 0) 2135 return (msi->msi_msgnum); 2136 return (0); 2137 } 2138 2139 /* kfree pcicfgregs structure and all depending data structures */ 2140 2141 int 2142 pci_freecfg(struct pci_devinfo *dinfo) 2143 { 2144 struct devlist *devlist_head; 2145 int i; 2146 2147 devlist_head = &pci_devq; 2148 2149 if (dinfo->cfg.vpd.vpd_reg) { 2150 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF); 2151 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++) 2152 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF); 2153 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF); 2154 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++) 2155 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF); 2156 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF); 2157 } 2158 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); 2159 kfree(dinfo, M_DEVBUF); 2160 2161 /* increment the generation count */ 2162 pci_generation++; 2163 2164 /* we're losing one device */ 2165 pci_numdevs--; 2166 return (0); 2167 } 2168 2169 /* 2170 * PCI power manangement 2171 */ 2172 int 2173 pci_set_powerstate_method(device_t dev, device_t child, int state) 2174 { 2175 struct pci_devinfo *dinfo = device_get_ivars(child); 2176 pcicfgregs *cfg = &dinfo->cfg; 2177 uint16_t status; 2178 int oldstate, highest, delay; 2179 2180 if (cfg->pp.pp_cap == 0) 2181 return (EOPNOTSUPP); 2182 2183 /* 2184 * Optimize a no state change request away. While it would be OK to 2185 * write to the hardware in theory, some devices have shown odd 2186 * behavior when going from D3 -> D3. 2187 */ 2188 oldstate = pci_get_powerstate(child); 2189 if (oldstate == state) 2190 return (0); 2191 2192 /* 2193 * The PCI power management specification states that after a state 2194 * transition between PCI power states, system software must 2195 * guarantee a minimal delay before the function accesses the device. 2196 * Compute the worst case delay that we need to guarantee before we 2197 * access the device. Many devices will be responsive much more 2198 * quickly than this delay, but there are some that don't respond 2199 * instantly to state changes. Transitions to/from D3 state require 2200 * 10ms, while D2 requires 200us, and D0/1 require none. The delay 2201 * is done below with DELAY rather than a sleeper function because 2202 * this function can be called from contexts where we cannot sleep. 2203 */ 2204 highest = (oldstate > state) ? oldstate : state; 2205 if (highest == PCI_POWERSTATE_D3) 2206 delay = 10000; 2207 else if (highest == PCI_POWERSTATE_D2) 2208 delay = 200; 2209 else 2210 delay = 0; 2211 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) 2212 & ~PCIM_PSTAT_DMASK; 2213 switch (state) { 2214 case PCI_POWERSTATE_D0: 2215 status |= PCIM_PSTAT_D0; 2216 break; 2217 case PCI_POWERSTATE_D1: 2218 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) 2219 return (EOPNOTSUPP); 2220 status |= PCIM_PSTAT_D1; 2221 break; 2222 case PCI_POWERSTATE_D2: 2223 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) 2224 return (EOPNOTSUPP); 2225 status |= PCIM_PSTAT_D2; 2226 break; 2227 case PCI_POWERSTATE_D3: 2228 status |= PCIM_PSTAT_D3; 2229 break; 2230 default: 2231 return (EINVAL); 2232 } 2233 2234 if (bootverbose) 2235 kprintf( 2236 "pci%d:%d:%d:%d: Transition from D%d to D%d\n", 2237 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot, 2238 dinfo->cfg.func, oldstate, state); 2239 2240 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); 2241 if (delay) 2242 DELAY(delay); 2243 return (0); 2244 } 2245 2246 int 2247 pci_get_powerstate_method(device_t dev, device_t child) 2248 { 2249 struct pci_devinfo *dinfo = device_get_ivars(child); 2250 pcicfgregs *cfg = &dinfo->cfg; 2251 uint16_t status; 2252 int result; 2253 2254 if (cfg->pp.pp_cap != 0) { 2255 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); 2256 switch (status & PCIM_PSTAT_DMASK) { 2257 case PCIM_PSTAT_D0: 2258 result = PCI_POWERSTATE_D0; 2259 break; 2260 case PCIM_PSTAT_D1: 2261 result = PCI_POWERSTATE_D1; 2262 break; 2263 case PCIM_PSTAT_D2: 2264 result = PCI_POWERSTATE_D2; 2265 break; 2266 case PCIM_PSTAT_D3: 2267 result = PCI_POWERSTATE_D3; 2268 break; 2269 default: 2270 result = PCI_POWERSTATE_UNKNOWN; 2271 break; 2272 } 2273 } else { 2274 /* No support, device is always at D0 */ 2275 result = PCI_POWERSTATE_D0; 2276 } 2277 return (result); 2278 } 2279 2280 /* 2281 * Some convenience functions for PCI device drivers. 2282 */ 2283 2284 static __inline void 2285 pci_set_command_bit(device_t dev, device_t child, uint16_t bit) 2286 { 2287 uint16_t command; 2288 2289 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); 2290 command |= bit; 2291 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); 2292 } 2293 2294 static __inline void 2295 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) 2296 { 2297 uint16_t command; 2298 2299 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); 2300 command &= ~bit; 2301 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); 2302 } 2303 2304 int 2305 pci_enable_busmaster_method(device_t dev, device_t child) 2306 { 2307 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); 2308 return (0); 2309 } 2310 2311 int 2312 pci_disable_busmaster_method(device_t dev, device_t child) 2313 { 2314 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); 2315 return (0); 2316 } 2317 2318 int 2319 pci_enable_io_method(device_t dev, device_t child, int space) 2320 { 2321 uint16_t command; 2322 uint16_t bit; 2323 char *error; 2324 2325 bit = 0; 2326 error = NULL; 2327 2328 switch(space) { 2329 case SYS_RES_IOPORT: 2330 bit = PCIM_CMD_PORTEN; 2331 error = "port"; 2332 break; 2333 case SYS_RES_MEMORY: 2334 bit = PCIM_CMD_MEMEN; 2335 error = "memory"; 2336 break; 2337 default: 2338 return (EINVAL); 2339 } 2340 pci_set_command_bit(dev, child, bit); 2341 /* Some devices seem to need a brief stall here, what do to? */ 2342 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); 2343 if (command & bit) 2344 return (0); 2345 device_printf(child, "failed to enable %s mapping!\n", error); 2346 return (ENXIO); 2347 } 2348 2349 int 2350 pci_disable_io_method(device_t dev, device_t child, int space) 2351 { 2352 uint16_t command; 2353 uint16_t bit; 2354 char *error; 2355 2356 bit = 0; 2357 error = NULL; 2358 2359 switch(space) { 2360 case SYS_RES_IOPORT: 2361 bit = PCIM_CMD_PORTEN; 2362 error = "port"; 2363 break; 2364 case SYS_RES_MEMORY: 2365 bit = PCIM_CMD_MEMEN; 2366 error = "memory"; 2367 break; 2368 default: 2369 return (EINVAL); 2370 } 2371 pci_clear_command_bit(dev, child, bit); 2372 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); 2373 if (command & bit) { 2374 device_printf(child, "failed to disable %s mapping!\n", error); 2375 return (ENXIO); 2376 } 2377 return (0); 2378 } 2379 2380 /* 2381 * New style pci driver. Parent device is either a pci-host-bridge or a 2382 * pci-pci-bridge. Both kinds are represented by instances of pcib. 2383 */ 2384 2385 void 2386 pci_print_verbose(struct pci_devinfo *dinfo) 2387 { 2388 2389 if (bootverbose) { 2390 pcicfgregs *cfg = &dinfo->cfg; 2391 2392 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", 2393 cfg->vendor, cfg->device, cfg->revid); 2394 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", 2395 cfg->domain, cfg->bus, cfg->slot, cfg->func); 2396 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", 2397 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, 2398 cfg->mfdev); 2399 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", 2400 cfg->cmdreg, cfg->statreg, cfg->cachelnsz); 2401 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", 2402 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, 2403 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); 2404 if (cfg->intpin > 0) 2405 kprintf("\tintpin=%c, irq=%d\n", 2406 cfg->intpin +'a' -1, cfg->intline); 2407 if (cfg->pp.pp_cap) { 2408 uint16_t status; 2409 2410 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); 2411 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n", 2412 cfg->pp.pp_cap & PCIM_PCAP_SPEC, 2413 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", 2414 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", 2415 status & PCIM_PSTAT_DMASK); 2416 } 2417 if (cfg->msi.msi_location) { 2418 int ctrl; 2419 2420 ctrl = cfg->msi.msi_ctrl; 2421 kprintf("\tMSI supports %d message%s%s%s\n", 2422 cfg->msi.msi_msgnum, 2423 (cfg->msi.msi_msgnum == 1) ? "" : "s", 2424 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", 2425 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); 2426 } 2427 if (cfg->msix.msix_location) { 2428 kprintf("\tMSI-X supports %d message%s ", 2429 cfg->msix.msix_msgnum, 2430 (cfg->msix.msix_msgnum == 1) ? "" : "s"); 2431 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) 2432 kprintf("in map 0x%x\n", 2433 cfg->msix.msix_table_bar); 2434 else 2435 kprintf("in maps 0x%x and 0x%x\n", 2436 cfg->msix.msix_table_bar, 2437 cfg->msix.msix_pba_bar); 2438 } 2439 pci_print_verbose_expr(cfg); 2440 } 2441 } 2442 2443 static void 2444 pci_print_verbose_expr(const pcicfgregs *cfg) 2445 { 2446 const struct pcicfg_expr *expr = &cfg->expr; 2447 const char *port_name; 2448 uint16_t port_type; 2449 2450 if (!bootverbose) 2451 return; 2452 2453 if (expr->expr_ptr == 0) /* No PCI Express capability */ 2454 return; 2455 2456 kprintf("\tPCI Express ver.%d cap=0x%04x", 2457 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap); 2458 2459 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE; 2460 2461 switch (port_type) { 2462 case PCIE_END_POINT: 2463 port_name = "DEVICE"; 2464 break; 2465 case PCIE_LEG_END_POINT: 2466 port_name = "LEGDEV"; 2467 break; 2468 case PCIE_ROOT_PORT: 2469 port_name = "ROOT"; 2470 break; 2471 case PCIE_UP_STREAM_PORT: 2472 port_name = "UPSTREAM"; 2473 break; 2474 case PCIE_DOWN_STREAM_PORT: 2475 port_name = "DOWNSTRM"; 2476 break; 2477 case PCIE_PCIE2PCI_BRIDGE: 2478 port_name = "PCIE2PCI"; 2479 break; 2480 case PCIE_PCI2PCIE_BRIDGE: 2481 port_name = "PCI2PCIE"; 2482 break; 2483 case PCIE_ROOT_END_POINT: 2484 port_name = "ROOTDEV"; 2485 break; 2486 case PCIE_ROOT_EVT_COLL: 2487 port_name = "ROOTEVTC"; 2488 break; 2489 default: 2490 port_name = NULL; 2491 break; 2492 } 2493 if ((port_type == PCIE_ROOT_PORT || 2494 port_type == PCIE_DOWN_STREAM_PORT) && 2495 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL)) 2496 port_name = NULL; 2497 if (port_name != NULL) 2498 kprintf("[%s]", port_name); 2499 2500 if (pcie_slotimpl(cfg)) { 2501 kprintf(", slotcap=0x%08x", expr->expr_slotcap); 2502 if (expr->expr_slotcap & PCIEM_SLOTCAP_HP_CAP) 2503 kprintf("[HOTPLUG]"); 2504 } 2505 kprintf("\n"); 2506 } 2507 2508 static int 2509 pci_porten(device_t pcib, int b, int s, int f) 2510 { 2511 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2) 2512 & PCIM_CMD_PORTEN) != 0; 2513 } 2514 2515 static int 2516 pci_memen(device_t pcib, int b, int s, int f) 2517 { 2518 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2) 2519 & PCIM_CMD_MEMEN) != 0; 2520 } 2521 2522 /* 2523 * Add a resource based on a pci map register. Return 1 if the map 2524 * register is a 32bit map register or 2 if it is a 64bit register. 2525 */ 2526 static int 2527 pci_add_map(device_t pcib, device_t bus, device_t dev, 2528 int b, int s, int f, int reg, struct resource_list *rl, int force, 2529 int prefetch) 2530 { 2531 uint32_t map; 2532 uint16_t old_cmd; 2533 pci_addr_t base; 2534 pci_addr_t start, end, count; 2535 uint8_t ln2size; 2536 uint8_t ln2range; 2537 uint32_t testval; 2538 uint16_t cmd; 2539 int type; 2540 int barlen; 2541 struct resource *res; 2542 2543 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4); 2544 2545 /* Disable access to device memory */ 2546 old_cmd = 0; 2547 if (PCI_BAR_MEM(map)) { 2548 old_cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2); 2549 cmd = old_cmd & ~PCIM_CMD_MEMEN; 2550 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2); 2551 } 2552 2553 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4); 2554 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4); 2555 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4); 2556 2557 /* Restore memory access mode */ 2558 if (PCI_BAR_MEM(map)) { 2559 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, old_cmd, 2); 2560 } 2561 2562 if (PCI_BAR_MEM(map)) { 2563 type = SYS_RES_MEMORY; 2564 if (map & PCIM_BAR_MEM_PREFETCH) 2565 prefetch = 1; 2566 } else 2567 type = SYS_RES_IOPORT; 2568 ln2size = pci_mapsize(testval); 2569 ln2range = pci_maprange(testval); 2570 base = pci_mapbase(map); 2571 barlen = ln2range == 64 ? 2 : 1; 2572 2573 /* 2574 * For I/O registers, if bottom bit is set, and the next bit up 2575 * isn't clear, we know we have a BAR that doesn't conform to the 2576 * spec, so ignore it. Also, sanity check the size of the data 2577 * areas to the type of memory involved. Memory must be at least 2578 * 16 bytes in size, while I/O ranges must be at least 4. 2579 */ 2580 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) 2581 return (barlen); 2582 if ((type == SYS_RES_MEMORY && ln2size < 4) || 2583 (type == SYS_RES_IOPORT && ln2size < 2)) 2584 return (barlen); 2585 2586 if (ln2range == 64) 2587 /* Read the other half of a 64bit map register */ 2588 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32; 2589 if (bootverbose) { 2590 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", 2591 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size); 2592 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) 2593 kprintf(", port disabled\n"); 2594 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) 2595 kprintf(", memory disabled\n"); 2596 else 2597 kprintf(", enabled\n"); 2598 } 2599 2600 /* 2601 * If base is 0, then we have problems. It is best to ignore 2602 * such entries for the moment. These will be allocated later if 2603 * the driver specifically requests them. However, some 2604 * removable busses look better when all resources are allocated, 2605 * so allow '0' to be overridden. 2606 * 2607 * Similarly treat maps whose values is the same as the test value 2608 * read back. These maps have had all f's written to them by the 2609 * BIOS in an attempt to disable the resources. 2610 */ 2611 if (!force && (base == 0 || map == testval)) 2612 return (barlen); 2613 if ((u_long)base != base) { 2614 device_printf(bus, 2615 "pci%d:%d:%d:%d bar %#x too many address bits", 2616 pci_get_domain(dev), b, s, f, reg); 2617 return (barlen); 2618 } 2619 2620 /* 2621 * This code theoretically does the right thing, but has 2622 * undesirable side effects in some cases where peripherals 2623 * respond oddly to having these bits enabled. Let the user 2624 * be able to turn them off (since pci_enable_io_modes is 1 by 2625 * default). 2626 */ 2627 if (pci_enable_io_modes) { 2628 /* Turn on resources that have been left off by a lazy BIOS */ 2629 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) { 2630 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2); 2631 cmd |= PCIM_CMD_PORTEN; 2632 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2); 2633 } 2634 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) { 2635 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2); 2636 cmd |= PCIM_CMD_MEMEN; 2637 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2); 2638 } 2639 } else { 2640 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) 2641 return (barlen); 2642 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) 2643 return (barlen); 2644 } 2645 2646 count = 1 << ln2size; 2647 if (base == 0 || base == pci_mapbase(testval)) { 2648 start = 0; /* Let the parent decide. */ 2649 end = ~0ULL; 2650 } else { 2651 start = base; 2652 end = base + (1 << ln2size) - 1; 2653 } 2654 resource_list_add(rl, type, reg, start, end, count, -1); 2655 2656 /* 2657 * Try to allocate the resource for this BAR from our parent 2658 * so that this resource range is already reserved. The 2659 * driver for this device will later inherit this resource in 2660 * pci_alloc_resource(). 2661 */ 2662 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count, 2663 prefetch ? RF_PREFETCHABLE : 0, -1); 2664 if (res == NULL) { 2665 /* 2666 * If the allocation fails, delete the resource list 2667 * entry to force pci_alloc_resource() to allocate 2668 * resources from the parent. 2669 */ 2670 resource_list_delete(rl, type, reg); 2671 #ifdef PCI_BAR_CLEAR 2672 /* Clear the BAR */ 2673 start = 0; 2674 #else /* !PCI_BAR_CLEAR */ 2675 /* 2676 * Don't clear BAR here. Some BIOS lists HPET as a 2677 * PCI function, clearing the BAR causes HPET timer 2678 * stop ticking. 2679 */ 2680 if (bootverbose) { 2681 kprintf("pci:%d:%d:%d: resource reservation failed " 2682 "%#jx - %#jx\n", b, s, f, 2683 (intmax_t)start, (intmax_t)end); 2684 } 2685 return (barlen); 2686 #endif /* PCI_BAR_CLEAR */ 2687 } else { 2688 start = rman_get_start(res); 2689 } 2690 pci_write_config(dev, reg, start, 4); 2691 if (ln2range == 64) 2692 pci_write_config(dev, reg + 4, start >> 32, 4); 2693 return (barlen); 2694 } 2695 2696 /* 2697 * For ATA devices we need to decide early what addressing mode to use. 2698 * Legacy demands that the primary and secondary ATA ports sits on the 2699 * same addresses that old ISA hardware did. This dictates that we use 2700 * those addresses and ignore the BAR's if we cannot set PCI native 2701 * addressing mode. 2702 */ 2703 static void 2704 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b, 2705 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask) 2706 { 2707 int rid, type, progif; 2708 #if 0 2709 /* if this device supports PCI native addressing use it */ 2710 progif = pci_read_config(dev, PCIR_PROGIF, 1); 2711 if ((progif & 0x8a) == 0x8a) { 2712 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && 2713 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { 2714 kprintf("Trying ATA native PCI addressing mode\n"); 2715 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); 2716 } 2717 } 2718 #endif 2719 progif = pci_read_config(dev, PCIR_PROGIF, 1); 2720 type = SYS_RES_IOPORT; 2721 if (progif & PCIP_STORAGE_IDE_MODEPRIM) { 2722 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force, 2723 prefetchmask & (1 << 0)); 2724 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force, 2725 prefetchmask & (1 << 1)); 2726 } else { 2727 rid = PCIR_BAR(0); 2728 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8, -1); 2729 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8, 2730 0, -1); 2731 rid = PCIR_BAR(1); 2732 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1, -1); 2733 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1, 2734 0, -1); 2735 } 2736 if (progif & PCIP_STORAGE_IDE_MODESEC) { 2737 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force, 2738 prefetchmask & (1 << 2)); 2739 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force, 2740 prefetchmask & (1 << 3)); 2741 } else { 2742 rid = PCIR_BAR(2); 2743 resource_list_add(rl, type, rid, 0x170, 0x177, 8, -1); 2744 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8, 2745 0, -1); 2746 rid = PCIR_BAR(3); 2747 resource_list_add(rl, type, rid, 0x376, 0x376, 1, -1); 2748 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1, 2749 0, -1); 2750 } 2751 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force, 2752 prefetchmask & (1 << 4)); 2753 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force, 2754 prefetchmask & (1 << 5)); 2755 } 2756 2757 static void 2758 pci_assign_interrupt(device_t bus, device_t dev, int force_route) 2759 { 2760 struct pci_devinfo *dinfo = device_get_ivars(dev); 2761 pcicfgregs *cfg = &dinfo->cfg; 2762 char tunable_name[64]; 2763 int irq; 2764 2765 /* Has to have an intpin to have an interrupt. */ 2766 if (cfg->intpin == 0) 2767 return; 2768 2769 /* Let the user override the IRQ with a tunable. */ 2770 irq = PCI_INVALID_IRQ; 2771 ksnprintf(tunable_name, sizeof(tunable_name), 2772 "hw.pci%d.%d.%d.%d.INT%c.irq", 2773 cfg->domain, cfg->bus, cfg->slot, cfg->func, cfg->intpin + 'A' - 1); 2774 if (TUNABLE_INT_FETCH(tunable_name, &irq)) { 2775 if (irq >= 255 || irq <= 0) { 2776 irq = PCI_INVALID_IRQ; 2777 } else { 2778 if (machintr_legacy_intr_find(irq, 2779 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW) < 0) { 2780 device_printf(dev, 2781 "hw.pci%d.%d.%d.%d.INT%c.irq=%d, invalid\n", 2782 cfg->domain, cfg->bus, cfg->slot, cfg->func, 2783 cfg->intpin + 'A' - 1, irq); 2784 irq = PCI_INVALID_IRQ; 2785 } else { 2786 BUS_CONFIG_INTR(bus, dev, irq, 2787 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW); 2788 } 2789 } 2790 } 2791 2792 /* 2793 * If we didn't get an IRQ via the tunable, then we either use the 2794 * IRQ value in the intline register or we ask the bus to route an 2795 * interrupt for us. If force_route is true, then we only use the 2796 * value in the intline register if the bus was unable to assign an 2797 * IRQ. 2798 */ 2799 if (!PCI_INTERRUPT_VALID(irq)) { 2800 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) 2801 irq = PCI_ASSIGN_INTERRUPT(bus, dev); 2802 if (!PCI_INTERRUPT_VALID(irq)) 2803 irq = cfg->intline; 2804 } 2805 2806 /* If after all that we don't have an IRQ, just bail. */ 2807 if (!PCI_INTERRUPT_VALID(irq)) 2808 return; 2809 2810 /* Update the config register if it changed. */ 2811 if (irq != cfg->intline) { 2812 cfg->intline = irq; 2813 pci_write_config(dev, PCIR_INTLINE, irq, 1); 2814 } 2815 2816 /* Add this IRQ as rid 0 interrupt resource. */ 2817 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1, 2818 machintr_legacy_intr_cpuid(irq)); 2819 } 2820 2821 /* Perform early OHCI takeover from SMM. */ 2822 static void 2823 ohci_early_takeover(device_t self) 2824 { 2825 struct resource *res; 2826 uint32_t ctl; 2827 int rid; 2828 int i; 2829 2830 rid = PCIR_BAR(0); 2831 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); 2832 if (res == NULL) 2833 return; 2834 2835 ctl = bus_read_4(res, OHCI_CONTROL); 2836 if (ctl & OHCI_IR) { 2837 if (bootverbose) 2838 kprintf("ohci early: " 2839 "SMM active, request owner change\n"); 2840 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); 2841 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { 2842 DELAY(1000); 2843 ctl = bus_read_4(res, OHCI_CONTROL); 2844 } 2845 if (ctl & OHCI_IR) { 2846 if (bootverbose) 2847 kprintf("ohci early: " 2848 "SMM does not respond, resetting\n"); 2849 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); 2850 } 2851 /* Disable interrupts */ 2852 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); 2853 } 2854 2855 bus_release_resource(self, SYS_RES_MEMORY, rid, res); 2856 } 2857 2858 /* Perform early UHCI takeover from SMM. */ 2859 static void 2860 uhci_early_takeover(device_t self) 2861 { 2862 struct resource *res; 2863 int rid; 2864 2865 /* 2866 * Set the PIRQD enable bit and switch off all the others. We don't 2867 * want legacy support to interfere with us XXX Does this also mean 2868 * that the BIOS won't touch the keyboard anymore if it is connected 2869 * to the ports of the root hub? 2870 */ 2871 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); 2872 2873 /* Disable interrupts */ 2874 rid = PCI_UHCI_BASE_REG; 2875 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); 2876 if (res != NULL) { 2877 bus_write_2(res, UHCI_INTR, 0); 2878 bus_release_resource(self, SYS_RES_IOPORT, rid, res); 2879 } 2880 } 2881 2882 /* Perform early EHCI takeover from SMM. */ 2883 static void 2884 ehci_early_takeover(device_t self) 2885 { 2886 struct resource *res; 2887 uint32_t cparams; 2888 uint32_t eec; 2889 uint32_t eecp; 2890 uint32_t bios_sem; 2891 uint32_t offs; 2892 int rid; 2893 int i; 2894 2895 rid = PCIR_BAR(0); 2896 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); 2897 if (res == NULL) 2898 return; 2899 2900 cparams = bus_read_4(res, EHCI_HCCPARAMS); 2901 2902 /* Synchronise with the BIOS if it owns the controller. */ 2903 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; 2904 eecp = EHCI_EECP_NEXT(eec)) { 2905 eec = pci_read_config(self, eecp, 4); 2906 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { 2907 continue; 2908 } 2909 bios_sem = pci_read_config(self, eecp + 2910 EHCI_LEGSUP_BIOS_SEM, 1); 2911 if (bios_sem == 0) { 2912 continue; 2913 } 2914 if (bootverbose) 2915 kprintf("ehci early: " 2916 "SMM active, request owner change\n"); 2917 2918 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); 2919 2920 for (i = 0; (i < 100) && (bios_sem != 0); i++) { 2921 DELAY(1000); 2922 bios_sem = pci_read_config(self, eecp + 2923 EHCI_LEGSUP_BIOS_SEM, 1); 2924 } 2925 2926 if (bios_sem != 0) { 2927 if (bootverbose) 2928 kprintf("ehci early: " 2929 "SMM does not respond\n"); 2930 } 2931 /* Disable interrupts */ 2932 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); 2933 bus_write_4(res, offs + EHCI_USBINTR, 0); 2934 } 2935 bus_release_resource(self, SYS_RES_MEMORY, rid, res); 2936 } 2937 2938 /* Perform early XHCI takeover from SMM. */ 2939 static void 2940 xhci_early_takeover(device_t self) 2941 { 2942 struct resource *res; 2943 uint32_t cparams; 2944 uint32_t eec; 2945 uint32_t eecp; 2946 uint32_t bios_sem; 2947 uint32_t offs; 2948 int rid; 2949 int i; 2950 2951 rid = PCIR_BAR(0); 2952 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); 2953 if (res == NULL) 2954 return; 2955 2956 cparams = bus_read_4(res, XHCI_HCSPARAMS0); 2957 2958 eec = -1; 2959 2960 /* Synchronise with the BIOS if it owns the controller. */ 2961 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); 2962 eecp += XHCI_XECP_NEXT(eec) << 2) { 2963 eec = bus_read_4(res, eecp); 2964 2965 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) 2966 continue; 2967 2968 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); 2969 2970 if (bios_sem == 0) { 2971 if (bootverbose) 2972 kprintf("xhci early: xhci is not owned by SMM\n"); 2973 2974 continue; 2975 } 2976 2977 if (bootverbose) 2978 kprintf("xhci early: " 2979 "SMM active, request owner change\n"); 2980 2981 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1); 2982 2983 /* wait a maximum of 5 seconds */ 2984 2985 for (i = 0; (i < 5000) && (bios_sem != 0); i++) { 2986 DELAY(1000); 2987 2988 bios_sem = bus_read_1(res, eecp + 2989 XHCI_XECP_BIOS_SEM); 2990 } 2991 2992 if (bios_sem != 0) { 2993 if (bootverbose) { 2994 kprintf("xhci early: " 2995 "SMM does not respond\n"); 2996 kprintf("xhci early: " 2997 "taking xhci by force\n"); 2998 } 2999 bus_write_1(res, eecp + XHCI_XECP_BIOS_SEM, 0x00); 3000 } else { 3001 if (bootverbose) 3002 kprintf("xhci early: " 3003 "handover successful\n"); 3004 } 3005 3006 /* Disable interrupts */ 3007 offs = bus_read_1(res, XHCI_CAPLENGTH); 3008 bus_write_4(res, offs + XHCI_USBCMD, 0); 3009 bus_read_4(res, offs + XHCI_USBSTS); 3010 } 3011 bus_release_resource(self, SYS_RES_MEMORY, rid, res); 3012 } 3013 3014 void 3015 pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask) 3016 { 3017 struct pci_devinfo *dinfo = device_get_ivars(dev); 3018 pcicfgregs *cfg = &dinfo->cfg; 3019 struct resource_list *rl = &dinfo->resources; 3020 struct pci_quirk *q; 3021 int b, i, f, s; 3022 3023 b = cfg->bus; 3024 s = cfg->slot; 3025 f = cfg->func; 3026 3027 /* ATA devices needs special map treatment */ 3028 if ((pci_get_class(dev) == PCIC_STORAGE) && 3029 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && 3030 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || 3031 (!pci_read_config(dev, PCIR_BAR(0), 4) && 3032 !pci_read_config(dev, PCIR_BAR(2), 4))) ) 3033 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask); 3034 else 3035 for (i = 0; i < cfg->nummaps;) 3036 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i), 3037 rl, force, prefetchmask & (1 << i)); 3038 3039 /* 3040 * Add additional, quirked resources. 3041 */ 3042 for (q = &pci_quirks[0]; q->devid; q++) { 3043 if (q->devid == ((cfg->device << 16) | cfg->vendor) 3044 && q->type == PCI_QUIRK_MAP_REG) 3045 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl, 3046 force, 0); 3047 } 3048 3049 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { 3050 /* 3051 * Try to re-route interrupts. Sometimes the BIOS or 3052 * firmware may leave bogus values in these registers. 3053 * If the re-route fails, then just stick with what we 3054 * have. 3055 */ 3056 pci_assign_interrupt(bus, dev, 1); 3057 } 3058 3059 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && 3060 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { 3061 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI) 3062 xhci_early_takeover(dev); 3063 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) 3064 ehci_early_takeover(dev); 3065 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) 3066 ohci_early_takeover(dev); 3067 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) 3068 uhci_early_takeover(dev); 3069 } 3070 } 3071 3072 void 3073 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size) 3074 { 3075 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) 3076 device_t pcib = device_get_parent(dev); 3077 struct pci_devinfo *dinfo; 3078 int maxslots; 3079 int s, f, pcifunchigh; 3080 uint8_t hdrtype; 3081 3082 KASSERT(dinfo_size >= sizeof(struct pci_devinfo), 3083 ("dinfo_size too small")); 3084 maxslots = PCIB_MAXSLOTS(pcib); 3085 for (s = 0; s <= maxslots; s++) { 3086 pcifunchigh = 0; 3087 f = 0; 3088 DELAY(1); 3089 hdrtype = REG(PCIR_HDRTYPE, 1); 3090 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) 3091 continue; 3092 if (hdrtype & PCIM_MFDEV) 3093 pcifunchigh = PCI_FUNCMAX; 3094 for (f = 0; f <= pcifunchigh; f++) { 3095 dinfo = pci_read_device(pcib, domain, busno, s, f, 3096 dinfo_size); 3097 if (dinfo != NULL) { 3098 pci_add_child(dev, dinfo); 3099 } 3100 } 3101 } 3102 #undef REG 3103 } 3104 3105 void 3106 pci_add_child(device_t bus, struct pci_devinfo *dinfo) 3107 { 3108 device_t pcib; 3109 3110 pcib = device_get_parent(bus); 3111 dinfo->cfg.dev = device_add_child(bus, NULL, -1); 3112 device_set_ivars(dinfo->cfg.dev, dinfo); 3113 resource_list_init(&dinfo->resources); 3114 pci_cfg_save(dinfo->cfg.dev, dinfo, 0); 3115 pci_cfg_restore(dinfo->cfg.dev, dinfo); 3116 pci_print_verbose(dinfo); 3117 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0); 3118 } 3119 3120 static int 3121 pci_probe(device_t dev) 3122 { 3123 device_set_desc(dev, "PCI bus"); 3124 3125 /* Allow other subclasses to override this driver. */ 3126 return (-1000); 3127 } 3128 3129 static int 3130 pci_attach(device_t dev) 3131 { 3132 int busno, domain; 3133 3134 /* 3135 * Since there can be multiple independantly numbered PCI 3136 * busses on systems with multiple PCI domains, we can't use 3137 * the unit number to decide which bus we are probing. We ask 3138 * the parent pcib what our domain and bus numbers are. 3139 */ 3140 domain = pcib_get_domain(dev); 3141 busno = pcib_get_bus(dev); 3142 if (bootverbose) 3143 device_printf(dev, "domain=%d, physical bus=%d\n", 3144 domain, busno); 3145 3146 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo)); 3147 3148 return (bus_generic_attach(dev)); 3149 } 3150 3151 int 3152 pci_suspend(device_t dev) 3153 { 3154 int dstate, error, i, numdevs; 3155 device_t acpi_dev, child, *devlist; 3156 struct pci_devinfo *dinfo; 3157 3158 /* 3159 * Save the PCI configuration space for each child and set the 3160 * device in the appropriate power state for this sleep state. 3161 */ 3162 acpi_dev = NULL; 3163 if (pci_do_power_resume) 3164 acpi_dev = devclass_get_device(devclass_find("acpi"), 0); 3165 device_get_children(dev, &devlist, &numdevs); 3166 for (i = 0; i < numdevs; i++) { 3167 child = devlist[i]; 3168 dinfo = (struct pci_devinfo *) device_get_ivars(child); 3169 pci_cfg_save(child, dinfo, 0); 3170 } 3171 3172 /* Suspend devices before potentially powering them down. */ 3173 error = bus_generic_suspend(dev); 3174 if (error) { 3175 kfree(devlist, M_TEMP); 3176 return (error); 3177 } 3178 3179 /* 3180 * Always set the device to D3. If ACPI suggests a different 3181 * power state, use it instead. If ACPI is not present, the 3182 * firmware is responsible for managing device power. Skip 3183 * children who aren't attached since they are powered down 3184 * separately. Only manage type 0 devices for now. 3185 */ 3186 for (i = 0; acpi_dev && i < numdevs; i++) { 3187 child = devlist[i]; 3188 dinfo = (struct pci_devinfo *) device_get_ivars(child); 3189 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) { 3190 dstate = PCI_POWERSTATE_D3; 3191 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate); 3192 pci_set_powerstate(child, dstate); 3193 } 3194 } 3195 kfree(devlist, M_TEMP); 3196 return (0); 3197 } 3198 3199 int 3200 pci_resume(device_t dev) 3201 { 3202 int i, numdevs; 3203 device_t acpi_dev, child, *devlist; 3204 struct pci_devinfo *dinfo; 3205 3206 /* 3207 * Set each child to D0 and restore its PCI configuration space. 3208 */ 3209 acpi_dev = NULL; 3210 if (pci_do_power_resume) 3211 acpi_dev = devclass_get_device(devclass_find("acpi"), 0); 3212 device_get_children(dev, &devlist, &numdevs); 3213 for (i = 0; i < numdevs; i++) { 3214 /* 3215 * Notify ACPI we're going to D0 but ignore the result. If 3216 * ACPI is not present, the firmware is responsible for 3217 * managing device power. Only manage type 0 devices for now. 3218 */ 3219 child = devlist[i]; 3220 dinfo = (struct pci_devinfo *) device_get_ivars(child); 3221 if (acpi_dev && device_is_attached(child) && 3222 dinfo->cfg.hdrtype == 0) { 3223 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL); 3224 pci_set_powerstate(child, PCI_POWERSTATE_D0); 3225 } 3226 3227 /* Now the device is powered up, restore its config space. */ 3228 pci_cfg_restore(child, dinfo); 3229 } 3230 kfree(devlist, M_TEMP); 3231 return (bus_generic_resume(dev)); 3232 } 3233 3234 static void 3235 pci_load_vendor_data(void) 3236 { 3237 caddr_t vendordata, info; 3238 3239 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) { 3240 info = preload_search_info(vendordata, MODINFO_ADDR); 3241 pci_vendordata = *(char **)info; 3242 info = preload_search_info(vendordata, MODINFO_SIZE); 3243 pci_vendordata_size = *(size_t *)info; 3244 /* terminate the database */ 3245 pci_vendordata[pci_vendordata_size] = '\n'; 3246 } 3247 } 3248 3249 void 3250 pci_driver_added(device_t dev, driver_t *driver) 3251 { 3252 int numdevs; 3253 device_t *devlist; 3254 device_t child; 3255 struct pci_devinfo *dinfo; 3256 int i; 3257 3258 if (bootverbose) 3259 device_printf(dev, "driver added\n"); 3260 DEVICE_IDENTIFY(driver, dev); 3261 device_get_children(dev, &devlist, &numdevs); 3262 for (i = 0; i < numdevs; i++) { 3263 child = devlist[i]; 3264 if (device_get_state(child) != DS_NOTPRESENT) 3265 continue; 3266 dinfo = device_get_ivars(child); 3267 pci_print_verbose(dinfo); 3268 if (bootverbose) 3269 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n", 3270 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot, 3271 dinfo->cfg.func); 3272 pci_cfg_restore(child, dinfo); 3273 if (device_probe_and_attach(child) != 0) 3274 pci_cfg_save(child, dinfo, 1); 3275 } 3276 kfree(devlist, M_TEMP); 3277 } 3278 3279 static void 3280 pci_child_detached(device_t parent __unused, device_t child) 3281 { 3282 /* Turn child's power off */ 3283 pci_cfg_save(child, device_get_ivars(child), 1); 3284 } 3285 3286 int 3287 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, 3288 driver_intr_t *intr, void *arg, void **cookiep, 3289 lwkt_serialize_t serializer, const char *desc) 3290 { 3291 int rid, error; 3292 void *cookie; 3293 3294 error = bus_generic_setup_intr(dev, child, irq, flags, intr, 3295 arg, &cookie, serializer, desc); 3296 if (error) 3297 return (error); 3298 3299 /* If this is not a direct child, just bail out. */ 3300 if (device_get_parent(child) != dev) { 3301 *cookiep = cookie; 3302 return(0); 3303 } 3304 3305 rid = rman_get_rid(irq); 3306 if (rid == 0) { 3307 /* Make sure that INTx is enabled */ 3308 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); 3309 } else { 3310 struct pci_devinfo *dinfo = device_get_ivars(child); 3311 uint64_t addr; 3312 uint32_t data; 3313 3314 /* 3315 * Check to see if the interrupt is MSI or MSI-X. 3316 * Ask our parent to map the MSI and give 3317 * us the address and data register values. 3318 * If we fail for some reason, teardown the 3319 * interrupt handler. 3320 */ 3321 if (dinfo->cfg.msi.msi_alloc > 0) { 3322 struct pcicfg_msi *msi = &dinfo->cfg.msi; 3323 3324 if (msi->msi_addr == 0) { 3325 KASSERT(msi->msi_handlers == 0, 3326 ("MSI has handlers, but vectors not mapped")); 3327 error = PCIB_MAP_MSI(device_get_parent(dev), 3328 child, rman_get_start(irq), &addr, &data, 3329 rman_get_cpuid(irq)); 3330 if (error) 3331 goto bad; 3332 msi->msi_addr = addr; 3333 msi->msi_data = data; 3334 pci_enable_msi(child, addr, data); 3335 } 3336 msi->msi_handlers++; 3337 } else { 3338 struct msix_vector *mv; 3339 u_int vector; 3340 3341 KASSERT(dinfo->cfg.msix.msix_alloc > 0, 3342 ("No MSI-X or MSI rid %d allocated", rid)); 3343 3344 mv = pci_find_msix_vector(child, rid); 3345 KASSERT(mv != NULL, 3346 ("MSI-X rid %d is not allocated", rid)); 3347 KASSERT(mv->mv_address == 0, 3348 ("MSI-X rid %d has been setup", rid)); 3349 3350 error = PCIB_MAP_MSI(device_get_parent(dev), 3351 child, rman_get_start(irq), &addr, &data, 3352 rman_get_cpuid(irq)); 3353 if (error) 3354 goto bad; 3355 mv->mv_address = addr; 3356 mv->mv_data = data; 3357 3358 vector = PCI_MSIX_RID2VEC(rid); 3359 pci_setup_msix_vector(child, vector, 3360 mv->mv_address, mv->mv_data); 3361 pci_unmask_msix_vector(child, vector); 3362 } 3363 3364 /* 3365 * Make sure that INTx is disabled if we are using MSI/MSI-X, 3366 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG, 3367 * in which case we "enable" INTx so MSI/MSI-X actually works. 3368 */ 3369 if (!pci_has_quirk(pci_get_devid(child), 3370 PCI_QUIRK_MSI_INTX_BUG)) 3371 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); 3372 else 3373 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); 3374 bad: 3375 if (error) { 3376 (void)bus_generic_teardown_intr(dev, child, irq, 3377 cookie); 3378 return (error); 3379 } 3380 } 3381 *cookiep = cookie; 3382 return (0); 3383 } 3384 3385 int 3386 pci_teardown_intr(device_t dev, device_t child, struct resource *irq, 3387 void *cookie) 3388 { 3389 int rid, error; 3390 3391 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) 3392 return (EINVAL); 3393 3394 /* If this isn't a direct child, just bail out */ 3395 if (device_get_parent(child) != dev) 3396 return(bus_generic_teardown_intr(dev, child, irq, cookie)); 3397 3398 rid = rman_get_rid(irq); 3399 if (rid == 0) { 3400 /* Mask INTx */ 3401 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); 3402 } else { 3403 struct pci_devinfo *dinfo = device_get_ivars(child); 3404 3405 /* 3406 * Check to see if the interrupt is MSI or MSI-X. If so, 3407 * decrement the appropriate handlers count and mask the 3408 * MSI-X message, or disable MSI messages if the count 3409 * drops to 0. 3410 */ 3411 if (dinfo->cfg.msi.msi_alloc > 0) { 3412 struct pcicfg_msi *msi = &dinfo->cfg.msi; 3413 3414 KASSERT(rid <= msi->msi_alloc, 3415 ("MSI-X index too high")); 3416 KASSERT(msi->msi_handlers > 0, 3417 ("MSI rid %d is not setup", rid)); 3418 3419 msi->msi_handlers--; 3420 if (msi->msi_handlers == 0) 3421 pci_disable_msi(child); 3422 } else { 3423 struct msix_vector *mv; 3424 3425 KASSERT(dinfo->cfg.msix.msix_alloc > 0, 3426 ("No MSI or MSI-X rid %d allocated", rid)); 3427 3428 mv = pci_find_msix_vector(child, rid); 3429 KASSERT(mv != NULL, 3430 ("MSI-X rid %d is not allocated", rid)); 3431 KASSERT(mv->mv_address != 0, 3432 ("MSI-X rid %d has not been setup", rid)); 3433 3434 pci_mask_msix_vector(child, PCI_MSIX_RID2VEC(rid)); 3435 mv->mv_address = 0; 3436 mv->mv_data = 0; 3437 } 3438 } 3439 error = bus_generic_teardown_intr(dev, child, irq, cookie); 3440 if (rid > 0) 3441 KASSERT(error == 0, 3442 ("%s: generic teardown failed for MSI/MSI-X", __func__)); 3443 return (error); 3444 } 3445 3446 int 3447 pci_print_child(device_t dev, device_t child) 3448 { 3449 struct pci_devinfo *dinfo; 3450 struct resource_list *rl; 3451 int retval = 0; 3452 3453 dinfo = device_get_ivars(child); 3454 rl = &dinfo->resources; 3455 3456 retval += bus_print_child_header(dev, child); 3457 3458 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); 3459 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); 3460 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); 3461 if (device_get_flags(dev)) 3462 retval += kprintf(" flags %#x", device_get_flags(dev)); 3463 3464 retval += kprintf(" at device %d.%d", pci_get_slot(child), 3465 pci_get_function(child)); 3466 3467 retval += bus_print_child_footer(dev, child); 3468 3469 return (retval); 3470 } 3471 3472 static struct 3473 { 3474 int class; 3475 int subclass; 3476 char *desc; 3477 } pci_nomatch_tab[] = { 3478 {PCIC_OLD, -1, "old"}, 3479 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"}, 3480 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"}, 3481 {PCIC_STORAGE, -1, "mass storage"}, 3482 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"}, 3483 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"}, 3484 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"}, 3485 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"}, 3486 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"}, 3487 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"}, 3488 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"}, 3489 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"}, 3490 {PCIC_NETWORK, -1, "network"}, 3491 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"}, 3492 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"}, 3493 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"}, 3494 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"}, 3495 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"}, 3496 {PCIC_DISPLAY, -1, "display"}, 3497 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"}, 3498 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"}, 3499 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"}, 3500 {PCIC_MULTIMEDIA, -1, "multimedia"}, 3501 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"}, 3502 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"}, 3503 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"}, 3504 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"}, 3505 {PCIC_MEMORY, -1, "memory"}, 3506 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"}, 3507 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"}, 3508 {PCIC_BRIDGE, -1, "bridge"}, 3509 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"}, 3510 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"}, 3511 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"}, 3512 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"}, 3513 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"}, 3514 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"}, 3515 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"}, 3516 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"}, 3517 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"}, 3518 {PCIC_SIMPLECOMM, -1, "simple comms"}, 3519 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */ 3520 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"}, 3521 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"}, 3522 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"}, 3523 {PCIC_BASEPERIPH, -1, "base peripheral"}, 3524 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"}, 3525 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"}, 3526 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"}, 3527 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"}, 3528 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"}, 3529 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"}, 3530 {PCIC_INPUTDEV, -1, "input device"}, 3531 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"}, 3532 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"}, 3533 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"}, 3534 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"}, 3535 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"}, 3536 {PCIC_DOCKING, -1, "docking station"}, 3537 {PCIC_PROCESSOR, -1, "processor"}, 3538 {PCIC_SERIALBUS, -1, "serial bus"}, 3539 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"}, 3540 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"}, 3541 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"}, 3542 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"}, 3543 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"}, 3544 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"}, 3545 {PCIC_WIRELESS, -1, "wireless controller"}, 3546 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"}, 3547 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"}, 3548 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"}, 3549 {PCIC_INTELLIIO, -1, "intelligent I/O controller"}, 3550 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"}, 3551 {PCIC_SATCOM, -1, "satellite communication"}, 3552 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"}, 3553 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"}, 3554 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"}, 3555 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"}, 3556 {PCIC_CRYPTO, -1, "encrypt/decrypt"}, 3557 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"}, 3558 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"}, 3559 {PCIC_DASP, -1, "dasp"}, 3560 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"}, 3561 {0, 0, NULL} 3562 }; 3563 3564 void 3565 pci_probe_nomatch(device_t dev, device_t child) 3566 { 3567 int i; 3568 char *cp, *scp, *device; 3569 3570 /* 3571 * Look for a listing for this device in a loaded device database. 3572 */ 3573 if ((device = pci_describe_device(child)) != NULL) { 3574 device_printf(dev, "<%s>", device); 3575 kfree(device, M_DEVBUF); 3576 } else { 3577 /* 3578 * Scan the class/subclass descriptions for a general 3579 * description. 3580 */ 3581 cp = "unknown"; 3582 scp = NULL; 3583 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { 3584 if (pci_nomatch_tab[i].class == pci_get_class(child)) { 3585 if (pci_nomatch_tab[i].subclass == -1) { 3586 cp = pci_nomatch_tab[i].desc; 3587 } else if (pci_nomatch_tab[i].subclass == 3588 pci_get_subclass(child)) { 3589 scp = pci_nomatch_tab[i].desc; 3590 } 3591 } 3592 } 3593 device_printf(dev, "<%s%s%s>", 3594 cp ? cp : "", 3595 ((cp != NULL) && (scp != NULL)) ? ", " : "", 3596 scp ? scp : ""); 3597 } 3598 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d", 3599 pci_get_vendor(child), pci_get_device(child), 3600 pci_get_slot(child), pci_get_function(child)); 3601 if (pci_get_intpin(child) > 0) { 3602 int irq; 3603 3604 irq = pci_get_irq(child); 3605 if (PCI_INTERRUPT_VALID(irq)) 3606 kprintf(" irq %d", irq); 3607 } 3608 kprintf("\n"); 3609 3610 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1); 3611 } 3612 3613 /* 3614 * Parse the PCI device database, if loaded, and return a pointer to a 3615 * description of the device. 3616 * 3617 * The database is flat text formatted as follows: 3618 * 3619 * Any line not in a valid format is ignored. 3620 * Lines are terminated with newline '\n' characters. 3621 * 3622 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then 3623 * the vendor name. 3624 * 3625 * A DEVICE line is entered immediately below the corresponding VENDOR ID. 3626 * - devices cannot be listed without a corresponding VENDOR line. 3627 * A DEVICE line consists of a TAB, the 4 digit (hex) device code, 3628 * another TAB, then the device name. 3629 */ 3630 3631 /* 3632 * Assuming (ptr) points to the beginning of a line in the database, 3633 * return the vendor or device and description of the next entry. 3634 * The value of (vendor) or (device) inappropriate for the entry type 3635 * is set to -1. Returns nonzero at the end of the database. 3636 * 3637 * Note that this is slightly unrobust in the face of corrupt data; 3638 * we attempt to safeguard against this by spamming the end of the 3639 * database with a newline when we initialise. 3640 */ 3641 static int 3642 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) 3643 { 3644 char *cp = *ptr; 3645 int left; 3646 3647 *device = -1; 3648 *vendor = -1; 3649 **desc = '\0'; 3650 for (;;) { 3651 left = pci_vendordata_size - (cp - pci_vendordata); 3652 if (left <= 0) { 3653 *ptr = cp; 3654 return(1); 3655 } 3656 3657 /* vendor entry? */ 3658 if (*cp != '\t' && 3659 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) 3660 break; 3661 /* device entry? */ 3662 if (*cp == '\t' && 3663 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) 3664 break; 3665 3666 /* skip to next line */ 3667 while (*cp != '\n' && left > 0) { 3668 cp++; 3669 left--; 3670 } 3671 if (*cp == '\n') { 3672 cp++; 3673 left--; 3674 } 3675 } 3676 /* skip to next line */ 3677 while (*cp != '\n' && left > 0) { 3678 cp++; 3679 left--; 3680 } 3681 if (*cp == '\n' && left > 0) 3682 cp++; 3683 *ptr = cp; 3684 return(0); 3685 } 3686 3687 static char * 3688 pci_describe_device(device_t dev) 3689 { 3690 int vendor, device; 3691 char *desc, *vp, *dp, *line; 3692 3693 desc = vp = dp = NULL; 3694 3695 /* 3696 * If we have no vendor data, we can't do anything. 3697 */ 3698 if (pci_vendordata == NULL) 3699 goto out; 3700 3701 /* 3702 * Scan the vendor data looking for this device 3703 */ 3704 line = pci_vendordata; 3705 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL) 3706 goto out; 3707 for (;;) { 3708 if (pci_describe_parse_line(&line, &vendor, &device, &vp)) 3709 goto out; 3710 if (vendor == pci_get_vendor(dev)) 3711 break; 3712 } 3713 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL) 3714 goto out; 3715 for (;;) { 3716 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { 3717 *dp = 0; 3718 break; 3719 } 3720 if (vendor != -1) { 3721 *dp = 0; 3722 break; 3723 } 3724 if (device == pci_get_device(dev)) 3725 break; 3726 } 3727 if (dp[0] == '\0') 3728 ksnprintf(dp, 80, "0x%x", pci_get_device(dev)); 3729 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != 3730 NULL) 3731 ksprintf(desc, "%s, %s", vp, dp); 3732 out: 3733 if (vp != NULL) 3734 kfree(vp, M_DEVBUF); 3735 if (dp != NULL) 3736 kfree(dp, M_DEVBUF); 3737 return(desc); 3738 } 3739 3740 int 3741 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 3742 { 3743 struct pci_devinfo *dinfo; 3744 pcicfgregs *cfg; 3745 3746 dinfo = device_get_ivars(child); 3747 cfg = &dinfo->cfg; 3748 3749 switch (which) { 3750 case PCI_IVAR_ETHADDR: 3751 /* 3752 * The generic accessor doesn't deal with failure, so 3753 * we set the return value, then return an error. 3754 */ 3755 *((uint8_t **) result) = NULL; 3756 return (EINVAL); 3757 case PCI_IVAR_SUBVENDOR: 3758 *result = cfg->subvendor; 3759 break; 3760 case PCI_IVAR_SUBDEVICE: 3761 *result = cfg->subdevice; 3762 break; 3763 case PCI_IVAR_VENDOR: 3764 *result = cfg->vendor; 3765 break; 3766 case PCI_IVAR_DEVICE: 3767 *result = cfg->device; 3768 break; 3769 case PCI_IVAR_DEVID: 3770 *result = (cfg->device << 16) | cfg->vendor; 3771 break; 3772 case PCI_IVAR_CLASS: 3773 *result = cfg->baseclass; 3774 break; 3775 case PCI_IVAR_SUBCLASS: 3776 *result = cfg->subclass; 3777 break; 3778 case PCI_IVAR_PROGIF: 3779 *result = cfg->progif; 3780 break; 3781 case PCI_IVAR_REVID: 3782 *result = cfg->revid; 3783 break; 3784 case PCI_IVAR_INTPIN: 3785 *result = cfg->intpin; 3786 break; 3787 case PCI_IVAR_IRQ: 3788 *result = cfg->intline; 3789 break; 3790 case PCI_IVAR_DOMAIN: 3791 *result = cfg->domain; 3792 break; 3793 case PCI_IVAR_BUS: 3794 *result = cfg->bus; 3795 break; 3796 case PCI_IVAR_SLOT: 3797 *result = cfg->slot; 3798 break; 3799 case PCI_IVAR_FUNCTION: 3800 *result = cfg->func; 3801 break; 3802 case PCI_IVAR_CMDREG: 3803 *result = cfg->cmdreg; 3804 break; 3805 case PCI_IVAR_CACHELNSZ: 3806 *result = cfg->cachelnsz; 3807 break; 3808 case PCI_IVAR_MINGNT: 3809 *result = cfg->mingnt; 3810 break; 3811 case PCI_IVAR_MAXLAT: 3812 *result = cfg->maxlat; 3813 break; 3814 case PCI_IVAR_LATTIMER: 3815 *result = cfg->lattimer; 3816 break; 3817 case PCI_IVAR_PCIXCAP_PTR: 3818 *result = cfg->pcix.pcix_ptr; 3819 break; 3820 case PCI_IVAR_PCIECAP_PTR: 3821 *result = cfg->expr.expr_ptr; 3822 break; 3823 case PCI_IVAR_VPDCAP_PTR: 3824 *result = cfg->vpd.vpd_reg; 3825 break; 3826 default: 3827 return (ENOENT); 3828 } 3829 return (0); 3830 } 3831 3832 int 3833 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) 3834 { 3835 struct pci_devinfo *dinfo; 3836 3837 dinfo = device_get_ivars(child); 3838 3839 switch (which) { 3840 case PCI_IVAR_INTPIN: 3841 dinfo->cfg.intpin = value; 3842 return (0); 3843 case PCI_IVAR_ETHADDR: 3844 case PCI_IVAR_SUBVENDOR: 3845 case PCI_IVAR_SUBDEVICE: 3846 case PCI_IVAR_VENDOR: 3847 case PCI_IVAR_DEVICE: 3848 case PCI_IVAR_DEVID: 3849 case PCI_IVAR_CLASS: 3850 case PCI_IVAR_SUBCLASS: 3851 case PCI_IVAR_PROGIF: 3852 case PCI_IVAR_REVID: 3853 case PCI_IVAR_IRQ: 3854 case PCI_IVAR_DOMAIN: 3855 case PCI_IVAR_BUS: 3856 case PCI_IVAR_SLOT: 3857 case PCI_IVAR_FUNCTION: 3858 return (EINVAL); /* disallow for now */ 3859 3860 default: 3861 return (ENOENT); 3862 } 3863 } 3864 #ifdef notyet 3865 #include "opt_ddb.h" 3866 #ifdef DDB 3867 #include <ddb/ddb.h> 3868 #include <sys/cons.h> 3869 3870 /* 3871 * List resources based on pci map registers, used for within ddb 3872 */ 3873 3874 DB_SHOW_COMMAND(pciregs, db_pci_dump) 3875 { 3876 struct pci_devinfo *dinfo; 3877 struct devlist *devlist_head; 3878 struct pci_conf *p; 3879 const char *name; 3880 int i, error, none_count; 3881 3882 none_count = 0; 3883 /* get the head of the device queue */ 3884 devlist_head = &pci_devq; 3885 3886 /* 3887 * Go through the list of devices and print out devices 3888 */ 3889 for (error = 0, i = 0, 3890 dinfo = STAILQ_FIRST(devlist_head); 3891 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; 3892 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { 3893 3894 /* Populate pd_name and pd_unit */ 3895 name = NULL; 3896 if (dinfo->cfg.dev) 3897 name = device_get_name(dinfo->cfg.dev); 3898 3899 p = &dinfo->conf; 3900 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " 3901 "chip=0x%08x rev=0x%02x hdr=0x%02x\n", 3902 (name && *name) ? name : "none", 3903 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : 3904 none_count++, 3905 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, 3906 p->pc_sel.pc_func, (p->pc_class << 16) | 3907 (p->pc_subclass << 8) | p->pc_progif, 3908 (p->pc_subdevice << 16) | p->pc_subvendor, 3909 (p->pc_device << 16) | p->pc_vendor, 3910 p->pc_revid, p->pc_hdr); 3911 } 3912 } 3913 #endif /* DDB */ 3914 #endif 3915 3916 static struct resource * 3917 pci_alloc_map(device_t dev, device_t child, int type, int *rid, 3918 u_long start, u_long end, u_long count, u_int flags) 3919 { 3920 struct pci_devinfo *dinfo = device_get_ivars(child); 3921 struct resource_list *rl = &dinfo->resources; 3922 struct resource_list_entry *rle; 3923 struct resource *res; 3924 pci_addr_t map, testval; 3925 int mapsize; 3926 3927 /* 3928 * Weed out the bogons, and figure out how large the BAR/map 3929 * is. Bars that read back 0 here are bogus and unimplemented. 3930 * Note: atapci in legacy mode are special and handled elsewhere 3931 * in the code. If you have a atapci device in legacy mode and 3932 * it fails here, that other code is broken. 3933 */ 3934 res = NULL; 3935 map = pci_read_config(child, *rid, 4); 3936 pci_write_config(child, *rid, 0xffffffff, 4); 3937 testval = pci_read_config(child, *rid, 4); 3938 if (pci_maprange(testval) == 64) 3939 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32; 3940 if (pci_mapbase(testval) == 0) 3941 goto out; 3942 3943 /* 3944 * Restore the original value of the BAR. We may have reprogrammed 3945 * the BAR of the low-level console device and when booting verbose, 3946 * we need the console device addressable. 3947 */ 3948 pci_write_config(child, *rid, map, 4); 3949 3950 if (PCI_BAR_MEM(testval)) { 3951 if (type != SYS_RES_MEMORY) { 3952 if (bootverbose) 3953 device_printf(dev, 3954 "child %s requested type %d for rid %#x," 3955 " but the BAR says it is an memio\n", 3956 device_get_nameunit(child), type, *rid); 3957 goto out; 3958 } 3959 } else { 3960 if (type != SYS_RES_IOPORT) { 3961 if (bootverbose) 3962 device_printf(dev, 3963 "child %s requested type %d for rid %#x," 3964 " but the BAR says it is an ioport\n", 3965 device_get_nameunit(child), type, *rid); 3966 goto out; 3967 } 3968 } 3969 /* 3970 * For real BARs, we need to override the size that 3971 * the driver requests, because that's what the BAR 3972 * actually uses and we would otherwise have a 3973 * situation where we might allocate the excess to 3974 * another driver, which won't work. 3975 */ 3976 mapsize = pci_mapsize(testval); 3977 count = 1UL << mapsize; 3978 if (RF_ALIGNMENT(flags) < mapsize) 3979 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); 3980 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH)) 3981 flags |= RF_PREFETCHABLE; 3982 3983 /* 3984 * Allocate enough resource, and then write back the 3985 * appropriate bar for that resource. 3986 */ 3987 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, 3988 start, end, count, flags, -1); 3989 if (res == NULL) { 3990 device_printf(child, 3991 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n", 3992 count, *rid, type, start, end); 3993 goto out; 3994 } 3995 resource_list_add(rl, type, *rid, start, end, count, -1); 3996 rle = resource_list_find(rl, type, *rid); 3997 if (rle == NULL) 3998 panic("pci_alloc_map: unexpectedly can't find resource."); 3999 rle->res = res; 4000 rle->start = rman_get_start(res); 4001 rle->end = rman_get_end(res); 4002 rle->count = count; 4003 if (bootverbose) 4004 device_printf(child, 4005 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n", 4006 count, *rid, type, rman_get_start(res)); 4007 map = rman_get_start(res); 4008 out:; 4009 pci_write_config(child, *rid, map, 4); 4010 if (pci_maprange(testval) == 64) 4011 pci_write_config(child, *rid + 4, map >> 32, 4); 4012 return (res); 4013 } 4014 4015 4016 struct resource * 4017 pci_alloc_resource(device_t dev, device_t child, int type, int *rid, 4018 u_long start, u_long end, u_long count, u_int flags, int cpuid) 4019 { 4020 struct pci_devinfo *dinfo = device_get_ivars(child); 4021 struct resource_list *rl = &dinfo->resources; 4022 struct resource_list_entry *rle; 4023 pcicfgregs *cfg = &dinfo->cfg; 4024 4025 /* 4026 * Perform lazy resource allocation 4027 */ 4028 if (device_get_parent(child) == dev) { 4029 switch (type) { 4030 case SYS_RES_IRQ: 4031 /* 4032 * Can't alloc legacy interrupt once MSI messages 4033 * have been allocated. 4034 */ 4035 if (*rid == 0 && (cfg->msi.msi_alloc > 0 || 4036 cfg->msix.msix_alloc > 0)) 4037 return (NULL); 4038 /* 4039 * If the child device doesn't have an 4040 * interrupt routed and is deserving of an 4041 * interrupt, try to assign it one. 4042 */ 4043 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && 4044 (cfg->intpin != 0)) 4045 pci_assign_interrupt(dev, child, 0); 4046 break; 4047 case SYS_RES_IOPORT: 4048 case SYS_RES_MEMORY: 4049 if (*rid < PCIR_BAR(cfg->nummaps)) { 4050 /* 4051 * Enable the I/O mode. We should 4052 * also be assigning resources too 4053 * when none are present. The 4054 * resource_list_alloc kind of sorta does 4055 * this... 4056 */ 4057 if (PCI_ENABLE_IO(dev, child, type)) 4058 return (NULL); 4059 } 4060 rle = resource_list_find(rl, type, *rid); 4061 if (rle == NULL) 4062 return (pci_alloc_map(dev, child, type, rid, 4063 start, end, count, flags)); 4064 break; 4065 } 4066 /* 4067 * If we've already allocated the resource, then 4068 * return it now. But first we may need to activate 4069 * it, since we don't allocate the resource as active 4070 * above. Normally this would be done down in the 4071 * nexus, but since we short-circuit that path we have 4072 * to do its job here. Not sure if we should kfree the 4073 * resource if it fails to activate. 4074 */ 4075 rle = resource_list_find(rl, type, *rid); 4076 if (rle != NULL && rle->res != NULL) { 4077 if (bootverbose) 4078 device_printf(child, 4079 "Reserved %#lx bytes for rid %#x type %d at %#lx\n", 4080 rman_get_size(rle->res), *rid, type, 4081 rman_get_start(rle->res)); 4082 if ((flags & RF_ACTIVE) && 4083 bus_generic_activate_resource(dev, child, type, 4084 *rid, rle->res) != 0) 4085 return (NULL); 4086 return (rle->res); 4087 } 4088 } 4089 return (resource_list_alloc(rl, dev, child, type, rid, 4090 start, end, count, flags, cpuid)); 4091 } 4092 4093 void 4094 pci_delete_resource(device_t dev, device_t child, int type, int rid) 4095 { 4096 struct pci_devinfo *dinfo; 4097 struct resource_list *rl; 4098 struct resource_list_entry *rle; 4099 4100 if (device_get_parent(child) != dev) 4101 return; 4102 4103 dinfo = device_get_ivars(child); 4104 rl = &dinfo->resources; 4105 rle = resource_list_find(rl, type, rid); 4106 if (rle) { 4107 if (rle->res) { 4108 if (rman_get_device(rle->res) != dev || 4109 rman_get_flags(rle->res) & RF_ACTIVE) { 4110 device_printf(dev, "delete_resource: " 4111 "Resource still owned by child, oops. " 4112 "(type=%d, rid=%d, addr=%lx)\n", 4113 rle->type, rle->rid, 4114 rman_get_start(rle->res)); 4115 return; 4116 } 4117 bus_release_resource(dev, type, rid, rle->res); 4118 } 4119 resource_list_delete(rl, type, rid); 4120 } 4121 /* 4122 * Why do we turn off the PCI configuration BAR when we delete a 4123 * resource? -- imp 4124 */ 4125 pci_write_config(child, rid, 0, 4); 4126 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid); 4127 } 4128 4129 struct resource_list * 4130 pci_get_resource_list (device_t dev, device_t child) 4131 { 4132 struct pci_devinfo *dinfo = device_get_ivars(child); 4133 4134 if (dinfo == NULL) 4135 return (NULL); 4136 4137 return (&dinfo->resources); 4138 } 4139 4140 uint32_t 4141 pci_read_config_method(device_t dev, device_t child, int reg, int width) 4142 { 4143 struct pci_devinfo *dinfo = device_get_ivars(child); 4144 pcicfgregs *cfg = &dinfo->cfg; 4145 4146 return (PCIB_READ_CONFIG(device_get_parent(dev), 4147 cfg->bus, cfg->slot, cfg->func, reg, width)); 4148 } 4149 4150 void 4151 pci_write_config_method(device_t dev, device_t child, int reg, 4152 uint32_t val, int width) 4153 { 4154 struct pci_devinfo *dinfo = device_get_ivars(child); 4155 pcicfgregs *cfg = &dinfo->cfg; 4156 4157 PCIB_WRITE_CONFIG(device_get_parent(dev), 4158 cfg->bus, cfg->slot, cfg->func, reg, val, width); 4159 } 4160 4161 int 4162 pci_child_location_str_method(device_t dev, device_t child, char *buf, 4163 size_t buflen) 4164 { 4165 4166 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child), 4167 pci_get_function(child)); 4168 return (0); 4169 } 4170 4171 int 4172 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, 4173 size_t buflen) 4174 { 4175 struct pci_devinfo *dinfo; 4176 pcicfgregs *cfg; 4177 4178 dinfo = device_get_ivars(child); 4179 cfg = &dinfo->cfg; 4180 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x " 4181 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, 4182 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, 4183 cfg->progif); 4184 return (0); 4185 } 4186 4187 int 4188 pci_assign_interrupt_method(device_t dev, device_t child) 4189 { 4190 struct pci_devinfo *dinfo = device_get_ivars(child); 4191 pcicfgregs *cfg = &dinfo->cfg; 4192 4193 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, 4194 cfg->intpin)); 4195 } 4196 4197 static int 4198 pci_modevent(module_t mod, int what, void *arg) 4199 { 4200 static struct cdev *pci_cdev; 4201 4202 switch (what) { 4203 case MOD_LOAD: 4204 STAILQ_INIT(&pci_devq); 4205 pci_generation = 0; 4206 pci_cdev = make_dev(&pci_ops, 0, UID_ROOT, GID_WHEEL, 0644, 4207 "pci"); 4208 pci_load_vendor_data(); 4209 break; 4210 4211 case MOD_UNLOAD: 4212 destroy_dev(pci_cdev); 4213 break; 4214 } 4215 4216 return (0); 4217 } 4218 4219 void 4220 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) 4221 { 4222 int i; 4223 4224 /* 4225 * Only do header type 0 devices. Type 1 devices are bridges, 4226 * which we know need special treatment. Type 2 devices are 4227 * cardbus bridges which also require special treatment. 4228 * Other types are unknown, and we err on the side of safety 4229 * by ignoring them. 4230 */ 4231 if (dinfo->cfg.hdrtype != 0) 4232 return; 4233 4234 /* 4235 * Restore the device to full power mode. We must do this 4236 * before we restore the registers because moving from D3 to 4237 * D0 will cause the chip's BARs and some other registers to 4238 * be reset to some unknown power on reset values. Cut down 4239 * the noise on boot by doing nothing if we are already in 4240 * state D0. 4241 */ 4242 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 4243 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 4244 } 4245 for (i = 0; i < dinfo->cfg.nummaps; i++) 4246 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4); 4247 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4); 4248 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); 4249 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); 4250 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); 4251 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); 4252 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); 4253 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); 4254 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); 4255 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); 4256 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); 4257 4258 /* Restore MSI and MSI-X configurations if they are present. */ 4259 if (dinfo->cfg.msi.msi_location != 0) 4260 pci_resume_msi(dev); 4261 if (dinfo->cfg.msix.msix_location != 0) 4262 pci_resume_msix(dev); 4263 } 4264 4265 void 4266 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) 4267 { 4268 int i; 4269 uint32_t cls; 4270 int ps; 4271 4272 /* 4273 * Only do header type 0 devices. Type 1 devices are bridges, which 4274 * we know need special treatment. Type 2 devices are cardbus bridges 4275 * which also require special treatment. Other types are unknown, and 4276 * we err on the side of safety by ignoring them. Powering down 4277 * bridges should not be undertaken lightly. 4278 */ 4279 if (dinfo->cfg.hdrtype != 0) 4280 return; 4281 for (i = 0; i < dinfo->cfg.nummaps; i++) 4282 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4); 4283 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4); 4284 4285 /* 4286 * Some drivers apparently write to these registers w/o updating our 4287 * cached copy. No harm happens if we update the copy, so do so here 4288 * so we can restore them. The COMMAND register is modified by the 4289 * bus w/o updating the cache. This should represent the normally 4290 * writable portion of the 'defined' part of type 0 headers. In 4291 * theory we also need to save/restore the PCI capability structures 4292 * we know about, but apart from power we don't know any that are 4293 * writable. 4294 */ 4295 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); 4296 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); 4297 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); 4298 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); 4299 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); 4300 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); 4301 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); 4302 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); 4303 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); 4304 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 4305 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 4306 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); 4307 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); 4308 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); 4309 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); 4310 4311 /* 4312 * don't set the state for display devices, base peripherals and 4313 * memory devices since bad things happen when they are powered down. 4314 * We should (a) have drivers that can easily detach and (b) use 4315 * generic drivers for these devices so that some device actually 4316 * attaches. We need to make sure that when we implement (a) we don't 4317 * power the device down on a reattach. 4318 */ 4319 cls = pci_get_class(dev); 4320 if (!setstate) 4321 return; 4322 switch (pci_do_power_nodriver) 4323 { 4324 case 0: /* NO powerdown at all */ 4325 return; 4326 case 1: /* Conservative about what to power down */ 4327 if (cls == PCIC_STORAGE) 4328 return; 4329 /*FALLTHROUGH*/ 4330 case 2: /* Agressive about what to power down */ 4331 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || 4332 cls == PCIC_BASEPERIPH) 4333 return; 4334 /*FALLTHROUGH*/ 4335 case 3: /* Power down everything */ 4336 break; 4337 } 4338 /* 4339 * PCI spec says we can only go into D3 state from D0 state. 4340 * Transition from D[12] into D0 before going to D3 state. 4341 */ 4342 ps = pci_get_powerstate(dev); 4343 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) 4344 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 4345 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) 4346 pci_set_powerstate(dev, PCI_POWERSTATE_D3); 4347 } 4348 4349 int 4350 pci_alloc_1intr(device_t dev, int msi_enable, int *rid0, u_int *flags0) 4351 { 4352 int rid, type; 4353 u_int flags; 4354 4355 rid = 0; 4356 type = PCI_INTR_TYPE_LEGACY; 4357 flags = RF_SHAREABLE | RF_ACTIVE; 4358 4359 msi_enable = device_getenv_int(dev, "msi.enable", msi_enable); 4360 if (msi_enable) { 4361 int cpu; 4362 4363 cpu = device_getenv_int(dev, "msi.cpu", -1); 4364 if (cpu >= ncpus) 4365 cpu = ncpus - 1; 4366 4367 if (pci_alloc_msi(dev, &rid, 1, cpu) == 0) { 4368 flags &= ~RF_SHAREABLE; 4369 type = PCI_INTR_TYPE_MSI; 4370 } 4371 } 4372 4373 *rid0 = rid; 4374 *flags0 = flags; 4375 4376 return type; 4377 } 4378 4379 /* Wrapper APIs suitable for device driver use. */ 4380 void 4381 pci_save_state(device_t dev) 4382 { 4383 struct pci_devinfo *dinfo; 4384 4385 dinfo = device_get_ivars(dev); 4386 pci_cfg_save(dev, dinfo, 0); 4387 } 4388 4389 void 4390 pci_restore_state(device_t dev) 4391 { 4392 struct pci_devinfo *dinfo; 4393 4394 dinfo = device_get_ivars(dev); 4395 pci_cfg_restore(dev, dinfo); 4396 } 4397