1 /* $NetBSD: pci.h,v 1.19 2015/06/24 19:46:30 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _LINUX_PCI_H_ 33 #define _LINUX_PCI_H_ 34 35 #ifdef _KERNEL_OPT 36 #if defined(i386) || defined(amd64) 37 #include "acpica.h" 38 #else /* !(i386 || amd64) */ 39 #define NACPICA 0 40 #endif /* i386 || amd64 */ 41 #endif 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/bus.h> 46 #include <sys/cdefs.h> 47 #include <sys/kmem.h> 48 #include <sys/systm.h> 49 50 #include <machine/limits.h> 51 52 #include <dev/pci/pcidevs.h> 53 #include <dev/pci/pcireg.h> 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/agpvar.h> 56 57 #include <dev/acpi/acpivar.h> 58 #include <dev/acpi/acpi_pci.h> 59 60 #include <linux/dma-mapping.h> 61 #include <linux/ioport.h> 62 #include <linux/kernel.h> 63 64 struct pci_bus { 65 u_int number; 66 }; 67 68 struct pci_device_id { 69 uint32_t vendor; 70 uint32_t device; 71 uint32_t subvendor; 72 uint32_t subdevice; 73 uint32_t class; 74 uint32_t class_mask; 75 unsigned long driver_data; 76 }; 77 78 #define PCI_ANY_ID ((pcireg_t)-1) 79 80 #define PCI_BASE_CLASS_DISPLAY PCI_CLASS_DISPLAY 81 82 #define PCI_CLASS_DISPLAY_VGA \ 83 ((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_VGA) 84 #define PCI_CLASS_BRIDGE_ISA \ 85 ((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA) 86 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601); 87 88 /* XXX This is getting silly... */ 89 #define PCI_VENDOR_ID_ASUSTEK PCI_VENDOR_ASUSTEK 90 #define PCI_VENDOR_ID_ATI PCI_VENDOR_ATI 91 #define PCI_VENDOR_ID_DELL PCI_VENDOR_DELL 92 #define PCI_VENDOR_ID_IBM PCI_VENDOR_IBM 93 #define PCI_VENDOR_ID_HP PCI_VENDOR_HP 94 #define PCI_VENDOR_ID_INTEL PCI_VENDOR_INTEL 95 #define PCI_VENDOR_ID_NVIDIA PCI_VENDOR_NVIDIA 96 #define PCI_VENDOR_ID_SONY PCI_VENDOR_SONY 97 #define PCI_VENDOR_ID_VIA PCI_VENDOR_VIATECH 98 99 #define PCI_DEVICE_ID_ATI_RADEON_QY PCI_PRODUCT_ATI_RADEON_RV100_QY 100 101 #define PCI_DEVFN(DEV, FN) \ 102 (__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2))) 103 #define PCI_SLOT(DEVFN) __SHIFTOUT((DEVFN), __BITS(3, 7)) 104 #define PCI_FUNC(DEVFN) __SHIFTOUT((DEVFN), __BITS(0, 2)) 105 106 #define PCI_NUM_RESOURCES ((PCI_MAPREG_END - PCI_MAPREG_START) / 4) 107 #define DEVICE_COUNT_RESOURCE PCI_NUM_RESOURCES 108 109 #define PCI_CAP_ID_AGP PCI_CAP_AGP 110 111 typedef int pci_power_t; 112 113 #define PCI_D0 0 114 #define PCI_D1 1 115 #define PCI_D2 2 116 #define PCI_D3hot 3 117 #define PCI_D3cold 4 118 119 #define __pci_iomem 120 121 struct pci_dev { 122 struct pci_attach_args pd_pa; 123 int pd_kludges; /* Gotta lose 'em... */ 124 #define NBPCI_KLUDGE_GET_MUMBLE 0x01 125 #define NBPCI_KLUDGE_MAP_ROM 0x02 126 bus_space_tag_t pd_rom_bst; 127 bus_space_handle_t pd_rom_bsh; 128 bus_size_t pd_rom_size; 129 bus_space_handle_t pd_rom_found_bsh; 130 bus_size_t pd_rom_found_size; 131 void *pd_rom_vaddr; 132 device_t pd_dev; 133 struct drm_device *pd_drm_dev; /* XXX Nouveau kludge! */ 134 struct { 135 pcireg_t type; 136 bus_addr_t addr; 137 bus_size_t size; 138 int flags; 139 bus_space_tag_t bst; 140 bus_space_handle_t bsh; 141 void __pci_iomem *kva; 142 } pd_resources[PCI_NUM_RESOURCES]; 143 struct pci_conf_state *pd_saved_state; 144 struct acpi_devnode *pd_ad; 145 struct device dev; /* XXX Don't believe me! */ 146 struct pci_bus *bus; 147 uint32_t devfn; 148 uint16_t vendor; 149 uint16_t device; 150 uint16_t subsystem_vendor; 151 uint16_t subsystem_device; 152 uint8_t revision; 153 uint32_t class; 154 bool msi_enabled; 155 }; 156 157 static inline device_t 158 pci_dev_dev(struct pci_dev *pdev) 159 { 160 return pdev->pd_dev; 161 } 162 163 /* XXX Nouveau kludge! Don't believe me! */ 164 static inline struct pci_dev * 165 to_pci_dev(struct device *dev) 166 { 167 168 return container_of(dev, struct pci_dev, dev); 169 } 170 171 /* XXX Nouveau kludge! */ 172 static inline struct drm_device * 173 pci_get_drvdata(struct pci_dev *pdev) 174 { 175 return pdev->pd_drm_dev; 176 } 177 178 static inline void 179 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, 180 const struct pci_attach_args *pa, int kludges) 181 { 182 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag, 183 PCI_SUBSYS_ID_REG); 184 unsigned i; 185 186 pdev->pd_pa = *pa; 187 pdev->pd_kludges = kludges; 188 pdev->pd_rom_vaddr = NULL; 189 pdev->pd_dev = dev; 190 #if (NACPICA > 0) 191 pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus, 192 pa->pa_device, pa->pa_function); 193 #else 194 pdev->pd_ad = NULL; 195 #endif 196 pdev->bus = kmem_zalloc(sizeof(struct pci_bus), KM_NOSLEEP); 197 pdev->bus->number = pa->pa_bus; 198 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function); 199 pdev->vendor = PCI_VENDOR(pa->pa_id); 200 pdev->device = PCI_PRODUCT(pa->pa_id); 201 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id); 202 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id); 203 pdev->revision = PCI_REVISION(pa->pa_class); 204 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */ 205 206 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 207 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 208 const int reg = PCI_BAR(i); 209 210 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc, 211 pa->pa_tag, reg); 212 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg, 213 pdev->pd_resources[i].type, 214 &pdev->pd_resources[i].addr, 215 &pdev->pd_resources[i].size, 216 &pdev->pd_resources[i].flags)) { 217 pdev->pd_resources[i].addr = 0; 218 pdev->pd_resources[i].size = 0; 219 pdev->pd_resources[i].flags = 0; 220 } 221 pdev->pd_resources[i].kva = NULL; 222 } 223 } 224 225 static inline int 226 pci_find_capability(struct pci_dev *pdev, int cap) 227 { 228 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap, 229 NULL, NULL); 230 } 231 232 static inline int 233 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep) 234 { 235 KASSERT(!ISSET(reg, 3)); 236 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg); 237 return 0; 238 } 239 240 static inline int 241 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep) 242 { 243 KASSERT(!ISSET(reg, 1)); 244 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 245 (reg &~ 2)) >> (8 * (reg & 2)); 246 return 0; 247 } 248 249 static inline int 250 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep) 251 { 252 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 253 (reg &~ 3)) >> (8 * (reg & 3)); 254 return 0; 255 } 256 257 static inline int 258 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value) 259 { 260 KASSERT(!ISSET(reg, 3)); 261 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value); 262 return 0; 263 } 264 265 static inline void 266 pci_rmw_config(struct pci_dev *pdev, int reg, unsigned int bytes, 267 uint32_t value) 268 { 269 const uint32_t mask = ~((~0UL) << (8 * bytes)); 270 const int reg32 = (reg &~ 3); 271 const unsigned int shift = (8 * (reg & 3)); 272 uint32_t value32; 273 274 KASSERT(bytes <= 4); 275 KASSERT(!ISSET(value, ~mask)); 276 pci_read_config_dword(pdev, reg32, &value32); 277 value32 &=~ (mask << shift); 278 value32 |= (value << shift); 279 pci_write_config_dword(pdev, reg32, value32); 280 } 281 282 static inline int 283 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value) 284 { 285 KASSERT(!ISSET(reg, 1)); 286 pci_rmw_config(pdev, reg, 2, value); 287 return 0; 288 } 289 290 static inline int 291 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value) 292 { 293 pci_rmw_config(pdev, reg, 1, value); 294 return 0; 295 } 296 297 /* 298 * XXX pci msi 299 */ 300 static inline int 301 pci_enable_msi(struct pci_dev *pdev) 302 { 303 return -ENOSYS; 304 } 305 306 static inline void 307 pci_disable_msi(struct pci_dev *pdev __unused) 308 { 309 KASSERT(pdev->msi_enabled); 310 } 311 312 static inline void 313 pci_set_master(struct pci_dev *pdev) 314 { 315 pcireg_t csr; 316 317 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 318 PCI_COMMAND_STATUS_REG); 319 csr |= PCI_COMMAND_MASTER_ENABLE; 320 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 321 PCI_COMMAND_STATUS_REG, csr); 322 } 323 324 static inline void 325 pci_clear_master(struct pci_dev *pdev) 326 { 327 pcireg_t csr; 328 329 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 330 PCI_COMMAND_STATUS_REG); 331 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE; 332 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 333 PCI_COMMAND_STATUS_REG, csr); 334 } 335 336 #define PCIBIOS_MIN_MEM 0x100000 /* XXX bogus x86 kludge bollocks */ 337 338 static inline bus_addr_t 339 pcibios_align_resource(void *p, const struct resource *resource, 340 bus_addr_t addr, bus_size_t size) 341 { 342 panic("pcibios_align_resource has accessed unaligned neurons!"); 343 } 344 345 static inline int 346 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource, 347 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused, 348 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t, 349 bus_size_t) __unused, 350 struct pci_dev *pdev) 351 { 352 const struct pci_attach_args *const pa = &pdev->pd_pa; 353 bus_space_tag_t bst; 354 int error; 355 356 switch (resource->flags) { 357 case IORESOURCE_MEM: 358 bst = pa->pa_memt; 359 break; 360 361 case IORESOURCE_IO: 362 bst = pa->pa_iot; 363 break; 364 365 default: 366 panic("I don't know what kind of resource you want!"); 367 } 368 369 resource->r_bst = bst; 370 error = bus_space_alloc(bst, start, __type_max(bus_addr_t), 371 size, align, 0, 0, &resource->start, &resource->r_bsh); 372 if (error) 373 return error; 374 375 resource->size = size; 376 return 0; 377 } 378 379 /* 380 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are 381 * defined only for their single purposes in i915drm, in 382 * i915_get_bridge_dev and intel_detect_pch. We can't define them more 383 * generally without adapting pci_find_device (and pci_enumerate_bus 384 * internally) to pass a cookie through. 385 */ 386 387 static inline int /* XXX inline? */ 388 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa) 389 { 390 391 if (pa->pa_bus != 0) 392 return 0; 393 if (pa->pa_device != 0) 394 return 0; 395 if (pa->pa_function != 0) 396 return 0; 397 398 return 1; 399 } 400 401 static inline struct pci_dev * 402 pci_get_bus_and_slot(int bus, int slot) 403 { 404 struct pci_attach_args pa; 405 406 KASSERT(bus == 0); 407 KASSERT(slot == PCI_DEVFN(0, 0)); 408 409 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0)) 410 return NULL; 411 412 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 413 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 414 415 return pdev; 416 } 417 418 static inline int /* XXX inline? */ 419 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa) 420 { 421 422 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE) 423 return 0; 424 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA) 425 return 0; 426 427 return 1; 428 } 429 430 static inline void 431 pci_dev_put(struct pci_dev *pdev) 432 { 433 434 if (pdev == NULL) 435 return; 436 437 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE)); 438 kmem_free(pdev, sizeof(*pdev)); 439 } 440 441 static inline struct pci_dev * 442 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from) 443 { 444 struct pci_attach_args pa; 445 446 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8)); 447 448 if (from != NULL) { 449 pci_dev_put(from); 450 return NULL; 451 } 452 453 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge)) 454 return NULL; 455 456 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 457 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 458 459 return pdev; 460 } 461 462 #define __pci_rom_iomem 463 464 static inline void 465 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused) 466 { 467 468 /* XXX Disable the ROM address decoder. */ 469 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 470 KASSERT(vaddr == pdev->pd_rom_vaddr); 471 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size); 472 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM; 473 pdev->pd_rom_vaddr = NULL; 474 } 475 476 /* XXX Whattakludge! Should move this in sys/arch/. */ 477 static int 478 pci_map_rom_md(struct pci_dev *pdev) 479 { 480 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__) 481 const bus_addr_t rom_base = 0xc0000; 482 const bus_size_t rom_size = 0x20000; 483 bus_space_handle_t rom_bsh; 484 int error; 485 486 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY) 487 return ENXIO; 488 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA) 489 return ENXIO; 490 /* XXX Check whether this is the primary VGA card? */ 491 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size, 492 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh); 493 if (error) 494 return ENXIO; 495 496 pdev->pd_rom_bst = pdev->pd_pa.pa_memt; 497 pdev->pd_rom_bsh = rom_bsh; 498 pdev->pd_rom_size = rom_size; 499 500 return 0; 501 #else 502 return ENXIO; 503 #endif 504 } 505 506 static inline void __pci_rom_iomem * 507 pci_map_rom(struct pci_dev *pdev, size_t *sizep) 508 { 509 510 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 511 512 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM, 513 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR), 514 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size) 515 != 0 && 516 pci_map_rom_md(pdev) != 0) 517 return NULL; 518 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM; 519 520 /* XXX This type is obviously wrong in general... */ 521 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh, 522 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, 523 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) { 524 pci_unmap_rom(pdev, NULL); 525 return NULL; 526 } 527 528 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX); 529 *sizep = pdev->pd_rom_found_size; 530 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst, 531 pdev->pd_rom_found_bsh); 532 return pdev->pd_rom_vaddr; 533 } 534 535 static inline void __pci_rom_iomem * 536 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep) 537 { 538 539 *sizep = 0; 540 return NULL; 541 } 542 543 static inline int 544 pci_enable_rom(struct pci_dev *pdev) 545 { 546 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc; 547 const pcitag_t tag = pdev->pd_pa.pa_tag; 548 pcireg_t addr; 549 int s; 550 551 /* XXX Don't do anything if the ROM isn't there. */ 552 553 s = splhigh(); 554 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM); 555 addr |= PCI_MAPREG_ROM_ENABLE; 556 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr); 557 splx(s); 558 559 return 0; 560 } 561 562 static inline void 563 pci_disable_rom(struct pci_dev *pdev) 564 { 565 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc; 566 const pcitag_t tag = pdev->pd_pa.pa_tag; 567 pcireg_t addr; 568 int s; 569 570 s = splhigh(); 571 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM); 572 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE; 573 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr); 574 splx(s); 575 } 576 577 static inline bus_addr_t 578 pci_resource_start(struct pci_dev *pdev, unsigned i) 579 { 580 581 KASSERT(i < PCI_NUM_RESOURCES); 582 return pdev->pd_resources[i].addr; 583 } 584 585 static inline bus_size_t 586 pci_resource_len(struct pci_dev *pdev, unsigned i) 587 { 588 589 KASSERT(i < PCI_NUM_RESOURCES); 590 return pdev->pd_resources[i].size; 591 } 592 593 static inline bus_addr_t 594 pci_resource_end(struct pci_dev *pdev, unsigned i) 595 { 596 597 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1); 598 } 599 600 static inline int 601 pci_resource_flags(struct pci_dev *pdev, unsigned i) 602 { 603 604 KASSERT(i < PCI_NUM_RESOURCES); 605 return pdev->pd_resources[i].flags; 606 } 607 608 static inline void __pci_iomem * 609 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size) 610 { 611 int error; 612 613 KASSERT(i < PCI_NUM_RESOURCES); 614 KASSERT(pdev->pd_resources[i].kva == NULL); 615 616 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM) 617 return NULL; 618 if (pdev->pd_resources[i].size < size) 619 return NULL; 620 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr, 621 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags, 622 &pdev->pd_resources[i].bsh); 623 if (error) { 624 /* Horrible hack: try asking the fake AGP device. */ 625 if (!agp_i810_borrow(pdev->pd_resources[i].addr, size, 626 &pdev->pd_resources[i].bsh)) 627 return NULL; 628 } 629 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt; 630 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst, 631 pdev->pd_resources[i].bsh); 632 633 return pdev->pd_resources[i].kva; 634 } 635 636 static inline void 637 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva) 638 { 639 unsigned i; 640 641 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 642 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 643 if (pdev->pd_resources[i].kva == kva) 644 break; 645 } 646 KASSERT(i < PCI_NUM_RESOURCES); 647 648 pdev->pd_resources[i].kva = NULL; 649 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh, 650 pdev->pd_resources[i].size); 651 } 652 653 static inline void 654 pci_save_state(struct pci_dev *pdev) 655 { 656 657 KASSERT(pdev->pd_saved_state == NULL); 658 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state), 659 KM_SLEEP); 660 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 661 pdev->pd_saved_state); 662 } 663 664 static inline void 665 pci_restore_state(struct pci_dev *pdev) 666 { 667 668 KASSERT(pdev->pd_saved_state != NULL); 669 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 670 pdev->pd_saved_state); 671 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state)); 672 pdev->pd_saved_state = NULL; 673 } 674 675 static inline bool 676 pci_is_pcie(struct pci_dev *pdev) 677 { 678 679 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0); 680 } 681 682 static inline bool 683 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask) 684 { 685 686 /* XXX Cop-out. */ 687 if (mask > DMA_BIT_MASK(32)) 688 return pci_dma64_available(&pdev->pd_pa); 689 else 690 return true; 691 } 692 693 #endif /* _LINUX_PCI_H_ */ 694