1 /* $NetBSD: pci.h,v 1.24 2017/08/31 23:47:50 maya Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _LINUX_PCI_H_ 33 #define _LINUX_PCI_H_ 34 35 #ifdef _KERNEL_OPT 36 #if defined(i386) || defined(amd64) 37 #include "acpica.h" 38 #else /* !(i386 || amd64) */ 39 #define NACPICA 0 40 #endif /* i386 || amd64 */ 41 #endif 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/bus.h> 46 #include <sys/cdefs.h> 47 #include <sys/kmem.h> 48 #include <sys/systm.h> 49 50 #include <machine/limits.h> 51 52 #include <dev/pci/pcidevs.h> 53 #include <dev/pci/pcireg.h> 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/agpvar.h> 56 57 #if NACPICA > 0 58 #include <dev/acpi/acpivar.h> 59 #include <dev/acpi/acpi_pci.h> 60 #else 61 struct acpi_devnode; 62 #endif 63 64 #include <linux/dma-mapping.h> 65 #include <linux/ioport.h> 66 #include <linux/kernel.h> 67 68 struct pci_bus { 69 u_int number; 70 }; 71 72 struct pci_device_id { 73 uint32_t vendor; 74 uint32_t device; 75 uint32_t subvendor; 76 uint32_t subdevice; 77 uint32_t class; 78 uint32_t class_mask; 79 unsigned long driver_data; 80 }; 81 82 #define PCI_ANY_ID ((pcireg_t)-1) 83 84 #define PCI_BASE_CLASS_DISPLAY PCI_CLASS_DISPLAY 85 86 #define PCI_CLASS_DISPLAY_VGA \ 87 ((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_VGA) 88 #define PCI_CLASS_BRIDGE_ISA \ 89 ((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA) 90 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601); 91 92 /* XXX This is getting silly... */ 93 #define PCI_VENDOR_ID_ASUSTEK PCI_VENDOR_ASUSTEK 94 #define PCI_VENDOR_ID_ATI PCI_VENDOR_ATI 95 #define PCI_VENDOR_ID_DELL PCI_VENDOR_DELL 96 #define PCI_VENDOR_ID_IBM PCI_VENDOR_IBM 97 #define PCI_VENDOR_ID_HP PCI_VENDOR_HP 98 #define PCI_VENDOR_ID_INTEL PCI_VENDOR_INTEL 99 #define PCI_VENDOR_ID_NVIDIA PCI_VENDOR_NVIDIA 100 #define PCI_VENDOR_ID_SONY PCI_VENDOR_SONY 101 #define PCI_VENDOR_ID_VIA PCI_VENDOR_VIATECH 102 103 #define PCI_DEVICE_ID_ATI_RADEON_QY PCI_PRODUCT_ATI_RADEON_RV100_QY 104 105 #define PCI_DEVFN(DEV, FN) \ 106 (__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2))) 107 #define PCI_SLOT(DEVFN) __SHIFTOUT((DEVFN), __BITS(3, 7)) 108 #define PCI_FUNC(DEVFN) __SHIFTOUT((DEVFN), __BITS(0, 2)) 109 110 #define PCI_NUM_RESOURCES ((PCI_MAPREG_END - PCI_MAPREG_START) / 4) 111 #define DEVICE_COUNT_RESOURCE PCI_NUM_RESOURCES 112 113 #define PCI_CAP_ID_AGP PCI_CAP_AGP 114 115 typedef int pci_power_t; 116 117 #define PCI_D0 0 118 #define PCI_D1 1 119 #define PCI_D2 2 120 #define PCI_D3hot 3 121 #define PCI_D3cold 4 122 123 #define __pci_iomem 124 125 struct pci_dev { 126 struct pci_attach_args pd_pa; 127 int pd_kludges; /* Gotta lose 'em... */ 128 #define NBPCI_KLUDGE_GET_MUMBLE 0x01 129 #define NBPCI_KLUDGE_MAP_ROM 0x02 130 bus_space_tag_t pd_rom_bst; 131 bus_space_handle_t pd_rom_bsh; 132 bus_size_t pd_rom_size; 133 bus_space_handle_t pd_rom_found_bsh; 134 bus_size_t pd_rom_found_size; 135 void *pd_rom_vaddr; 136 device_t pd_dev; 137 struct drm_device *pd_drm_dev; /* XXX Nouveau kludge! */ 138 struct { 139 pcireg_t type; 140 bus_addr_t addr; 141 bus_size_t size; 142 int flags; 143 bus_space_tag_t bst; 144 bus_space_handle_t bsh; 145 void __pci_iomem *kva; 146 } pd_resources[PCI_NUM_RESOURCES]; 147 struct pci_conf_state *pd_saved_state; 148 struct acpi_devnode *pd_ad; 149 struct pci_bus *bus; 150 uint32_t devfn; 151 uint16_t vendor; 152 uint16_t device; 153 uint16_t subsystem_vendor; 154 uint16_t subsystem_device; 155 uint8_t revision; 156 uint32_t class; 157 bool msi_enabled; 158 pci_intr_handle_t *intr_handles; 159 }; 160 161 static inline device_t 162 pci_dev_dev(struct pci_dev *pdev) 163 { 164 return pdev->pd_dev; 165 } 166 167 /* XXX Nouveau kludge! */ 168 static inline struct drm_device * 169 pci_get_drvdata(struct pci_dev *pdev) 170 { 171 return pdev->pd_drm_dev; 172 } 173 174 static inline void 175 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, 176 const struct pci_attach_args *pa, int kludges) 177 { 178 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag, 179 PCI_SUBSYS_ID_REG); 180 unsigned i; 181 182 pdev->pd_pa = *pa; 183 pdev->pd_kludges = kludges; 184 pdev->pd_rom_vaddr = NULL; 185 pdev->pd_dev = dev; 186 #if (NACPICA > 0) 187 pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus, 188 pa->pa_device, pa->pa_function); 189 #else 190 pdev->pd_ad = NULL; 191 #endif 192 pdev->bus = kmem_zalloc(sizeof(struct pci_bus), KM_NOSLEEP); 193 pdev->bus->number = pa->pa_bus; 194 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function); 195 pdev->vendor = PCI_VENDOR(pa->pa_id); 196 pdev->device = PCI_PRODUCT(pa->pa_id); 197 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id); 198 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id); 199 pdev->revision = PCI_REVISION(pa->pa_class); 200 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */ 201 202 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 203 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 204 const int reg = PCI_BAR(i); 205 206 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc, 207 pa->pa_tag, reg); 208 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg, 209 pdev->pd_resources[i].type, 210 &pdev->pd_resources[i].addr, 211 &pdev->pd_resources[i].size, 212 &pdev->pd_resources[i].flags)) { 213 pdev->pd_resources[i].addr = 0; 214 pdev->pd_resources[i].size = 0; 215 pdev->pd_resources[i].flags = 0; 216 } 217 pdev->pd_resources[i].kva = NULL; 218 } 219 } 220 221 static inline int 222 pci_find_capability(struct pci_dev *pdev, int cap) 223 { 224 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap, 225 NULL, NULL); 226 } 227 228 static inline int 229 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep) 230 { 231 KASSERT(!ISSET(reg, 3)); 232 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg); 233 return 0; 234 } 235 236 static inline int 237 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep) 238 { 239 KASSERT(!ISSET(reg, 1)); 240 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 241 (reg &~ 2)) >> (8 * (reg & 2)); 242 return 0; 243 } 244 245 static inline int 246 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep) 247 { 248 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 249 (reg &~ 3)) >> (8 * (reg & 3)); 250 return 0; 251 } 252 253 static inline int 254 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value) 255 { 256 KASSERT(!ISSET(reg, 3)); 257 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value); 258 return 0; 259 } 260 261 static inline void 262 pci_rmw_config(struct pci_dev *pdev, int reg, unsigned int bytes, 263 uint32_t value) 264 { 265 const uint32_t mask = ~((~0UL) << (8 * bytes)); 266 const int reg32 = (reg &~ 3); 267 const unsigned int shift = (8 * (reg & 3)); 268 uint32_t value32; 269 270 KASSERT(bytes <= 4); 271 KASSERT(!ISSET(value, ~mask)); 272 pci_read_config_dword(pdev, reg32, &value32); 273 value32 &=~ (mask << shift); 274 value32 |= (value << shift); 275 pci_write_config_dword(pdev, reg32, value32); 276 } 277 278 static inline int 279 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value) 280 { 281 KASSERT(!ISSET(reg, 1)); 282 pci_rmw_config(pdev, reg, 2, value); 283 return 0; 284 } 285 286 static inline int 287 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value) 288 { 289 pci_rmw_config(pdev, reg, 1, value); 290 return 0; 291 } 292 293 static inline int 294 pci_enable_msi(struct pci_dev *pdev) 295 { 296 #ifdef notyet 297 const struct pci_attach_args *const pa = &pdev->pd_pa; 298 299 if (pci_msi_alloc_exact(pa, &pdev->intr_handles, 1)) 300 return -EINVAL; 301 302 pdev->msi_enabled = 1; 303 return 0; 304 #else 305 return -ENOSYS; 306 #endif 307 } 308 309 static inline void 310 pci_disable_msi(struct pci_dev *pdev __unused) 311 { 312 const struct pci_attach_args *const pa = &pdev->pd_pa; 313 314 if (pdev->intr_handles != NULL) { 315 pci_intr_release(pa->pa_pc, pdev->intr_handles, 1); 316 pdev->intr_handles = NULL; 317 } 318 pdev->msi_enabled = 0; 319 } 320 321 static inline void 322 pci_set_master(struct pci_dev *pdev) 323 { 324 pcireg_t csr; 325 326 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 327 PCI_COMMAND_STATUS_REG); 328 csr |= PCI_COMMAND_MASTER_ENABLE; 329 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 330 PCI_COMMAND_STATUS_REG, csr); 331 } 332 333 static inline void 334 pci_clear_master(struct pci_dev *pdev) 335 { 336 pcireg_t csr; 337 338 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 339 PCI_COMMAND_STATUS_REG); 340 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE; 341 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 342 PCI_COMMAND_STATUS_REG, csr); 343 } 344 345 #define PCIBIOS_MIN_MEM 0x100000 /* XXX bogus x86 kludge bollocks */ 346 347 static inline bus_addr_t 348 pcibios_align_resource(void *p, const struct resource *resource, 349 bus_addr_t addr, bus_size_t size) 350 { 351 panic("pcibios_align_resource has accessed unaligned neurons!"); 352 } 353 354 static inline int 355 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource, 356 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused, 357 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t, 358 bus_size_t) __unused, 359 struct pci_dev *pdev) 360 { 361 const struct pci_attach_args *const pa = &pdev->pd_pa; 362 bus_space_tag_t bst; 363 int error; 364 365 switch (resource->flags) { 366 case IORESOURCE_MEM: 367 bst = pa->pa_memt; 368 break; 369 370 case IORESOURCE_IO: 371 bst = pa->pa_iot; 372 break; 373 374 default: 375 panic("I don't know what kind of resource you want!"); 376 } 377 378 resource->r_bst = bst; 379 error = bus_space_alloc(bst, start, __type_max(bus_addr_t), 380 size, align, 0, 0, &resource->start, &resource->r_bsh); 381 if (error) 382 return error; 383 384 resource->size = size; 385 return 0; 386 } 387 388 /* 389 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are 390 * defined only for their single purposes in i915drm, in 391 * i915_get_bridge_dev and intel_detect_pch. We can't define them more 392 * generally without adapting pci_find_device (and pci_enumerate_bus 393 * internally) to pass a cookie through. 394 */ 395 396 static inline int /* XXX inline? */ 397 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa) 398 { 399 400 if (pa->pa_bus != 0) 401 return 0; 402 if (pa->pa_device != 0) 403 return 0; 404 if (pa->pa_function != 0) 405 return 0; 406 407 return 1; 408 } 409 410 static inline struct pci_dev * 411 pci_get_bus_and_slot(int bus, int slot) 412 { 413 struct pci_attach_args pa; 414 415 KASSERT(bus == 0); 416 KASSERT(slot == PCI_DEVFN(0, 0)); 417 418 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0)) 419 return NULL; 420 421 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 422 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 423 424 return pdev; 425 } 426 427 static inline int /* XXX inline? */ 428 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa) 429 { 430 431 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE) 432 return 0; 433 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA) 434 return 0; 435 436 return 1; 437 } 438 439 static inline void 440 pci_dev_put(struct pci_dev *pdev) 441 { 442 443 if (pdev == NULL) 444 return; 445 446 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE)); 447 kmem_free(pdev, sizeof(*pdev)); 448 } 449 450 static inline struct pci_dev * 451 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from) 452 { 453 struct pci_attach_args pa; 454 455 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8)); 456 457 if (from != NULL) { 458 pci_dev_put(from); 459 return NULL; 460 } 461 462 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge)) 463 return NULL; 464 465 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 466 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 467 468 return pdev; 469 } 470 471 #define __pci_rom_iomem 472 473 static inline void 474 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused) 475 { 476 477 /* XXX Disable the ROM address decoder. */ 478 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 479 KASSERT(vaddr == pdev->pd_rom_vaddr); 480 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size); 481 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM; 482 pdev->pd_rom_vaddr = NULL; 483 } 484 485 /* XXX Whattakludge! Should move this in sys/arch/. */ 486 static int 487 pci_map_rom_md(struct pci_dev *pdev) 488 { 489 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__) 490 const bus_addr_t rom_base = 0xc0000; 491 const bus_size_t rom_size = 0x20000; 492 bus_space_handle_t rom_bsh; 493 int error; 494 495 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY) 496 return ENXIO; 497 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA) 498 return ENXIO; 499 /* XXX Check whether this is the primary VGA card? */ 500 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size, 501 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh); 502 if (error) 503 return ENXIO; 504 505 pdev->pd_rom_bst = pdev->pd_pa.pa_memt; 506 pdev->pd_rom_bsh = rom_bsh; 507 pdev->pd_rom_size = rom_size; 508 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM; 509 510 return 0; 511 #else 512 return ENXIO; 513 #endif 514 } 515 516 static inline void __pci_rom_iomem * 517 pci_map_rom(struct pci_dev *pdev, size_t *sizep) 518 { 519 520 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 521 522 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM, 523 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR), 524 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size) 525 != 0) 526 goto fail_mi; 527 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM; 528 529 /* XXX This type is obviously wrong in general... */ 530 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh, 531 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, 532 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) { 533 pci_unmap_rom(pdev, NULL); 534 goto fail_mi; 535 } 536 goto success; 537 538 fail_mi: 539 if (pci_map_rom_md(pdev) != 0) 540 goto fail_md; 541 542 /* XXX This type is obviously wrong in general... */ 543 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh, 544 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, 545 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) { 546 pci_unmap_rom(pdev, NULL); 547 goto fail_md; 548 } 549 550 success: 551 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX); 552 *sizep = pdev->pd_rom_found_size; 553 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst, 554 pdev->pd_rom_found_bsh); 555 return pdev->pd_rom_vaddr; 556 557 fail_md: 558 return NULL; 559 } 560 561 static inline void __pci_rom_iomem * 562 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep) 563 { 564 565 *sizep = 0; 566 return NULL; 567 } 568 569 static inline int 570 pci_enable_rom(struct pci_dev *pdev) 571 { 572 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc; 573 const pcitag_t tag = pdev->pd_pa.pa_tag; 574 pcireg_t addr; 575 int s; 576 577 /* XXX Don't do anything if the ROM isn't there. */ 578 579 s = splhigh(); 580 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM); 581 addr |= PCI_MAPREG_ROM_ENABLE; 582 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr); 583 splx(s); 584 585 return 0; 586 } 587 588 static inline void 589 pci_disable_rom(struct pci_dev *pdev) 590 { 591 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc; 592 const pcitag_t tag = pdev->pd_pa.pa_tag; 593 pcireg_t addr; 594 int s; 595 596 s = splhigh(); 597 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM); 598 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE; 599 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr); 600 splx(s); 601 } 602 603 static inline bus_addr_t 604 pci_resource_start(struct pci_dev *pdev, unsigned i) 605 { 606 607 KASSERT(i < PCI_NUM_RESOURCES); 608 return pdev->pd_resources[i].addr; 609 } 610 611 static inline bus_size_t 612 pci_resource_len(struct pci_dev *pdev, unsigned i) 613 { 614 615 KASSERT(i < PCI_NUM_RESOURCES); 616 return pdev->pd_resources[i].size; 617 } 618 619 static inline bus_addr_t 620 pci_resource_end(struct pci_dev *pdev, unsigned i) 621 { 622 623 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1); 624 } 625 626 static inline int 627 pci_resource_flags(struct pci_dev *pdev, unsigned i) 628 { 629 630 KASSERT(i < PCI_NUM_RESOURCES); 631 return pdev->pd_resources[i].flags; 632 } 633 634 static inline void __pci_iomem * 635 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size) 636 { 637 int error; 638 639 KASSERT(i < PCI_NUM_RESOURCES); 640 KASSERT(pdev->pd_resources[i].kva == NULL); 641 642 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM) 643 return NULL; 644 if (pdev->pd_resources[i].size < size) 645 return NULL; 646 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr, 647 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags, 648 &pdev->pd_resources[i].bsh); 649 if (error) { 650 /* Horrible hack: try asking the fake AGP device. */ 651 if (!agp_i810_borrow(pdev->pd_resources[i].addr, size, 652 &pdev->pd_resources[i].bsh)) 653 return NULL; 654 } 655 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt; 656 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst, 657 pdev->pd_resources[i].bsh); 658 659 return pdev->pd_resources[i].kva; 660 } 661 662 static inline void 663 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva) 664 { 665 unsigned i; 666 667 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 668 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 669 if (pdev->pd_resources[i].kva == kva) 670 break; 671 } 672 KASSERT(i < PCI_NUM_RESOURCES); 673 674 pdev->pd_resources[i].kva = NULL; 675 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh, 676 pdev->pd_resources[i].size); 677 } 678 679 static inline void 680 pci_save_state(struct pci_dev *pdev) 681 { 682 683 KASSERT(pdev->pd_saved_state == NULL); 684 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state), 685 KM_SLEEP); 686 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 687 pdev->pd_saved_state); 688 } 689 690 static inline void 691 pci_restore_state(struct pci_dev *pdev) 692 { 693 694 KASSERT(pdev->pd_saved_state != NULL); 695 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 696 pdev->pd_saved_state); 697 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state)); 698 pdev->pd_saved_state = NULL; 699 } 700 701 static inline bool 702 pci_is_pcie(struct pci_dev *pdev) 703 { 704 705 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0); 706 } 707 708 static inline bool 709 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask) 710 { 711 712 /* XXX Cop-out. */ 713 if (mask > DMA_BIT_MASK(32)) 714 return pci_dma64_available(&pdev->pd_pa); 715 else 716 return true; 717 } 718 719 #endif /* _LINUX_PCI_H_ */ 720