1 /* $NetBSD: pci.h,v 1.11 2014/11/11 11:30:21 nonaka Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _LINUX_PCI_H_ 33 #define _LINUX_PCI_H_ 34 35 #ifdef _KERNEL_OPT 36 #if defined(i386) || defined(amd64) 37 #include "acpica.h" 38 #else /* !(i386 || amd64) */ 39 #define NACPICA 0 40 #endif /* i386 || amd64 */ 41 #endif 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/bus.h> 46 #include <sys/cdefs.h> 47 #include <sys/kmem.h> 48 #include <sys/systm.h> 49 50 #include <machine/limits.h> 51 52 #include <dev/pci/pcidevs.h> 53 #include <dev/pci/pcireg.h> 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/agpvar.h> 56 57 #include <dev/acpi/acpivar.h> 58 #include <dev/acpi/acpi_pci.h> 59 60 #include <linux/dma-mapping.h> 61 #include <linux/ioport.h> 62 63 struct pci_bus { 64 u_int number; 65 }; 66 67 struct pci_device_id { 68 uint32_t vendor; 69 uint32_t device; 70 uint32_t subvendor; 71 uint32_t subdevice; 72 uint32_t class; 73 uint32_t class_mask; 74 unsigned long driver_data; 75 }; 76 77 #define PCI_ANY_ID ((pcireg_t)-1) 78 79 #define PCI_BASE_CLASS_DISPLAY PCI_CLASS_DISPLAY 80 81 #define PCI_CLASS_BRIDGE_ISA \ 82 ((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA) 83 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601); 84 85 /* XXX This is getting silly... */ 86 #define PCI_VENDOR_ID_ASUSTEK PCI_VENDOR_ASUSTEK 87 #define PCI_VENDOR_ID_ATI PCI_VENDOR_ATI 88 #define PCI_VENDOR_ID_DELL PCI_VENDOR_DELL 89 #define PCI_VENDOR_ID_IBM PCI_VENDOR_IBM 90 #define PCI_VENDOR_ID_HP PCI_VENDOR_HP 91 #define PCI_VENDOR_ID_INTEL PCI_VENDOR_INTEL 92 #define PCI_VENDOR_ID_NVIDIA PCI_VENDOR_NVIDIA 93 #define PCI_VENDOR_ID_SONY PCI_VENDOR_SONY 94 #define PCI_VENDOR_ID_VIA PCI_VENDOR_VIATECH 95 96 #define PCI_DEVICE_ID_ATI_RADEON_QY PCI_PRODUCT_ATI_RADEON_RV100_QY 97 98 #define PCI_DEVFN(DEV, FN) \ 99 (__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2))) 100 #define PCI_SLOT(DEVFN) __SHIFTOUT((DEVFN), __BITS(3, 7)) 101 #define PCI_FUNC(DEVFN) __SHIFTOUT((DEVFN), __BITS(0, 2)) 102 103 #define PCI_NUM_RESOURCES ((PCI_MAPREG_END - PCI_MAPREG_START) / 4) 104 #define DEVICE_COUNT_RESOURCE PCI_NUM_RESOURCES 105 106 #define PCI_CAP_ID_AGP PCI_CAP_AGP 107 108 typedef int pci_power_t; 109 110 #define PCI_D0 0 111 #define PCI_D1 1 112 #define PCI_D2 2 113 #define PCI_D3hot 3 114 #define PCI_D3cold 4 115 116 #define __pci_iomem 117 118 struct pci_dev { 119 struct pci_attach_args pd_pa; 120 int pd_kludges; /* Gotta lose 'em... */ 121 #define NBPCI_KLUDGE_GET_MUMBLE 0x01 122 #define NBPCI_KLUDGE_MAP_ROM 0x02 123 bus_space_tag_t pd_rom_bst; 124 bus_space_handle_t pd_rom_bsh; 125 bus_size_t pd_rom_size; 126 void *pd_rom_vaddr; 127 device_t pd_dev; 128 struct { 129 pcireg_t type; 130 bus_addr_t addr; 131 bus_size_t size; 132 int flags; 133 bus_space_tag_t bst; 134 bus_space_handle_t bsh; 135 void __pci_iomem *kva; 136 } pd_resources[PCI_NUM_RESOURCES]; 137 struct pci_conf_state *pd_saved_state; 138 struct acpi_devnode *pd_ad; 139 struct device dev; /* XXX Don't believe me! */ 140 struct pci_bus *bus; 141 uint32_t devfn; 142 uint16_t vendor; 143 uint16_t device; 144 uint16_t subsystem_vendor; 145 uint16_t subsystem_device; 146 uint8_t revision; 147 uint32_t class; 148 bool msi_enabled; 149 }; 150 151 static inline device_t 152 pci_dev_dev(struct pci_dev *pdev) 153 { 154 return pdev->pd_dev; 155 } 156 157 static inline void 158 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, 159 const struct pci_attach_args *pa, int kludges) 160 { 161 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag, 162 PCI_SUBSYS_ID_REG); 163 unsigned i; 164 165 pdev->pd_pa = *pa; 166 pdev->pd_kludges = kludges; 167 pdev->pd_rom_vaddr = NULL; 168 pdev->pd_dev = dev; 169 #if (NACPICA > 0) 170 pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus, 171 pa->pa_device, pa->pa_function); 172 #else 173 pdev->pd_ad = NULL; 174 #endif 175 pdev->bus = kmem_zalloc(sizeof(struct pci_bus), KM_NOSLEEP); 176 pdev->bus->number = pa->pa_bus; 177 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function); 178 pdev->vendor = PCI_VENDOR(pa->pa_id); 179 pdev->device = PCI_PRODUCT(pa->pa_id); 180 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id); 181 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id); 182 pdev->revision = PCI_REVISION(pa->pa_class); 183 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */ 184 185 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 186 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 187 const int reg = PCI_BAR(i); 188 189 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc, 190 pa->pa_tag, reg); 191 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg, 192 pdev->pd_resources[i].type, 193 &pdev->pd_resources[i].addr, 194 &pdev->pd_resources[i].size, 195 &pdev->pd_resources[i].flags)) { 196 pdev->pd_resources[i].addr = 0; 197 pdev->pd_resources[i].size = 0; 198 pdev->pd_resources[i].flags = 0; 199 } 200 pdev->pd_resources[i].kva = NULL; 201 } 202 } 203 204 static inline int 205 pci_find_capability(struct pci_dev *pdev, int cap) 206 { 207 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap, 208 NULL, NULL); 209 } 210 211 static inline int 212 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep) 213 { 214 KASSERT(!ISSET(reg, 3)); 215 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg); 216 return 0; 217 } 218 219 static inline int 220 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep) 221 { 222 KASSERT(!ISSET(reg, 1)); 223 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 224 (reg &~ 2)) >> (8 * (reg & 2)); 225 return 0; 226 } 227 228 static inline int 229 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep) 230 { 231 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 232 (reg &~ 3)) >> (8 * (reg & 3)); 233 return 0; 234 } 235 236 static inline int 237 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value) 238 { 239 KASSERT(!ISSET(reg, 3)); 240 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value); 241 return 0; 242 } 243 244 static inline void 245 pci_rmw_config(struct pci_dev *pdev, int reg, unsigned int bytes, 246 uint32_t value) 247 { 248 const uint32_t mask = ~((~0UL) << (8 * bytes)); 249 const int reg32 = (reg &~ 3); 250 const unsigned int shift = (8 * (reg & 3)); 251 uint32_t value32; 252 253 KASSERT(bytes <= 4); 254 KASSERT(!ISSET(value, ~mask)); 255 pci_read_config_dword(pdev, reg32, &value32); 256 value32 &=~ (mask << shift); 257 value32 |= (value << shift); 258 pci_write_config_dword(pdev, reg32, value32); 259 } 260 261 static inline int 262 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value) 263 { 264 KASSERT(!ISSET(reg, 1)); 265 pci_rmw_config(pdev, reg, 2, value); 266 return 0; 267 } 268 269 static inline int 270 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value) 271 { 272 pci_rmw_config(pdev, reg, 1, value); 273 return 0; 274 } 275 276 /* 277 * XXX pci msi 278 */ 279 static inline int 280 pci_enable_msi(struct pci_dev *pdev) 281 { 282 return -ENOSYS; 283 } 284 285 static inline void 286 pci_disable_msi(struct pci_dev *pdev __unused) 287 { 288 KASSERT(pdev->msi_enabled); 289 } 290 291 static inline void 292 pci_set_master(struct pci_dev *pdev) 293 { 294 pcireg_t csr; 295 296 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 297 PCI_COMMAND_STATUS_REG); 298 csr |= PCI_COMMAND_MASTER_ENABLE; 299 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 300 PCI_COMMAND_STATUS_REG, csr); 301 } 302 303 static inline void 304 pci_clear_master(struct pci_dev *pdev) 305 { 306 pcireg_t csr; 307 308 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 309 PCI_COMMAND_STATUS_REG); 310 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE; 311 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 312 PCI_COMMAND_STATUS_REG, csr); 313 } 314 315 #define PCIBIOS_MIN_MEM 0 /* XXX bogus x86 kludge bollocks */ 316 317 static inline bus_addr_t 318 pcibios_align_resource(void *p, const struct resource *resource, 319 bus_addr_t addr, bus_size_t size) 320 { 321 panic("pcibios_align_resource has accessed unaligned neurons!"); 322 } 323 324 static inline int 325 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource, 326 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused, 327 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t, 328 bus_size_t) __unused, 329 struct pci_dev *pdev) 330 { 331 const struct pci_attach_args *const pa = &pdev->pd_pa; 332 bus_space_tag_t bst; 333 int error; 334 335 switch (resource->flags) { 336 case IORESOURCE_MEM: 337 bst = pa->pa_memt; 338 break; 339 340 case IORESOURCE_IO: 341 bst = pa->pa_iot; 342 break; 343 344 default: 345 panic("I don't know what kind of resource you want!"); 346 } 347 348 resource->r_bst = bst; 349 error = bus_space_alloc(bst, start, __type_max(bus_addr_t), 350 size, align, 0, 0, &resource->start, &resource->r_bsh); 351 if (error) 352 return error; 353 354 resource->size = size; 355 return 0; 356 } 357 358 /* 359 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are 360 * defined only for their single purposes in i915drm, in 361 * i915_get_bridge_dev and intel_detect_pch. We can't define them more 362 * generally without adapting pci_find_device (and pci_enumerate_bus 363 * internally) to pass a cookie through. 364 */ 365 366 static inline int /* XXX inline? */ 367 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa) 368 { 369 370 if (pa->pa_bus != 0) 371 return 0; 372 if (pa->pa_device != 0) 373 return 0; 374 if (pa->pa_function != 0) 375 return 0; 376 377 return 1; 378 } 379 380 static inline struct pci_dev * 381 pci_get_bus_and_slot(int bus, int slot) 382 { 383 struct pci_attach_args pa; 384 385 KASSERT(bus == 0); 386 KASSERT(slot == PCI_DEVFN(0, 0)); 387 388 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0)) 389 return NULL; 390 391 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 392 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 393 394 return pdev; 395 } 396 397 static inline int /* XXX inline? */ 398 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa) 399 { 400 401 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE) 402 return 0; 403 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA) 404 return 0; 405 406 return 1; 407 } 408 409 static inline void 410 pci_dev_put(struct pci_dev *pdev) 411 { 412 413 if (pdev == NULL) 414 return; 415 416 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE)); 417 kmem_free(pdev, sizeof(*pdev)); 418 } 419 420 static inline struct pci_dev * 421 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from) 422 { 423 struct pci_attach_args pa; 424 425 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8)); 426 427 if (from != NULL) { 428 pci_dev_put(from); 429 return NULL; 430 } 431 432 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge)) 433 return NULL; 434 435 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 436 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 437 438 return pdev; 439 } 440 441 #define __pci_rom_iomem 442 443 static inline void 444 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused) 445 { 446 447 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 448 KASSERT(vaddr == pdev->pd_rom_vaddr); 449 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size); 450 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM; 451 pdev->pd_rom_vaddr = NULL; 452 } 453 454 /* XXX Whattakludge! Should move this in sys/arch/. */ 455 static int 456 pci_map_rom_md(struct pci_dev *pdev) 457 { 458 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__) 459 const bus_addr_t rom_base = 0xc0000; 460 const bus_size_t rom_size = 0x20000; 461 bus_space_handle_t rom_bsh; 462 int error; 463 464 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY) 465 return ENXIO; 466 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA) 467 return ENXIO; 468 /* XXX Check whether this is the primary VGA card? */ 469 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size, 470 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh); 471 if (error) 472 return ENXIO; 473 474 pdev->pd_rom_bst = pdev->pd_pa.pa_memt; 475 pdev->pd_rom_bsh = rom_bsh; 476 pdev->pd_rom_size = rom_size; 477 478 return 0; 479 #else 480 return ENXIO; 481 #endif 482 } 483 484 static inline void __pci_rom_iomem * 485 pci_map_rom(struct pci_dev *pdev, size_t *sizep) 486 { 487 bus_space_handle_t bsh; 488 bus_size_t size; 489 490 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 491 492 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM, 493 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR), 494 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size) 495 != 0 && 496 pci_map_rom_md(pdev) != 0) 497 return NULL; 498 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM; 499 500 /* XXX This type is obviously wrong in general... */ 501 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh, 502 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, &bsh, &size)) { 503 pci_unmap_rom(pdev, NULL); 504 return NULL; 505 } 506 507 KASSERT(size <= SIZE_T_MAX); 508 *sizep = size; 509 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst, bsh); 510 return pdev->pd_rom_vaddr; 511 } 512 513 static inline bus_addr_t 514 pci_resource_start(struct pci_dev *pdev, unsigned i) 515 { 516 517 KASSERT(i < PCI_NUM_RESOURCES); 518 return pdev->pd_resources[i].addr; 519 } 520 521 static inline bus_size_t 522 pci_resource_len(struct pci_dev *pdev, unsigned i) 523 { 524 525 KASSERT(i < PCI_NUM_RESOURCES); 526 return pdev->pd_resources[i].size; 527 } 528 529 static inline bus_addr_t 530 pci_resource_end(struct pci_dev *pdev, unsigned i) 531 { 532 533 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1); 534 } 535 536 static inline int 537 pci_resource_flags(struct pci_dev *pdev, unsigned i) 538 { 539 540 KASSERT(i < PCI_NUM_RESOURCES); 541 return pdev->pd_resources[i].flags; 542 } 543 544 static inline void __pci_iomem * 545 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size) 546 { 547 int error; 548 549 KASSERT(i < PCI_NUM_RESOURCES); 550 KASSERT(pdev->pd_resources[i].kva == NULL); 551 552 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM) 553 return NULL; 554 if (pdev->pd_resources[i].size < size) 555 return NULL; 556 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr, 557 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags, 558 &pdev->pd_resources[i].bsh); 559 if (error) { 560 /* Horrible hack: try asking the fake AGP device. */ 561 if (!agp_i810_borrow(pdev->pd_resources[i].addr, size, 562 &pdev->pd_resources[i].bsh)) 563 return NULL; 564 } 565 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt; 566 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst, 567 pdev->pd_resources[i].bsh); 568 569 return pdev->pd_resources[i].kva; 570 } 571 572 static inline void 573 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva) 574 { 575 unsigned i; 576 577 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 578 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 579 if (pdev->pd_resources[i].kva == kva) 580 break; 581 } 582 KASSERT(i < PCI_NUM_RESOURCES); 583 584 pdev->pd_resources[i].kva = NULL; 585 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh, 586 pdev->pd_resources[i].size); 587 } 588 589 static inline void 590 pci_save_state(struct pci_dev *pdev) 591 { 592 593 KASSERT(pdev->pd_saved_state == NULL); 594 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state), 595 KM_SLEEP); 596 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 597 pdev->pd_saved_state); 598 } 599 600 static inline void 601 pci_restore_state(struct pci_dev *pdev) 602 { 603 604 KASSERT(pdev->pd_saved_state != NULL); 605 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 606 pdev->pd_saved_state); 607 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state)); 608 pdev->pd_saved_state = NULL; 609 } 610 611 static inline bool 612 pci_is_pcie(struct pci_dev *pdev) 613 { 614 615 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0); 616 } 617 618 static inline bool 619 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask) 620 { 621 622 /* XXX Cop-out. */ 623 if (mask > DMA_BIT_MASK(32)) 624 return pci_dma64_available(&pdev->pd_pa); 625 else 626 return true; 627 } 628 629 #endif /* _LINUX_PCI_H_ */ 630