1 /* $NetBSD: linux_pci.c,v 1.16 2021/12/19 10:57:42 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifdef _KERNEL_OPT 33 #include "opt_pci.h" 34 #endif 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.16 2021/12/19 10:57:42 riastradh Exp $"); 38 39 #if NACPICA > 0 40 #include <dev/acpi/acpivar.h> 41 #include <dev/acpi/acpi_pci.h> 42 #endif 43 44 #include <linux/pci.h> 45 46 #include <drm/drm_agp_netbsd.h> 47 48 device_t 49 pci_dev_dev(struct pci_dev *pdev) 50 { 51 52 return pdev->pd_dev; 53 } 54 55 void 56 pci_set_drvdata(struct pci_dev *pdev, void *drvdata) 57 { 58 pdev->pd_drvdata = drvdata; 59 } 60 61 void * 62 pci_get_drvdata(struct pci_dev *pdev) 63 { 64 return pdev->pd_drvdata; 65 } 66 67 void 68 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent, 69 const struct pci_attach_args *pa, int kludges) 70 { 71 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag, 72 PCI_SUBSYS_ID_REG); 73 unsigned i; 74 75 memset(pdev, 0, sizeof(*pdev)); /* paranoia */ 76 77 pdev->pd_pa = *pa; 78 pdev->pd_kludges = kludges; 79 pdev->pd_rom_vaddr = NULL; 80 pdev->pd_dev = dev; 81 #if (NACPICA > 0) 82 #ifdef __HAVE_PCI_GET_SEGMENT 83 const int seg = pci_get_segment(pa->pa_pc); 84 #else 85 const int seg = 0; 86 #endif 87 pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus, 88 pa->pa_device, pa->pa_function); 89 #else 90 pdev->pd_ad = NULL; 91 #endif 92 pdev->pd_saved_state = NULL; 93 pdev->pd_intr_handles = NULL; 94 pdev->pd_drvdata = NULL; 95 pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP); 96 pdev->bus->pb_pc = pa->pa_pc; 97 pdev->bus->pb_dev = parent; 98 pdev->bus->number = pa->pa_bus; 99 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function); 100 pdev->vendor = PCI_VENDOR(pa->pa_id); 101 pdev->device = PCI_PRODUCT(pa->pa_id); 102 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id); 103 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id); 104 pdev->revision = PCI_REVISION(pa->pa_class); 105 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */ 106 107 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 108 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 109 const int reg = PCI_BAR(i); 110 111 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc, 112 pa->pa_tag, reg); 113 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg, 114 pdev->pd_resources[i].type, 115 &pdev->pd_resources[i].addr, 116 &pdev->pd_resources[i].size, 117 &pdev->pd_resources[i].flags)) { 118 pdev->pd_resources[i].addr = 0; 119 pdev->pd_resources[i].size = 0; 120 pdev->pd_resources[i].flags = 0; 121 } 122 pdev->pd_resources[i].kva = NULL; 123 pdev->pd_resources[i].mapped = false; 124 } 125 } 126 127 int 128 pci_find_capability(struct pci_dev *pdev, int cap) 129 { 130 131 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap, 132 NULL, NULL); 133 } 134 135 int 136 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep) 137 { 138 139 KASSERT(!ISSET(reg, 3)); 140 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg); 141 return 0; 142 } 143 144 int 145 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep) 146 { 147 148 KASSERT(!ISSET(reg, 1)); 149 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 150 (reg &~ 2)) >> (8 * (reg & 2)); 151 return 0; 152 } 153 154 int 155 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep) 156 { 157 158 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 159 (reg &~ 3)) >> (8 * (reg & 3)); 160 return 0; 161 } 162 163 int 164 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value) 165 { 166 167 KASSERT(!ISSET(reg, 3)); 168 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value); 169 return 0; 170 } 171 172 int 173 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg, 174 uint32_t *valuep) 175 { 176 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 177 PCI_FUNC(devfn)); 178 179 KASSERT(!ISSET(reg, 1)); 180 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3)); 181 return 0; 182 } 183 184 int 185 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg, 186 uint16_t *valuep) 187 { 188 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 189 PCI_FUNC(devfn)); 190 191 KASSERT(!ISSET(reg, 1)); 192 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2)); 193 return 0; 194 } 195 196 int 197 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg, 198 uint8_t *valuep) 199 { 200 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 201 PCI_FUNC(devfn)); 202 203 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3)); 204 return 0; 205 } 206 207 int 208 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg, 209 uint32_t value) 210 { 211 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 212 PCI_FUNC(devfn)); 213 214 KASSERT(!ISSET(reg, 3)); 215 pci_conf_write(bus->pb_pc, tag, reg, value); 216 return 0; 217 } 218 219 static void 220 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes, 221 uint32_t value) 222 { 223 const uint32_t mask = ~((~0UL) << (8 * bytes)); 224 const int reg32 = (reg &~ 3); 225 const unsigned int shift = (8 * (reg & 3)); 226 uint32_t value32; 227 228 KASSERT(bytes <= 4); 229 KASSERT(!ISSET(value, ~mask)); 230 value32 = pci_conf_read(pc, tag, reg32); 231 value32 &=~ (mask << shift); 232 value32 |= (value << shift); 233 pci_conf_write(pc, tag, reg32, value32); 234 } 235 236 int 237 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value) 238 { 239 240 KASSERT(!ISSET(reg, 1)); 241 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value); 242 return 0; 243 } 244 245 int 246 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value) 247 { 248 249 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value); 250 return 0; 251 } 252 253 int 254 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg, 255 uint16_t value) 256 { 257 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 258 PCI_FUNC(devfn)); 259 260 KASSERT(!ISSET(reg, 1)); 261 pci_rmw_config(bus->pb_pc, tag, reg, 2, value); 262 return 0; 263 } 264 265 int 266 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg, 267 uint8_t value) 268 { 269 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 270 PCI_FUNC(devfn)); 271 272 pci_rmw_config(bus->pb_pc, tag, reg, 1, value); 273 return 0; 274 } 275 276 int 277 pci_enable_msi(struct pci_dev *pdev) 278 { 279 const struct pci_attach_args *const pa = &pdev->pd_pa; 280 281 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1)) 282 return -EINVAL; 283 284 pdev->msi_enabled = 1; 285 return 0; 286 } 287 288 void 289 pci_disable_msi(struct pci_dev *pdev __unused) 290 { 291 const struct pci_attach_args *const pa = &pdev->pd_pa; 292 293 if (pdev->pd_intr_handles != NULL) { 294 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1); 295 pdev->pd_intr_handles = NULL; 296 } 297 pdev->msi_enabled = 0; 298 } 299 300 void 301 pci_set_master(struct pci_dev *pdev) 302 { 303 pcireg_t csr; 304 305 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 306 PCI_COMMAND_STATUS_REG); 307 csr |= PCI_COMMAND_MASTER_ENABLE; 308 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 309 PCI_COMMAND_STATUS_REG, csr); 310 } 311 312 void 313 pci_clear_master(struct pci_dev *pdev) 314 { 315 pcireg_t csr; 316 317 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 318 PCI_COMMAND_STATUS_REG); 319 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE; 320 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 321 PCI_COMMAND_STATUS_REG, csr); 322 } 323 324 bus_addr_t 325 pcibios_align_resource(void *p, const struct resource *resource, 326 bus_addr_t addr, bus_size_t size) 327 { 328 panic("pcibios_align_resource has accessed unaligned neurons!"); 329 } 330 331 int 332 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource, 333 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused, 334 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t, 335 bus_size_t) __unused, 336 struct pci_dev *pdev) 337 { 338 const struct pci_attach_args *const pa = &pdev->pd_pa; 339 bus_space_tag_t bst; 340 int error; 341 342 switch (resource->flags) { 343 case IORESOURCE_MEM: 344 bst = pa->pa_memt; 345 break; 346 347 case IORESOURCE_IO: 348 bst = pa->pa_iot; 349 break; 350 351 default: 352 panic("I don't know what kind of resource you want!"); 353 } 354 355 resource->r_bst = bst; 356 error = bus_space_alloc(bst, start, __type_max(bus_addr_t), 357 size, align, 0, 0, &resource->start, &resource->r_bsh); 358 if (error) 359 return error; 360 361 resource->end = start + (size - 1); 362 return 0; 363 } 364 365 /* 366 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are 367 * defined only for their single purposes in i915drm, in 368 * i915_get_bridge_dev and intel_detect_pch. We can't define them more 369 * generally without adapting pci_find_device (and pci_enumerate_bus 370 * internally) to pass a cookie through. 371 */ 372 373 static int 374 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa) 375 { 376 377 /* XXX domain */ 378 if (pa->pa_bus != 0) 379 return 0; 380 if (pa->pa_device != 0) 381 return 0; 382 if (pa->pa_function != 0) 383 return 0; 384 385 return 1; 386 } 387 388 struct pci_dev * 389 pci_get_domain_bus_and_slot(int domain, int bus, int slot) 390 { 391 struct pci_attach_args pa; 392 393 KASSERT(domain == 0); 394 KASSERT(bus == 0); 395 KASSERT(slot == PCI_DEVFN(0, 0)); 396 397 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0)) 398 return NULL; 399 400 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 401 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 402 403 return pdev; 404 } 405 406 static int 407 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa) 408 { 409 410 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE) 411 return 0; 412 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA) 413 return 0; 414 415 return 1; 416 } 417 418 void 419 pci_dev_put(struct pci_dev *pdev) 420 { 421 422 if (pdev == NULL) 423 return; 424 425 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE)); 426 kmem_free(pdev->bus, sizeof(*pdev->bus)); 427 kmem_free(pdev, sizeof(*pdev)); 428 } 429 430 struct pci_dev * /* XXX i915 kludge */ 431 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from) 432 { 433 struct pci_attach_args pa; 434 435 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8)); 436 437 if (from != NULL) { 438 pci_dev_put(from); 439 return NULL; 440 } 441 442 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge)) 443 return NULL; 444 445 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 446 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 447 448 return pdev; 449 } 450 451 void 452 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused) 453 { 454 455 /* XXX Disable the ROM address decoder. */ 456 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 457 KASSERT(vaddr == pdev->pd_rom_vaddr); 458 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size); 459 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM; 460 pdev->pd_rom_vaddr = NULL; 461 } 462 463 /* XXX Whattakludge! Should move this in sys/arch/. */ 464 static int 465 pci_map_rom_md(struct pci_dev *pdev) 466 { 467 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__) 468 const bus_addr_t rom_base = 0xc0000; 469 const bus_size_t rom_size = 0x20000; 470 bus_space_handle_t rom_bsh; 471 int error; 472 473 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY) 474 return ENXIO; 475 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA) 476 return ENXIO; 477 /* XXX Check whether this is the primary VGA card? */ 478 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size, 479 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh); 480 if (error) 481 return ENXIO; 482 483 pdev->pd_rom_bst = pdev->pd_pa.pa_memt; 484 pdev->pd_rom_bsh = rom_bsh; 485 pdev->pd_rom_size = rom_size; 486 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM; 487 488 return 0; 489 #else 490 return ENXIO; 491 #endif 492 } 493 494 void __pci_rom_iomem * 495 pci_map_rom(struct pci_dev *pdev, size_t *sizep) 496 { 497 498 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 499 500 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM, 501 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR), 502 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size) 503 != 0) 504 goto fail_mi; 505 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM; 506 507 /* XXX This type is obviously wrong in general... */ 508 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh, 509 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, 510 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) { 511 pci_unmap_rom(pdev, NULL); 512 goto fail_mi; 513 } 514 goto success; 515 516 fail_mi: 517 if (pci_map_rom_md(pdev) != 0) 518 goto fail_md; 519 520 /* XXX This type is obviously wrong in general... */ 521 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh, 522 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, 523 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) { 524 pci_unmap_rom(pdev, NULL); 525 goto fail_md; 526 } 527 528 success: 529 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX); 530 *sizep = pdev->pd_rom_found_size; 531 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst, 532 pdev->pd_rom_found_bsh); 533 return pdev->pd_rom_vaddr; 534 535 fail_md: 536 return NULL; 537 } 538 539 void __pci_rom_iomem * 540 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep) 541 { 542 543 *sizep = 0; 544 return NULL; 545 } 546 547 int 548 pci_enable_rom(struct pci_dev *pdev) 549 { 550 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc; 551 const pcitag_t tag = pdev->pd_pa.pa_tag; 552 pcireg_t addr; 553 int s; 554 555 /* XXX Don't do anything if the ROM isn't there. */ 556 557 s = splhigh(); 558 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM); 559 addr |= PCI_MAPREG_ROM_ENABLE; 560 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr); 561 splx(s); 562 563 return 0; 564 } 565 566 void 567 pci_disable_rom(struct pci_dev *pdev) 568 { 569 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc; 570 const pcitag_t tag = pdev->pd_pa.pa_tag; 571 pcireg_t addr; 572 int s; 573 574 s = splhigh(); 575 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM); 576 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE; 577 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr); 578 splx(s); 579 } 580 581 bus_addr_t 582 pci_resource_start(struct pci_dev *pdev, unsigned i) 583 { 584 585 KASSERT(i < PCI_NUM_RESOURCES); 586 return pdev->pd_resources[i].addr; 587 } 588 589 bus_size_t 590 pci_resource_len(struct pci_dev *pdev, unsigned i) 591 { 592 593 KASSERT(i < PCI_NUM_RESOURCES); 594 return pdev->pd_resources[i].size; 595 } 596 597 bus_addr_t 598 pci_resource_end(struct pci_dev *pdev, unsigned i) 599 { 600 601 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1); 602 } 603 604 int 605 pci_resource_flags(struct pci_dev *pdev, unsigned i) 606 { 607 608 KASSERT(i < PCI_NUM_RESOURCES); 609 return pdev->pd_resources[i].flags; 610 } 611 612 void __pci_iomem * 613 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size) 614 { 615 int error; 616 617 KASSERT(i < PCI_NUM_RESOURCES); 618 KASSERT(pdev->pd_resources[i].kva == NULL); 619 620 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM) 621 return NULL; 622 if (pdev->pd_resources[i].size < size) 623 return NULL; 624 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr, 625 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags, 626 &pdev->pd_resources[i].bsh); 627 if (error) 628 return NULL; 629 /* XXX Synchronize with drm_agp_borrow_hook in drm_agpsupport.c. */ 630 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt; 631 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst, 632 pdev->pd_resources[i].bsh); 633 pdev->pd_resources[i].mapped = true; 634 635 return pdev->pd_resources[i].kva; 636 } 637 638 void 639 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva) 640 { 641 unsigned i; 642 643 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 644 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 645 if (pdev->pd_resources[i].kva == kva) 646 break; 647 } 648 KASSERT(i < PCI_NUM_RESOURCES); 649 650 pdev->pd_resources[i].kva = NULL; 651 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh, 652 pdev->pd_resources[i].size); 653 } 654 655 void 656 pci_save_state(struct pci_dev *pdev) 657 { 658 659 KASSERT(pdev->pd_saved_state == NULL); 660 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state), 661 KM_SLEEP); 662 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 663 pdev->pd_saved_state); 664 } 665 666 void 667 pci_restore_state(struct pci_dev *pdev) 668 { 669 670 KASSERT(pdev->pd_saved_state != NULL); 671 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 672 pdev->pd_saved_state); 673 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state)); 674 pdev->pd_saved_state = NULL; 675 } 676 677 bool 678 pci_is_pcie(struct pci_dev *pdev) 679 { 680 681 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0); 682 } 683 684 bool 685 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask) 686 { 687 688 /* XXX Cop-out. */ 689 if (mask > DMA_BIT_MASK(32)) 690 return pci_dma64_available(&pdev->pd_pa); 691 else 692 return true; 693 } 694 695 bool 696 pci_is_thunderbolt_attached(struct pci_dev *pdev) 697 { 698 699 /* XXX Cop-out. */ 700 return false; 701 } 702 703 bool 704 pci_is_root_bus(struct pci_bus *bus) 705 { 706 707 /* XXX Cop-out. */ 708 return false; 709 } 710 711 int 712 pci_domain_nr(struct pci_bus *bus) 713 { 714 715 return device_unit(bus->pb_dev); 716 } 717 718 /* 719 * We explicitly rename pci_enable/disable_device so that you have to 720 * review each use of them, since NetBSD's PCI API does _not_ respect 721 * our local enablecnt here, but there are different parts of NetBSD 722 * that automatically enable/disable like PMF, so you have to decide 723 * for each one whether to call it or not. 724 */ 725 726 int 727 linux_pci_enable_device(struct pci_dev *pdev) 728 { 729 const struct pci_attach_args *pa = &pdev->pd_pa; 730 pcireg_t csr; 731 int s; 732 733 if (pdev->pd_enablecnt++) 734 return 0; 735 736 s = splhigh(); 737 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 738 /* If someone else (firmware) already enabled it, credit them. */ 739 if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE)) 740 pdev->pd_enablecnt++; 741 csr |= PCI_COMMAND_IO_ENABLE; 742 csr |= PCI_COMMAND_MEM_ENABLE; 743 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr); 744 splx(s); 745 746 return 0; 747 } 748 749 void 750 linux_pci_disable_device(struct pci_dev *pdev) 751 { 752 const struct pci_attach_args *pa = &pdev->pd_pa; 753 pcireg_t csr; 754 int s; 755 756 if (--pdev->pd_enablecnt) 757 return; 758 759 s = splhigh(); 760 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 761 csr &= ~PCI_COMMAND_IO_ENABLE; 762 csr &= ~PCI_COMMAND_MEM_ENABLE; 763 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr); 764 splx(s); 765 } 766 767 void 768 linux_pci_dev_destroy(struct pci_dev *pdev) 769 { 770 unsigned i; 771 772 if (pdev->bus != NULL) { 773 kmem_free(pdev->bus, sizeof(*pdev->bus)); 774 pdev->bus = NULL; 775 } 776 if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) { 777 pci_unmap_rom(pdev, pdev->pd_rom_vaddr); 778 pdev->pd_rom_vaddr = 0; 779 } 780 for (i = 0; i < __arraycount(pdev->pd_resources); i++) { 781 if (!pdev->pd_resources[i].mapped) 782 continue; 783 bus_space_unmap(pdev->pd_resources[i].bst, 784 pdev->pd_resources[i].bsh, pdev->pd_resources[i].size); 785 } 786 787 /* There is no way these should be still in use. */ 788 KASSERT(pdev->pd_saved_state == NULL); 789 KASSERT(pdev->pd_intr_handles == NULL); 790 } 791 792 bool 793 dev_is_pci(struct pci_dev *pdev) 794 { 795 return pdev != NULL; 796 } 797