1 /* $NetBSD: linux_pci.c,v 1.4 2018/08/27 14:19:59 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.4 2018/08/27 14:19:59 riastradh Exp $"); 34 35 #include <linux/pci.h> 36 37 device_t 38 pci_dev_dev(struct pci_dev *pdev) 39 { 40 41 return pdev->pd_dev; 42 } 43 44 /* XXX Nouveau kludge! */ 45 struct drm_device * 46 pci_get_drvdata(struct pci_dev *pdev) 47 { 48 49 return pdev->pd_drm_dev; 50 } 51 52 void 53 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent, 54 const struct pci_attach_args *pa, int kludges) 55 { 56 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag, 57 PCI_SUBSYS_ID_REG); 58 unsigned i; 59 60 memset(pdev, 0, sizeof(*pdev)); /* paranoia */ 61 62 pdev->pd_pa = *pa; 63 pdev->pd_kludges = kludges; 64 pdev->pd_rom_vaddr = NULL; 65 pdev->pd_dev = dev; 66 #if (NACPICA > 0) 67 pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus, 68 pa->pa_device, pa->pa_function); 69 #else 70 pdev->pd_ad = NULL; 71 #endif 72 pdev->pd_saved_state = NULL; 73 pdev->pd_intr_handles = NULL; 74 pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP); 75 pdev->bus->pb_pc = pa->pa_pc; 76 pdev->bus->pb_dev = parent; 77 pdev->bus->number = pa->pa_bus; 78 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function); 79 pdev->vendor = PCI_VENDOR(pa->pa_id); 80 pdev->device = PCI_PRODUCT(pa->pa_id); 81 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id); 82 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id); 83 pdev->revision = PCI_REVISION(pa->pa_class); 84 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */ 85 86 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 87 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 88 const int reg = PCI_BAR(i); 89 90 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc, 91 pa->pa_tag, reg); 92 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg, 93 pdev->pd_resources[i].type, 94 &pdev->pd_resources[i].addr, 95 &pdev->pd_resources[i].size, 96 &pdev->pd_resources[i].flags)) { 97 pdev->pd_resources[i].addr = 0; 98 pdev->pd_resources[i].size = 0; 99 pdev->pd_resources[i].flags = 0; 100 } 101 pdev->pd_resources[i].kva = NULL; 102 pdev->pd_resources[i].mapped = false; 103 } 104 } 105 106 int 107 pci_find_capability(struct pci_dev *pdev, int cap) 108 { 109 110 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap, 111 NULL, NULL); 112 } 113 114 int 115 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep) 116 { 117 118 KASSERT(!ISSET(reg, 3)); 119 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg); 120 return 0; 121 } 122 123 int 124 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep) 125 { 126 127 KASSERT(!ISSET(reg, 1)); 128 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 129 (reg &~ 2)) >> (8 * (reg & 2)); 130 return 0; 131 } 132 133 int 134 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep) 135 { 136 137 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 138 (reg &~ 3)) >> (8 * (reg & 3)); 139 return 0; 140 } 141 142 int 143 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value) 144 { 145 146 KASSERT(!ISSET(reg, 3)); 147 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value); 148 return 0; 149 } 150 151 int 152 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg, 153 uint32_t *valuep) 154 { 155 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 156 PCI_FUNC(devfn)); 157 158 KASSERT(!ISSET(reg, 1)); 159 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3)); 160 return 0; 161 } 162 163 int 164 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg, 165 uint16_t *valuep) 166 { 167 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 168 PCI_FUNC(devfn)); 169 170 KASSERT(!ISSET(reg, 1)); 171 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2)); 172 return 0; 173 } 174 175 int 176 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg, 177 uint8_t *valuep) 178 { 179 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 180 PCI_FUNC(devfn)); 181 182 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3)); 183 return 0; 184 } 185 186 int 187 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg, 188 uint32_t value) 189 { 190 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 191 PCI_FUNC(devfn)); 192 193 KASSERT(!ISSET(reg, 3)); 194 pci_conf_write(bus->pb_pc, tag, reg, value); 195 return 0; 196 } 197 198 static void 199 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes, 200 uint32_t value) 201 { 202 const uint32_t mask = ~((~0UL) << (8 * bytes)); 203 const int reg32 = (reg &~ 3); 204 const unsigned int shift = (8 * (reg & 3)); 205 uint32_t value32; 206 207 KASSERT(bytes <= 4); 208 KASSERT(!ISSET(value, ~mask)); 209 value32 = pci_conf_read(pc, tag, reg32); 210 value32 &=~ (mask << shift); 211 value32 |= (value << shift); 212 pci_conf_write(pc, tag, reg32, value32); 213 } 214 215 int 216 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value) 217 { 218 219 KASSERT(!ISSET(reg, 1)); 220 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value); 221 return 0; 222 } 223 224 int 225 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value) 226 { 227 228 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value); 229 return 0; 230 } 231 232 int 233 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg, 234 uint16_t value) 235 { 236 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 237 PCI_FUNC(devfn)); 238 239 KASSERT(!ISSET(reg, 1)); 240 pci_rmw_config(bus->pb_pc, tag, reg, 2, value); 241 return 0; 242 } 243 244 int 245 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg, 246 uint8_t value) 247 { 248 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn), 249 PCI_FUNC(devfn)); 250 251 pci_rmw_config(bus->pb_pc, tag, reg, 1, value); 252 return 0; 253 } 254 255 int 256 pci_enable_msi(struct pci_dev *pdev) 257 { 258 #ifdef notyet 259 const struct pci_attach_args *const pa = &pdev->pd_pa; 260 261 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1)) 262 return -EINVAL; 263 264 pdev->msi_enabled = 1; 265 return 0; 266 #else 267 return -ENOSYS; 268 #endif 269 } 270 271 void 272 pci_disable_msi(struct pci_dev *pdev __unused) 273 { 274 const struct pci_attach_args *const pa = &pdev->pd_pa; 275 276 if (pdev->pd_intr_handles != NULL) { 277 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1); 278 pdev->pd_intr_handles = NULL; 279 } 280 pdev->msi_enabled = 0; 281 } 282 283 void 284 pci_set_master(struct pci_dev *pdev) 285 { 286 pcireg_t csr; 287 288 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 289 PCI_COMMAND_STATUS_REG); 290 csr |= PCI_COMMAND_MASTER_ENABLE; 291 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 292 PCI_COMMAND_STATUS_REG, csr); 293 } 294 295 void 296 pci_clear_master(struct pci_dev *pdev) 297 { 298 pcireg_t csr; 299 300 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 301 PCI_COMMAND_STATUS_REG); 302 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE; 303 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 304 PCI_COMMAND_STATUS_REG, csr); 305 } 306 307 bus_addr_t 308 pcibios_align_resource(void *p, const struct resource *resource, 309 bus_addr_t addr, bus_size_t size) 310 { 311 panic("pcibios_align_resource has accessed unaligned neurons!"); 312 } 313 314 int 315 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource, 316 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused, 317 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t, 318 bus_size_t) __unused, 319 struct pci_dev *pdev) 320 { 321 const struct pci_attach_args *const pa = &pdev->pd_pa; 322 bus_space_tag_t bst; 323 int error; 324 325 switch (resource->flags) { 326 case IORESOURCE_MEM: 327 bst = pa->pa_memt; 328 break; 329 330 case IORESOURCE_IO: 331 bst = pa->pa_iot; 332 break; 333 334 default: 335 panic("I don't know what kind of resource you want!"); 336 } 337 338 resource->r_bst = bst; 339 error = bus_space_alloc(bst, start, __type_max(bus_addr_t), 340 size, align, 0, 0, &resource->start, &resource->r_bsh); 341 if (error) 342 return error; 343 344 resource->size = size; 345 return 0; 346 } 347 348 /* 349 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are 350 * defined only for their single purposes in i915drm, in 351 * i915_get_bridge_dev and intel_detect_pch. We can't define them more 352 * generally without adapting pci_find_device (and pci_enumerate_bus 353 * internally) to pass a cookie through. 354 */ 355 356 static int 357 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa) 358 { 359 360 if (pa->pa_bus != 0) 361 return 0; 362 if (pa->pa_device != 0) 363 return 0; 364 if (pa->pa_function != 0) 365 return 0; 366 367 return 1; 368 } 369 370 struct pci_dev * 371 pci_get_bus_and_slot(int bus, int slot) 372 { 373 struct pci_attach_args pa; 374 375 KASSERT(bus == 0); 376 KASSERT(slot == PCI_DEVFN(0, 0)); 377 378 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0)) 379 return NULL; 380 381 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 382 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 383 384 return pdev; 385 } 386 387 static int 388 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa) 389 { 390 391 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE) 392 return 0; 393 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA) 394 return 0; 395 396 return 1; 397 } 398 399 void 400 pci_dev_put(struct pci_dev *pdev) 401 { 402 403 if (pdev == NULL) 404 return; 405 406 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE)); 407 kmem_free(pdev->bus, sizeof(*pdev->bus)); 408 kmem_free(pdev, sizeof(*pdev)); 409 } 410 411 struct pci_dev * /* XXX i915 kludge */ 412 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from) 413 { 414 struct pci_attach_args pa; 415 416 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8)); 417 418 if (from != NULL) { 419 pci_dev_put(from); 420 return NULL; 421 } 422 423 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge)) 424 return NULL; 425 426 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP); 427 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE); 428 429 return pdev; 430 } 431 432 void 433 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused) 434 { 435 436 /* XXX Disable the ROM address decoder. */ 437 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 438 KASSERT(vaddr == pdev->pd_rom_vaddr); 439 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size); 440 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM; 441 pdev->pd_rom_vaddr = NULL; 442 } 443 444 /* XXX Whattakludge! Should move this in sys/arch/. */ 445 static int 446 pci_map_rom_md(struct pci_dev *pdev) 447 { 448 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__) 449 const bus_addr_t rom_base = 0xc0000; 450 const bus_size_t rom_size = 0x20000; 451 bus_space_handle_t rom_bsh; 452 int error; 453 454 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY) 455 return ENXIO; 456 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA) 457 return ENXIO; 458 /* XXX Check whether this is the primary VGA card? */ 459 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size, 460 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh); 461 if (error) 462 return ENXIO; 463 464 pdev->pd_rom_bst = pdev->pd_pa.pa_memt; 465 pdev->pd_rom_bsh = rom_bsh; 466 pdev->pd_rom_size = rom_size; 467 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM; 468 469 return 0; 470 #else 471 return ENXIO; 472 #endif 473 } 474 475 void __pci_rom_iomem * 476 pci_map_rom(struct pci_dev *pdev, size_t *sizep) 477 { 478 479 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)); 480 481 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM, 482 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR), 483 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size) 484 != 0) 485 goto fail_mi; 486 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM; 487 488 /* XXX This type is obviously wrong in general... */ 489 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh, 490 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, 491 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) { 492 pci_unmap_rom(pdev, NULL); 493 goto fail_mi; 494 } 495 goto success; 496 497 fail_mi: 498 if (pci_map_rom_md(pdev) != 0) 499 goto fail_md; 500 501 /* XXX This type is obviously wrong in general... */ 502 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh, 503 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, 504 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) { 505 pci_unmap_rom(pdev, NULL); 506 goto fail_md; 507 } 508 509 success: 510 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX); 511 *sizep = pdev->pd_rom_found_size; 512 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst, 513 pdev->pd_rom_found_bsh); 514 return pdev->pd_rom_vaddr; 515 516 fail_md: 517 return NULL; 518 } 519 520 void __pci_rom_iomem * 521 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep) 522 { 523 524 *sizep = 0; 525 return NULL; 526 } 527 528 int 529 pci_enable_rom(struct pci_dev *pdev) 530 { 531 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc; 532 const pcitag_t tag = pdev->pd_pa.pa_tag; 533 pcireg_t addr; 534 int s; 535 536 /* XXX Don't do anything if the ROM isn't there. */ 537 538 s = splhigh(); 539 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM); 540 addr |= PCI_MAPREG_ROM_ENABLE; 541 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr); 542 splx(s); 543 544 return 0; 545 } 546 547 void 548 pci_disable_rom(struct pci_dev *pdev) 549 { 550 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc; 551 const pcitag_t tag = pdev->pd_pa.pa_tag; 552 pcireg_t addr; 553 int s; 554 555 s = splhigh(); 556 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM); 557 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE; 558 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr); 559 splx(s); 560 } 561 562 bus_addr_t 563 pci_resource_start(struct pci_dev *pdev, unsigned i) 564 { 565 566 KASSERT(i < PCI_NUM_RESOURCES); 567 return pdev->pd_resources[i].addr; 568 } 569 570 bus_size_t 571 pci_resource_len(struct pci_dev *pdev, unsigned i) 572 { 573 574 KASSERT(i < PCI_NUM_RESOURCES); 575 return pdev->pd_resources[i].size; 576 } 577 578 bus_addr_t 579 pci_resource_end(struct pci_dev *pdev, unsigned i) 580 { 581 582 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1); 583 } 584 585 int 586 pci_resource_flags(struct pci_dev *pdev, unsigned i) 587 { 588 589 KASSERT(i < PCI_NUM_RESOURCES); 590 return pdev->pd_resources[i].flags; 591 } 592 593 void __pci_iomem * 594 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size) 595 { 596 int error; 597 598 KASSERT(i < PCI_NUM_RESOURCES); 599 KASSERT(pdev->pd_resources[i].kva == NULL); 600 601 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM) 602 return NULL; 603 if (pdev->pd_resources[i].size < size) 604 return NULL; 605 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr, 606 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags, 607 &pdev->pd_resources[i].bsh); 608 if (error) { 609 /* Horrible hack: try asking the fake AGP device. */ 610 if (!agp_i810_borrow(pdev->pd_resources[i].addr, size, 611 &pdev->pd_resources[i].bsh)) 612 return NULL; 613 } 614 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt; 615 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst, 616 pdev->pd_resources[i].bsh); 617 pdev->pd_resources[i].mapped = true; 618 619 return pdev->pd_resources[i].kva; 620 } 621 622 void 623 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva) 624 { 625 unsigned i; 626 627 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES); 628 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 629 if (pdev->pd_resources[i].kva == kva) 630 break; 631 } 632 KASSERT(i < PCI_NUM_RESOURCES); 633 634 pdev->pd_resources[i].kva = NULL; 635 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh, 636 pdev->pd_resources[i].size); 637 } 638 639 void 640 pci_save_state(struct pci_dev *pdev) 641 { 642 643 KASSERT(pdev->pd_saved_state == NULL); 644 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state), 645 KM_SLEEP); 646 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 647 pdev->pd_saved_state); 648 } 649 650 void 651 pci_restore_state(struct pci_dev *pdev) 652 { 653 654 KASSERT(pdev->pd_saved_state != NULL); 655 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, 656 pdev->pd_saved_state); 657 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state)); 658 pdev->pd_saved_state = NULL; 659 } 660 661 bool 662 pci_is_pcie(struct pci_dev *pdev) 663 { 664 665 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0); 666 } 667 668 bool 669 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask) 670 { 671 672 /* XXX Cop-out. */ 673 if (mask > DMA_BIT_MASK(32)) 674 return pci_dma64_available(&pdev->pd_pa); 675 else 676 return true; 677 } 678 679 bool 680 pci_is_root_bus(struct pci_bus *bus) 681 { 682 683 /* XXX Cop-out. */ 684 return false; 685 } 686 687 int 688 pci_domain_nr(struct pci_bus *bus) 689 { 690 691 return device_unit(bus->pb_dev); 692 } 693 694 /* 695 * We explicitly rename pci_enable/disable_device so that you have to 696 * review each use of them, since NetBSD's PCI API does _not_ respect 697 * our local enablecnt here, but there are different parts of NetBSD 698 * that automatically enable/disable like PMF, so you have to decide 699 * for each one whether to call it or not. 700 */ 701 702 int 703 linux_pci_enable_device(struct pci_dev *pdev) 704 { 705 const struct pci_attach_args *pa = &pdev->pd_pa; 706 pcireg_t csr; 707 int s; 708 709 if (pdev->pd_enablecnt++) 710 return 0; 711 712 s = splhigh(); 713 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 714 /* If someone else (firmware) already enabled it, credit them. */ 715 if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE)) 716 pdev->pd_enablecnt++; 717 csr |= PCI_COMMAND_IO_ENABLE; 718 csr |= PCI_COMMAND_MEM_ENABLE; 719 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr); 720 splx(s); 721 722 return 0; 723 } 724 725 void 726 linux_pci_disable_device(struct pci_dev *pdev) 727 { 728 const struct pci_attach_args *pa = &pdev->pd_pa; 729 pcireg_t csr; 730 int s; 731 732 if (--pdev->pd_enablecnt) 733 return; 734 735 s = splhigh(); 736 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 737 csr &= ~PCI_COMMAND_IO_ENABLE; 738 csr &= ~PCI_COMMAND_MEM_ENABLE; 739 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr); 740 splx(s); 741 } 742 743 void 744 linux_pci_dev_destroy(struct pci_dev *pdev) 745 { 746 unsigned i; 747 748 if (pdev->bus != NULL) { 749 kmem_free(pdev->bus, sizeof(*pdev->bus)); 750 pdev->bus = NULL; 751 } 752 if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) { 753 pci_unmap_rom(pdev, pdev->pd_rom_vaddr); 754 pdev->pd_rom_vaddr = 0; 755 } 756 for (i = 0; i < __arraycount(pdev->pd_resources); i++) { 757 if (!pdev->pd_resources[i].mapped) 758 continue; 759 bus_space_unmap(pdev->pd_resources[i].bst, 760 pdev->pd_resources[i].bsh, pdev->pd_resources[i].size); 761 } 762 763 /* There is no way these should be still in use. */ 764 KASSERT(pdev->pd_saved_state == NULL); 765 KASSERT(pdev->pd_intr_handles == NULL); 766 } 767