1 /* $OpenBSD: phb.c,v 1.16 2020/09/21 11:14:28 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/param.h> 19 #include <sys/systm.h> 20 #include <sys/device.h> 21 #include <sys/extent.h> 22 23 #include <machine/bus.h> 24 #include <machine/fdt.h> 25 #include <machine/opal.h> 26 27 #include <dev/pci/pcidevs.h> 28 #include <dev/pci/pcireg.h> 29 #include <dev/pci/pcivar.h> 30 31 #include <dev/ofw/openfirm.h> 32 #include <dev/ofw/fdt.h> 33 34 extern paddr_t physmax; /* machdep.c */ 35 36 #define IODA_TVE_SELECT (1ULL << 59) 37 38 struct phb_range { 39 uint32_t flags; 40 uint64_t pci_base; 41 uint64_t phys_base; 42 uint64_t size; 43 }; 44 45 struct phb_softc { 46 struct device sc_dev; 47 bus_space_tag_t sc_iot; 48 bus_dma_tag_t sc_dmat; 49 50 int sc_node; 51 int sc_acells; 52 int sc_scells; 53 int sc_pacells; 54 int sc_pscells; 55 struct phb_range *sc_ranges; 56 int sc_nranges; 57 58 uint64_t sc_phb_id; 59 uint64_t sc_pe_number; 60 uint32_t sc_msi_ranges[2]; 61 uint32_t sc_xive; 62 63 struct bus_space sc_bus_iot; 64 struct bus_space sc_bus_memt; 65 struct machine_bus_dma_tag sc_bus_dmat; 66 67 struct ppc64_pci_chipset sc_pc; 68 struct extent *sc_busex; 69 struct extent *sc_memex; 70 struct extent *sc_ioex; 71 int sc_bus; 72 }; 73 74 int phb_match(struct device *, void *, void *); 75 void phb_attach(struct device *, struct device *, void *); 76 77 struct cfattach phb_ca = { 78 sizeof (struct phb_softc), phb_match, phb_attach 79 }; 80 81 struct cfdriver phb_cd = { 82 NULL, "phb", DV_DULL 83 }; 84 85 void phb_attach_hook(struct device *, struct device *, 86 struct pcibus_attach_args *); 87 int phb_bus_maxdevs(void *, int); 88 pcitag_t phb_make_tag(void *, int, int, int); 89 void phb_decompose_tag(void *, pcitag_t, int *, int *, int *); 90 int phb_conf_size(void *, pcitag_t); 91 pcireg_t phb_conf_read(void *, pcitag_t, int); 92 void phb_conf_write(void *, pcitag_t, int, pcireg_t); 93 94 int phb_intr_map(struct pci_attach_args *, pci_intr_handle_t *); 95 const char *phb_intr_string(void *, pci_intr_handle_t); 96 void *phb_intr_establish(void *, pci_intr_handle_t, int, struct cpu_info *, 97 int (*)(void *), void *, char *); 98 void phb_intr_disestablish(void *, void *); 99 100 int phb_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int, 101 bus_space_handle_t *); 102 int phb_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int, 103 bus_space_handle_t *); 104 paddr_t phb_bs_mmap(bus_space_tag_t, bus_addr_t, off_t, int, int); 105 int phb_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, 106 bus_size_t, struct proc *, int, paddr_t *, int *, int); 107 int phb_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, 108 bus_dma_segment_t *, int, bus_size_t, int); 109 110 int 111 phb_match(struct device *parent, void *match, void *aux) 112 { 113 struct fdt_attach_args *faa = aux; 114 115 return (OF_is_compatible(faa->fa_node, "ibm,ioda2-phb") || 116 OF_is_compatible(faa->fa_node, "ibm,ioda3-phb")); 117 } 118 119 void 120 phb_attach(struct device *parent, struct device *self, void *aux) 121 { 122 struct phb_softc *sc = (struct phb_softc *)self; 123 struct fdt_attach_args *faa = aux; 124 struct pcibus_attach_args pba; 125 uint32_t bus_range[2]; 126 uint32_t *ranges; 127 uint32_t m64window[6]; 128 uint32_t m64ranges[2]; 129 int i, j, nranges, rangeslen; 130 uint32_t window; 131 uint32_t chip_id; 132 int64_t error; 133 134 if (faa->fa_nreg < 1) { 135 printf(": no registers\n"); 136 return; 137 } 138 139 sc->sc_iot = faa->fa_iot; 140 sc->sc_dmat = faa->fa_dmat; 141 sc->sc_node = faa->fa_node; 142 sc->sc_phb_id = OF_getpropint64(sc->sc_node, "ibm,opal-phbid", 0); 143 sc->sc_pe_number = 0; 144 145 if (OF_getproplen(sc->sc_node, "ibm,chip-id") == sizeof(chip_id)) { 146 chip_id = OF_getpropint(sc->sc_node, "ibm,chip-id", 0); 147 printf(": chip 0x%x", chip_id); 148 } 149 150 /* 151 * Reset the IODA tables. Should clear any gunk left behind 152 * by Linux. 153 */ 154 error = opal_pci_reset(sc->sc_phb_id, OPAL_RESET_PCI_IODA_TABLE, 155 OPAL_ASSERT_RESET); 156 if (error != OPAL_SUCCESS) { 157 printf(": can't reset IODA table\n"); 158 return; 159 } 160 161 /* 162 * Keep things simple and use a single PE for everything below 163 * this host bridge. 164 */ 165 error = opal_pci_set_pe(sc->sc_phb_id, sc->sc_pe_number, 0, 166 OPAL_IGNORE_RID_BUS_NUMBER, OPAL_IGNORE_RID_DEVICE_NUMBER, 167 OPAL_IGNORE_RID_FUNCTION_NUMBER, OPAL_MAP_PE); 168 if (error != OPAL_SUCCESS) { 169 printf(": can't map PHB PE\n"); 170 return; 171 } 172 173 /* Enable bypass mode. */ 174 error = opal_pci_map_pe_dma_window_real(sc->sc_phb_id, 175 sc->sc_pe_number, (sc->sc_pe_number << 1) | 1, 176 IODA_TVE_SELECT, physmax); 177 if (error != OPAL_SUCCESS) { 178 printf(": can't enable DMA bypass\n"); 179 return; 180 } 181 182 /* 183 * Parse address ranges such that we can do the appropriate 184 * address translations. 185 */ 186 187 sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells", 188 faa->fa_acells); 189 sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells", 190 faa->fa_scells); 191 sc->sc_pacells = faa->fa_acells; 192 sc->sc_pscells = faa->fa_scells; 193 194 rangeslen = OF_getproplen(sc->sc_node, "ranges"); 195 if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) || 196 (rangeslen / sizeof(uint32_t)) % (sc->sc_acells + 197 sc->sc_pacells + sc->sc_scells)) { 198 printf(": invalid ranges property\n"); 199 return; 200 } 201 202 ranges = malloc(rangeslen, M_TEMP, M_WAITOK); 203 OF_getpropintarray(sc->sc_node, "ranges", ranges, 204 rangeslen); 205 206 /* 207 * Reserve an extra slot here and make sure it is filled 208 * with zeroes. 209 */ 210 nranges = (rangeslen / sizeof(uint32_t)) / 211 (sc->sc_acells + sc->sc_pacells + sc->sc_scells); 212 sc->sc_ranges = mallocarray(nranges + 1, 213 sizeof(struct phb_range), M_DEVBUF, M_ZERO | M_WAITOK); 214 sc->sc_nranges = nranges + 1; 215 216 for (i = 0, j = 0; i < sc->sc_nranges; i++) { 217 sc->sc_ranges[i].flags = ranges[j++]; 218 sc->sc_ranges[i].pci_base = ranges[j++]; 219 if (sc->sc_acells - 1 == 2) { 220 sc->sc_ranges[i].pci_base <<= 32; 221 sc->sc_ranges[i].pci_base |= ranges[j++]; 222 } 223 sc->sc_ranges[i].phys_base = ranges[j++]; 224 if (sc->sc_pacells == 2) { 225 sc->sc_ranges[i].phys_base <<= 32; 226 sc->sc_ranges[i].phys_base |= ranges[j++]; 227 } 228 sc->sc_ranges[i].size = ranges[j++]; 229 if (sc->sc_scells == 2) { 230 sc->sc_ranges[i].size <<= 32; 231 sc->sc_ranges[i].size |= ranges[j++]; 232 } 233 } 234 235 free(ranges, M_TEMP, rangeslen); 236 237 /* 238 * IBM has chosen a non-standard way to encode 64-bit mmio 239 * ranges. Stick the information into the slot we reserved 240 * above. 241 */ 242 if (OF_getpropintarray(sc->sc_node, "ibm,opal-m64-window", 243 m64window, sizeof(m64window)) == sizeof(m64window)) { 244 sc->sc_ranges[sc->sc_nranges - 1].flags = 0x03000000; 245 sc->sc_ranges[sc->sc_nranges - 1].pci_base = 246 (uint64_t)m64window[0] << 32 | m64window[1]; 247 sc->sc_ranges[sc->sc_nranges - 1].phys_base = 248 (uint64_t)m64window[2] << 32 | m64window[3]; 249 sc->sc_ranges[sc->sc_nranges - 1].size = 250 (uint64_t)m64window[4] << 32 | m64window[5]; 251 } 252 253 /* 254 * Enable all the 64-bit mmio windows we found. 255 */ 256 m64ranges[0] = 0; m64ranges[1] = 16; 257 OF_getpropintarray(sc->sc_node, "ibm,opal-available-m64-ranges", 258 m64ranges, sizeof(m64ranges)); 259 window = m64ranges[0]; 260 for (i = 0; i < sc->sc_nranges; i++) { 261 /* Skip non-64-bit ranges. */ 262 if ((sc->sc_ranges[i].flags & 0x03000000) != 0x03000000) 263 continue; 264 265 /* Bail if we're out of 64-bit mmio windows. */ 266 if (window > m64ranges[1]) { 267 printf(": no 64-bit mmio window available\n"); 268 return; 269 } 270 271 error = opal_pci_set_phb_mem_window(sc->sc_phb_id, 272 OPAL_M64_WINDOW_TYPE, window, sc->sc_ranges[i].phys_base, 273 sc->sc_ranges[i].pci_base, sc->sc_ranges[i].size); 274 if (error != OPAL_SUCCESS) { 275 printf(": can't set 64-bit mmio window\n"); 276 return; 277 } 278 error = opal_pci_phb_mmio_enable(sc->sc_phb_id, 279 OPAL_M64_WINDOW_TYPE, window, OPAL_ENABLE_M64_SPLIT); 280 if (error != OPAL_SUCCESS) { 281 printf(": can't enable 64-bit mmio window\n"); 282 return; 283 } 284 285 window++; 286 } 287 288 OF_getpropintarray(sc->sc_node, "ibm,opal-msi-ranges", 289 sc->sc_msi_ranges, sizeof(sc->sc_msi_ranges)); 290 291 /* Create extents for our address spaces. */ 292 sc->sc_busex = extent_create("pcibus", 0, 255, 293 M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED); 294 sc->sc_memex = extent_create("pcimem", 0, (u_long)-1, 295 M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED); 296 sc->sc_ioex = extent_create("pciio", 0, 0xffffffff, 297 M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED); 298 299 /* Set up bus range. */ 300 if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range, 301 sizeof(bus_range)) != sizeof(bus_range) || 302 bus_range[0] >= 256 || bus_range[1] >= 256) { 303 bus_range[0] = 0; 304 bus_range[1] = 255; 305 } 306 sc->sc_bus = bus_range[0]; 307 extent_free(sc->sc_busex, bus_range[0], 308 bus_range[1] - bus_range[0] + 1, EX_WAITOK); 309 310 /* Set up mmio ranges. */ 311 for (i = 0; i < sc->sc_nranges; i++) { 312 if ((sc->sc_ranges[i].flags & 0x02000000) != 0x02000000) 313 continue; 314 315 extent_free(sc->sc_memex, sc->sc_ranges[i].pci_base, 316 sc->sc_ranges[i].size, EX_WAITOK); 317 } 318 319 printf("\n"); 320 321 memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot)); 322 sc->sc_bus_iot.bus_private = sc; 323 sc->sc_bus_iot._space_map = phb_bs_iomap; 324 sc->sc_bus_iot._space_read_2 = little_space_read_2; 325 sc->sc_bus_iot._space_read_4 = little_space_read_4; 326 sc->sc_bus_iot._space_read_8 = little_space_read_8; 327 sc->sc_bus_iot._space_write_2 = little_space_write_2; 328 sc->sc_bus_iot._space_write_4 = little_space_write_4; 329 sc->sc_bus_iot._space_write_8 = little_space_write_8; 330 memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt)); 331 sc->sc_bus_memt.bus_private = sc; 332 sc->sc_bus_memt._space_map = phb_bs_memmap; 333 sc->sc_bus_memt._space_mmap = phb_bs_mmap; 334 sc->sc_bus_memt._space_read_2 = little_space_read_2; 335 sc->sc_bus_memt._space_read_4 = little_space_read_4; 336 sc->sc_bus_memt._space_read_8 = little_space_read_8; 337 sc->sc_bus_memt._space_write_2 = little_space_write_2; 338 sc->sc_bus_memt._space_write_4 = little_space_write_4; 339 sc->sc_bus_memt._space_write_8 = little_space_write_8; 340 341 memcpy(&sc->sc_bus_dmat, sc->sc_dmat, sizeof(sc->sc_bus_dmat)); 342 sc->sc_bus_dmat._cookie = sc; 343 sc->sc_bus_dmat._dmamap_load_buffer = phb_dmamap_load_buffer; 344 sc->sc_bus_dmat._dmamap_load_raw = phb_dmamap_load_raw; 345 346 sc->sc_pc.pc_conf_v = sc; 347 sc->sc_pc.pc_attach_hook = phb_attach_hook; 348 sc->sc_pc.pc_bus_maxdevs = phb_bus_maxdevs; 349 sc->sc_pc.pc_make_tag = phb_make_tag; 350 sc->sc_pc.pc_decompose_tag = phb_decompose_tag; 351 sc->sc_pc.pc_conf_size = phb_conf_size; 352 sc->sc_pc.pc_conf_read = phb_conf_read; 353 sc->sc_pc.pc_conf_write = phb_conf_write; 354 355 sc->sc_pc.pc_intr_v = sc; 356 sc->sc_pc.pc_intr_map = phb_intr_map; 357 sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi; 358 sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix; 359 sc->sc_pc.pc_intr_string = phb_intr_string; 360 sc->sc_pc.pc_intr_establish = phb_intr_establish; 361 sc->sc_pc.pc_intr_disestablish = phb_intr_disestablish; 362 363 memset(&pba, 0, sizeof(pba)); 364 pba.pba_busname = "pci"; 365 pba.pba_iot = &sc->sc_bus_iot; 366 pba.pba_memt = &sc->sc_bus_memt; 367 pba.pba_dmat = &sc->sc_bus_dmat; 368 pba.pba_pc = &sc->sc_pc; 369 pba.pba_busex = sc->sc_busex; 370 pba.pba_memex = sc->sc_memex; 371 pba.pba_ioex = sc->sc_ioex; 372 pba.pba_domain = pci_ndomains++; 373 pba.pba_bus = sc->sc_bus; 374 pba.pba_flags |= PCI_FLAGS_MSI_ENABLED; 375 376 config_found(self, &pba, NULL); 377 } 378 379 void 380 phb_attach_hook(struct device *parent, struct device *self, 381 struct pcibus_attach_args *pba) 382 { 383 } 384 385 int 386 phb_bus_maxdevs(void *v, int bus) 387 { 388 struct phb_softc *sc = v; 389 390 if (bus == sc->sc_bus || bus == sc->sc_bus + 1) 391 return 1; 392 return 32; 393 } 394 395 int 396 phb_find_node(int node, int bus, int device, int function) 397 { 398 uint32_t reg[5]; 399 uint32_t phys_hi; 400 int child; 401 402 phys_hi = ((bus << 16) | (device << 11) | (function << 8)); 403 404 for (child = OF_child(node); child; child = OF_peer(child)) { 405 if (OF_getpropintarray(child, "reg", 406 reg, sizeof(reg)) != sizeof(reg)) 407 continue; 408 409 if (reg[0] == phys_hi) 410 return child; 411 412 node = phb_find_node(child, bus, device, function); 413 if (node) 414 return node; 415 } 416 417 return 0; 418 } 419 420 pcitag_t 421 phb_make_tag(void *v, int bus, int device, int function) 422 { 423 struct phb_softc *sc = v; 424 int node; 425 426 node = phb_find_node(sc->sc_node, bus, device, function); 427 return (((pcitag_t)node << 32) | 428 (bus << 8) | (device << 3) | (function << 0)); 429 } 430 431 void 432 phb_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp) 433 { 434 if (bp != NULL) 435 *bp = (tag >> 8) & 0xff; 436 if (dp != NULL) 437 *dp = (tag >> 3) & 0x1f; 438 if (fp != NULL) 439 *fp = (tag >> 0) & 0x7; 440 } 441 442 int 443 phb_conf_size(void *v, pcitag_t tag) 444 { 445 return PCIE_CONFIG_SPACE_SIZE; 446 } 447 448 pcireg_t 449 phb_conf_read(void *v, pcitag_t tag, int reg) 450 { 451 struct phb_softc *sc = v; 452 int64_t error; 453 uint32_t data; 454 uint16_t pci_error_state; 455 uint8_t freeze_state; 456 457 tag = PCITAG_OFFSET(tag); 458 error = opal_pci_config_read_word(sc->sc_phb_id, 459 tag, reg, opal_phys(&data)); 460 if (error == OPAL_SUCCESS && data != 0xffffffff) 461 return data; 462 463 /* 464 * Probing hardware that isn't there may ut the host bridge in 465 * an error state. Clear the error. 466 */ 467 error = opal_pci_eeh_freeze_status(sc->sc_phb_id, sc->sc_pe_number, 468 opal_phys(&freeze_state), opal_phys(&pci_error_state), NULL); 469 if (freeze_state) 470 opal_pci_eeh_freeze_clear(sc->sc_phb_id, sc->sc_pe_number, 471 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 472 473 return 0xffffffff; 474 } 475 476 void 477 phb_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data) 478 { 479 struct phb_softc *sc = v; 480 481 tag = PCITAG_OFFSET(tag); 482 opal_pci_config_write_word(sc->sc_phb_id, tag, reg, data); 483 } 484 485 int 486 phb_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp) 487 { 488 int pin = pa->pa_rawintrpin; 489 490 if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX) 491 return -1; 492 493 if (pa->pa_tag == 0) 494 return -1; 495 496 ihp->ih_pc = pa->pa_pc; 497 ihp->ih_tag = pa->pa_intrtag; 498 ihp->ih_intrpin = pa->pa_intrpin; 499 ihp->ih_type = PCI_INTX; 500 501 return 0; 502 } 503 504 const char * 505 phb_intr_string(void *v, pci_intr_handle_t ih) 506 { 507 switch (ih.ih_type) { 508 case PCI_MSI32: 509 case PCI_MSI64: 510 return "msi"; 511 case PCI_MSIX: 512 return "msix"; 513 } 514 515 return "intx"; 516 } 517 518 void * 519 phb_intr_establish(void *v, pci_intr_handle_t ih, int level, 520 struct cpu_info *ci, int (*func)(void *), void *arg, char *name) 521 { 522 struct phb_softc *sc = v; 523 void *cookie = NULL; 524 525 KASSERT(ih.ih_type != PCI_NONE); 526 527 if (ih.ih_type != PCI_INTX) { 528 uint32_t addr32, data; 529 uint64_t addr; 530 uint32_t xive; 531 int64_t error; 532 533 if (sc->sc_xive >= sc->sc_msi_ranges[1]) 534 return NULL; 535 536 /* Allocate an MSI. */ 537 xive = sc->sc_xive++; 538 539 error = opal_pci_set_xive_pe(sc->sc_phb_id, 540 sc->sc_pe_number, xive); 541 if (error != OPAL_SUCCESS) 542 return NULL; 543 544 if (ih.ih_type == PCI_MSI32) { 545 error = opal_get_msi_32(sc->sc_phb_id, 0, xive, 546 1, opal_phys(&addr32), opal_phys(&data)); 547 addr = addr32; 548 } else { 549 error = opal_get_msi_64(sc->sc_phb_id, 0, xive, 550 1, opal_phys(&addr), opal_phys(&data)); 551 } 552 if (error != OPAL_SUCCESS) 553 return NULL; 554 555 cookie = intr_establish(sc->sc_msi_ranges[0] + xive, 556 IST_EDGE, level, ci, func, arg, name); 557 if (cookie == NULL) 558 return NULL; 559 560 if (ih.ih_type == PCI_MSIX) { 561 pci_msix_enable(ih.ih_pc, ih.ih_tag, 562 &sc->sc_bus_memt, ih.ih_intrpin, addr, data); 563 } else 564 pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data); 565 } else { 566 int bus, dev, fn; 567 uint32_t reg[4]; 568 int node; 569 570 phb_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn); 571 572 reg[0] = bus << 16 | dev << 11 | fn << 8; 573 reg[1] = reg[2] = 0; 574 reg[3] = ih.ih_intrpin; 575 576 /* Host bridge child node holds the interrupt map. */ 577 node = OF_child(sc->sc_node); 578 if (node == 0) 579 return NULL; 580 581 cookie = fdt_intr_establish_imap(node, reg, sizeof(reg), 582 level, func, arg, name); 583 } 584 585 return cookie; 586 } 587 588 void 589 phb_intr_disestablish(void *v, void *cookie) 590 { 591 } 592 593 int 594 phb_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size, 595 int flags, bus_space_handle_t *bshp) 596 { 597 struct phb_softc *sc = t->bus_private; 598 int i; 599 600 for (i = 0; i < sc->sc_nranges; i++) { 601 uint64_t pci_start = sc->sc_ranges[i].pci_base; 602 uint64_t pci_end = pci_start + sc->sc_ranges[i].size; 603 uint64_t phys_start = sc->sc_ranges[i].phys_base; 604 605 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 && 606 addr >= pci_start && addr + size <= pci_end) { 607 return bus_space_map(sc->sc_iot, 608 addr - pci_start + phys_start, size, flags, bshp); 609 } 610 } 611 612 return ENXIO; 613 } 614 615 int 616 phb_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size, 617 int flags, bus_space_handle_t *bshp) 618 { 619 struct phb_softc *sc = t->bus_private; 620 int i; 621 622 for (i = 0; i < sc->sc_nranges; i++) { 623 uint64_t pci_start = sc->sc_ranges[i].pci_base; 624 uint64_t pci_end = pci_start + sc->sc_ranges[i].size; 625 uint64_t phys_start = sc->sc_ranges[i].phys_base; 626 627 if ((sc->sc_ranges[i].flags & 0x02000000) == 0x02000000 && 628 addr >= pci_start && addr + size <= pci_end) { 629 return bus_space_map(sc->sc_iot, 630 addr - pci_start + phys_start, size, flags, bshp); 631 } 632 } 633 634 return ENXIO; 635 } 636 637 paddr_t 638 phb_bs_mmap(bus_space_tag_t t, bus_addr_t addr, off_t off, 639 int prot, int flags) 640 { 641 struct phb_softc *sc = t->bus_private; 642 int i; 643 644 for (i = 0; i < sc->sc_nranges; i++) { 645 uint64_t pci_start = sc->sc_ranges[i].pci_base; 646 uint64_t pci_end = pci_start + sc->sc_ranges[i].size; 647 uint64_t phys_start = sc->sc_ranges[i].phys_base; 648 649 if ((sc->sc_ranges[i].flags & 0x02000000) == 0x02000000 && 650 addr >= pci_start && addr + PAGE_SIZE <= pci_end) { 651 return bus_space_mmap(sc->sc_iot, 652 addr - pci_start + phys_start, off, prot, flags); 653 } 654 } 655 656 return -1; 657 } 658 659 int 660 phb_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 661 bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp, 662 int *segp, int first) 663 { 664 struct phb_softc *sc = t->_cookie; 665 int seg, firstseg = *segp; 666 int error; 667 668 error = sc->sc_dmat->_dmamap_load_buffer(sc->sc_dmat, map, buf, buflen, 669 p, flags, lastaddrp, segp, first); 670 if (error) 671 return error; 672 673 /* For each segment. */ 674 for (seg = firstseg; seg <= *segp; seg++) 675 map->dm_segs[seg].ds_addr |= IODA_TVE_SELECT; 676 677 return 0; 678 } 679 680 int 681 phb_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 682 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 683 { 684 struct phb_softc *sc = t->_cookie; 685 int seg, error; 686 687 error = sc->sc_dmat->_dmamap_load_raw(sc->sc_dmat, map, 688 segs, nsegs, size, flags); 689 if (error) 690 return error; 691 692 /* For each segment. */ 693 for (seg = 0; seg < nsegs; seg++) 694 map->dm_segs[seg].ds_addr |= IODA_TVE_SELECT; 695 696 return 0; 697 } 698