1 /* $OpenBSD: dwpcie.c,v 1.39 2022/11/27 22:04:59 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/param.h> 19 #include <sys/systm.h> 20 #include <sys/device.h> 21 #include <sys/evcount.h> 22 #include <sys/extent.h> 23 #include <sys/malloc.h> 24 25 #include <machine/intr.h> 26 #include <machine/bus.h> 27 #include <machine/fdt.h> 28 29 #include <dev/pci/pcidevs.h> 30 #include <dev/pci/pcireg.h> 31 #include <dev/pci/pcivar.h> 32 #include <dev/pci/ppbreg.h> 33 34 #include <dev/ofw/openfirm.h> 35 #include <dev/ofw/ofw_clock.h> 36 #include <dev/ofw/ofw_gpio.h> 37 #include <dev/ofw/ofw_misc.h> 38 #include <dev/ofw/ofw_pinctrl.h> 39 #include <dev/ofw/ofw_power.h> 40 #include <dev/ofw/fdt.h> 41 42 /* Registers */ 43 #define PCIE_PORT_LINK_CTRL 0x710 44 #define PCIE_PORT_LINK_CTRL_LANES_MASK (0x3f << 16) 45 #define PCIE_PORT_LINK_CTRL_LANES_1 (0x1 << 16) 46 #define PCIE_PORT_LINK_CTRL_LANES_2 (0x3 << 16) 47 #define PCIE_PORT_LINK_CTRL_LANES_4 (0x7 << 16) 48 #define PCIE_PORT_LINK_CTRL_LANES_8 (0xf << 16) 49 #define PCIE_PHY_DEBUG_R1 0x72c 50 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) 51 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) 52 #define PCIE_LINK_WIDTH_SPEED_CTRL 0x80c 53 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_MASK (0x1f << 8) 54 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_1 (0x1 << 8) 55 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_2 (0x2 << 8) 56 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_4 (0x4 << 8) 57 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_8 (0x8 << 8) 58 #define PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE (1 << 17) 59 60 #define PCIE_MSI_ADDR_LO 0x820 61 #define PCIE_MSI_ADDR_HI 0x824 62 #define PCIE_MSI_INTR0_ENABLE 0x828 63 #define PCIE_MSI_INTR0_MASK 0x82c 64 #define PCIE_MSI_INTR0_STATUS 0x830 65 66 #define MISC_CONTROL_1 0x8bc 67 #define MISC_CONTROL_1_DBI_RO_WR_EN (1 << 0) 68 #define IATU_VIEWPORT 0x900 69 #define IATU_VIEWPORT_INDEX0 0 70 #define IATU_VIEWPORT_INDEX1 1 71 #define IATU_VIEWPORT_INDEX2 2 72 #define IATU_VIEWPORT_INDEX3 3 73 #define IATU_OFFSET_VIEWPORT 0x904 74 #define IATU_OFFSET_UNROLL(x) (0x200 * (x)) 75 #define IATU_REGION_CTRL_1 0x000 76 #define IATU_REGION_CTRL_1_TYPE_MEM 0 77 #define IATU_REGION_CTRL_1_TYPE_IO 2 78 #define IATU_REGION_CTRL_1_TYPE_CFG0 4 79 #define IATU_REGION_CTRL_1_TYPE_CFG1 5 80 #define IATU_REGION_CTRL_2 0x004 81 #define IATU_REGION_CTRL_2_REGION_EN (1U << 31) 82 #define IATU_LWR_BASE_ADDR 0x08 83 #define IATU_UPPER_BASE_ADDR 0x0c 84 #define IATU_LIMIT_ADDR 0x10 85 #define IATU_LWR_TARGET_ADDR 0x14 86 #define IATU_UPPER_TARGET_ADDR 0x18 87 88 #define PCIE_GLOBAL_CTRL 0x8000 89 #define PCIE_GLOBAL_CTRL_APP_LTSSM_EN (1 << 2) 90 #define PCIE_GLOBAL_CTRL_DEVICE_TYPE_MASK (0xf << 4) 91 #define PCIE_GLOBAL_CTRL_DEVICE_TYPE_RC (0x4 << 4) 92 #define PCIE_GLOBAL_STATUS 0x8008 93 #define PCIE_GLOBAL_STATUS_RDLH_LINK_UP (1 << 1) 94 #define PCIE_GLOBAL_STATUS_PHY_LINK_UP (1 << 9) 95 #define PCIE_PM_STATUS 0x8014 96 #define PCIE_GLOBAL_INT_CAUSE 0x801c 97 #define PCIE_GLOBAL_INT_MASK 0x8020 98 #define PCIE_GLOBAL_INT_MASK_INT_A (1 << 9) 99 #define PCIE_GLOBAL_INT_MASK_INT_B (1 << 10) 100 #define PCIE_GLOBAL_INT_MASK_INT_C (1 << 11) 101 #define PCIE_GLOBAL_INT_MASK_INT_D (1 << 12) 102 #define PCIE_ARCACHE_TRC 0x8050 103 #define PCIE_ARCACHE_TRC_DEFAULT 0x3511 104 #define PCIE_AWCACHE_TRC 0x8054 105 #define PCIE_AWCACHE_TRC_DEFAULT 0x5311 106 #define PCIE_ARUSER 0x805c 107 #define PCIE_AWUSER 0x8060 108 #define PCIE_AXUSER_DOMAIN_MASK (0x3 << 4) 109 #define PCIE_AXUSER_DOMAIN_INNER_SHARABLE (0x1 << 4) 110 #define PCIE_AXUSER_DOMAIN_OUTER_SHARABLE (0x2 << 4) 111 #define PCIE_STREAMID 0x8064 112 #define PCIE_STREAMID_FUNC_BITS(x) ((x) << 0) 113 #define PCIE_STREAMID_DEV_BITS(x) ((x) << 4) 114 #define PCIE_STREAMID_BUS_BITS(x) ((x) << 8) 115 #define PCIE_STREAMID_ROOTPORT(x) ((x) << 12) 116 #define PCIE_STREAMID_8040 \ 117 (PCIE_STREAMID_ROOTPORT(0x80) | PCIE_STREAMID_BUS_BITS(2) | \ 118 PCIE_STREAMID_DEV_BITS(2) | PCIE_STREAMID_FUNC_BITS(3)) 119 120 /* Amlogic G12A registers */ 121 #define PCIE_CFG0 0x0000 122 #define PCIE_CFG0_APP_LTSSM_EN (1 << 7) 123 #define PCIE_STATUS12 0x0030 124 #define PCIE_STATUS12_RDLH_LINK_UP (1 << 16) 125 #define PCIE_STATUS12_LTSSM_MASK (0x1f << 10) 126 #define PCIE_STATUS12_LTSSM_UP (0x11 << 10) 127 #define PCIE_STATUS12_SMLH_LINK_UP (1 << 6) 128 129 /* NXP i.MX8MQ registers */ 130 #define PCIE_RC_LCR 0x7c 131 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 132 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 133 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf 134 #define PCIE_RC_LCR_L1EL_MASK (0x7 << 15) 135 #define PCIE_RC_LCR_L1EL_64US (0x6 << 15) 136 137 #define IOMUXC_GPR12 0x30 138 #define IMX8MQ_GPR_PCIE2_DEVICE_TYPE_MASK (0xf << 8) 139 #define IMX8MQ_GPR_PCIE2_DEVICE_TYPE_RC (0x4 << 8) 140 #define IMX8MQ_GPR_PCIE1_DEVICE_TYPE_MASK (0xf << 12) 141 #define IMX8MQ_GPR_PCIE1_DEVICE_TYPE_RC (0x4 << 12) 142 #define IOMUXC_GPR14 0x38 143 #define IOMUXC_GPR16 0x40 144 #define IMX8MQ_GPR_PCIE_REF_USE_PAD (1 << 9) 145 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN (1 << 10) 146 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE (1 << 11) 147 #define IMX8MM_GPR_PCIE_SSC_EN (1 << 16) 148 #define IMX8MM_GPR_PCIE_POWER_OFF (1 << 17) 149 #define IMX8MM_GPR_PCIE_CMN_RST (1 << 18) 150 #define IMX8MM_GPR_PCIE_AUX_EN (1 << 19) 151 #define IMX8MM_GPR_PCIE_REF_CLK_MASK (0x3 << 24) 152 #define IMX8MM_GPR_PCIE_REF_CLK_PLL (0x3 << 24) 153 #define IMX8MM_GPR_PCIE_REF_CLK_EXT (0x2 << 24) 154 155 #define IMX8MM_PCIE_PHY_CMN_REG62 0x188 156 #define IMX8MM_PCIE_PHY_CMN_REG62_PLL_CLK_OUT 0x08 157 #define IMX8MM_PCIE_PHY_CMN_REG64 0x190 158 #define IMX8MM_PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM 0x8c 159 #define IMX8MM_PCIE_PHY_CMN_REG75 0x1d4 160 #define IMX8MM_PCIE_PHY_CMN_REG75_PLL_DONE 0x3 161 #define IMX8MM_PCIE_PHY_TRSV_REG5 0x414 162 #define IMX8MM_PCIE_PHY_TRSV_REG5_GEN1_DEEMP 0x2d 163 #define IMX8MM_PCIE_PHY_TRSV_REG6 0x418 164 #define IMX8MM_PCIE_PHY_TRSV_REG6_GEN2_DEEMP 0xf 165 166 #define ANATOP_PLLOUT_CTL 0x74 167 #define ANATOP_PLLOUT_CTL_CKE (1 << 4) 168 #define ANATOP_PLLOUT_CTL_SEL_SYSPLL1 0xb 169 #define ANATOP_PLLOUT_CTL_SEL_MASK 0xf 170 #define ANATOP_PLLOUT_DIV 0x7c 171 #define ANATOP_PLLOUT_DIV_SYSPLL1 0x7 172 173 #define HREAD4(sc, reg) \ 174 (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))) 175 #define HWRITE4(sc, reg, val) \ 176 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 177 #define HSET4(sc, reg, bits) \ 178 HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits)) 179 #define HCLR4(sc, reg, bits) \ 180 HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits)) 181 182 struct dwpcie_range { 183 uint32_t flags; 184 uint64_t pci_base; 185 uint64_t phys_base; 186 uint64_t size; 187 }; 188 189 #define DWPCIE_NUM_MSI 32 190 191 struct dwpcie_msi { 192 int (*dm_func)(void *); 193 void *dm_arg; 194 int dm_ipl; 195 int dm_flags; 196 int dm_vec; 197 struct evcount dm_count; 198 char *dm_name; 199 }; 200 201 struct dwpcie_softc { 202 struct device sc_dev; 203 bus_space_tag_t sc_iot; 204 bus_space_handle_t sc_ioh; 205 bus_dma_tag_t sc_dmat; 206 207 bus_addr_t sc_ctrl_base; 208 bus_size_t sc_ctrl_size; 209 210 bus_addr_t sc_conf_base; 211 bus_size_t sc_conf_size; 212 bus_space_handle_t sc_conf_ioh; 213 214 bus_addr_t sc_glue_base; 215 bus_size_t sc_glue_size; 216 bus_space_handle_t sc_glue_ioh; 217 218 bus_addr_t sc_atu_base; 219 bus_size_t sc_atu_size; 220 bus_space_handle_t sc_atu_ioh; 221 222 bus_addr_t sc_io_base; 223 bus_addr_t sc_io_bus_addr; 224 bus_size_t sc_io_size; 225 bus_addr_t sc_mem_base; 226 bus_addr_t sc_mem_bus_addr; 227 bus_size_t sc_mem_size; 228 bus_addr_t sc_pmem_base; 229 bus_addr_t sc_pmem_bus_addr; 230 bus_size_t sc_pmem_size; 231 232 int sc_node; 233 int sc_acells; 234 int sc_scells; 235 int sc_pacells; 236 int sc_pscells; 237 struct dwpcie_range *sc_ranges; 238 int sc_nranges; 239 240 struct bus_space sc_bus_iot; 241 struct bus_space sc_bus_memt; 242 243 struct machine_pci_chipset sc_pc; 244 int sc_bus; 245 246 int sc_num_viewport; 247 int sc_atu_unroll; 248 int sc_atu_viewport; 249 250 void *sc_ih; 251 252 uint64_t sc_msi_addr; 253 struct dwpcie_msi sc_msi[DWPCIE_NUM_MSI]; 254 }; 255 256 struct dwpcie_intr_handle { 257 struct machine_intr_handle pih_ih; 258 struct dwpcie_softc *pih_sc; 259 struct dwpcie_msi *pih_dm; 260 bus_dma_tag_t pih_dmat; 261 bus_dmamap_t pih_map; 262 }; 263 264 int dwpcie_match(struct device *, void *, void *); 265 void dwpcie_attach(struct device *, struct device *, void *); 266 267 const struct cfattach dwpcie_ca = { 268 sizeof (struct dwpcie_softc), dwpcie_match, dwpcie_attach 269 }; 270 271 struct cfdriver dwpcie_cd = { 272 NULL, "dwpcie", DV_DULL 273 }; 274 275 int 276 dwpcie_match(struct device *parent, void *match, void *aux) 277 { 278 struct fdt_attach_args *faa = aux; 279 280 return (OF_is_compatible(faa->fa_node, "amlogic,g12a-pcie") || 281 OF_is_compatible(faa->fa_node, "fsl,imx8mm-pcie") || 282 OF_is_compatible(faa->fa_node, "fsl,imx8mq-pcie") || 283 OF_is_compatible(faa->fa_node, "marvell,armada8k-pcie") || 284 OF_is_compatible(faa->fa_node, "qcom,pcie-sc8280xp") || 285 OF_is_compatible(faa->fa_node, "rockchip,rk3568-pcie") || 286 OF_is_compatible(faa->fa_node, "sifive,fu740-pcie")); 287 } 288 289 void dwpcie_attach_deferred(struct device *); 290 291 void dwpcie_atu_disable(struct dwpcie_softc *, int); 292 void dwpcie_atu_config(struct dwpcie_softc *, int, int, 293 uint64_t, uint64_t, uint64_t); 294 void dwpcie_link_config(struct dwpcie_softc *); 295 int dwpcie_link_up(struct dwpcie_softc *); 296 297 int dwpcie_armada8k_init(struct dwpcie_softc *); 298 int dwpcie_armada8k_link_up(struct dwpcie_softc *); 299 int dwpcie_armada8k_intr(void *); 300 301 int dwpcie_g12a_init(struct dwpcie_softc *); 302 int dwpcie_g12a_link_up(struct dwpcie_softc *); 303 304 int dwpcie_imx8mq_init(struct dwpcie_softc *); 305 int dwpcie_imx8mq_intr(void *); 306 307 int dwpcie_fu740_init(struct dwpcie_softc *); 308 int dwpcie_rk3568_init(struct dwpcie_softc *); 309 int dwpcie_sc8280xp_init(struct dwpcie_softc *); 310 311 void dwpcie_attach_hook(struct device *, struct device *, 312 struct pcibus_attach_args *); 313 int dwpcie_bus_maxdevs(void *, int); 314 pcitag_t dwpcie_make_tag(void *, int, int, int); 315 void dwpcie_decompose_tag(void *, pcitag_t, int *, int *, int *); 316 int dwpcie_conf_size(void *, pcitag_t); 317 pcireg_t dwpcie_conf_read(void *, pcitag_t, int); 318 void dwpcie_conf_write(void *, pcitag_t, int, pcireg_t); 319 int dwpcie_probe_device_hook(void *, struct pci_attach_args *); 320 321 int dwpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *); 322 const char *dwpcie_intr_string(void *, pci_intr_handle_t); 323 void *dwpcie_intr_establish(void *, pci_intr_handle_t, int, 324 struct cpu_info *, int (*)(void *), void *, char *); 325 void dwpcie_intr_disestablish(void *, void *); 326 327 int dwpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int, 328 bus_space_handle_t *); 329 int dwpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int, 330 bus_space_handle_t *); 331 332 struct interrupt_controller dwpcie_ic = { 333 .ic_barrier = intr_barrier 334 }; 335 336 void 337 dwpcie_attach(struct device *parent, struct device *self, void *aux) 338 { 339 struct dwpcie_softc *sc = (struct dwpcie_softc *)self; 340 struct fdt_attach_args *faa = aux; 341 uint32_t *ranges; 342 int i, j, nranges, rangeslen; 343 int atu, config, ctrl, glue; 344 345 if (faa->fa_nreg < 2) { 346 printf(": no registers\n"); 347 return; 348 } 349 350 sc->sc_ctrl_base = faa->fa_reg[0].addr; 351 sc->sc_ctrl_size = faa->fa_reg[0].size; 352 353 ctrl = OF_getindex(faa->fa_node, "dbi", "reg-names"); 354 if (ctrl >= 0 && ctrl < faa->fa_nreg) { 355 sc->sc_ctrl_base = faa->fa_reg[ctrl].addr; 356 sc->sc_ctrl_size = faa->fa_reg[ctrl].size; 357 } 358 359 config = OF_getindex(faa->fa_node, "config", "reg-names"); 360 if (config < 0 || config >= faa->fa_nreg) { 361 printf(": no config registers\n"); 362 return; 363 } 364 365 sc->sc_conf_base = faa->fa_reg[config].addr; 366 sc->sc_conf_size = faa->fa_reg[config].size; 367 368 sc->sc_atu_base = sc->sc_ctrl_base + 0x300000; 369 sc->sc_atu_size = sc->sc_ctrl_size - 0x300000; 370 371 atu = OF_getindex(faa->fa_node, "atu", "reg-names"); 372 if (atu >= 0 && atu < faa->fa_nreg) { 373 sc->sc_atu_base = faa->fa_reg[atu].addr; 374 sc->sc_atu_size = faa->fa_reg[atu].size; 375 } 376 377 if (OF_is_compatible(faa->fa_node, "amlogic,g12a-pcie")) { 378 glue = OF_getindex(faa->fa_node, "cfg", "reg-names"); 379 if (glue < 0 || glue >= faa->fa_nreg) { 380 printf(": no glue registers\n"); 381 return; 382 } 383 384 sc->sc_glue_base = faa->fa_reg[glue].addr; 385 sc->sc_glue_size = faa->fa_reg[glue].size; 386 } 387 388 sc->sc_iot = faa->fa_iot; 389 sc->sc_dmat = faa->fa_dmat; 390 sc->sc_node = faa->fa_node; 391 392 sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells", 393 faa->fa_acells); 394 sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells", 395 faa->fa_scells); 396 sc->sc_pacells = faa->fa_acells; 397 sc->sc_pscells = faa->fa_scells; 398 399 rangeslen = OF_getproplen(sc->sc_node, "ranges"); 400 if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) || 401 (rangeslen / sizeof(uint32_t)) % (sc->sc_acells + 402 sc->sc_pacells + sc->sc_scells)) { 403 printf(": invalid ranges property\n"); 404 return; 405 } 406 407 ranges = malloc(rangeslen, M_TEMP, M_WAITOK); 408 OF_getpropintarray(sc->sc_node, "ranges", ranges, 409 rangeslen); 410 411 nranges = (rangeslen / sizeof(uint32_t)) / 412 (sc->sc_acells + sc->sc_pacells + sc->sc_scells); 413 sc->sc_ranges = mallocarray(nranges, 414 sizeof(struct dwpcie_range), M_TEMP, M_WAITOK); 415 sc->sc_nranges = nranges; 416 417 for (i = 0, j = 0; i < sc->sc_nranges; i++) { 418 sc->sc_ranges[i].flags = ranges[j++]; 419 sc->sc_ranges[i].pci_base = ranges[j++]; 420 if (sc->sc_acells - 1 == 2) { 421 sc->sc_ranges[i].pci_base <<= 32; 422 sc->sc_ranges[i].pci_base |= ranges[j++]; 423 } 424 sc->sc_ranges[i].phys_base = ranges[j++]; 425 if (sc->sc_pacells == 2) { 426 sc->sc_ranges[i].phys_base <<= 32; 427 sc->sc_ranges[i].phys_base |= ranges[j++]; 428 } 429 sc->sc_ranges[i].size = ranges[j++]; 430 if (sc->sc_scells == 2) { 431 sc->sc_ranges[i].size <<= 32; 432 sc->sc_ranges[i].size |= ranges[j++]; 433 } 434 } 435 436 free(ranges, M_TEMP, rangeslen); 437 438 if (bus_space_map(sc->sc_iot, sc->sc_ctrl_base, 439 sc->sc_ctrl_size, 0, &sc->sc_ioh)) { 440 free(sc->sc_ranges, M_TEMP, sc->sc_nranges * 441 sizeof(struct dwpcie_range)); 442 printf(": can't map ctrl registers\n"); 443 return; 444 } 445 446 if (bus_space_map(sc->sc_iot, sc->sc_conf_base, 447 sc->sc_conf_size, 0, &sc->sc_conf_ioh)) { 448 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ctrl_size); 449 free(sc->sc_ranges, M_TEMP, sc->sc_nranges * 450 sizeof(struct dwpcie_range)); 451 printf(": can't map config registers\n"); 452 return; 453 } 454 455 sc->sc_num_viewport = OF_getpropint(sc->sc_node, "num-viewport", 2); 456 457 printf("\n"); 458 459 pinctrl_byname(sc->sc_node, "default"); 460 clock_set_assigned(sc->sc_node); 461 462 config_defer(self, dwpcie_attach_deferred); 463 } 464 465 void 466 dwpcie_attach_deferred(struct device *self) 467 { 468 struct dwpcie_softc *sc = (struct dwpcie_softc *)self; 469 struct pcibus_attach_args pba; 470 bus_addr_t iobase, iolimit; 471 bus_addr_t membase, memlimit; 472 bus_addr_t pmembase, pmemlimit; 473 uint32_t bus_range[2]; 474 pcireg_t bir, blr, csr; 475 int i, error = 0; 476 477 if (OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie")) 478 error = dwpcie_armada8k_init(sc); 479 if (OF_is_compatible(sc->sc_node, "amlogic,g12a-pcie")) 480 error = dwpcie_g12a_init(sc); 481 if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie") || 482 OF_is_compatible(sc->sc_node, "fsl,imx8mq-pcie")) 483 error = dwpcie_imx8mq_init(sc); 484 if (OF_is_compatible(sc->sc_node, "qcom,pcie-sc8280xp")) 485 error = dwpcie_sc8280xp_init(sc); 486 if (OF_is_compatible(sc->sc_node, "rockchip,rk3568-pcie")) 487 error = dwpcie_rk3568_init(sc); 488 if (OF_is_compatible(sc->sc_node, "sifive,fu740-pcie")) 489 error = dwpcie_fu740_init(sc); 490 if (error != 0) { 491 bus_space_unmap(sc->sc_iot, sc->sc_conf_ioh, sc->sc_conf_size); 492 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ctrl_size); 493 free(sc->sc_ranges, M_TEMP, sc->sc_nranges * 494 sizeof(struct dwpcie_range)); 495 printf("%s: can't initialize hardware\n", 496 sc->sc_dev.dv_xname); 497 return; 498 } 499 500 sc->sc_atu_viewport = -1; 501 if (HREAD4(sc, IATU_VIEWPORT) == 0xffffffff) { 502 sc->sc_atu_unroll = 1; 503 if (bus_space_map(sc->sc_iot, sc->sc_atu_base, 504 sc->sc_atu_size, 0, &sc->sc_atu_ioh)) { 505 bus_space_unmap(sc->sc_iot, sc->sc_conf_ioh, 506 sc->sc_conf_size); 507 bus_space_unmap(sc->sc_iot, sc->sc_ioh, 508 sc->sc_ctrl_size); 509 free(sc->sc_ranges, M_TEMP, sc->sc_nranges * 510 sizeof(struct dwpcie_range)); 511 printf("%s: can't map atu registers\n", 512 sc->sc_dev.dv_xname); 513 return; 514 } 515 } 516 517 /* Set up address translation for I/O space. */ 518 for (i = 0; i < sc->sc_nranges; i++) { 519 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 && 520 sc->sc_ranges[i].size > 0) { 521 sc->sc_io_base = sc->sc_ranges[i].phys_base; 522 sc->sc_io_bus_addr = sc->sc_ranges[i].pci_base; 523 sc->sc_io_size = sc->sc_ranges[i].size; 524 } 525 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 && 526 sc->sc_ranges[i].size > 0) { 527 sc->sc_mem_base = sc->sc_ranges[i].phys_base; 528 sc->sc_mem_bus_addr = sc->sc_ranges[i].pci_base; 529 sc->sc_mem_size = sc->sc_ranges[i].size; 530 } 531 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x03000000 && 532 sc->sc_ranges[i].size > 0) { 533 sc->sc_pmem_base = sc->sc_ranges[i].phys_base; 534 sc->sc_pmem_bus_addr = sc->sc_ranges[i].pci_base; 535 sc->sc_pmem_size = sc->sc_ranges[i].size; 536 } 537 } 538 if (sc->sc_mem_size == 0) { 539 printf("%s: no memory mapped I/O window\n", 540 sc->sc_dev.dv_xname); 541 return; 542 } 543 544 /* 545 * Disable prefetchable memory mapped I/O window if we don't 546 * have enough viewports to enable it. 547 */ 548 if (sc->sc_num_viewport < 4) 549 sc->sc_pmem_size = 0; 550 551 for (i = 0; i < sc->sc_num_viewport; i++) 552 dwpcie_atu_disable(sc, i); 553 554 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX0, 555 IATU_REGION_CTRL_1_TYPE_MEM, sc->sc_mem_base, 556 sc->sc_mem_bus_addr, sc->sc_mem_size); 557 if (sc->sc_num_viewport > 2 && sc->sc_io_size > 0) 558 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX2, 559 IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base, 560 sc->sc_io_bus_addr, sc->sc_io_size); 561 if (sc->sc_num_viewport > 3 && sc->sc_pmem_size > 0) 562 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX3, 563 IATU_REGION_CTRL_1_TYPE_MEM, sc->sc_pmem_base, 564 sc->sc_pmem_bus_addr, sc->sc_pmem_size); 565 566 /* Enable modification of read-only bits. */ 567 HSET4(sc, MISC_CONTROL_1, MISC_CONTROL_1_DBI_RO_WR_EN); 568 569 /* A Root Port is a PCI-PCI Bridge. */ 570 HWRITE4(sc, PCI_CLASS_REG, 571 PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT | 572 PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT); 573 574 /* Clear BAR as U-Boot seems to leave garbage in it. */ 575 HWRITE4(sc, PCI_MAPREG_START, PCI_MAPREG_MEM_TYPE_64BIT); 576 HWRITE4(sc, PCI_MAPREG_START + 4, 0); 577 578 /* Enable 32-bit I/O addressing. */ 579 HSET4(sc, PPB_REG_IOSTATUS, 580 PPB_IO_32BIT | (PPB_IO_32BIT << PPB_IOLIMIT_SHIFT)); 581 582 /* Make sure read-only bits are write-protected. */ 583 HCLR4(sc, MISC_CONTROL_1, MISC_CONTROL_1_DBI_RO_WR_EN); 584 585 /* Set up bus range. */ 586 if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range, 587 sizeof(bus_range)) != sizeof(bus_range) || 588 bus_range[0] >= 32 || bus_range[1] >= 32) { 589 bus_range[0] = 0; 590 bus_range[1] = 31; 591 } 592 sc->sc_bus = bus_range[0]; 593 594 /* Initialize bus range. */ 595 bir = bus_range[0]; 596 bir |= ((bus_range[0] + 1) << 8); 597 bir |= (bus_range[1] << 16); 598 HWRITE4(sc, PPB_REG_BUSINFO, bir); 599 600 /* Initialize memory mapped I/O window. */ 601 membase = sc->sc_mem_bus_addr; 602 memlimit = membase + sc->sc_mem_size - 1; 603 blr = memlimit & PPB_MEM_MASK; 604 blr |= (membase >> PPB_MEM_SHIFT); 605 HWRITE4(sc, PPB_REG_MEM, blr); 606 607 /* Initialize I/O window. */ 608 if (sc->sc_io_size > 0) { 609 iobase = sc->sc_io_bus_addr; 610 iolimit = iobase + sc->sc_io_size - 1; 611 blr = iolimit & PPB_IO_MASK; 612 blr |= (iobase >> PPB_IO_SHIFT); 613 HWRITE4(sc, PPB_REG_IOSTATUS, blr); 614 blr = (iobase & 0xffff0000) >> 16; 615 blr |= iolimit & 0xffff0000; 616 HWRITE4(sc, PPB_REG_IO_HI, blr); 617 } else { 618 HWRITE4(sc, PPB_REG_IOSTATUS, 0x000000ff); 619 HWRITE4(sc, PPB_REG_IO_HI, 0x0000ffff); 620 } 621 622 /* Initialize prefetchable memory mapped I/O window. */ 623 if (sc->sc_pmem_size > 0) { 624 pmembase = sc->sc_pmem_bus_addr; 625 pmemlimit = pmembase + sc->sc_pmem_size - 1; 626 blr = pmemlimit & PPB_MEM_MASK; 627 blr |= (pmembase >> PPB_MEM_SHIFT); 628 HWRITE4(sc, PPB_REG_PREFMEM, blr); 629 HWRITE4(sc, PPB_REG_PREFBASE_HI32, pmembase >> 32); 630 HWRITE4(sc, PPB_REG_PREFLIM_HI32, pmemlimit >> 32); 631 } else { 632 HWRITE4(sc, PPB_REG_PREFMEM, 0x0000ffff); 633 HWRITE4(sc, PPB_REG_PREFBASE_HI32, 0); 634 HWRITE4(sc, PPB_REG_PREFLIM_HI32, 0); 635 } 636 637 csr = PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE; 638 if (sc->sc_io_size > 0) 639 csr |= PCI_COMMAND_IO_ENABLE; 640 HWRITE4(sc, PCI_COMMAND_STATUS_REG, csr); 641 642 memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot)); 643 sc->sc_bus_iot.bus_private = sc; 644 sc->sc_bus_iot._space_map = dwpcie_bs_iomap; 645 memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt)); 646 sc->sc_bus_memt.bus_private = sc; 647 sc->sc_bus_memt._space_map = dwpcie_bs_memmap; 648 649 sc->sc_pc.pc_conf_v = sc; 650 sc->sc_pc.pc_attach_hook = dwpcie_attach_hook; 651 sc->sc_pc.pc_bus_maxdevs = dwpcie_bus_maxdevs; 652 sc->sc_pc.pc_make_tag = dwpcie_make_tag; 653 sc->sc_pc.pc_decompose_tag = dwpcie_decompose_tag; 654 sc->sc_pc.pc_conf_size = dwpcie_conf_size; 655 sc->sc_pc.pc_conf_read = dwpcie_conf_read; 656 sc->sc_pc.pc_conf_write = dwpcie_conf_write; 657 sc->sc_pc.pc_probe_device_hook = dwpcie_probe_device_hook; 658 659 sc->sc_pc.pc_intr_v = sc; 660 sc->sc_pc.pc_intr_map = dwpcie_intr_map; 661 sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi; 662 sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix; 663 sc->sc_pc.pc_intr_string = dwpcie_intr_string; 664 sc->sc_pc.pc_intr_establish = dwpcie_intr_establish; 665 sc->sc_pc.pc_intr_disestablish = dwpcie_intr_disestablish; 666 667 memset(&pba, 0, sizeof(pba)); 668 pba.pba_busname = "pci"; 669 pba.pba_iot = &sc->sc_bus_iot; 670 pba.pba_memt = &sc->sc_bus_memt; 671 pba.pba_dmat = sc->sc_dmat; 672 pba.pba_pc = &sc->sc_pc; 673 pba.pba_domain = pci_ndomains++; 674 pba.pba_bus = sc->sc_bus; 675 if (OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie") || 676 OF_is_compatible(sc->sc_node, "rockchip,rk3568-pcie") || 677 sc->sc_msi_addr) 678 pba.pba_flags |= PCI_FLAGS_MSI_ENABLED; 679 680 config_found(self, &pba, NULL); 681 } 682 683 void 684 dwpcie_link_config(struct dwpcie_softc *sc) 685 { 686 uint32_t mode, width, reg; 687 int lanes; 688 689 lanes = OF_getpropint(sc->sc_node, "num-lanes", 0); 690 691 switch (lanes) { 692 case 1: 693 mode = PCIE_PORT_LINK_CTRL_LANES_1; 694 width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_1; 695 break; 696 case 2: 697 mode = PCIE_PORT_LINK_CTRL_LANES_2; 698 width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_2; 699 break; 700 case 4: 701 mode = PCIE_PORT_LINK_CTRL_LANES_4; 702 width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_4; 703 break; 704 case 8: 705 mode = PCIE_PORT_LINK_CTRL_LANES_8; 706 width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_8; 707 break; 708 default: 709 printf("%s: %d lanes not supported\n", __func__, lanes); 710 return; 711 } 712 713 reg = HREAD4(sc, PCIE_PORT_LINK_CTRL); 714 reg &= ~PCIE_PORT_LINK_CTRL_LANES_MASK; 715 reg |= mode; 716 HWRITE4(sc, PCIE_PORT_LINK_CTRL, reg); 717 718 reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL); 719 reg &= ~PCIE_LINK_WIDTH_SPEED_CTRL_LANES_MASK; 720 reg |= width; 721 HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg); 722 723 reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL); 724 reg |= PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE; 725 HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg); 726 } 727 728 int 729 dwpcie_msi_intr(void *arg) 730 { 731 struct dwpcie_softc *sc = arg; 732 struct dwpcie_msi *dm; 733 uint32_t status; 734 int vec, s; 735 736 status = HREAD4(sc, PCIE_MSI_INTR0_STATUS); 737 if (status == 0) 738 return 0; 739 740 HWRITE4(sc, PCIE_MSI_INTR0_STATUS, status); 741 while (status) { 742 vec = ffs(status) - 1; 743 status &= ~(1U << vec); 744 745 dm = &sc->sc_msi[vec]; 746 if (dm->dm_func == NULL) 747 continue; 748 749 if ((dm->dm_flags & IPL_MPSAFE) == 0) 750 KERNEL_LOCK(); 751 s = splraise(dm->dm_ipl); 752 if (dm->dm_func(dm->dm_arg)) 753 dm->dm_count.ec_count++; 754 splx(s); 755 if ((dm->dm_flags & IPL_MPSAFE) == 0) 756 KERNEL_UNLOCK(); 757 } 758 759 return 1; 760 } 761 762 int 763 dwpcie_msi_init(struct dwpcie_softc *sc) 764 { 765 bus_dma_segment_t seg; 766 bus_dmamap_t map; 767 uint64_t addr; 768 int error, rseg; 769 770 /* 771 * Allocate some DMA memory such that we have a "safe" target 772 * address for MSIs. 773 */ 774 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(uint32_t), 775 sizeof(uint32_t), 0, &seg, 1, &rseg, BUS_DMA_WAITOK); 776 if (error) 777 return error; 778 779 /* 780 * Translate the CPU address into a bus address that we can 781 * program into the hardware. 782 */ 783 error = bus_dmamap_create(sc->sc_dmat, sizeof(uint32_t), 1, 784 sizeof(uint32_t), 0, BUS_DMA_WAITOK, &map); 785 if (error) { 786 bus_dmamem_free(sc->sc_dmat, &seg, 1); 787 return error; 788 } 789 error = bus_dmamap_load_raw(sc->sc_dmat, map, &seg, 1, 790 sizeof(uint32_t), BUS_DMA_WAITOK); 791 if (error) { 792 bus_dmamap_destroy(sc->sc_dmat, map); 793 bus_dmamem_free(sc->sc_dmat, &seg, 1); 794 return error; 795 } 796 797 addr = map->dm_segs[0].ds_addr; 798 HWRITE4(sc, PCIE_MSI_ADDR_LO, addr); 799 HWRITE4(sc, PCIE_MSI_ADDR_HI, addr >> 32); 800 801 bus_dmamap_unload(sc->sc_dmat, map); 802 bus_dmamap_destroy(sc->sc_dmat, map); 803 804 /* Enable, mask and clear all MSIs. */ 805 HWRITE4(sc, PCIE_MSI_INTR0_ENABLE, 0xffffffff); 806 HWRITE4(sc, PCIE_MSI_INTR0_MASK, 0xffffffff); 807 HWRITE4(sc, PCIE_MSI_INTR0_STATUS, 0xffffffff); 808 809 KASSERT(sc->sc_ih == NULL); 810 sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_BIO | IPL_MPSAFE, 811 dwpcie_msi_intr, sc, sc->sc_dev.dv_xname); 812 if (sc->sc_ih == NULL) { 813 bus_dmamem_free(sc->sc_dmat, &seg, 1); 814 return EINVAL; 815 } 816 817 /* 818 * Hold on to the DMA memory such that nobody can use it to 819 * actually do DMA transfers. 820 */ 821 822 sc->sc_msi_addr = addr; 823 return 0; 824 } 825 826 int 827 dwpcie_armada8k_init(struct dwpcie_softc *sc) 828 { 829 uint32_t reg; 830 int timo; 831 832 clock_enable_all(sc->sc_node); 833 834 dwpcie_link_config(sc); 835 836 if (!dwpcie_armada8k_link_up(sc)) { 837 reg = HREAD4(sc, PCIE_GLOBAL_CTRL); 838 reg &= ~PCIE_GLOBAL_CTRL_APP_LTSSM_EN; 839 HWRITE4(sc, PCIE_GLOBAL_CTRL, reg); 840 } 841 842 /* 843 * Setup Requester-ID to Stream-ID mapping 844 * XXX: TF-A is supposed to set this up, but doesn't! 845 */ 846 HWRITE4(sc, PCIE_STREAMID, PCIE_STREAMID_8040); 847 848 /* Enable Root Complex mode. */ 849 reg = HREAD4(sc, PCIE_GLOBAL_CTRL); 850 reg &= ~PCIE_GLOBAL_CTRL_DEVICE_TYPE_MASK; 851 reg |= PCIE_GLOBAL_CTRL_DEVICE_TYPE_RC; 852 HWRITE4(sc, PCIE_GLOBAL_CTRL, reg); 853 854 HWRITE4(sc, PCIE_ARCACHE_TRC, PCIE_ARCACHE_TRC_DEFAULT); 855 HWRITE4(sc, PCIE_AWCACHE_TRC, PCIE_AWCACHE_TRC_DEFAULT); 856 reg = HREAD4(sc, PCIE_ARUSER); 857 reg &= ~PCIE_AXUSER_DOMAIN_MASK; 858 reg |= PCIE_AXUSER_DOMAIN_OUTER_SHARABLE; 859 HWRITE4(sc, PCIE_ARUSER, reg); 860 reg = HREAD4(sc, PCIE_AWUSER); 861 reg &= ~PCIE_AXUSER_DOMAIN_MASK; 862 reg |= PCIE_AXUSER_DOMAIN_OUTER_SHARABLE; 863 HWRITE4(sc, PCIE_AWUSER, reg); 864 865 if (!dwpcie_armada8k_link_up(sc)) { 866 reg = HREAD4(sc, PCIE_GLOBAL_CTRL); 867 reg |= PCIE_GLOBAL_CTRL_APP_LTSSM_EN; 868 HWRITE4(sc, PCIE_GLOBAL_CTRL, reg); 869 } 870 871 for (timo = 40; timo > 0; timo--) { 872 if (dwpcie_armada8k_link_up(sc)) 873 break; 874 delay(1000); 875 } 876 if (timo == 0) 877 return ETIMEDOUT; 878 879 sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_AUDIO | IPL_MPSAFE, 880 dwpcie_armada8k_intr, sc, sc->sc_dev.dv_xname); 881 882 /* Unmask INTx interrupts. */ 883 HWRITE4(sc, PCIE_GLOBAL_INT_MASK, 884 PCIE_GLOBAL_INT_MASK_INT_A | PCIE_GLOBAL_INT_MASK_INT_B | 885 PCIE_GLOBAL_INT_MASK_INT_C | PCIE_GLOBAL_INT_MASK_INT_D); 886 887 return 0; 888 } 889 890 int 891 dwpcie_armada8k_link_up(struct dwpcie_softc *sc) 892 { 893 uint32_t reg, mask; 894 895 mask = PCIE_GLOBAL_STATUS_RDLH_LINK_UP; 896 mask |= PCIE_GLOBAL_STATUS_PHY_LINK_UP; 897 reg = HREAD4(sc, PCIE_GLOBAL_STATUS); 898 return ((reg & mask) == mask); 899 } 900 901 int 902 dwpcie_armada8k_intr(void *arg) 903 { 904 struct dwpcie_softc *sc = arg; 905 uint32_t cause; 906 907 /* Acknowledge interrupts. */ 908 cause = HREAD4(sc, PCIE_GLOBAL_INT_CAUSE); 909 HWRITE4(sc, PCIE_GLOBAL_INT_CAUSE, cause); 910 911 /* INTx interrupt, so not really ours. */ 912 return 0; 913 } 914 915 int 916 dwpcie_g12a_init(struct dwpcie_softc *sc) 917 { 918 uint32_t *reset_gpio; 919 ssize_t reset_gpiolen; 920 uint32_t reg; 921 int error, timo; 922 923 reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios"); 924 if (reset_gpiolen <= 0) 925 return ENXIO; 926 927 if (bus_space_map(sc->sc_iot, sc->sc_glue_base, 928 sc->sc_glue_size, 0, &sc->sc_glue_ioh)) 929 return ENOMEM; 930 931 power_domain_enable(sc->sc_node); 932 933 phy_enable(sc->sc_node, "pcie"); 934 935 reset_assert_all(sc->sc_node); 936 delay(500); 937 reset_deassert_all(sc->sc_node); 938 delay(500); 939 940 clock_set_frequency(sc->sc_node, "port", 100000000UL); 941 clock_enable_all(sc->sc_node); 942 943 reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK); 944 OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio, 945 reset_gpiolen); 946 gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT); 947 gpio_controller_set_pin(reset_gpio, 1); 948 949 dwpcie_link_config(sc); 950 951 reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CFG0); 952 reg |= PCIE_CFG0_APP_LTSSM_EN; 953 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CFG0, reg); 954 955 gpio_controller_set_pin(reset_gpio, 1); 956 delay(500); 957 gpio_controller_set_pin(reset_gpio, 0); 958 959 free(reset_gpio, M_TEMP, reset_gpiolen); 960 961 for (timo = 40; timo > 0; timo--) { 962 if (dwpcie_g12a_link_up(sc)) 963 break; 964 delay(1000); 965 } 966 if (timo == 0) 967 return ETIMEDOUT; 968 969 error = dwpcie_msi_init(sc); 970 if (error) 971 return error; 972 973 return 0; 974 } 975 976 int 977 dwpcie_g12a_link_up(struct dwpcie_softc *sc) 978 { 979 uint32_t reg; 980 981 reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_STATUS12); 982 if ((reg & PCIE_STATUS12_SMLH_LINK_UP) && 983 (reg & PCIE_STATUS12_RDLH_LINK_UP) && 984 (reg & PCIE_STATUS12_LTSSM_MASK) == PCIE_STATUS12_LTSSM_UP) 985 return 1; 986 return 0; 987 } 988 989 int 990 dwpcie_imx8mq_init(struct dwpcie_softc *sc) 991 { 992 uint32_t *clkreq_gpio, *disable_gpio, *reset_gpio; 993 ssize_t clkreq_gpiolen, disable_gpiolen, reset_gpiolen; 994 struct regmap *anatop, *gpr, *phy; 995 uint32_t off, reg; 996 int error, timo; 997 998 if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) { 999 anatop = regmap_bycompatible("fsl,imx8mm-anatop"); 1000 gpr = regmap_bycompatible("fsl,imx8mm-iomuxc-gpr"); 1001 phy = regmap_bycompatible("fsl,imx7d-pcie-phy"); 1002 KASSERT(phy != NULL); 1003 } else { 1004 anatop = regmap_bycompatible("fsl,imx8mq-anatop"); 1005 gpr = regmap_bycompatible("fsl,imx8mq-iomuxc-gpr"); 1006 } 1007 KASSERT(anatop != NULL); 1008 KASSERT(gpr != NULL); 1009 1010 clkreq_gpiolen = OF_getproplen(sc->sc_node, "clkreq-gpio"); 1011 disable_gpiolen = OF_getproplen(sc->sc_node, "disable-gpio"); 1012 reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpio"); 1013 1014 if (clkreq_gpiolen > 0) { 1015 clkreq_gpio = malloc(clkreq_gpiolen, M_TEMP, M_WAITOK); 1016 OF_getpropintarray(sc->sc_node, "clkreq-gpio", clkreq_gpio, 1017 clkreq_gpiolen); 1018 gpio_controller_config_pin(clkreq_gpio, GPIO_CONFIG_OUTPUT); 1019 gpio_controller_set_pin(clkreq_gpio, 1); 1020 } 1021 1022 if (disable_gpiolen > 0) { 1023 disable_gpio = malloc(disable_gpiolen, M_TEMP, M_WAITOK); 1024 OF_getpropintarray(sc->sc_node, "disable-gpio", disable_gpio, 1025 disable_gpiolen); 1026 gpio_controller_config_pin(disable_gpio, GPIO_CONFIG_OUTPUT); 1027 gpio_controller_set_pin(disable_gpio, 0); 1028 } 1029 1030 if (reset_gpiolen > 0) { 1031 reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK); 1032 OF_getpropintarray(sc->sc_node, "reset-gpio", reset_gpio, 1033 reset_gpiolen); 1034 gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT); 1035 gpio_controller_set_pin(reset_gpio, 1); 1036 } 1037 1038 power_domain_enable(sc->sc_node); 1039 reset_assert(sc->sc_node, "pciephy"); 1040 reset_assert(sc->sc_node, "apps"); 1041 1042 reg = regmap_read_4(gpr, IOMUXC_GPR12); 1043 if (OF_getpropint(sc->sc_node, "ctrl-id", 0) == 0) { 1044 off = IOMUXC_GPR14; 1045 reg &= ~IMX8MQ_GPR_PCIE1_DEVICE_TYPE_MASK; 1046 reg |= IMX8MQ_GPR_PCIE1_DEVICE_TYPE_RC; 1047 } else { 1048 off = IOMUXC_GPR16; 1049 reg &= ~IMX8MQ_GPR_PCIE2_DEVICE_TYPE_MASK; 1050 reg |= IMX8MQ_GPR_PCIE2_DEVICE_TYPE_RC; 1051 } 1052 regmap_write_4(gpr, IOMUXC_GPR12, reg); 1053 1054 if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) { 1055 if (OF_getproplen(sc->sc_node, "ext_osc") == 0 || 1056 OF_getpropint(sc->sc_node, "ext_osc", 0)) { 1057 reg = regmap_read_4(gpr, off); 1058 reg &= ~(IMX8MQ_GPR_PCIE_REF_USE_PAD | 1059 IMX8MM_GPR_PCIE_SSC_EN | 1060 IMX8MM_GPR_PCIE_POWER_OFF | 1061 IMX8MM_GPR_PCIE_REF_CLK_MASK); 1062 reg |= (IMX8MM_GPR_PCIE_AUX_EN | 1063 IMX8MM_GPR_PCIE_REF_CLK_EXT); 1064 regmap_write_4(gpr, off, reg); 1065 delay(100); 1066 reg = regmap_read_4(gpr, off); 1067 reg |= IMX8MM_GPR_PCIE_CMN_RST; 1068 regmap_write_4(gpr, off, reg); 1069 delay(200); 1070 } else { 1071 reg = regmap_read_4(gpr, off); 1072 reg &= ~(IMX8MQ_GPR_PCIE_REF_USE_PAD | 1073 IMX8MM_GPR_PCIE_SSC_EN | 1074 IMX8MM_GPR_PCIE_POWER_OFF | 1075 IMX8MM_GPR_PCIE_REF_CLK_MASK); 1076 reg |= (IMX8MM_GPR_PCIE_AUX_EN | 1077 IMX8MM_GPR_PCIE_REF_CLK_PLL); 1078 regmap_write_4(gpr, off, reg); 1079 delay(100); 1080 regmap_write_4(phy, IMX8MM_PCIE_PHY_CMN_REG62, 1081 IMX8MM_PCIE_PHY_CMN_REG62_PLL_CLK_OUT); 1082 regmap_write_4(phy, IMX8MM_PCIE_PHY_CMN_REG64, 1083 IMX8MM_PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM); 1084 reg = regmap_read_4(gpr, off); 1085 reg |= IMX8MM_GPR_PCIE_CMN_RST; 1086 regmap_write_4(gpr, off, reg); 1087 delay(200); 1088 regmap_write_4(phy, IMX8MM_PCIE_PHY_TRSV_REG5, 1089 IMX8MM_PCIE_PHY_TRSV_REG5_GEN1_DEEMP); 1090 regmap_write_4(phy, IMX8MM_PCIE_PHY_TRSV_REG6, 1091 IMX8MM_PCIE_PHY_TRSV_REG6_GEN2_DEEMP); 1092 } 1093 } else { 1094 if (OF_getproplen(sc->sc_node, "ext_osc") == 0 || 1095 OF_getpropint(sc->sc_node, "ext_osc", 0)) { 1096 reg = regmap_read_4(gpr, off); 1097 reg |= IMX8MQ_GPR_PCIE_REF_USE_PAD; 1098 regmap_write_4(gpr, off, reg); 1099 } else { 1100 reg = regmap_read_4(gpr, off); 1101 reg &= ~IMX8MQ_GPR_PCIE_REF_USE_PAD; 1102 regmap_write_4(gpr, off, reg); 1103 1104 regmap_write_4(anatop, ANATOP_PLLOUT_CTL, 1105 ANATOP_PLLOUT_CTL_CKE | 1106 ANATOP_PLLOUT_CTL_SEL_SYSPLL1); 1107 regmap_write_4(anatop, ANATOP_PLLOUT_DIV, 1108 ANATOP_PLLOUT_DIV_SYSPLL1); 1109 } 1110 } 1111 1112 clock_enable(sc->sc_node, "pcie_phy"); 1113 clock_enable(sc->sc_node, "pcie_bus"); 1114 clock_enable(sc->sc_node, "pcie"); 1115 clock_enable(sc->sc_node, "pcie_aux"); 1116 1117 /* Allow clocks to stabilize. */ 1118 delay(200); 1119 1120 if (reset_gpiolen > 0) { 1121 gpio_controller_set_pin(reset_gpio, 1); 1122 delay(100000); 1123 gpio_controller_set_pin(reset_gpio, 0); 1124 } 1125 1126 reset_deassert(sc->sc_node, "pciephy"); 1127 1128 if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) { 1129 for (timo = 2000; timo > 0; timo--) { 1130 if (regmap_read_4(phy, IMX8MM_PCIE_PHY_CMN_REG75) == 1131 IMX8MM_PCIE_PHY_CMN_REG75_PLL_DONE) 1132 break; 1133 delay(10); 1134 } 1135 if (timo == 0) { 1136 error = ETIMEDOUT; 1137 goto err; 1138 } 1139 } 1140 1141 reg = HREAD4(sc, 0x100000 + PCIE_RC_LCR); 1142 reg &= ~PCIE_RC_LCR_L1EL_MASK; 1143 reg |= PCIE_RC_LCR_L1EL_64US; 1144 HWRITE4(sc, 0x100000 + PCIE_RC_LCR, reg); 1145 1146 dwpcie_link_config(sc); 1147 1148 reg = HREAD4(sc, PCIE_RC_LCR); 1149 reg &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 1150 reg |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; 1151 HWRITE4(sc, PCIE_RC_LCR, reg); 1152 1153 reset_deassert(sc->sc_node, "apps"); 1154 1155 for (timo = 20000; timo > 0; timo--) { 1156 if (dwpcie_link_up(sc)) 1157 break; 1158 delay(10); 1159 } 1160 if (timo == 0) { 1161 error = ETIMEDOUT; 1162 goto err; 1163 } 1164 1165 if (OF_getpropint(sc->sc_node, "fsl,max-link-speed", 1) >= 2) { 1166 reg = HREAD4(sc, PCIE_RC_LCR); 1167 reg &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 1168 reg |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; 1169 HWRITE4(sc, PCIE_RC_LCR, reg); 1170 1171 reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL); 1172 reg |= PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE; 1173 HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg); 1174 1175 for (timo = 20000; timo > 0; timo--) { 1176 if (dwpcie_link_up(sc)) 1177 break; 1178 delay(10); 1179 } 1180 if (timo == 0) { 1181 error = ETIMEDOUT; 1182 goto err; 1183 } 1184 } 1185 1186 sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_AUDIO | IPL_MPSAFE, 1187 dwpcie_imx8mq_intr, sc, sc->sc_dev.dv_xname); 1188 1189 /* Unmask INTx interrupts. */ 1190 HWRITE4(sc, PCIE_GLOBAL_INT_MASK, 1191 PCIE_GLOBAL_INT_MASK_INT_A | PCIE_GLOBAL_INT_MASK_INT_B | 1192 PCIE_GLOBAL_INT_MASK_INT_C | PCIE_GLOBAL_INT_MASK_INT_D); 1193 1194 error = 0; 1195 err: 1196 if (clkreq_gpiolen > 0) 1197 free(clkreq_gpio, M_TEMP, clkreq_gpiolen); 1198 if (disable_gpiolen > 0) 1199 free(disable_gpio, M_TEMP, disable_gpiolen); 1200 if (reset_gpiolen > 0) 1201 free(reset_gpio, M_TEMP, reset_gpiolen); 1202 return error; 1203 } 1204 1205 int 1206 dwpcie_imx8mq_intr(void *arg) 1207 { 1208 struct dwpcie_softc *sc = arg; 1209 uint32_t cause; 1210 1211 /* Acknowledge interrupts. */ 1212 cause = HREAD4(sc, PCIE_GLOBAL_INT_CAUSE); 1213 HWRITE4(sc, PCIE_GLOBAL_INT_CAUSE, cause); 1214 1215 /* INTx interrupt, so not really ours. */ 1216 return 0; 1217 } 1218 1219 int 1220 dwpcie_fu740_init(struct dwpcie_softc *sc) 1221 { 1222 sc->sc_num_viewport = 8; 1223 1224 return 0; 1225 } 1226 1227 int 1228 dwpcie_rk3568_init(struct dwpcie_softc *sc) 1229 { 1230 sc->sc_num_viewport = 8; 1231 1232 return 0; 1233 } 1234 1235 int 1236 dwpcie_sc8280xp_init(struct dwpcie_softc *sc) 1237 { 1238 sc->sc_num_viewport = 8; 1239 1240 return 0; 1241 } 1242 1243 void 1244 dwpcie_atu_write(struct dwpcie_softc *sc, int index, off_t reg, 1245 uint32_t val) 1246 { 1247 if (sc->sc_atu_unroll) { 1248 bus_space_write_4(sc->sc_iot, sc->sc_atu_ioh, 1249 IATU_OFFSET_UNROLL(index) + reg, val); 1250 return; 1251 } 1252 1253 if (sc->sc_atu_viewport != index) { 1254 HWRITE4(sc, IATU_VIEWPORT, index); 1255 sc->sc_atu_viewport = index; 1256 } 1257 1258 HWRITE4(sc, IATU_OFFSET_VIEWPORT + reg, val); 1259 } 1260 1261 uint32_t 1262 dwpcie_atu_read(struct dwpcie_softc *sc, int index, off_t reg) 1263 { 1264 if (sc->sc_atu_unroll) { 1265 return bus_space_read_4(sc->sc_iot, sc->sc_atu_ioh, 1266 IATU_OFFSET_UNROLL(index) + reg); 1267 } 1268 1269 if (sc->sc_atu_viewport != index) { 1270 HWRITE4(sc, IATU_VIEWPORT, index); 1271 sc->sc_atu_viewport = index; 1272 } 1273 1274 return HREAD4(sc, IATU_OFFSET_VIEWPORT + reg); 1275 } 1276 1277 void 1278 dwpcie_atu_disable(struct dwpcie_softc *sc, int index) 1279 { 1280 dwpcie_atu_write(sc, index, IATU_REGION_CTRL_2, 0); 1281 } 1282 1283 void 1284 dwpcie_atu_config(struct dwpcie_softc *sc, int index, int type, 1285 uint64_t cpu_addr, uint64_t pci_addr, uint64_t size) 1286 { 1287 uint32_t reg; 1288 int timo; 1289 1290 dwpcie_atu_write(sc, index, IATU_LWR_BASE_ADDR, cpu_addr); 1291 dwpcie_atu_write(sc, index, IATU_UPPER_BASE_ADDR, cpu_addr >> 32); 1292 dwpcie_atu_write(sc, index, IATU_LIMIT_ADDR, cpu_addr + size - 1); 1293 dwpcie_atu_write(sc, index, IATU_LWR_TARGET_ADDR, pci_addr); 1294 dwpcie_atu_write(sc, index, IATU_UPPER_TARGET_ADDR, pci_addr >> 32); 1295 dwpcie_atu_write(sc, index, IATU_REGION_CTRL_1, type); 1296 dwpcie_atu_write(sc, index, IATU_REGION_CTRL_2, 1297 IATU_REGION_CTRL_2_REGION_EN); 1298 1299 for (timo = 5; timo > 0; timo--) { 1300 reg = dwpcie_atu_read(sc, index, IATU_REGION_CTRL_2); 1301 if (reg & IATU_REGION_CTRL_2_REGION_EN) 1302 break; 1303 delay(9000); 1304 } 1305 if (timo == 0) 1306 printf("%s:%d: timeout\n", __func__, __LINE__); 1307 } 1308 1309 int 1310 dwpcie_link_up(struct dwpcie_softc *sc) 1311 { 1312 uint32_t reg; 1313 1314 reg = HREAD4(sc, PCIE_PHY_DEBUG_R1); 1315 if ((reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) != 0 && 1316 (reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING) == 0) 1317 return 1; 1318 return 0; 1319 } 1320 1321 void 1322 dwpcie_attach_hook(struct device *parent, struct device *self, 1323 struct pcibus_attach_args *pba) 1324 { 1325 } 1326 1327 int 1328 dwpcie_bus_maxdevs(void *v, int bus) 1329 { 1330 struct dwpcie_softc *sc = v; 1331 1332 if (bus == sc->sc_bus || bus == sc->sc_bus + 1) 1333 return 1; 1334 return 32; 1335 } 1336 1337 pcitag_t 1338 dwpcie_make_tag(void *v, int bus, int device, int function) 1339 { 1340 return ((bus << 24) | (device << 19) | (function << 16)); 1341 } 1342 1343 void 1344 dwpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp) 1345 { 1346 if (bp != NULL) 1347 *bp = (tag >> 24) & 0xff; 1348 if (dp != NULL) 1349 *dp = (tag >> 19) & 0x1f; 1350 if (fp != NULL) 1351 *fp = (tag >> 16) & 0x7; 1352 } 1353 1354 int 1355 dwpcie_conf_size(void *v, pcitag_t tag) 1356 { 1357 return PCIE_CONFIG_SPACE_SIZE; 1358 } 1359 1360 pcireg_t 1361 dwpcie_conf_read(void *v, pcitag_t tag, int reg) 1362 { 1363 struct dwpcie_softc *sc = v; 1364 int bus, dev, fn; 1365 uint32_t ret; 1366 1367 dwpcie_decompose_tag(sc, tag, &bus, &dev, &fn); 1368 if (bus == sc->sc_bus) { 1369 KASSERT(dev == 0); 1370 return HREAD4(sc, tag | reg); 1371 } 1372 1373 if (bus == sc->sc_bus + 1) { 1374 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1, 1375 IATU_REGION_CTRL_1_TYPE_CFG0, 1376 sc->sc_conf_base, tag, sc->sc_conf_size); 1377 } else { 1378 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1, 1379 IATU_REGION_CTRL_1_TYPE_CFG1, 1380 sc->sc_conf_base, tag, sc->sc_conf_size); 1381 } 1382 1383 ret = bus_space_read_4(sc->sc_iot, sc->sc_conf_ioh, reg); 1384 1385 if (sc->sc_num_viewport <= 2 && sc->sc_io_size > 0) { 1386 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1, 1387 IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base, 1388 sc->sc_io_bus_addr, sc->sc_io_size); 1389 } 1390 1391 return ret; 1392 } 1393 1394 void 1395 dwpcie_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data) 1396 { 1397 struct dwpcie_softc *sc = v; 1398 int bus, dev, fn; 1399 1400 dwpcie_decompose_tag(sc, tag, &bus, &dev, &fn); 1401 if (bus == sc->sc_bus) { 1402 KASSERT(dev == 0); 1403 HWRITE4(sc, tag | reg, data); 1404 return; 1405 } 1406 1407 if (bus == sc->sc_bus + 1) { 1408 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1, 1409 IATU_REGION_CTRL_1_TYPE_CFG0, 1410 sc->sc_conf_base, tag, sc->sc_conf_size); 1411 } else { 1412 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1, 1413 IATU_REGION_CTRL_1_TYPE_CFG1, 1414 sc->sc_conf_base, tag, sc->sc_conf_size); 1415 } 1416 1417 bus_space_write_4(sc->sc_iot, sc->sc_conf_ioh, reg, data); 1418 1419 if (sc->sc_num_viewport <= 2 && sc->sc_io_size > 0) { 1420 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1, 1421 IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base, 1422 sc->sc_io_bus_addr, sc->sc_io_size); 1423 } 1424 } 1425 1426 int 1427 dwpcie_probe_device_hook(void *v, struct pci_attach_args *pa) 1428 { 1429 struct dwpcie_softc *sc = v; 1430 uint16_t rid; 1431 int i; 1432 1433 rid = pci_requester_id(pa->pa_pc, pa->pa_tag); 1434 pa->pa_dmat = iommu_device_map_pci(sc->sc_node, rid, pa->pa_dmat); 1435 1436 for (i = 0; i < sc->sc_nranges; i++) { 1437 iommu_reserve_region_pci(sc->sc_node, rid, 1438 sc->sc_ranges[i].pci_base, sc->sc_ranges[i].size); 1439 } 1440 1441 return 0; 1442 } 1443 1444 int 1445 dwpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp) 1446 { 1447 int pin = pa->pa_rawintrpin; 1448 1449 if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX) 1450 return -1; 1451 1452 if (pa->pa_tag == 0) 1453 return -1; 1454 1455 ihp->ih_pc = pa->pa_pc; 1456 ihp->ih_tag = pa->pa_intrtag; 1457 ihp->ih_intrpin = pa->pa_intrpin; 1458 ihp->ih_type = PCI_INTX; 1459 1460 return 0; 1461 } 1462 1463 const char * 1464 dwpcie_intr_string(void *v, pci_intr_handle_t ih) 1465 { 1466 switch (ih.ih_type) { 1467 case PCI_MSI: 1468 return "msi"; 1469 case PCI_MSIX: 1470 return "msix"; 1471 } 1472 1473 return "intx"; 1474 } 1475 1476 struct dwpcie_msi * 1477 dwpcie_msi_establish(struct dwpcie_softc *sc, int level, 1478 int (*func)(void *), void *arg, char *name) 1479 { 1480 struct dwpcie_msi *dm; 1481 int vec; 1482 1483 for (vec = 0; vec < DWPCIE_NUM_MSI; vec++) { 1484 dm = &sc->sc_msi[vec]; 1485 if (dm->dm_func == NULL) 1486 break; 1487 } 1488 if (vec == DWPCIE_NUM_MSI) 1489 return NULL; 1490 1491 dm->dm_func = func; 1492 dm->dm_arg = arg; 1493 dm->dm_ipl = level & IPL_IRQMASK; 1494 dm->dm_flags = level & IPL_FLAGMASK; 1495 dm->dm_vec = vec; 1496 dm->dm_name = name; 1497 if (name != NULL) 1498 evcount_attach(&dm->dm_count, name, &dm->dm_vec); 1499 1500 /* Unmask the MSI. */ 1501 HCLR4(sc, PCIE_MSI_INTR0_MASK, (1U << vec)); 1502 1503 return dm; 1504 } 1505 1506 void 1507 dwpcie_msi_disestablish(struct dwpcie_softc *sc, struct dwpcie_msi *dm) 1508 { 1509 /* Mask the MSI. */ 1510 HSET4(sc, PCIE_MSI_INTR0_MASK, (1U << dm->dm_vec)); 1511 1512 if (dm->dm_name) 1513 evcount_detach(&dm->dm_count); 1514 dm->dm_func = NULL; 1515 } 1516 1517 void * 1518 dwpcie_intr_establish(void *v, pci_intr_handle_t ih, int level, 1519 struct cpu_info *ci, int (*func)(void *), void *arg, char *name) 1520 { 1521 struct dwpcie_softc *sc = v; 1522 struct dwpcie_intr_handle *pih; 1523 void *cookie = NULL; 1524 1525 KASSERT(ih.ih_type != PCI_NONE); 1526 1527 if (ih.ih_type != PCI_INTX) { 1528 struct dwpcie_msi *dm = NULL; 1529 bus_dma_tag_t dmat = ih.ih_dmat; 1530 bus_dma_segment_t seg; 1531 bus_dmamap_t map; 1532 uint64_t addr, data; 1533 1534 if (sc->sc_msi_addr) { 1535 dm = dwpcie_msi_establish(sc, level, func, arg, name); 1536 if (dm == NULL) 1537 return NULL; 1538 addr = sc->sc_msi_addr; 1539 data = dm->dm_vec; 1540 } else { 1541 /* 1542 * Assume hardware passes Requester ID as 1543 * sideband data. 1544 */ 1545 data = pci_requester_id(ih.ih_pc, ih.ih_tag); 1546 cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr, 1547 &data, level, ci, func, arg, (void *)name); 1548 if (cookie == NULL) 1549 return NULL; 1550 } 1551 1552 pih = malloc(sizeof(*pih), M_DEVBUF, M_WAITOK | M_ZERO); 1553 pih->pih_ih.ih_ic = &dwpcie_ic; 1554 pih->pih_ih.ih_ih = cookie; 1555 pih->pih_sc = sc; 1556 pih->pih_dm = dm; 1557 1558 if (sc->sc_msi_addr == 0) { 1559 if (bus_dmamap_create(dmat, sizeof(uint32_t), 1, 1560 sizeof(uint32_t), 0, BUS_DMA_WAITOK, &map)) { 1561 free(pih, M_DEVBUF, sizeof(*pih)); 1562 fdt_intr_disestablish(cookie); 1563 return NULL; 1564 } 1565 1566 memset(&seg, 0, sizeof(seg)); 1567 seg.ds_addr = addr; 1568 seg.ds_len = sizeof(uint32_t); 1569 1570 if (bus_dmamap_load_raw(dmat, map, &seg, 1, 1571 sizeof(uint32_t), BUS_DMA_WAITOK)) { 1572 bus_dmamap_destroy(dmat, map); 1573 free(pih, M_DEVBUF, sizeof(*pih)); 1574 fdt_intr_disestablish(cookie); 1575 return NULL; 1576 } 1577 1578 addr = map->dm_segs[0].ds_addr; 1579 pih->pih_dmat = dmat; 1580 pih->pih_map = map; 1581 } 1582 1583 if (ih.ih_type == PCI_MSIX) { 1584 pci_msix_enable(ih.ih_pc, ih.ih_tag, 1585 &sc->sc_bus_memt, ih.ih_intrpin, addr, data); 1586 } else 1587 pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data); 1588 } else { 1589 int bus, dev, fn; 1590 uint32_t reg[4]; 1591 1592 dwpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn); 1593 1594 reg[0] = bus << 16 | dev << 11 | fn << 8; 1595 reg[1] = reg[2] = 0; 1596 reg[3] = ih.ih_intrpin; 1597 1598 cookie = fdt_intr_establish_imap_cpu(sc->sc_node, reg, 1599 sizeof(reg), level, ci, func, arg, name); 1600 if (cookie == NULL) 1601 return NULL; 1602 1603 pih = malloc(sizeof(*pih), M_DEVBUF, M_WAITOK | M_ZERO); 1604 pih->pih_ih.ih_ic = &dwpcie_ic; 1605 pih->pih_ih.ih_ih = cookie; 1606 } 1607 1608 return pih; 1609 } 1610 1611 void 1612 dwpcie_intr_disestablish(void *v, void *cookie) 1613 { 1614 struct dwpcie_intr_handle *pih = cookie; 1615 1616 if (pih->pih_dm) 1617 dwpcie_msi_disestablish(pih->pih_sc, pih->pih_dm); 1618 else 1619 fdt_intr_disestablish(pih->pih_ih.ih_ih); 1620 1621 if (pih->pih_dmat) { 1622 bus_dmamap_unload(pih->pih_dmat, pih->pih_map); 1623 bus_dmamap_destroy(pih->pih_dmat, pih->pih_map); 1624 } 1625 1626 free(pih, M_DEVBUF, sizeof(*pih)); 1627 } 1628 1629 int 1630 dwpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size, 1631 int flags, bus_space_handle_t *bshp) 1632 { 1633 struct dwpcie_softc *sc = t->bus_private; 1634 int i; 1635 1636 for (i = 0; i < sc->sc_nranges; i++) { 1637 uint64_t pci_start = sc->sc_ranges[i].pci_base; 1638 uint64_t pci_end = pci_start + sc->sc_ranges[i].size; 1639 uint64_t phys_start = sc->sc_ranges[i].phys_base; 1640 1641 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 && 1642 addr >= pci_start && addr + size <= pci_end) { 1643 return bus_space_map(sc->sc_iot, 1644 addr - pci_start + phys_start, size, flags, bshp); 1645 } 1646 } 1647 1648 return ENXIO; 1649 } 1650 1651 int 1652 dwpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size, 1653 int flags, bus_space_handle_t *bshp) 1654 { 1655 struct dwpcie_softc *sc = t->bus_private; 1656 int i; 1657 1658 for (i = 0; i < sc->sc_nranges; i++) { 1659 uint64_t pci_start = sc->sc_ranges[i].pci_base; 1660 uint64_t pci_end = pci_start + sc->sc_ranges[i].size; 1661 uint64_t phys_start = sc->sc_ranges[i].phys_base; 1662 1663 if ((sc->sc_ranges[i].flags & 0x02000000) == 0x02000000 && 1664 addr >= pci_start && addr + size <= pci_end) { 1665 return bus_space_map(sc->sc_iot, 1666 addr - pci_start + phys_start, size, flags, bshp); 1667 } 1668 } 1669 1670 return ENXIO; 1671 } 1672