1 /* $OpenBSD: rkpcie.c,v 1.15 2021/10/24 17:52:26 mpi Exp $ */ 2 /* 3 * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/param.h> 19 #include <sys/systm.h> 20 #include <sys/device.h> 21 #include <sys/extent.h> 22 #include <sys/malloc.h> 23 24 #include <machine/intr.h> 25 #include <machine/bus.h> 26 #include <machine/fdt.h> 27 28 #include <dev/pci/pcidevs.h> 29 #include <dev/pci/pcireg.h> 30 #include <dev/pci/pcivar.h> 31 32 #include <dev/ofw/openfirm.h> 33 #include <dev/ofw/ofw_clock.h> 34 #include <dev/ofw/ofw_gpio.h> 35 #include <dev/ofw/ofw_misc.h> 36 #include <dev/ofw/ofw_regulator.h> 37 #include <dev/ofw/fdt.h> 38 39 #define PCIE_CLIENT_BASIC_STRAP_CONF 0x0000 40 #define PCIE_CLIENT_PCIE_GEN_SEL_1 (((1 << 7) << 16) | (0 << 7)) 41 #define PCIE_CLIENT_PCIE_GEN_SEL_2 (((1 << 7) << 16) | (1 << 7)) 42 #define PCIE_CLIENT_MODE_SELECT_RC (((1 << 6) << 16) | (1 << 6)) 43 #define PCIE_CLIENT_LINK_TRAIN_EN (((1 << 1) << 16) | (1 << 1)) 44 #define PCIE_CLIENT_CONF_EN (((1 << 0) << 16) | (1 << 0)) 45 #define PCIE_CLIENT_DEBUG_OUT_0 0x003c 46 #define PCIE_CLIENT_DEBUG_LTSSM_MASK 0x0000001f 47 #define PCIE_CLIENT_DEBUG_LTSSM_L0 0x00000010 48 #define PCIE_CLIENT_BASIC_STATUS1 0x0048 49 #define PCIE_CLIENT_LINK_ST (0x3 << 20) 50 #define PCIE_CLIENT_LINK_ST_UP (0x3 << 20) 51 #define PCIE_CLIENT_INT_MASK 0x004c 52 #define PCIE_CLIENT_INTD_MASK (((1 << 8) << 16) | (1 << 8)) 53 #define PCIE_CLIENT_INTD_UNMASK (((1 << 8) << 16) | (0 << 8)) 54 #define PCIE_CLIENT_INTC_MASK (((1 << 7) << 16) | (1 << 7)) 55 #define PCIE_CLIENT_INTC_UNMASK (((1 << 7) << 16) | (0 << 7)) 56 #define PCIE_CLIENT_INTB_MASK (((1 << 6) << 16) | (1 << 6)) 57 #define PCIE_CLIENT_INTB_UNMASK (((1 << 6) << 16) | (0 << 6)) 58 #define PCIE_CLIENT_INTA_MASK (((1 << 5) << 16) | (1 << 5)) 59 #define PCIE_CLIENT_INTA_UNMASK (((1 << 5) << 16) | (0 << 5)) 60 61 #define PCIE_RC_NORMAL_BASE 0x800000 62 63 #define PCIE_LM_BASE 0x900000 64 #define PCIE_LM_VENDOR_ID (PCIE_LM_BASE + 0x44) 65 #define PCIE_LM_RCBAR (PCIE_LM_BASE + 0x300) 66 #define PCIE_LM_RCBARPIE (1 << 19) 67 #define PCIE_LM_RCBARPIS (1 << 20) 68 69 #define PCIE_RC_BASE 0xa00000 70 #define PCIE_RC_PCIE_LCAP (PCIE_RC_BASE + 0x0cc) 71 #define PCIE_RC_PCIE_LCAP_APMS_L0S (1 << 10) 72 #define PCIE_RC_LCSR (PCIE_RC_BASE + 0x0d0) 73 #define PCIE_RC_LCSR2 (PCIE_RC_BASE + 0x0f0) 74 75 #define PCIE_ATR_BASE 0xc00000 76 #define PCIE_ATR_OB_ADDR0(i) (PCIE_ATR_BASE + 0x000 + (i) * 0x20) 77 #define PCIE_ATR_OB_ADDR1(i) (PCIE_ATR_BASE + 0x004 + (i) * 0x20) 78 #define PCIE_ATR_OB_DESC0(i) (PCIE_ATR_BASE + 0x008 + (i) * 0x20) 79 #define PCIE_ATR_OB_DESC1(i) (PCIE_ATR_BASE + 0x00c + (i) * 0x20) 80 #define PCIE_ATR_IB_ADDR0(i) (PCIE_ATR_BASE + 0x800 + (i) * 0x8) 81 #define PCIE_ATR_IB_ADDR1(i) (PCIE_ATR_BASE + 0x804 + (i) * 0x8) 82 #define PCIE_ATR_HDR_MEM 0x2 83 #define PCIE_ATR_HDR_IO 0x6 84 #define PCIE_ATR_HDR_CFG_TYPE0 0xa 85 #define PCIE_ATR_HDR_CFG_TYPE1 0xb 86 #define PCIE_ATR_HDR_RID (1 << 23) 87 88 #define PCIE_ATR_OB_REGION0_SIZE (32 * 1024 * 1024) 89 #define PCIE_ATR_OB_REGION_SIZE (1 * 1024 * 1024) 90 91 #define HREAD4(sc, reg) \ 92 (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))) 93 #define HWRITE4(sc, reg, val) \ 94 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 95 96 struct rkpcie_softc { 97 struct device sc_dev; 98 bus_space_tag_t sc_iot; 99 bus_space_handle_t sc_ioh; 100 bus_space_handle_t sc_axi_ioh; 101 bus_addr_t sc_axi_addr; 102 bus_addr_t sc_apb_addr; 103 int sc_node; 104 int sc_phy_node; 105 106 struct machine_pci_chipset sc_pc; 107 struct extent *sc_busex; 108 struct extent *sc_memex; 109 struct extent *sc_ioex; 110 int sc_bus; 111 }; 112 113 int rkpcie_match(struct device *, void *, void *); 114 void rkpcie_attach(struct device *, struct device *, void *); 115 116 const struct cfattach rkpcie_ca = { 117 sizeof (struct rkpcie_softc), rkpcie_match, rkpcie_attach 118 }; 119 120 struct cfdriver rkpcie_cd = { 121 NULL, "rkpcie", DV_DULL 122 }; 123 124 int 125 rkpcie_match(struct device *parent, void *match, void *aux) 126 { 127 struct fdt_attach_args *faa = aux; 128 129 return OF_is_compatible(faa->fa_node, "rockchip,rk3399-pcie"); 130 } 131 132 void rkpcie_atr_init(struct rkpcie_softc *); 133 void rkpcie_phy_init(struct rkpcie_softc *); 134 void rkpcie_phy_poweron(struct rkpcie_softc *); 135 136 void rkpcie_attach_hook(struct device *, struct device *, 137 struct pcibus_attach_args *); 138 int rkpcie_bus_maxdevs(void *, int); 139 pcitag_t rkpcie_make_tag(void *, int, int, int); 140 void rkpcie_decompose_tag(void *, pcitag_t, int *, int *, int *); 141 int rkpcie_conf_size(void *, pcitag_t); 142 pcireg_t rkpcie_conf_read(void *, pcitag_t, int); 143 void rkpcie_conf_write(void *, pcitag_t, int, pcireg_t); 144 int rkpcie_probe_device_hook(void *, struct pci_attach_args *); 145 146 int rkpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *); 147 const char *rkpcie_intr_string(void *, pci_intr_handle_t); 148 void *rkpcie_intr_establish(void *, pci_intr_handle_t, int, 149 struct cpu_info *, int (*)(void *), void *, char *); 150 void rkpcie_intr_disestablish(void *, void *); 151 152 /* 153 * When link training, the LTSSM configuration state exits to L0 state upon 154 * success. Wait for L0 state before proceeding after link training has been 155 * initiated either by PCIE_CLIENT_LINK_TRAIN_EN or when triggered via 156 * LCSR Retrain Link bit. See PCIE 2.0 Base Specification, 4.2.6.3.6 157 * Configuration.Idle. 158 * 159 * Checking link up alone is not sufficient for checking for L0 state. LTSSM 160 * state L0 can be detected when link up is set and link training is cleared. 161 * See PCIE 2.0 Base Specification, 4.2.6 Link Training and Status State Rules, 162 * Table 4-8 Link Status Mapped to the LTSSM. 163 * 164 * However, RC doesn't set the link training bit when initially training via 165 * PCIE_CLIENT_LINK_TRAIN_EN. Fortunately, RC has provided a debug register 166 * that has the LTSSM state which can be checked instead. 167 * 168 * It is important to have reached L0 state before beginning Gen 2 training, 169 * as it is documented that setting the Retrain Link bit while currently 170 * in Recovery or Configuration states is a race condition that may result 171 * in missing the retraining. See See PCIE 2.0 Base Specification, 7.8.7 172 * Link Control Register implementation notes on Retrain Link bit. 173 */ 174 175 static int 176 rkpcie_link_training_wait(struct rkpcie_softc *sc) 177 { 178 uint32_t status; 179 int timo; 180 for (timo = 500; timo > 0; timo--) { 181 status = HREAD4(sc, PCIE_CLIENT_DEBUG_OUT_0); 182 if ((status & PCIE_CLIENT_DEBUG_LTSSM_MASK) == 183 PCIE_CLIENT_DEBUG_LTSSM_L0) 184 break; 185 delay(1000); 186 } 187 return timo == 0; 188 } 189 190 void 191 rkpcie_attach(struct device *parent, struct device *self, void *aux) 192 { 193 struct rkpcie_softc *sc = (struct rkpcie_softc *)self; 194 struct fdt_attach_args *faa = aux; 195 struct pcibus_attach_args pba; 196 uint32_t *ep_gpio = NULL; 197 uint32_t bus_range[2]; 198 uint32_t status; 199 uint32_t max_link_speed; 200 int len; 201 202 if (faa->fa_nreg < 2) { 203 printf(": no registers\n"); 204 return; 205 } 206 207 sc->sc_iot = faa->fa_iot; 208 209 if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr, 210 faa->fa_reg[1].size, 0, &sc->sc_ioh)) { 211 printf(": can't map registers\n"); 212 return; 213 } 214 215 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 216 faa->fa_reg[0].size, 0, &sc->sc_axi_ioh)) { 217 printf(": can't map AXI registers\n"); 218 bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[1].size); 219 return; 220 } 221 222 sc->sc_axi_addr = faa->fa_reg[0].addr; 223 sc->sc_apb_addr = faa->fa_reg[1].addr; 224 sc->sc_node = faa->fa_node; 225 printf("\n"); 226 227 len = OF_getproplen(sc->sc_node, "ep-gpios"); 228 if (len > 0) { 229 ep_gpio = malloc(len, M_TEMP, M_WAITOK); 230 OF_getpropintarray(sc->sc_node, "ep-gpios", ep_gpio, len); 231 } 232 233 max_link_speed = OF_getpropint(sc->sc_node, "max-link-speed", 1); 234 235 clock_enable_all(sc->sc_node); 236 237 regulator_enable(OF_getpropint(sc->sc_node, "vpcie12v-supply", 0)); 238 regulator_enable(OF_getpropint(sc->sc_node, "vpcie3v3-supply", 0)); 239 regulator_enable(OF_getpropint(sc->sc_node, "vpcie1v8-supply", 0)); 240 regulator_enable(OF_getpropint(sc->sc_node, "vpcie0v9-supply", 0)); 241 242 if (ep_gpio) { 243 gpio_controller_config_pin(ep_gpio, GPIO_CONFIG_OUTPUT); 244 gpio_controller_set_pin(ep_gpio, 0); 245 } 246 247 reset_assert(sc->sc_node, "aclk"); 248 reset_assert(sc->sc_node, "pclk"); 249 reset_assert(sc->sc_node, "pm"); 250 251 rkpcie_phy_init(sc); 252 253 reset_assert(sc->sc_node, "core"); 254 reset_assert(sc->sc_node, "mgmt"); 255 reset_assert(sc->sc_node, "mgmt-sticky"); 256 reset_assert(sc->sc_node, "pipe"); 257 258 delay(10); 259 260 reset_deassert(sc->sc_node, "pm"); 261 reset_deassert(sc->sc_node, "aclk"); 262 reset_deassert(sc->sc_node, "pclk"); 263 264 if (max_link_speed > 1) 265 status = PCIE_CLIENT_PCIE_GEN_SEL_2; 266 else 267 status = PCIE_CLIENT_PCIE_GEN_SEL_1; 268 269 /* Switch into Root Complex mode. */ 270 HWRITE4(sc, PCIE_CLIENT_BASIC_STRAP_CONF, PCIE_CLIENT_MODE_SELECT_RC 271 | PCIE_CLIENT_CONF_EN | status); 272 273 rkpcie_phy_poweron(sc); 274 275 reset_deassert(sc->sc_node, "core"); 276 reset_deassert(sc->sc_node, "mgmt"); 277 reset_deassert(sc->sc_node, "mgmt-sticky"); 278 reset_deassert(sc->sc_node, "pipe"); 279 280 /* 281 * Workaround RC bug where Target Link Speed is not set by GEN_SEL_2 282 */ 283 if (max_link_speed > 1) { 284 status = HREAD4(sc, PCIE_RC_LCSR2); 285 status &= ~PCI_PCIE_LCSR2_TLS; 286 status |= PCI_PCIE_LCSR2_TLS_5; 287 HWRITE4(sc, PCIE_RC_LCSR2, status); 288 } 289 290 /* Start link training. */ 291 HWRITE4(sc, PCIE_CLIENT_BASIC_STRAP_CONF, PCIE_CLIENT_LINK_TRAIN_EN); 292 293 /* XXX Advertise power limits? */ 294 295 if (ep_gpio) { 296 gpio_controller_set_pin(ep_gpio, 1); 297 free(ep_gpio, M_TEMP, len); 298 } 299 300 if (rkpcie_link_training_wait(sc)) { 301 printf("%s: link training timeout\n", sc->sc_dev.dv_xname); 302 return; 303 } 304 305 if (max_link_speed > 1) { 306 status = HREAD4(sc, PCIE_RC_LCSR); 307 if ((status & PCI_PCIE_LCSR_CLS) == PCI_PCIE_LCSR_CLS_2_5) { 308 HWRITE4(sc, PCIE_RC_LCSR, HREAD4(sc, PCIE_RC_LCSR) | 309 PCI_PCIE_LCSR_RL); 310 311 if (rkpcie_link_training_wait(sc)) { 312 /* didn't make it back to L0 state */ 313 printf("%s: gen2 link training timeout\n", 314 sc->sc_dev.dv_xname); 315 return; 316 } 317 } 318 } 319 320 /* 321 * XXX On at least the RockPro64, many cards will panic when first 322 * accessing PCIe config space during bus scanning. A delay after 323 * link training allows some of these cards to function. 324 */ 325 delay(2000000); 326 327 /* Initialize Root Complex registers. */ 328 HWRITE4(sc, PCIE_LM_VENDOR_ID, PCI_VENDOR_ROCKCHIP); 329 HWRITE4(sc, PCIE_RC_BASE + PCI_CLASS_REG, 330 PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT | 331 PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT); 332 HWRITE4(sc, PCIE_LM_RCBAR, PCIE_LM_RCBARPIE | PCIE_LM_RCBARPIS); 333 334 if (OF_getproplen(sc->sc_node, "aspm-no-l0s") == 0) { 335 status = HREAD4(sc, PCIE_RC_PCIE_LCAP); 336 status &= ~PCIE_RC_PCIE_LCAP_APMS_L0S; 337 HWRITE4(sc, PCIE_RC_PCIE_LCAP, status); 338 } 339 340 /* Create extents for our address spaces. */ 341 sc->sc_busex = extent_create("pcibus", 0, 255, 342 M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED); 343 sc->sc_memex = extent_create("pcimem", 0, (u_long)-1, 344 M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED); 345 sc->sc_ioex = extent_create("pciio", 0, 0xffffffff, 346 M_DEVBUF, NULL, 0, EX_WAITOK | EX_FILLED); 347 348 /* Set up bus range. */ 349 if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range, 350 sizeof(bus_range)) != sizeof(bus_range) || 351 bus_range[0] >= 32 || bus_range[1] >= 32) { 352 bus_range[0] = 0; 353 bus_range[1] = 31; 354 } 355 sc->sc_bus = bus_range[0]; 356 extent_free(sc->sc_busex, bus_range[0], 357 bus_range[1] - bus_range[0] + 1, EX_WAITOK); 358 359 /* Configure Address Translation. */ 360 rkpcie_atr_init(sc); 361 362 sc->sc_pc.pc_conf_v = sc; 363 sc->sc_pc.pc_attach_hook = rkpcie_attach_hook; 364 sc->sc_pc.pc_bus_maxdevs = rkpcie_bus_maxdevs; 365 sc->sc_pc.pc_make_tag = rkpcie_make_tag; 366 sc->sc_pc.pc_decompose_tag = rkpcie_decompose_tag; 367 sc->sc_pc.pc_conf_size = rkpcie_conf_size; 368 sc->sc_pc.pc_conf_read = rkpcie_conf_read; 369 sc->sc_pc.pc_conf_write = rkpcie_conf_write; 370 sc->sc_pc.pc_probe_device_hook = rkpcie_probe_device_hook; 371 372 sc->sc_pc.pc_intr_v = sc; 373 sc->sc_pc.pc_intr_map = rkpcie_intr_map; 374 sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi; 375 sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix; 376 sc->sc_pc.pc_intr_string = rkpcie_intr_string; 377 sc->sc_pc.pc_intr_establish = rkpcie_intr_establish; 378 sc->sc_pc.pc_intr_disestablish = rkpcie_intr_disestablish; 379 380 memset(&pba, 0, sizeof(pba)); 381 pba.pba_busname = "pci"; 382 pba.pba_iot = faa->fa_iot; 383 pba.pba_memt = faa->fa_iot; 384 pba.pba_dmat = faa->fa_dmat; 385 pba.pba_pc = &sc->sc_pc; 386 pba.pba_busex = sc->sc_busex; 387 pba.pba_memex = sc->sc_memex; 388 pba.pba_ioex = sc->sc_ioex; 389 pba.pba_domain = pci_ndomains++; 390 pba.pba_bus = sc->sc_bus; 391 pba.pba_flags |= PCI_FLAGS_MSI_ENABLED; 392 393 config_found(self, &pba, NULL); 394 } 395 396 void 397 rkpcie_atr_init(struct rkpcie_softc *sc) 398 { 399 uint32_t *ranges = NULL; 400 struct extent *ex; 401 bus_addr_t addr; 402 bus_size_t size, offset; 403 uint32_t type; 404 int len, region; 405 int i; 406 407 /* Use region 0 to map PCI configuration space. */ 408 HWRITE4(sc, PCIE_ATR_OB_ADDR0(0), 25 - 1); 409 HWRITE4(sc, PCIE_ATR_OB_ADDR1(0), 0); 410 HWRITE4(sc, PCIE_ATR_OB_DESC0(0), 411 PCIE_ATR_HDR_CFG_TYPE0 | PCIE_ATR_HDR_RID); 412 HWRITE4(sc, PCIE_ATR_OB_DESC1(0), 0); 413 414 len = OF_getproplen(sc->sc_node, "ranges"); 415 if (len <= 0 || (len % (7 * sizeof(uint32_t))) != 0) 416 goto fail; 417 ranges = malloc(len, M_TEMP, M_WAITOK); 418 OF_getpropintarray(sc->sc_node, "ranges", ranges, len); 419 420 for (i = 0; i < len / sizeof(uint32_t); i += 7) { 421 /* Handle IO and MMIO. */ 422 switch (ranges[i] & 0x03000000) { 423 case 0x01000000: 424 type = PCIE_ATR_HDR_IO; 425 ex = sc->sc_ioex; 426 break; 427 case 0x02000000: 428 case 0x03000000: 429 type = PCIE_ATR_HDR_MEM; 430 ex = sc->sc_memex; 431 break; 432 default: 433 continue; 434 } 435 436 /* Only support identity mappings. */ 437 if (ranges[i + 1] != ranges[i + 3] || 438 ranges[i + 2] != ranges[i + 4]) 439 goto fail; 440 441 /* Only support mappings aligned on a region boundary. */ 442 addr = ((uint64_t)ranges[i + 1] << 32) + ranges[i + 2]; 443 if (addr & (PCIE_ATR_OB_REGION_SIZE - 1)) 444 goto fail; 445 446 /* Mappings should lie between AXI and APB regions. */ 447 size = ranges[i + 6]; 448 if (addr < sc->sc_axi_addr + PCIE_ATR_OB_REGION0_SIZE) 449 goto fail; 450 if (addr + size > sc->sc_apb_addr) 451 goto fail; 452 453 offset = addr - sc->sc_axi_addr - PCIE_ATR_OB_REGION0_SIZE; 454 region = 1 + (offset / PCIE_ATR_OB_REGION_SIZE); 455 while (size > 0) { 456 HWRITE4(sc, PCIE_ATR_OB_ADDR0(region), 32 - 1); 457 HWRITE4(sc, PCIE_ATR_OB_ADDR1(region), 0); 458 HWRITE4(sc, PCIE_ATR_OB_DESC0(region), 459 type | PCIE_ATR_HDR_RID); 460 HWRITE4(sc, PCIE_ATR_OB_DESC1(region), 0); 461 462 extent_free(ex, addr, PCIE_ATR_OB_REGION_SIZE, 463 EX_WAITOK); 464 addr += PCIE_ATR_OB_REGION_SIZE; 465 size -= PCIE_ATR_OB_REGION_SIZE; 466 region++; 467 } 468 } 469 470 /* Passthrought inbound translations unmodified. */ 471 HWRITE4(sc, PCIE_ATR_IB_ADDR0(2), 32 - 1); 472 HWRITE4(sc, PCIE_ATR_IB_ADDR1(2), 0); 473 474 free(ranges, M_TEMP, len); 475 return; 476 477 fail: 478 printf("%s: can't map ranges\n", sc->sc_dev.dv_xname); 479 free(ranges, M_TEMP, len); 480 } 481 482 void 483 rkpcie_attach_hook(struct device *parent, struct device *self, 484 struct pcibus_attach_args *pba) 485 { 486 } 487 488 int 489 rkpcie_bus_maxdevs(void *v, int bus) 490 { 491 struct rkpcie_softc *sc = v; 492 493 if (bus == sc->sc_bus || bus == sc->sc_bus + 1) 494 return 1; 495 return 32; 496 } 497 498 pcitag_t 499 rkpcie_make_tag(void *v, int bus, int device, int function) 500 { 501 /* Return ECAM address. */ 502 return ((bus << 20) | (device << 15) | (function << 12)); 503 } 504 505 void 506 rkpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp) 507 { 508 if (bp != NULL) 509 *bp = (tag >> 20) & 0xff; 510 if (dp != NULL) 511 *dp = (tag >> 15) & 0x1f; 512 if (fp != NULL) 513 *fp = (tag >> 12) & 0x7; 514 } 515 516 int 517 rkpcie_conf_size(void *v, pcitag_t tag) 518 { 519 return PCIE_CONFIG_SPACE_SIZE; 520 } 521 522 pcireg_t 523 rkpcie_conf_read(void *v, pcitag_t tag, int reg) 524 { 525 struct rkpcie_softc *sc = v; 526 int bus, dev, fn; 527 528 rkpcie_decompose_tag(sc, tag, &bus, &dev, &fn); 529 if (bus == sc->sc_bus) { 530 KASSERT(dev == 0); 531 return HREAD4(sc, PCIE_RC_NORMAL_BASE + tag | reg); 532 } 533 if (bus == sc->sc_bus + 1) { 534 KASSERT(dev == 0); 535 return bus_space_read_4(sc->sc_iot, sc->sc_axi_ioh, tag | reg); 536 } 537 538 return 0xffffffff; 539 } 540 541 void 542 rkpcie_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data) 543 { 544 struct rkpcie_softc *sc = v; 545 int bus, dev, fn; 546 547 rkpcie_decompose_tag(sc, tag, &bus, &dev, &fn); 548 if (bus == sc->sc_bus) { 549 KASSERT(dev == 0); 550 HWRITE4(sc, PCIE_RC_NORMAL_BASE + tag | reg, data); 551 return; 552 } 553 if (bus == sc->sc_bus + 1) { 554 KASSERT(dev == 0); 555 bus_space_write_4(sc->sc_iot, sc->sc_axi_ioh, tag | reg, data); 556 return; 557 } 558 } 559 560 int 561 rkpcie_probe_device_hook(void *v, struct pci_attach_args *pa) 562 { 563 return 0; 564 } 565 566 int 567 rkpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp) 568 { 569 int pin = pa->pa_rawintrpin; 570 571 if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX) 572 return -1; 573 574 if (pa->pa_tag == 0) 575 return -1; 576 577 ihp->ih_pc = pa->pa_pc; 578 ihp->ih_tag = pa->pa_intrtag; 579 ihp->ih_intrpin = pa->pa_intrpin; 580 ihp->ih_type = PCI_INTX; 581 582 return 0; 583 } 584 585 const char * 586 rkpcie_intr_string(void *v, pci_intr_handle_t ih) 587 { 588 switch (ih.ih_type) { 589 case PCI_MSI: 590 return "msi"; 591 case PCI_MSIX: 592 return "msix"; 593 } 594 595 return "intx"; 596 } 597 598 void * 599 rkpcie_intr_establish(void *v, pci_intr_handle_t ih, int level, 600 struct cpu_info *ci, int (*func)(void *), void *arg, char *name) 601 { 602 struct rkpcie_softc *sc = v; 603 void *cookie; 604 605 KASSERT(ih.ih_type != PCI_NONE); 606 607 if (ih.ih_type != PCI_INTX) { 608 uint64_t addr, data; 609 610 /* Assume hardware passes Requester ID as sideband data. */ 611 data = pci_requester_id(ih.ih_pc, ih.ih_tag); 612 cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr, 613 &data, level, ci, func, arg, name); 614 if (cookie == NULL) 615 return NULL; 616 617 /* TODO: translate address to the PCI device's view */ 618 619 if (ih.ih_type == PCI_MSIX) { 620 pci_msix_enable(ih.ih_pc, ih.ih_tag, 621 sc->sc_iot, ih.ih_intrpin, addr, data); 622 } else 623 pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data); 624 } else { 625 /* Unmask legacy interrupts. */ 626 HWRITE4(sc, PCIE_CLIENT_INT_MASK, 627 PCIE_CLIENT_INTA_UNMASK | PCIE_CLIENT_INTB_UNMASK | 628 PCIE_CLIENT_INTC_UNMASK | PCIE_CLIENT_INTD_UNMASK); 629 630 cookie = fdt_intr_establish_idx_cpu(sc->sc_node, 1, level, 631 ci, func, arg, name); 632 } 633 634 return cookie; 635 } 636 637 void 638 rkpcie_intr_disestablish(void *v, void *cookie) 639 { 640 } 641 642 /* 643 * PHY Support. 644 */ 645 646 #define RK3399_GRF_SOC_CON5_PCIE 0xe214 647 #define RK3399_TX_ELEC_IDLE_OFF_MASK ((1 << 3) << 16) 648 #define RK3399_TX_ELEC_IDLE_OFF (1 << 3) 649 #define RK3399_GRF_SOC_CON8 0xe220 650 #define RK3399_PCIE_TEST_DATA_MASK ((0xf << 7) << 16) 651 #define RK3399_PCIE_TEST_DATA_SHIFT 7 652 #define RK3399_PCIE_TEST_ADDR_MASK ((0x3f << 1) << 16) 653 #define RK3399_PCIE_TEST_ADDR_SHIFT 1 654 #define RK3399_PCIE_TEST_WRITE_ENABLE (((1 << 0) << 16) | (1 << 0)) 655 #define RK3399_PCIE_TEST_WRITE_DISABLE (((1 << 0) << 16) | (0 << 0)) 656 #define RK3399_GRF_SOC_STATUS1 0xe2a4 657 #define RK3399_PCIE_PHY_PLL_LOCKED (1 << 9) 658 #define RK3399_PCIE_PHY_PLL_OUTPUT (1 << 10) 659 660 #define RK3399_PCIE_PHY_CFG_PLL_LOCK 0x10 661 #define RK3399_PCIE_PHY_CFG_CLK_TEST 0x10 662 #define RK3399_PCIE_PHY_CFG_SEPE_RATE (1 << 3) 663 #define RK3399_PCIE_PHY_CFG_CLK_SCC 0x12 664 #define RK3399_PCIE_PHY_CFG_PLL_100M (1 << 3) 665 666 void 667 rkpcie_phy_init(struct rkpcie_softc *sc) 668 { 669 uint32_t phys[8]; 670 int len; 671 672 len = OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)); 673 if (len < sizeof(phys[0])) 674 return; 675 676 sc->sc_phy_node = OF_getnodebyphandle(phys[0]); 677 if (sc->sc_phy_node == 0) 678 return; 679 680 clock_set_assigned(sc->sc_phy_node); 681 clock_enable(sc->sc_phy_node, "refclk"); 682 reset_assert(sc->sc_phy_node, "phy"); 683 } 684 685 void 686 rkpcie_phy_write_conf(struct regmap *rm, uint8_t addr, uint8_t data) 687 { 688 regmap_write_4(rm, RK3399_GRF_SOC_CON8, 689 RK3399_PCIE_TEST_ADDR_MASK | 690 (addr << RK3399_PCIE_TEST_ADDR_SHIFT) | 691 RK3399_PCIE_TEST_DATA_MASK | 692 (data << RK3399_PCIE_TEST_DATA_SHIFT) | 693 RK3399_PCIE_TEST_WRITE_DISABLE); 694 delay(1); 695 regmap_write_4(rm, RK3399_GRF_SOC_CON8, 696 RK3399_PCIE_TEST_WRITE_ENABLE); 697 delay(1); 698 regmap_write_4(rm, RK3399_GRF_SOC_CON8, 699 RK3399_PCIE_TEST_WRITE_DISABLE); 700 } 701 702 void 703 rkpcie_phy_poweron(struct rkpcie_softc *sc) 704 { 705 struct regmap *rm; 706 uint32_t status; 707 int lane = 0; 708 int timo; 709 710 reset_deassert(sc->sc_phy_node, "phy"); 711 712 rm = regmap_bynode(OF_parent(sc->sc_phy_node)); 713 if (rm == NULL) 714 return; 715 716 regmap_write_4(rm, RK3399_GRF_SOC_CON8, 717 RK3399_PCIE_TEST_ADDR_MASK | 718 RK3399_PCIE_PHY_CFG_PLL_LOCK << RK3399_PCIE_TEST_ADDR_SHIFT); 719 regmap_write_4(rm, RK3399_GRF_SOC_CON5_PCIE, 720 RK3399_TX_ELEC_IDLE_OFF_MASK << lane | 0); 721 722 for (timo = 50; timo > 0; timo--) { 723 status = regmap_read_4(rm, RK3399_GRF_SOC_STATUS1); 724 if (status & RK3399_PCIE_PHY_PLL_LOCKED) 725 break; 726 delay(20000); 727 } 728 if (timo == 0) { 729 printf("%s: PHY PLL lock timeout\n", sc->sc_dev.dv_xname); 730 return; 731 } 732 733 rkpcie_phy_write_conf(rm, RK3399_PCIE_PHY_CFG_CLK_TEST, 734 RK3399_PCIE_PHY_CFG_SEPE_RATE); 735 rkpcie_phy_write_conf(rm, RK3399_PCIE_PHY_CFG_CLK_SCC, 736 RK3399_PCIE_PHY_CFG_PLL_100M); 737 738 for (timo = 50; timo > 0; timo--) { 739 status = regmap_read_4(rm, RK3399_GRF_SOC_STATUS1); 740 if ((status & RK3399_PCIE_PHY_PLL_OUTPUT) == 0) 741 break; 742 delay(20000); 743 } 744 if (timo == 0) { 745 printf("%s: PHY PLL output enable timeout\n", 746 sc->sc_dev.dv_xname); 747 return; 748 } 749 750 regmap_write_4(rm, RK3399_GRF_SOC_CON8, 751 RK3399_PCIE_TEST_ADDR_MASK | 752 RK3399_PCIE_PHY_CFG_PLL_LOCK << RK3399_PCIE_TEST_ADDR_SHIFT); 753 754 for (timo = 50; timo > 0; timo--) { 755 status = regmap_read_4(rm, RK3399_GRF_SOC_STATUS1); 756 if (status & RK3399_PCIE_PHY_PLL_LOCKED) 757 break; 758 delay(20000); 759 } 760 if (timo == 0) { 761 printf("%s: PHY PLL relock timeout\n", sc->sc_dev.dv_xname); 762 return; 763 } 764 } 765