1 /* $NetBSD: acpi_pci_layerscape_gen4.c,v 1.2 2020/02/02 16:44:25 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jared McNeill <jmcneill@invisible.ca>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * NXP Layerscape PCIe Gen4 controller (not ECAM compliant) 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: acpi_pci_layerscape_gen4.c,v 1.2 2020/02/02 16:44:25 jmcneill Exp $"); 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/device.h> 42 #include <sys/intr.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/extent.h> 46 #include <sys/kmem.h> 47 #include <sys/mutex.h> 48 49 #include <machine/cpu.h> 50 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #include <dev/pci/pciconf.h> 54 55 #include <dev/acpi/acpivar.h> 56 #include <dev/acpi/acpi_pci.h> 57 #include <dev/acpi/acpi_mcfg.h> 58 59 #include <arm/acpi/acpi_pci_machdep.h> 60 61 #define PAB_CTRL 0x808 62 #define PAB_CTRL_PAGE_SEL __BITS(18,13) 63 #define PAB_AXI_AMAP_PEX_WIN_L(x) (0xba8 + 0x10 * (x)) 64 #define PAB_AXI_AMAP_PEX_WIN_H(x) (0xbac + 0x10 * (x)) 65 #define INDIRECT_ADDR_BOUNDARY 0xc00 66 67 #define LUT_BASE 0x80000 68 #define LUT_GCR 0x28 69 #define LUT_GCR_RRE __BIT(0) 70 71 #define REG_TO_PAGE_INDEX(reg) (((reg) >> 10) & 0x3ff) 72 #define REG_TO_PAGE_ADDR(reg) (((reg) & 0x3ff) | INDIRECT_ADDR_BOUNDARY) 73 74 #define PAB_TARGET_BUS(b) ((b) << 24) 75 #define PAB_TARGET_DEV(d) ((d) << 19) 76 #define PAB_TARGET_FUNC(f) ((f) << 16) 77 78 struct acpi_pci_layerscape_gen4 { 79 bus_space_tag_t bst; 80 bus_space_handle_t bsh; 81 bus_space_handle_t win_bsh; 82 uint8_t rev; 83 kmutex_t lock; 84 }; 85 86 static void 87 acpi_pci_layerscape_gen4_ccsr_setpage(struct acpi_pci_layerscape_gen4 *pcie, u_int page_index) 88 { 89 uint32_t val; 90 91 val = bus_space_read_4(pcie->bst, pcie->bsh, PAB_CTRL); 92 val &= ~PAB_CTRL_PAGE_SEL; 93 val |= __SHIFTIN(page_index, PAB_CTRL_PAGE_SEL); 94 bus_space_write_4(pcie->bst, pcie->bsh, PAB_CTRL, val); 95 } 96 97 static uint32_t 98 acpi_pci_layerscape_gen4_ccsr_read4(struct acpi_pci_layerscape_gen4 *pcie, bus_size_t reg) 99 { 100 const bool indirect = reg >= INDIRECT_ADDR_BOUNDARY; 101 const u_int page_index = indirect ? REG_TO_PAGE_INDEX(reg) : 0; 102 const bus_size_t page_addr = indirect ? REG_TO_PAGE_ADDR(reg) : reg; 103 104 acpi_pci_layerscape_gen4_ccsr_setpage(pcie, page_index); 105 return bus_space_read_4(pcie->bst, pcie->bsh, page_addr); 106 } 107 108 static void 109 acpi_pci_layerscape_gen4_ccsr_write4(struct acpi_pci_layerscape_gen4 *pcie, 110 bus_size_t reg, pcireg_t data) 111 { 112 const bool indirect = reg >= INDIRECT_ADDR_BOUNDARY; 113 const u_int page_index = indirect ? REG_TO_PAGE_INDEX(reg) : 0; 114 const bus_size_t page_addr = indirect ? REG_TO_PAGE_ADDR(reg) : reg; 115 116 acpi_pci_layerscape_gen4_ccsr_setpage(pcie, page_index); 117 bus_space_write_4(pcie->bst, pcie->bsh, page_addr, data); 118 } 119 120 static void 121 acpi_pci_layerscape_gen4_select_target(struct acpi_pci_layerscape_gen4 *pcie, 122 pci_chipset_tag_t pc, pcitag_t tag) 123 { 124 int b, d, f; 125 126 pci_decompose_tag(pc, tag, &b, &d, &f); 127 128 const uint32_t target = PAB_TARGET_BUS(b) | 129 PAB_TARGET_DEV(d) | PAB_TARGET_FUNC(f); 130 131 acpi_pci_layerscape_gen4_ccsr_write4(pcie, PAB_AXI_AMAP_PEX_WIN_L(0), target); 132 acpi_pci_layerscape_gen4_ccsr_write4(pcie, PAB_AXI_AMAP_PEX_WIN_H(0), 0); 133 } 134 135 static bool 136 acpi_pci_layerscape_gen4_is_tag_okay(pci_chipset_tag_t pc, pcitag_t tag, int reg) 137 { 138 struct acpi_pci_context *ap = pc->pc_conf_v; 139 int b, d, f; 140 141 pci_decompose_tag(pc, tag, &b, &d, &f); 142 143 if (b <= ap->ap_bus + 1 && d > 0) 144 return false; 145 146 if (b != ap->ap_bus) 147 return acpimcfg_conf_valid(pc, tag, reg); 148 149 return true; 150 } 151 152 static int 153 acpi_pci_layerscape_gen4_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t *data) 154 { 155 struct acpi_pci_context *ap = pc->pc_conf_v; 156 struct acpi_pci_layerscape_gen4 *pcie = ap->ap_conf_priv; 157 int b, d, f; 158 159 pci_decompose_tag(pc, tag, &b, &d, &f); 160 161 if (!acpi_pci_layerscape_gen4_is_tag_okay(pc, tag, reg)) { 162 *data = -1; 163 return EINVAL; 164 } 165 166 mutex_enter(&pcie->lock); 167 168 if (pcie->rev == 0x10 && reg == PCI_ID_REG) 169 bus_space_write_4(pcie->bst, pcie->bsh, LUT_BASE + LUT_GCR, 0); 170 171 if (b == ap->ap_bus) { 172 *data = acpi_pci_layerscape_gen4_ccsr_read4(pcie, reg); 173 } else { 174 acpi_pci_layerscape_gen4_select_target(pcie, pc, tag); 175 *data = bus_space_read_4(pcie->bst, pcie->win_bsh, reg); 176 } 177 178 if (pcie->rev == 0x10 && reg == PCI_ID_REG) 179 bus_space_write_4(pcie->bst, pcie->bsh, LUT_BASE + LUT_GCR, LUT_GCR_RRE); 180 181 mutex_exit(&pcie->lock); 182 183 return 0; 184 } 185 186 static int 187 acpi_pci_layerscape_gen4_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data) 188 { 189 struct acpi_pci_context *ap = pc->pc_conf_v; 190 struct acpi_pci_layerscape_gen4 *pcie = ap->ap_conf_priv; 191 int b, d, f; 192 193 pci_decompose_tag(pc, tag, &b, &d, &f); 194 195 if (!acpi_pci_layerscape_gen4_is_tag_okay(pc, tag, reg)) 196 return EINVAL; 197 198 mutex_enter(&pcie->lock); 199 200 if (b == ap->ap_bus) { 201 acpi_pci_layerscape_gen4_ccsr_write4(pcie, reg, data); 202 } else { 203 acpi_pci_layerscape_gen4_select_target(pcie, pc, tag); 204 bus_space_write_4(pcie->bst, pcie->win_bsh, reg, data); 205 } 206 207 mutex_exit(&pcie->lock); 208 209 return 0; 210 } 211 212 static UINT64 213 acpi_pci_layerscape_win_base(ACPI_INTEGER seg) 214 { 215 ACPI_TABLE_MCFG *mcfg; 216 ACPI_MCFG_ALLOCATION *ama; 217 ACPI_STATUS rv; 218 uint32_t off; 219 int i; 220 221 rv = AcpiGetTable(ACPI_SIG_MCFG, 0, (ACPI_TABLE_HEADER **)&mcfg); 222 if (ACPI_FAILURE(rv)) 223 return 0; 224 225 off = sizeof(ACPI_TABLE_MCFG); 226 ama = ACPI_ADD_PTR(ACPI_MCFG_ALLOCATION, mcfg, off); 227 for (i = 0; off + sizeof(ACPI_MCFG_ALLOCATION) <= mcfg->Header.Length; i++) { 228 if (ama->PciSegment == seg) 229 return ama->Address; 230 off += sizeof(ACPI_MCFG_ALLOCATION); 231 ama = ACPI_ADD_PTR(ACPI_MCFG_ALLOCATION, mcfg, off); 232 } 233 234 return 0; /* not found */ 235 } 236 237 static ACPI_STATUS 238 acpi_pci_layerscape_gen4_map(ACPI_HANDLE handle, UINT32 level, void *ctx, void **retval) 239 { 240 struct acpi_pci_context *ap = ctx; 241 struct acpi_resources res; 242 struct acpi_mem *mem; 243 struct acpi_pci_layerscape_gen4 *pcie; 244 bus_space_handle_t bsh; 245 ACPI_HANDLE parent; 246 ACPI_INTEGER seg; 247 ACPI_STATUS rv; 248 UINT64 win_base; 249 int error; 250 251 rv = AcpiGetParent(handle, &parent); 252 if (ACPI_FAILURE(rv)) 253 return rv; 254 rv = acpi_eval_integer(parent, "_SEG", &seg); 255 if (ACPI_FAILURE(rv)) 256 seg = 0; 257 if (ap->ap_seg != seg) 258 return AE_OK; 259 260 rv = acpi_resource_parse(ap->ap_dev, handle, "_CRS", &res, &acpi_resource_parse_ops_quiet); 261 if (ACPI_FAILURE(rv)) 262 return rv; 263 264 mem = acpi_res_mem(&res, 0); 265 if (mem == NULL) { 266 acpi_resource_cleanup(&res); 267 return AE_NOT_FOUND; 268 } 269 270 win_base = acpi_pci_layerscape_win_base(seg); 271 if (win_base == 0) { 272 aprint_error_dev(ap->ap_dev, "couldn't find MCFG entry for segment %ld\n", seg); 273 return AE_NOT_FOUND; 274 } 275 276 error = bus_space_map(ap->ap_bst, mem->ar_base, mem->ar_length, 277 _ARM_BUS_SPACE_MAP_STRONGLY_ORDERED, &bsh); 278 if (error != 0) 279 return AE_NO_MEMORY; 280 281 pcie = kmem_alloc(sizeof(*pcie), KM_SLEEP); 282 pcie->bst = ap->ap_bst; 283 pcie->bsh = bsh; 284 mutex_init(&pcie->lock, MUTEX_DEFAULT, IPL_HIGH); 285 286 error = bus_space_map(ap->ap_bst, win_base, PCI_EXTCONF_SIZE, 287 _ARM_BUS_SPACE_MAP_STRONGLY_ORDERED, &pcie->win_bsh); 288 if (error != 0) 289 return AE_NO_MEMORY; 290 291 const pcireg_t cr = bus_space_read_4(pcie->bst, pcie->bsh, PCI_CLASS_REG); 292 pcie->rev = PCI_REVISION(cr); 293 294 ap->ap_conf_read = acpi_pci_layerscape_gen4_conf_read; 295 ap->ap_conf_write = acpi_pci_layerscape_gen4_conf_write; 296 ap->ap_conf_priv = pcie; 297 298 aprint_verbose_dev(ap->ap_dev, 299 "PCIe segment %lu: Layerscape Gen4 rev. %#x found at %#lx-%#lx\n", 300 seg, pcie->rev, mem->ar_base, mem->ar_base + mem->ar_length - 1); 301 302 return AE_CTRL_TERMINATE; 303 } 304 305 void 306 acpi_pci_layerscape_gen4_init(struct acpi_pci_context *ap) 307 { 308 ACPI_STATUS rv; 309 310 rv = AcpiGetDevices(__UNCONST("NXP0016"), acpi_pci_layerscape_gen4_map, ap, NULL); 311 if (ACPI_FAILURE(rv)) 312 return; 313 } 314