1 /* $NetBSD: gic_v2m.c,v 1.8 2019/12/02 03:06:51 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jared McNeill <jmcneill@invisible.ca>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #define _INTR_PRIVATE 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: gic_v2m.c,v 1.8 2019/12/02 03:06:51 msaitoh Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/kmem.h> 39 #include <sys/bitops.h> 40 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 44 #include <arm/pic/picvar.h> 45 #include <arm/cortex/gic_v2m.h> 46 47 static uint64_t 48 gic_v2m_msi_addr(struct gic_v2m_frame *frame, int spi) 49 { 50 if ((frame->frame_flags & GIC_V2M_FLAG_GRAVITON) != 0) 51 return frame->frame_reg + ((spi - 32) << 3); 52 53 return frame->frame_reg + GIC_MSI_SETSPI; 54 } 55 56 static uint32_t 57 gic_v2m_msi_data(struct gic_v2m_frame *frame, int spi) 58 { 59 if ((frame->frame_flags & GIC_V2M_FLAG_GRAVITON) != 0) 60 return 0; 61 62 return spi; 63 } 64 65 static int 66 gic_v2m_msi_alloc_spi(struct gic_v2m_frame *frame, int count, 67 const struct pci_attach_args *pa) 68 { 69 int spi, n; 70 71 for (spi = frame->frame_base; 72 spi < frame->frame_base + frame->frame_count; ) { 73 if (frame->frame_pa[spi] == NULL) { 74 for (n = 1; n < count; n++) 75 if (frame->frame_pa[spi + n] != NULL) 76 goto next_spi; 77 78 for (n = 0; n < count; n++) 79 frame->frame_pa[spi + n] = pa; 80 81 return spi; 82 } 83 next_spi: 84 spi += count; 85 } 86 87 return -1; 88 } 89 90 static void 91 gic_v2m_msi_free_spi(struct gic_v2m_frame *frame, int spi) 92 { 93 frame->frame_pa[spi] = NULL; 94 } 95 96 static int 97 gic_v2m_msi_available_spi(struct gic_v2m_frame *frame) 98 { 99 int spi, n; 100 101 for (spi = frame->frame_base, n = 0; 102 spi < frame->frame_base + frame->frame_count; 103 spi++) { 104 if (frame->frame_pa[spi] == NULL) 105 n++; 106 } 107 108 return n; 109 } 110 111 static void 112 gic_v2m_msi_enable(struct gic_v2m_frame *frame, int spi, int count) 113 { 114 const struct pci_attach_args *pa = frame->frame_pa[spi]; 115 pci_chipset_tag_t pc = pa->pa_pc; 116 pcitag_t tag = pa->pa_tag; 117 pcireg_t ctl; 118 int off; 119 120 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL)) 121 panic("gic_v2m_msi_enable: device is not MSI-capable"); 122 123 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL); 124 ctl &= ~PCI_MSI_CTL_MSI_ENABLE; 125 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl); 126 127 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL); 128 ctl &= ~PCI_MSI_CTL_MME_MASK; 129 ctl |= __SHIFTIN(ilog2(count), PCI_MSI_CTL_MME_MASK); 130 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl); 131 132 const uint64_t addr = gic_v2m_msi_addr(frame, spi); 133 const uint32_t data = gic_v2m_msi_data(frame, spi); 134 135 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL); 136 if (ctl & PCI_MSI_CTL_64BIT_ADDR) { 137 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO, 138 addr & 0xffffffff); 139 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI, 140 (addr >> 32) & 0xffffffff); 141 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, data); 142 } else { 143 pci_conf_write(pc, tag, off + PCI_MSI_MADDR, 144 addr & 0xffffffff); 145 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, data); 146 } 147 ctl |= PCI_MSI_CTL_MSI_ENABLE; 148 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl); 149 } 150 151 static void 152 gic_v2m_msi_disable(struct gic_v2m_frame *frame, int spi) 153 { 154 const struct pci_attach_args *pa = frame->frame_pa[spi]; 155 pci_chipset_tag_t pc = pa->pa_pc; 156 pcitag_t tag = pa->pa_tag; 157 pcireg_t ctl; 158 int off; 159 160 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL)) 161 panic("gic_v2m_msi_disable: device is not MSI-capable"); 162 163 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL); 164 ctl &= ~PCI_MSI_CTL_MSI_ENABLE; 165 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl); 166 } 167 168 static void 169 gic_v2m_msix_enable(struct gic_v2m_frame *frame, int spi, int msix_vec, 170 bus_space_tag_t bst, bus_space_handle_t bsh) 171 { 172 const struct pci_attach_args *pa = frame->frame_pa[spi]; 173 pci_chipset_tag_t pc = pa->pa_pc; 174 pcitag_t tag = pa->pa_tag; 175 pcireg_t ctl; 176 int off; 177 178 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL)) 179 panic("gic_v2m_msix_enable: device is not MSI-X-capable"); 180 181 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 182 ctl &= ~PCI_MSIX_CTL_ENABLE; 183 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 184 185 const uint64_t addr = gic_v2m_msi_addr(frame, spi); 186 const uint32_t data = gic_v2m_msi_data(frame, spi); 187 const uint64_t entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec; 188 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, (uint32_t)addr); 189 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, (uint32_t)(addr >> 32)); 190 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_DATA, data); 191 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, 0); 192 193 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 194 ctl |= PCI_MSIX_CTL_ENABLE; 195 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 196 } 197 198 static void 199 gic_v2m_msix_disable(struct gic_v2m_frame *frame, int spi) 200 { 201 const struct pci_attach_args *pa = frame->frame_pa[spi]; 202 pci_chipset_tag_t pc = pa->pa_pc; 203 pcitag_t tag = pa->pa_tag; 204 pcireg_t ctl; 205 int off; 206 207 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL)) 208 panic("gic_v2m_msix_disable: device is not MSI-X-capable"); 209 210 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 211 ctl &= ~PCI_MSIX_CTL_ENABLE; 212 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 213 } 214 215 static pci_intr_handle_t * 216 gic_v2m_msi_alloc(struct arm_pci_msi *msi, int *count, 217 const struct pci_attach_args *pa, bool exact) 218 { 219 struct gic_v2m_frame * const frame = msi->msi_priv; 220 pci_intr_handle_t *vectors; 221 int n, off; 222 223 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &off, NULL)) 224 return NULL; 225 226 const int avail = gic_v2m_msi_available_spi(frame); 227 if (exact && *count > avail) 228 return NULL; 229 230 while (*count > avail) { 231 if (avail < *count) 232 (*count) >>= 1; 233 } 234 if (*count == 0) 235 return NULL; 236 237 const int spi_base = gic_v2m_msi_alloc_spi(frame, *count, pa); 238 if (spi_base == -1) 239 return NULL; 240 241 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP); 242 for (n = 0; n < *count; n++) { 243 const int spi = spi_base + n; 244 vectors[n] = ARM_PCI_INTR_MSI | 245 __SHIFTIN(spi, ARM_PCI_INTR_IRQ) | 246 __SHIFTIN(n, ARM_PCI_INTR_MSI_VEC) | 247 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME); 248 } 249 250 gic_v2m_msi_enable(frame, spi_base, *count); 251 252 return vectors; 253 } 254 255 static pci_intr_handle_t * 256 gic_v2m_msix_alloc(struct arm_pci_msi *msi, u_int *table_indexes, int *count, 257 const struct pci_attach_args *pa, bool exact) 258 { 259 struct gic_v2m_frame * const frame = msi->msi_priv; 260 pci_intr_handle_t *vectors; 261 bus_space_tag_t bst; 262 bus_space_handle_t bsh; 263 bus_size_t bsz; 264 uint32_t table_offset, table_size; 265 int n, off, bar, error; 266 pcireg_t tbl; 267 268 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL)) 269 return NULL; 270 271 const int avail = gic_v2m_msi_available_spi(frame); 272 if (exact && *count > avail) 273 return NULL; 274 275 while (*count > avail) { 276 if (avail < *count) 277 (*count) >>= 1; 278 } 279 if (*count == 0) 280 return NULL; 281 282 tbl = pci_conf_read(pa->pa_pc, pa->pa_tag, off + PCI_MSIX_TBLOFFSET); 283 bar = PCI_BAR0 + (4 * (tbl & PCI_MSIX_TBLBIR_MASK)); 284 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK; 285 table_size = pci_msix_count(pa->pa_pc, pa->pa_tag) * PCI_MSIX_TABLE_ENTRY_SIZE; 286 if (table_size == 0) 287 return NULL; 288 289 error = pci_mapreg_submap(pa, bar, pci_mapreg_type(pa->pa_pc, pa->pa_tag, bar), 290 BUS_SPACE_MAP_LINEAR, roundup(table_size, PAGE_SIZE), table_offset, 291 &bst, &bsh, NULL, &bsz); 292 if (error) 293 return NULL; 294 295 const int spi_base = gic_v2m_msi_alloc_spi(frame, *count, pa); 296 if (spi_base == -1) { 297 bus_space_unmap(bst, bsh, bsz); 298 return NULL; 299 } 300 301 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP); 302 for (n = 0; n < *count; n++) { 303 const int spi = spi_base + n; 304 const int msix_vec = table_indexes ? table_indexes[n] : n; 305 vectors[msix_vec] = ARM_PCI_INTR_MSIX | 306 __SHIFTIN(spi, ARM_PCI_INTR_IRQ) | 307 __SHIFTIN(msix_vec, ARM_PCI_INTR_MSI_VEC) | 308 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME); 309 310 gic_v2m_msix_enable(frame, spi, msix_vec, bst, bsh); 311 } 312 313 bus_space_unmap(bst, bsh, bsz); 314 315 return vectors; 316 } 317 318 static void * 319 gic_v2m_msi_intr_establish(struct arm_pci_msi *msi, 320 pci_intr_handle_t ih, int ipl, int (*func)(void *), void *arg, const char *xname) 321 { 322 struct gic_v2m_frame * const frame = msi->msi_priv; 323 324 const int spi = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ); 325 const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0; 326 327 return pic_establish_intr(frame->frame_pic, spi, ipl, 328 IST_EDGE | mpsafe, func, arg, xname); 329 } 330 331 static void 332 gic_v2m_msi_intr_release(struct arm_pci_msi *msi, pci_intr_handle_t *pih, 333 int count) 334 { 335 struct gic_v2m_frame * const frame = msi->msi_priv; 336 int n; 337 338 for (n = 0; n < count; n++) { 339 const int spi = __SHIFTOUT(pih[n], ARM_PCI_INTR_IRQ); 340 if (pih[n] & ARM_PCI_INTR_MSIX) 341 gic_v2m_msix_disable(frame, spi); 342 if (pih[n] & ARM_PCI_INTR_MSI) 343 gic_v2m_msi_disable(frame, spi); 344 gic_v2m_msi_free_spi(frame, spi); 345 struct intrsource * const is = 346 frame->frame_pic->pic_sources[spi]; 347 if (is != NULL) 348 pic_disestablish_source(is); 349 } 350 } 351 352 int 353 gic_v2m_init(struct gic_v2m_frame *frame, device_t dev, uint32_t frame_id) 354 { 355 struct arm_pci_msi *msi = &frame->frame_msi; 356 357 msi->msi_dev = dev; 358 msi->msi_priv = frame; 359 msi->msi_alloc = gic_v2m_msi_alloc; 360 msi->msix_alloc = gic_v2m_msix_alloc; 361 msi->msi_intr_establish = gic_v2m_msi_intr_establish; 362 msi->msi_intr_release = gic_v2m_msi_intr_release; 363 364 return arm_pci_msi_add(msi); 365 } 366