1 /* $NetBSD: ep93xx_intr.c,v 1.28 2023/05/02 09:49:33 jmcneill Exp $ */ 2 3 /* 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jesse Off 9 * 10 * This code is derived from software contributed to The NetBSD Foundation 11 * by Ichiro FUKUHARA and Naoto Shimazaki. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.28 2023/05/02 09:49:33 jmcneill Exp $"); 37 38 /* 39 * Interrupt support for the Cirrus Logic EP93XX 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kmem.h> 45 #include <sys/termios.h> 46 #include <sys/lwp.h> 47 48 #include <sys/bus.h> 49 #include <sys/intr.h> 50 51 #include <arm/locore.h> 52 53 #include <arm/ep93xx/ep93xxreg.h> 54 #include <arm/ep93xx/ep93xxvar.h> 55 56 /* Interrupt handler queues. */ 57 struct intrq intrq[NIRQ]; 58 59 /* Interrupts to mask at each level. */ 60 static uint32_t vic1_imask[NIPL]; 61 static uint32_t vic2_imask[NIPL]; 62 63 /* Current interrupt priority level. */ 64 volatile int hardware_spl_level; 65 66 /* Software copy of the IRQs we have enabled. */ 67 volatile uint32_t vic1_intr_enabled; 68 volatile uint32_t vic2_intr_enabled; 69 70 void ep93xx_intr_dispatch(struct trapframe *); 71 72 #define VIC1REG(reg) *((volatile uint32_t*) (EP93XX_AHB_VBASE + \ 73 EP93XX_AHB_VIC1 + (reg))) 74 #define VIC2REG(reg) *((volatile uint32_t*) (EP93XX_AHB_VBASE + \ 75 EP93XX_AHB_VIC2 + (reg))) 76 77 static void 78 ep93xx_set_intrmask(uint32_t vic1_irqs, uint32_t vic2_irqs) 79 { 80 VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs; 81 VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs; 82 VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs; 83 VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs; 84 } 85 86 static void 87 ep93xx_enable_irq(int irq) 88 { 89 if (irq < VIC_NIRQ) { 90 vic1_intr_enabled |= (1U << irq); 91 VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq); 92 } else { 93 vic2_intr_enabled |= (1U << (irq - VIC_NIRQ)); 94 VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ)); 95 } 96 } 97 98 static inline void 99 ep93xx_disable_irq(int irq) 100 { 101 if (irq < VIC_NIRQ) { 102 vic1_intr_enabled &= ~(1U << irq); 103 VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq); 104 } else { 105 vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ)); 106 VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ)); 107 } 108 } 109 110 /* 111 * NOTE: This routine must be called with interrupts disabled in the CPSR. 112 */ 113 static void 114 ep93xx_intr_calculate_masks(void) 115 { 116 struct intrq *iq; 117 struct intrhand *ih; 118 int irq, ipl; 119 120 /* First, figure out which IPLs each IRQ has. */ 121 for (irq = 0; irq < NIRQ; irq++) { 122 int levels = 0; 123 iq = &intrq[irq]; 124 ep93xx_disable_irq(irq); 125 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 126 ih = TAILQ_NEXT(ih, ih_list)) 127 levels |= (1U << ih->ih_ipl); 128 iq->iq_levels = levels; 129 } 130 131 /* Next, figure out which IRQs are used by each IPL. */ 132 for (ipl = 0; ipl < NIPL; ipl++) { 133 int vic1_irqs = 0; 134 int vic2_irqs = 0; 135 for (irq = 0; irq < VIC_NIRQ; irq++) { 136 if (intrq[irq].iq_levels & (1U << ipl)) 137 vic1_irqs |= (1U << irq); 138 } 139 vic1_imask[ipl] = vic1_irqs; 140 for (irq = 0; irq < VIC_NIRQ; irq++) { 141 if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl)) 142 vic2_irqs |= (1U << irq); 143 } 144 vic2_imask[ipl] = vic2_irqs; 145 } 146 147 KASSERT(vic1_imask[IPL_NONE] == 0); 148 KASSERT(vic2_imask[IPL_NONE] == 0); 149 KASSERT(vic1_imask[IPL_SOFTCLOCK] == 0); 150 KASSERT(vic2_imask[IPL_SOFTCLOCK] == 0); 151 KASSERT(vic1_imask[IPL_SOFTBIO] == 0); 152 KASSERT(vic2_imask[IPL_SOFTBIO] == 0); 153 KASSERT(vic1_imask[IPL_SOFTNET] == 0); 154 KASSERT(vic2_imask[IPL_SOFTNET] == 0); 155 KASSERT(vic1_imask[IPL_SOFTSERIAL] == 0); 156 KASSERT(vic2_imask[IPL_SOFTSERIAL] == 0); 157 158 /* 159 * splsched() must block anything that uses the scheduler. 160 */ 161 vic1_imask[IPL_SCHED] |= vic1_imask[IPL_VM]; 162 vic2_imask[IPL_SCHED] |= vic2_imask[IPL_VM]; 163 164 /* 165 * splhigh() must block "everything". 166 */ 167 vic1_imask[IPL_HIGH] |= vic1_imask[IPL_SCHED]; 168 vic2_imask[IPL_HIGH] |= vic2_imask[IPL_SCHED]; 169 170 /* 171 * Now compute which IRQs must be blocked when servicing any 172 * given IRQ. 173 */ 174 for (irq = 0; irq < NIRQ; irq++) { 175 int vic1_irqs; 176 int vic2_irqs; 177 178 if (irq < VIC_NIRQ) { 179 vic1_irqs = (1U << irq); 180 vic2_irqs = 0; 181 } else { 182 vic1_irqs = 0; 183 vic2_irqs = (1U << (irq - VIC_NIRQ)); 184 } 185 iq = &intrq[irq]; 186 if (TAILQ_FIRST(&iq->iq_list) != NULL) 187 ep93xx_enable_irq(irq); 188 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 189 ih = TAILQ_NEXT(ih, ih_list)) { 190 vic1_irqs |= vic1_imask[ih->ih_ipl]; 191 vic2_irqs |= vic2_imask[ih->ih_ipl]; 192 } 193 iq->iq_vic1_mask = vic1_irqs; 194 iq->iq_vic2_mask = vic2_irqs; 195 } 196 } 197 198 inline void 199 splx(int new) 200 { 201 u_int oldirqstate; 202 203 oldirqstate = disable_interrupts(I32_bit); 204 set_curcpl(new); 205 if (new != hardware_spl_level) { 206 hardware_spl_level = new; 207 ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]); 208 } 209 restore_interrupts(oldirqstate); 210 211 #ifdef __HAVE_FAST_SOFTINTS 212 cpu_dosoftints(); 213 #endif 214 } 215 216 int 217 _splraise(int ipl) 218 { 219 int old; 220 u_int oldirqstate; 221 222 oldirqstate = disable_interrupts(I32_bit); 223 old = curcpl(); 224 set_curcpl(ipl); 225 restore_interrupts(oldirqstate); 226 return (old); 227 } 228 229 int 230 _spllower(int ipl) 231 { 232 int old = curcpl(); 233 234 if (old <= ipl) 235 return (old); 236 splx(ipl); 237 return (old); 238 } 239 240 /* 241 * ep93xx_intr_init: 242 * 243 * Initialize the rest of the interrupt subsystem, making it 244 * ready to handle interrupts from devices. 245 */ 246 void 247 ep93xx_intr_init(void) 248 { 249 struct intrq *iq; 250 int i; 251 252 vic1_intr_enabled = 0; 253 vic2_intr_enabled = 0; 254 255 for (i = 0; i < NIRQ; i++) { 256 iq = &intrq[i]; 257 TAILQ_INIT(&iq->iq_list); 258 259 snprintf(iq->iq_name, sizeof(iq->iq_name), "irq %d", i); 260 } 261 curcpu()->ci_intr_depth = 0; 262 set_curcpl(0); 263 hardware_spl_level = 0; 264 265 /* All interrupts should use IRQ not FIQ */ 266 VIC1REG(EP93XX_VIC_IntSelect) = 0; 267 VIC2REG(EP93XX_VIC_IntSelect) = 0; 268 269 ep93xx_intr_calculate_masks(); 270 271 /* Enable IRQs (don't yet use FIQs). */ 272 enable_interrupts(I32_bit); 273 } 274 275 void 276 ep93xx_intr_evcnt_attach(void) 277 { 278 struct intrq *iq; 279 int i; 280 281 for (i = 0; i < NIRQ; i++) { 282 iq = &intrq[i]; 283 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR, 284 NULL, (i < VIC_NIRQ ? "vic1" : "vic2"), 285 iq->iq_name); 286 } 287 } 288 289 void * 290 ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg) 291 { 292 struct intrq* iq; 293 struct intrhand* ih; 294 u_int oldirqstate; 295 296 if (irq < 0 || irq > NIRQ) 297 panic("ep93xx_intr_establish: IRQ %d out of range", irq); 298 if (ipl < 0 || ipl > NIPL) 299 panic("ep93xx_intr_establish: IPL %d out of range", ipl); 300 301 ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 302 ih->ih_func = ih_func; 303 ih->ih_arg = arg; 304 ih->ih_irq = irq; 305 ih->ih_ipl = ipl; 306 307 iq = &intrq[irq]; 308 309 oldirqstate = disable_interrupts(I32_bit); 310 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list); 311 ep93xx_intr_calculate_masks(); 312 restore_interrupts(oldirqstate); 313 314 return (ih); 315 } 316 317 void 318 ep93xx_intr_disestablish(void *cookie) 319 { 320 struct intrhand* ih = cookie; 321 struct intrq* iq = &intrq[ih->ih_irq]; 322 u_int oldirqstate; 323 324 oldirqstate = disable_interrupts(I32_bit); 325 TAILQ_REMOVE(&iq->iq_list, ih, ih_list); 326 ep93xx_intr_calculate_masks(); 327 restore_interrupts(oldirqstate); 328 329 kmem_free(ih, sizeof(*ih)); 330 } 331 332 void 333 ep93xx_intr_dispatch(struct trapframe *frame) 334 { 335 struct intrq* iq; 336 struct intrhand* ih; 337 u_int oldirqstate; 338 int pcpl; 339 uint32_t vic1_hwpend; 340 uint32_t vic2_hwpend; 341 int irq; 342 343 pcpl = curcpl(); 344 345 vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus); 346 vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus); 347 348 hardware_spl_level = pcpl; 349 ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend, 350 vic2_imask[pcpl] | vic2_hwpend); 351 352 vic1_hwpend &= ~vic1_imask[pcpl]; 353 vic2_hwpend &= ~vic2_imask[pcpl]; 354 355 if (vic1_hwpend) { 356 irq = ffs(vic1_hwpend) - 1; 357 358 iq = &intrq[irq]; 359 iq->iq_ev.ev_count++; 360 curcpu()->ci_data.cpu_nintr++; 361 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) { 362 set_curcpl(ih->ih_ipl); 363 oldirqstate = enable_interrupts(I32_bit); 364 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 365 restore_interrupts(oldirqstate); 366 } 367 } else if (vic2_hwpend) { 368 irq = ffs(vic2_hwpend) - 1; 369 370 iq = &intrq[irq + VIC_NIRQ]; 371 iq->iq_ev.ev_count++; 372 curcpu()->ci_data.cpu_nintr++; 373 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) { 374 set_curcpl(ih->ih_ipl); 375 oldirqstate = enable_interrupts(I32_bit); 376 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 377 restore_interrupts(oldirqstate); 378 } 379 } 380 381 set_curcpl(pcpl); 382 hardware_spl_level = pcpl; 383 ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]); 384 385 #ifdef __HAVE_FAST_SOFTINTS 386 cpu_dosoftints(); 387 #endif 388 } 389