1 /* $NetBSD: ep93xx_intr.c,v 1.22 2014/03/26 08:51:59 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jesse Off 9 * 10 * This code is derived from software contributed to The NetBSD Foundation 11 * by Ichiro FUKUHARA and Naoto Shimazaki. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.22 2014/03/26 08:51:59 christos Exp $"); 37 38 /* 39 * Interrupt support for the Cirrus Logic EP93XX 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/termios.h> 46 47 #include <sys/bus.h> 48 #include <sys/intr.h> 49 50 #include <arm/locore.h> 51 52 #include <arm/ep93xx/ep93xxreg.h> 53 #include <arm/ep93xx/ep93xxvar.h> 54 55 /* Interrupt handler queues. */ 56 struct intrq intrq[NIRQ]; 57 58 /* Interrupts to mask at each level. */ 59 static uint32_t vic1_imask[NIPL]; 60 static uint32_t vic2_imask[NIPL]; 61 62 /* Current interrupt priority level. */ 63 volatile int hardware_spl_level; 64 65 /* Software copy of the IRQs we have enabled. */ 66 volatile uint32_t vic1_intr_enabled; 67 volatile uint32_t vic2_intr_enabled; 68 69 void ep93xx_intr_dispatch(struct trapframe *); 70 71 #define VIC1REG(reg) *((volatile uint32_t*) (EP93XX_AHB_VBASE + \ 72 EP93XX_AHB_VIC1 + (reg))) 73 #define VIC2REG(reg) *((volatile uint32_t*) (EP93XX_AHB_VBASE + \ 74 EP93XX_AHB_VIC2 + (reg))) 75 76 static void 77 ep93xx_set_intrmask(uint32_t vic1_irqs, uint32_t vic2_irqs) 78 { 79 VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs; 80 VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs; 81 VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs; 82 VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs; 83 } 84 85 static void 86 ep93xx_enable_irq(int irq) 87 { 88 if (irq < VIC_NIRQ) { 89 vic1_intr_enabled |= (1U << irq); 90 VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq); 91 } else { 92 vic2_intr_enabled |= (1U << (irq - VIC_NIRQ)); 93 VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ)); 94 } 95 } 96 97 static inline void 98 ep93xx_disable_irq(int irq) 99 { 100 if (irq < VIC_NIRQ) { 101 vic1_intr_enabled &= ~(1U << irq); 102 VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq); 103 } else { 104 vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ)); 105 VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ)); 106 } 107 } 108 109 /* 110 * NOTE: This routine must be called with interrupts disabled in the CPSR. 111 */ 112 static void 113 ep93xx_intr_calculate_masks(void) 114 { 115 struct intrq *iq; 116 struct intrhand *ih; 117 int irq, ipl; 118 119 /* First, figure out which IPLs each IRQ has. */ 120 for (irq = 0; irq < NIRQ; irq++) { 121 int levels = 0; 122 iq = &intrq[irq]; 123 ep93xx_disable_irq(irq); 124 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 125 ih = TAILQ_NEXT(ih, ih_list)) 126 levels |= (1U << ih->ih_ipl); 127 iq->iq_levels = levels; 128 } 129 130 /* Next, figure out which IRQs are used by each IPL. */ 131 for (ipl = 0; ipl < NIPL; ipl++) { 132 int vic1_irqs = 0; 133 int vic2_irqs = 0; 134 for (irq = 0; irq < VIC_NIRQ; irq++) { 135 if (intrq[irq].iq_levels & (1U << ipl)) 136 vic1_irqs |= (1U << irq); 137 } 138 vic1_imask[ipl] = vic1_irqs; 139 for (irq = 0; irq < VIC_NIRQ; irq++) { 140 if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl)) 141 vic2_irqs |= (1U << irq); 142 } 143 vic2_imask[ipl] = vic2_irqs; 144 } 145 146 KASSERT(vic1_imask[IPL_NONE] == 0); 147 KASSERT(vic2_imask[IPL_NONE] == 0); 148 KASSERT(vic1_imask[IPL_SOFTCLOCK] == 0); 149 KASSERT(vic2_imask[IPL_SOFTCLOCK] == 0); 150 KASSERT(vic1_imask[IPL_SOFTBIO] == 0); 151 KASSERT(vic2_imask[IPL_SOFTBIO] == 0); 152 KASSERT(vic1_imask[IPL_SOFTNET] == 0); 153 KASSERT(vic2_imask[IPL_SOFTNET] == 0); 154 KASSERT(vic1_imask[IPL_SOFTSERIAL] == 0); 155 KASSERT(vic2_imask[IPL_SOFTSERIAL] == 0); 156 157 /* 158 * splsched() must block anything that uses the scheduler. 159 */ 160 vic1_imask[IPL_SCHED] |= vic1_imask[IPL_VM]; 161 vic2_imask[IPL_SCHED] |= vic2_imask[IPL_VM]; 162 163 /* 164 * splhigh() must block "everything". 165 */ 166 vic1_imask[IPL_HIGH] |= vic1_imask[IPL_SCHED]; 167 vic2_imask[IPL_HIGH] |= vic2_imask[IPL_SCHED]; 168 169 /* 170 * Now compute which IRQs must be blocked when servicing any 171 * given IRQ. 172 */ 173 for (irq = 0; irq < NIRQ; irq++) { 174 int vic1_irqs; 175 int vic2_irqs; 176 177 if (irq < VIC_NIRQ) { 178 vic1_irqs = (1U << irq); 179 vic2_irqs = 0; 180 } else { 181 vic1_irqs = 0; 182 vic2_irqs = (1U << (irq - VIC_NIRQ)); 183 } 184 iq = &intrq[irq]; 185 if (TAILQ_FIRST(&iq->iq_list) != NULL) 186 ep93xx_enable_irq(irq); 187 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 188 ih = TAILQ_NEXT(ih, ih_list)) { 189 vic1_irqs |= vic1_imask[ih->ih_ipl]; 190 vic2_irqs |= vic2_imask[ih->ih_ipl]; 191 } 192 iq->iq_vic1_mask = vic1_irqs; 193 iq->iq_vic2_mask = vic2_irqs; 194 } 195 } 196 197 inline void 198 splx(int new) 199 { 200 u_int oldirqstate; 201 202 oldirqstate = disable_interrupts(I32_bit); 203 set_curcpl(new); 204 if (new != hardware_spl_level) { 205 hardware_spl_level = new; 206 ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]); 207 } 208 restore_interrupts(oldirqstate); 209 210 #ifdef __HAVE_FAST_SOFTINTS 211 cpu_dosoftints(); 212 #endif 213 } 214 215 int 216 _splraise(int ipl) 217 { 218 int old; 219 u_int oldirqstate; 220 221 oldirqstate = disable_interrupts(I32_bit); 222 old = curcpl(); 223 set_curcpl(ipl); 224 restore_interrupts(oldirqstate); 225 return (old); 226 } 227 228 int 229 _spllower(int ipl) 230 { 231 int old = curcpl(); 232 233 if (old <= ipl) 234 return (old); 235 splx(ipl); 236 return (old); 237 } 238 239 /* 240 * ep93xx_intr_init: 241 * 242 * Initialize the rest of the interrupt subsystem, making it 243 * ready to handle interrupts from devices. 244 */ 245 void 246 ep93xx_intr_init(void) 247 { 248 struct intrq *iq; 249 int i; 250 251 vic1_intr_enabled = 0; 252 vic2_intr_enabled = 0; 253 254 for (i = 0; i < NIRQ; i++) { 255 iq = &intrq[i]; 256 TAILQ_INIT(&iq->iq_list); 257 258 snprintf(iq->iq_name, sizeof(iq->iq_name), "irq %d", i); 259 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR, 260 NULL, (i < VIC_NIRQ ? "vic1" : "vic2"), 261 iq->iq_name); 262 } 263 curcpu()->ci_intr_depth = 0; 264 set_curcpl(0); 265 hardware_spl_level = 0; 266 267 /* All interrupts should use IRQ not FIQ */ 268 VIC1REG(EP93XX_VIC_IntSelect) = 0; 269 VIC2REG(EP93XX_VIC_IntSelect) = 0; 270 271 ep93xx_intr_calculate_masks(); 272 273 /* Enable IRQs (don't yet use FIQs). */ 274 enable_interrupts(I32_bit); 275 } 276 277 void * 278 ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg) 279 { 280 struct intrq* iq; 281 struct intrhand* ih; 282 u_int oldirqstate; 283 284 if (irq < 0 || irq > NIRQ) 285 panic("ep93xx_intr_establish: IRQ %d out of range", irq); 286 if (ipl < 0 || ipl > NIPL) 287 panic("ep93xx_intr_establish: IPL %d out of range", ipl); 288 289 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT); 290 if (ih == NULL) 291 return (NULL); 292 293 ih->ih_func = ih_func; 294 ih->ih_arg = arg; 295 ih->ih_irq = irq; 296 ih->ih_ipl = ipl; 297 298 iq = &intrq[irq]; 299 300 oldirqstate = disable_interrupts(I32_bit); 301 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list); 302 ep93xx_intr_calculate_masks(); 303 restore_interrupts(oldirqstate); 304 305 return (ih); 306 } 307 308 void 309 ep93xx_intr_disestablish(void *cookie) 310 { 311 struct intrhand* ih = cookie; 312 struct intrq* iq = &intrq[ih->ih_irq]; 313 u_int oldirqstate; 314 315 oldirqstate = disable_interrupts(I32_bit); 316 TAILQ_REMOVE(&iq->iq_list, ih, ih_list); 317 ep93xx_intr_calculate_masks(); 318 restore_interrupts(oldirqstate); 319 } 320 321 void 322 ep93xx_intr_dispatch(struct trapframe *frame) 323 { 324 struct intrq* iq; 325 struct intrhand* ih; 326 u_int oldirqstate; 327 int pcpl; 328 uint32_t vic1_hwpend; 329 uint32_t vic2_hwpend; 330 int irq; 331 332 pcpl = curcpl(); 333 334 vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus); 335 vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus); 336 337 hardware_spl_level = pcpl; 338 ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend, 339 vic2_imask[pcpl] | vic2_hwpend); 340 341 vic1_hwpend &= ~vic1_imask[pcpl]; 342 vic2_hwpend &= ~vic2_imask[pcpl]; 343 344 if (vic1_hwpend) { 345 irq = ffs(vic1_hwpend) - 1; 346 347 iq = &intrq[irq]; 348 iq->iq_ev.ev_count++; 349 curcpu()->ci_data.cpu_nintr++; 350 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) { 351 set_curcpl(ih->ih_ipl); 352 oldirqstate = enable_interrupts(I32_bit); 353 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 354 restore_interrupts(oldirqstate); 355 } 356 } else if (vic2_hwpend) { 357 irq = ffs(vic2_hwpend) - 1; 358 359 iq = &intrq[irq + VIC_NIRQ]; 360 iq->iq_ev.ev_count++; 361 curcpu()->ci_data.cpu_nintr++; 362 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) { 363 set_curcpl(ih->ih_ipl); 364 oldirqstate = enable_interrupts(I32_bit); 365 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 366 restore_interrupts(oldirqstate); 367 } 368 } 369 370 set_curcpl(pcpl); 371 hardware_spl_level = pcpl; 372 ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]); 373 374 #ifdef __HAVE_FAST_SOFTINTS 375 cpu_dosoftints(); 376 #endif 377 } 378