1 /* $NetBSD: becc_icu.c,v 1.13 2012/08/02 15:56:07 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Interrupt support for the ADI Engineering Big Endian Companion Chip. 40 */ 41 42 #include <sys/cdefs.h> 43 __KERNEL_RCSID(0, "$NetBSD: becc_icu.c,v 1.13 2012/08/02 15:56:07 skrll Exp $"); 44 45 #ifndef EVBARM_SPL_NOINLINE 46 #define EVBARM_SPL_NOINLINE 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/malloc.h> 52 #include <sys/bus.h> 53 #include <sys/intr.h> 54 55 #include <uvm/uvm_extern.h> 56 57 #include <arm/cpufunc.h> 58 59 #include <arm/xscale/beccreg.h> 60 #include <arm/xscale/beccvar.h> 61 62 #include <arm/xscale/i80200reg.h> 63 #include <arm/xscale/i80200var.h> 64 65 /* Interrupt handler queues. */ 66 struct intrq intrq[NIRQ]; 67 68 /* Interrupts to mask at each level. */ 69 uint32_t becc_imask[NIPL]; 70 71 /* Interrupts pending. */ 72 volatile uint32_t becc_ipending; 73 volatile uint32_t becc_sipending; 74 75 /* Software copy of the IRQs we have enabled. */ 76 volatile uint32_t intr_enabled; 77 78 /* Mask if interrupts steered to FIQs. */ 79 uint32_t intr_steer; 80 81 /* 82 * Interrupt bit names. 83 * XXX Some of these are BRH-centric. 84 */ 85 const char * const becc_irqnames[] = { 86 "soft", 87 "timer A", 88 "timer B", 89 "irq 3", 90 "irq 4", 91 "irq 5", 92 "irq 6", 93 "diagerr", 94 "DMA EOT", 95 "DMA PERR", 96 "DMA TABT", 97 "DMA MABT", 98 "irq 12", 99 "irq 13", 100 "irq 14", 101 "irq 15", 102 "PCI PERR", 103 "irq 17", 104 "irq 18", 105 "PCI SERR", 106 "PCI OAPE", 107 "PCI OATA", 108 "PCI OAMA", 109 "irq 23", 110 "irq 24", 111 "irq 25", 112 "irq 26", /* PCI INTA */ 113 "irq 27", /* PCI INTB */ 114 "irq 28", /* PCI INTC */ 115 "irq 29", /* PCI INTD */ 116 "pushbutton", 117 "irq 31", 118 }; 119 120 void becc_intr_dispatch(struct trapframe *frame); 121 122 static inline uint32_t 123 becc_icsr_read(void) 124 { 125 uint32_t icsr; 126 127 icsr = BECC_CSR_READ(BECC_ICSR); 128 129 /* 130 * The ICSR register shows bits that are active even if they are 131 * masked in ICMR, so we have to mask them off with the interrupts 132 * we consider enabled. 133 */ 134 return (icsr & intr_enabled); 135 } 136 137 static inline void 138 becc_set_intrsteer(void) 139 { 140 141 BECC_CSR_WRITE(BECC_ICSTR, intr_steer & ICU_VALID_MASK); 142 (void) BECC_CSR_READ(BECC_ICSTR); 143 } 144 145 static inline void 146 becc_enable_irq(int irq) 147 { 148 149 intr_enabled |= (1U << irq); 150 becc_set_intrmask(); 151 } 152 153 static inline void 154 becc_disable_irq(int irq) 155 { 156 157 intr_enabled &= ~(1U << irq); 158 becc_set_intrmask(); 159 } 160 161 /* 162 * NOTE: This routine must be called with interrupts disabled in the CPSR. 163 */ 164 static void 165 becc_intr_calculate_masks(void) 166 { 167 struct intrq *iq; 168 struct intrhand *ih; 169 int irq, ipl; 170 171 /* First, figure out which IPLs each IRQ has. */ 172 for (irq = 0; irq < NIRQ; irq++) { 173 int levels = 0; 174 iq = &intrq[irq]; 175 becc_disable_irq(irq); 176 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 177 ih = TAILQ_NEXT(ih, ih_list)) 178 levels |= (1U << ih->ih_ipl); 179 iq->iq_levels = levels; 180 } 181 182 /* Next, figure out which IRQs are used by each IPL. */ 183 for (ipl = 0; ipl < NIPL; ipl++) { 184 int irqs = 0; 185 for (irq = 0; irq < NIRQ; irq++) { 186 if (intrq[irq].iq_levels & (1U << ipl)) 187 irqs |= (1U << irq); 188 } 189 becc_imask[ipl] = irqs; 190 } 191 192 becc_imask[IPL_NONE] = 0; 193 194 /* 195 * Enforce a hierarchy that gives "slow" device (or devices with 196 * limited input buffer space/"real-time" requirements) a better 197 * chance at not dropping data. 198 */ 199 becc_imask[IPL_VM] |= becc_imask[IPL_SOFTSERIAL]; 200 becc_imask[IPL_SCHED] |= becc_imask[IPL_VM]; 201 becc_imask[IPL_HIGH] |= becc_imask[IPL_SCHED]; 202 203 /* 204 * Now compute which IRQs must be blocked when servicing any 205 * given IRQ. 206 */ 207 for (irq = 0; irq < NIRQ; irq++) { 208 int irqs = (1U << irq); 209 iq = &intrq[irq]; 210 if (TAILQ_FIRST(&iq->iq_list) != NULL) 211 becc_enable_irq(irq); 212 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 213 ih = TAILQ_NEXT(ih, ih_list)) 214 irqs |= becc_imask[ih->ih_ipl]; 215 iq->iq_mask = irqs; 216 } 217 } 218 219 void 220 splx(int new) 221 { 222 becc_splx(new); 223 } 224 225 int 226 _spllower(int ipl) 227 { 228 return (becc_spllower(ipl)); 229 } 230 231 int 232 _splraise(int ipl) 233 { 234 return (becc_splraise(ipl)); 235 } 236 237 /* 238 * becc_icu_init: 239 * 240 * Initialize the BECC ICU. Called early in bootstrap 241 * to make sure the ICU is in a pristine state. 242 */ 243 void 244 becc_icu_init(void) 245 { 246 247 intr_enabled = 0; /* All interrupts disabled */ 248 becc_set_intrmask(); 249 250 intr_steer = 0; /* All interrupts steered to IRQ */ 251 becc_set_intrsteer(); 252 253 i80200_extirq_dispatch = becc_intr_dispatch; 254 255 i80200_intr_enable(INTCTL_IM); 256 } 257 258 /* 259 * becc_intr_init: 260 * 261 * Initialize the rest of the interrupt subsystem, making it 262 * ready to handle interrupts from devices. 263 */ 264 void 265 becc_intr_init(void) 266 { 267 struct intrq *iq; 268 int i; 269 270 intr_enabled = 0; 271 272 for (i = 0; i < NIRQ; i++) { 273 iq = &intrq[i]; 274 TAILQ_INIT(&iq->iq_list); 275 276 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR, 277 NULL, "becc", becc_irqnames[i]); 278 } 279 280 becc_intr_calculate_masks(); 281 282 /* Enable IRQs (don't yet use FIQs). */ 283 enable_interrupts(I32_bit); 284 } 285 286 void * 287 becc_intr_establish(int irq, int ipl, int (*func)(void *), void *arg) 288 { 289 struct intrq *iq; 290 struct intrhand *ih; 291 uint32_t oldirqstate; 292 293 if (irq < 0 || irq > NIRQ) 294 panic("becc_intr_establish: IRQ %d out of range", irq); 295 296 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT); 297 if (ih == NULL) 298 return (NULL); 299 300 ih->ih_func = func; 301 ih->ih_arg = arg; 302 ih->ih_ipl = ipl; 303 ih->ih_irq = irq; 304 305 iq = &intrq[irq]; 306 307 /* All BECC interrupts are level-triggered. */ 308 iq->iq_ist = IST_LEVEL; 309 310 oldirqstate = disable_interrupts(I32_bit); 311 312 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list); 313 314 becc_intr_calculate_masks(); 315 316 restore_interrupts(oldirqstate); 317 318 return (ih); 319 } 320 321 void 322 becc_intr_disestablish(void *cookie) 323 { 324 struct intrhand *ih = cookie; 325 struct intrq *iq = &intrq[ih->ih_irq]; 326 uint32_t oldirqstate; 327 328 oldirqstate = disable_interrupts(I32_bit); 329 330 TAILQ_REMOVE(&iq->iq_list, ih, ih_list); 331 332 becc_intr_calculate_masks(); 333 334 restore_interrupts(oldirqstate); 335 } 336 337 void 338 becc_intr_dispatch(struct trapframe *frame) 339 { 340 struct intrq *iq; 341 struct intrhand *ih; 342 uint32_t oldirqstate, irq, ibit, hwpend; 343 struct cpu_info * const ci = curcpu(); 344 const int ppl = ci->ci_cpl; 345 const uint32_t imask = becc_imask[ppl]; 346 347 hwpend = becc_icsr_read(); 348 349 /* 350 * Disable all the interrupts that are pending. We will 351 * reenable them once they are processed and not masked. 352 */ 353 intr_enabled &= ~hwpend; 354 becc_set_intrmask(); 355 356 while (hwpend != 0) { 357 irq = ffs(hwpend) - 1; 358 ibit = (1U << irq); 359 360 hwpend &= ~ibit; 361 362 if (imask & ibit) { 363 /* 364 * IRQ is masked; mark it as pending and check 365 * the next one. Note: the IRQ is already disabled. 366 */ 367 becc_ipending |= ibit; 368 continue; 369 } 370 371 becc_ipending &= ~ibit; 372 373 iq = &intrq[irq]; 374 iq->iq_ev.ev_count++; 375 ci->ci_data.cpu_nintr++; 376 TAILQ_FOREACH(ih, &iq->iq_list, ih_list) { 377 ci->ci_cpl = ih->ih_ipl; 378 oldirqstate = enable_interrupts(I32_bit); 379 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 380 restore_interrupts(oldirqstate); 381 } 382 383 ci->ci_cpl = ppl; 384 385 /* Re-enable this interrupt now that's it's cleared. */ 386 intr_enabled |= ibit; 387 becc_set_intrmask(); 388 } 389 390 if (becc_ipending & ~imask) { 391 intr_enabled |= (becc_ipending & ~imask); 392 becc_set_intrmask(); 393 } 394 395 #ifdef __HAVE_FAST_SOFTINTS 396 cpu_dosoftints(); 397 #endif 398 } 399