1 /* $NetBSD: i80321_icu.c,v 1.16 2007/12/11 17:12:27 ad Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.16 2007/12/11 17:12:27 ad Exp $"); 40 41 #ifndef EVBARM_SPL_NOINLINE 42 #define EVBARM_SPL_NOINLINE 43 #endif 44 45 /* 46 * Interrupt support for the Intel i80321 I/O Processor. 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/malloc.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #include <machine/bus.h> 56 #include <machine/intr.h> 57 58 #include <arm/cpufunc.h> 59 60 #include <arm/xscale/i80321reg.h> 61 #include <arm/xscale/i80321var.h> 62 63 /* Interrupt handler queues. */ 64 struct intrq intrq[NIRQ]; 65 66 /* Interrupts to mask at each level. */ 67 int i80321_imask[NIPL]; 68 69 /* Current interrupt priority level. */ 70 volatile int current_spl_level; 71 72 /* Interrupts pending. */ 73 volatile int i80321_ipending; 74 75 /* Software copy of the IRQs we have enabled. */ 76 volatile uint32_t intr_enabled; 77 78 /* Mask if interrupts steered to FIQs. */ 79 uint32_t intr_steer; 80 81 /* 82 * Map a software interrupt queue index (to the unused bits in the 83 * ICU registers -- XXX will need to revisit this if those bits are 84 * ever used in future steppings). 85 */ 86 static const uint32_t si_to_irqbit[4] = { 87 ICU_INT_bit26, /* SI_SOFTCLOCK */ 88 ICU_INT_bit22, /* SI_SOFTBIO */ 89 ICU_INT_bit5, /* SI_SOFTNET */ 90 ICU_INT_bit4, /* SI_SOFTSERIAL */ 91 }; 92 93 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)]) 94 95 /* 96 * Map a software interrupt queue to an interrupt priority level. 97 */ 98 static const int si_to_ipl[4] = { 99 IPL_SOFTCLOCK, /* SI_SOFTCLOCK */ 100 IPL_SOFTBIO, /* SI_SOFTBIO */ 101 IPL_SOFTNET, /* SI_SOFTNET */ 102 IPL_SOFTSERIAL, /* SI_SOFTSERIAL */ 103 }; 104 105 /* 106 * Interrupt bit names. 107 */ 108 const char *i80321_irqnames[] = { 109 "DMA0 EOT", 110 "DMA0 EOC", 111 "DMA1 EOT", 112 "DMA1 EOC", 113 "irq 4", 114 "irq 5", 115 "AAU EOT", 116 "AAU EOC", 117 "core PMU", 118 "TMR0 (hardclock)", 119 "TMR1", 120 "I2C0", 121 "I2C1", 122 "MU", 123 "BIST", 124 "periph PMU", 125 "XScale PMU", 126 "BIU error", 127 "ATU error", 128 "MCU error", 129 "DMA0 error", 130 "DMA1 error", 131 "irq 22", 132 "AAU error", 133 "MU error", 134 "SSP", 135 "irq 26", 136 "irq 27", 137 "irq 28", 138 "irq 29", 139 "irq 30", 140 "irq 31", 141 }; 142 143 void i80321_intr_dispatch(struct clockframe *frame); 144 145 static inline uint32_t 146 i80321_iintsrc_read(void) 147 { 148 uint32_t iintsrc; 149 150 __asm volatile("mrc p6, 0, %0, c8, c0, 0" 151 : "=r" (iintsrc)); 152 153 /* 154 * The IINTSRC register shows bits that are active even 155 * if they are masked in INTCTL, so we have to mask them 156 * off with the interrupts we consider enabled. 157 */ 158 return (iintsrc & intr_enabled); 159 } 160 161 static inline void 162 i80321_set_intrsteer(void) 163 { 164 165 __asm volatile("mcr p6, 0, %0, c4, c0, 0" 166 : 167 : "r" (intr_steer & ICU_INT_HWMASK)); 168 } 169 170 static inline void 171 i80321_enable_irq(int irq) 172 { 173 174 intr_enabled |= (1U << irq); 175 i80321_set_intrmask(); 176 } 177 178 static inline void 179 i80321_disable_irq(int irq) 180 { 181 182 intr_enabled &= ~(1U << irq); 183 i80321_set_intrmask(); 184 } 185 186 /* 187 * NOTE: This routine must be called with interrupts disabled in the CPSR. 188 */ 189 static void 190 i80321_intr_calculate_masks(void) 191 { 192 struct intrq *iq; 193 struct intrhand *ih; 194 int irq, ipl; 195 196 /* First, figure out which IPLs each IRQ has. */ 197 for (irq = 0; irq < NIRQ; irq++) { 198 int levels = 0; 199 iq = &intrq[irq]; 200 i80321_disable_irq(irq); 201 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 202 ih = TAILQ_NEXT(ih, ih_list)) 203 levels |= (1U << ih->ih_ipl); 204 iq->iq_levels = levels; 205 } 206 207 /* Next, figure out which IRQs are used by each IPL. */ 208 for (ipl = 0; ipl < NIPL; ipl++) { 209 int irqs = 0; 210 for (irq = 0; irq < NIRQ; irq++) { 211 if (intrq[irq].iq_levels & (1U << ipl)) 212 irqs |= (1U << irq); 213 } 214 i80321_imask[ipl] = irqs; 215 } 216 217 i80321_imask[IPL_NONE] = 0; 218 219 /* 220 * Enforce a hierarchy that gives "slow" device (or devices with 221 * limited input buffer space/"real-time" requirements) a better 222 * chance at not dropping data. 223 */ 224 i80321_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK); 225 i80321_imask[IPL_SOFTBIO] = SI_TO_IRQBIT(SI_SOFTBIO); 226 i80321_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET); 227 i80321_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL); 228 229 i80321_imask[IPL_SOFTBIO] |= i80321_imask[IPL_SOFTCLOCK]; 230 i80321_imask[IPL_SOFTNET] |= i80321_imask[IPL_SOFTBIO]; 231 i80321_imask[IPL_SOFTSERIAL] |= i80321_imask[IPL_SOFTNET]; 232 i80321_imask[IPL_VM] |= i80321_imask[IPL_SOFTSERIAL]; 233 i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED]; 234 235 /* 236 * Now compute which IRQs must be blocked when servicing any 237 * given IRQ. 238 */ 239 for (irq = 0; irq < NIRQ; irq++) { 240 int irqs = (1U << irq); 241 iq = &intrq[irq]; 242 if (TAILQ_FIRST(&iq->iq_list) != NULL) 243 i80321_enable_irq(irq); 244 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 245 ih = TAILQ_NEXT(ih, ih_list)) 246 irqs |= i80321_imask[ih->ih_ipl]; 247 iq->iq_mask = irqs; 248 } 249 } 250 251 void 252 i80321_do_pending(void) 253 { 254 #ifdef __HAVE_FAST_SOFTINTS 255 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED; 256 int new, oldirqstate; 257 258 if (__cpu_simple_lock_try(&processing) == 0) 259 return; 260 261 new = current_spl_level; 262 263 oldirqstate = disable_interrupts(I32_bit); 264 265 #define DO_SOFTINT(si) \ 266 if ((i80321_ipending & ~new) & SI_TO_IRQBIT(si)) { \ 267 i80321_ipending &= ~SI_TO_IRQBIT(si); \ 268 current_spl_level |= i80321_imask[si_to_ipl[(si)]]; \ 269 restore_interrupts(oldirqstate); \ 270 softintr_dispatch(si); \ 271 oldirqstate = disable_interrupts(I32_bit); \ 272 current_spl_level = new; \ 273 } 274 275 DO_SOFTINT(SI_SOFTSERIAL); 276 DO_SOFTINT(SI_SOFTNET); 277 DO_SOFTINT(SI_SOFTCLOCK); 278 DO_SOFTINT(SI_SOFT); 279 280 __cpu_simple_unlock(&processing); 281 282 restore_interrupts(oldirqstate); 283 #endif 284 } 285 286 void 287 splx(int new) 288 { 289 290 i80321_splx(new); 291 } 292 293 int 294 _spllower(int ipl) 295 { 296 297 return (i80321_spllower(ipl)); 298 } 299 300 int 301 _splraise(int ipl) 302 { 303 304 return (i80321_splraise(ipl)); 305 } 306 307 void 308 _setsoftintr(int si) 309 { 310 int oldirqstate; 311 312 oldirqstate = disable_interrupts(I32_bit); 313 i80321_ipending |= SI_TO_IRQBIT(si); 314 restore_interrupts(oldirqstate); 315 316 /* Process unmasked pending soft interrupts. */ 317 if ((i80321_ipending & INT_SWMASK) & ~current_spl_level) 318 i80321_do_pending(); 319 } 320 321 /* 322 * i80321_icu_init: 323 * 324 * Initialize the i80321 ICU. Called early in bootstrap 325 * to make sure the ICU is in a pristine state. 326 */ 327 void 328 i80321_icu_init(void) 329 { 330 331 intr_enabled = 0; /* All interrupts disabled */ 332 i80321_set_intrmask(); 333 334 intr_steer = 0; /* All interrupts steered to IRQ */ 335 i80321_set_intrsteer(); 336 } 337 338 /* 339 * i80321_intr_init: 340 * 341 * Initialize the rest of the interrupt subsystem, making it 342 * ready to handle interrupts from devices. 343 */ 344 void 345 i80321_intr_init(void) 346 { 347 struct intrq *iq; 348 int i; 349 350 intr_enabled = 0; 351 352 for (i = 0; i < NIRQ; i++) { 353 iq = &intrq[i]; 354 TAILQ_INIT(&iq->iq_list); 355 356 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR, 357 NULL, "iop321", i80321_irqnames[i]); 358 } 359 360 i80321_intr_calculate_masks(); 361 362 /* Enable IRQs (don't yet use FIQs). */ 363 enable_interrupts(I32_bit); 364 } 365 366 void * 367 i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg) 368 { 369 struct intrq *iq; 370 struct intrhand *ih; 371 u_int oldirqstate; 372 373 if (irq < 0 || irq > NIRQ) 374 panic("i80321_intr_establish: IRQ %d out of range", irq); 375 376 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT); 377 if (ih == NULL) 378 return (NULL); 379 380 ih->ih_func = func; 381 ih->ih_arg = arg; 382 ih->ih_ipl = ipl; 383 ih->ih_irq = irq; 384 385 iq = &intrq[irq]; 386 387 /* All IOP321 interrupts are level-triggered. */ 388 iq->iq_ist = IST_LEVEL; 389 390 oldirqstate = disable_interrupts(I32_bit); 391 392 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list); 393 394 i80321_intr_calculate_masks(); 395 396 restore_interrupts(oldirqstate); 397 398 return (ih); 399 } 400 401 void 402 i80321_intr_disestablish(void *cookie) 403 { 404 struct intrhand *ih = cookie; 405 struct intrq *iq = &intrq[ih->ih_irq]; 406 int oldirqstate; 407 408 oldirqstate = disable_interrupts(I32_bit); 409 410 TAILQ_REMOVE(&iq->iq_list, ih, ih_list); 411 412 i80321_intr_calculate_masks(); 413 414 restore_interrupts(oldirqstate); 415 } 416 417 /* 418 * Hardware interrupt handler. 419 * 420 * If I80321_HPI_ENABLED is defined, this code attempts to deal with 421 * HPI interrupts as best it can. 422 * 423 * The problem is that HPIs cannot be masked at the interrupt controller; 424 * they can only be masked by disabling IRQs in the XScale core. 425 * 426 * So, if an HPI comes in and we determine that it should be masked at 427 * the current IPL then we mark it pending in the usual way and set 428 * I32_bit in the interrupt frame. This ensures that when we return from 429 * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To 430 * ensure IRQs are enabled later, i80321_splx() has been modified to do 431 * just that when a pending HPI interrupt is unmasked.) Additionally, 432 * because HPIs are level-triggered, the registered handler for the HPI 433 * interrupt will also be invoked with IRQs disabled. If a masked HPI 434 * occurs at the same time as another unmasked higher priority interrupt, 435 * the higher priority handler will also be invoked with IRQs disabled. 436 * As a result, the system could end up executing a lot of code with IRQs 437 * completely disabled if the HPI's IPL is relatively low. 438 * 439 * At the present time, the only known use of HPI is for the console UART 440 * on a couple of boards. This is probably the least intrusive use of HPI 441 * as IPL_SERIAL is the highest priority IPL in the system anyway. The 442 * code has not been tested with HPI hooked up to a class of device which 443 * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to 444 * perform very poorly if at all, even though the following code has been 445 * designed (hopefully) to cope with it. 446 */ 447 448 void 449 i80321_intr_dispatch(struct clockframe *frame) 450 { 451 struct intrq *iq; 452 struct intrhand *ih; 453 int oldirqstate, pcpl, irq, ibit, hwpend; 454 struct cpu_info *ci; 455 #ifdef I80321_HPI_ENABLED 456 int oldpending; 457 #endif 458 459 ci = curcpu(); 460 ci->ci_idepth++; 461 pcpl = current_spl_level; 462 hwpend = i80321_iintsrc_read(); 463 464 /* 465 * Disable all the interrupts that are pending. We will 466 * reenable them once they are processed and not masked. 467 */ 468 intr_enabled &= ~hwpend; 469 i80321_set_intrmask(); 470 471 #ifdef I80321_HPI_ENABLED 472 oldirqstate = 0; /* XXX: quell gcc warning */ 473 #endif 474 475 while (hwpend != 0) { 476 #ifdef I80321_HPI_ENABLED 477 /* Deal with HPI interrupt first */ 478 if (__predict_false(hwpend & INT_HPIMASK)) 479 irq = ICU_INT_HPI; 480 else 481 #endif 482 irq = ffs(hwpend) - 1; 483 ibit = (1U << irq); 484 485 hwpend &= ~ibit; 486 487 if (pcpl & ibit) { 488 /* 489 * IRQ is masked; mark it as pending and check 490 * the next one. Note: the IRQ is already disabled. 491 */ 492 #ifdef I80321_HPI_ENABLED 493 if (__predict_false(irq == ICU_INT_HPI)) { 494 /* 495 * This is an HPI. We *must* disable 496 * IRQs in the interrupt frame until 497 * INT_HPIMASK is cleared by a later 498 * call to splx(). Otherwise the level- 499 * triggered interrupt will just keep 500 * coming back. 501 */ 502 frame->cf_if.if_spsr |= I32_bit; 503 } 504 #endif 505 i80321_ipending |= ibit; 506 continue; 507 } 508 509 #ifdef I80321_HPI_ENABLED 510 oldpending = i80321_ipending | ibit; 511 #endif 512 i80321_ipending &= ~ibit; 513 514 iq = &intrq[irq]; 515 iq->iq_ev.ev_count++; 516 uvmexp.intrs++; 517 current_spl_level |= iq->iq_mask; 518 #ifdef I80321_HPI_ENABLED 519 /* 520 * Re-enable interrupts iff an HPI is not pending 521 */ 522 if (__predict_true((oldpending & INT_HPIMASK) == 0)) 523 #endif 524 oldirqstate = enable_interrupts(I32_bit); 525 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 526 ih = TAILQ_NEXT(ih, ih_list)) { 527 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 528 } 529 #ifdef I80321_HPI_ENABLED 530 if (__predict_true((oldpending & INT_HPIMASK) == 0)) 531 #endif 532 restore_interrupts(oldirqstate); 533 #ifdef I80321_HPI_ENABLED 534 else if (irq == ICU_INT_HPI) { 535 /* 536 * We've just handled the HPI. Make sure IRQs 537 * are enabled in the interrupt frame. 538 * Here's hoping the handler really did clear 539 * down the source... 540 */ 541 frame->cf_if.if_spsr &= ~I32_bit; 542 } 543 #endif 544 current_spl_level = pcpl; 545 546 /* Re-enable this interrupt now that's it's cleared. */ 547 intr_enabled |= ibit; 548 i80321_set_intrmask(); 549 550 /* 551 * Don't forget to include interrupts which may have 552 * arrived in the meantime. 553 */ 554 hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~pcpl); 555 } 556 ci->ci_idepth--; 557 558 /* Check for pendings soft intrs. */ 559 if ((i80321_ipending & INT_SWMASK) & ~current_spl_level) { 560 #ifdef I80321_HPI_ENABLED 561 /* XXX: This is only necessary if HPI is < IPL_SOFT* */ 562 if (__predict_true((i80321_ipending & INT_HPIMASK) == 0)) 563 #endif 564 oldirqstate = enable_interrupts(I32_bit); 565 i80321_do_pending(); 566 #ifdef I80321_HPI_ENABLED 567 /* XXX: This is only necessary if HPI is < IPL_NET* */ 568 if (__predict_true((i80321_ipending & INT_HPIMASK) == 0)) 569 #endif 570 restore_interrupts(oldirqstate); 571 } 572 } 573