1 /* $NetBSD: ixp12x0_intr.c,v 1.17 2008/01/08 02:07:51 matt Exp $ */ 2 3 /* 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Ichiro FUKUHARA and Naoto Shimazaki. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.17 2008/01/08 02:07:51 matt Exp $"); 41 42 /* 43 * Interrupt support for the Intel ixp12x0 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/malloc.h> 49 #include <sys/simplelock.h> 50 #include <sys/termios.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include <machine/bus.h> 55 #include <machine/intr.h> 56 57 #include <arm/cpufunc.h> 58 59 #include <arm/ixp12x0/ixp12x0reg.h> 60 #include <arm/ixp12x0/ixp12x0var.h> 61 #include <arm/ixp12x0/ixp12x0_comreg.h> 62 #include <arm/ixp12x0/ixp12x0_comvar.h> 63 #include <arm/ixp12x0/ixp12x0_pcireg.h> 64 65 66 extern u_int32_t ixpcom_cr; /* current cr from *_com.c */ 67 extern u_int32_t ixpcom_imask; /* tell mask to *_com.c */ 68 69 /* Interrupt handler queues. */ 70 struct intrq intrq[NIRQ]; 71 72 /* Interrupts to mask at each level. */ 73 static u_int32_t imask[NIPL]; 74 static u_int32_t pci_imask[NIPL]; 75 76 /* Current interrupt priority level. */ 77 volatile int current_spl_level; 78 volatile int hardware_spl_level; 79 80 /* Software copy of the IRQs we have enabled. */ 81 volatile u_int32_t intr_enabled; 82 volatile u_int32_t pci_intr_enabled; 83 84 /* Interrupts pending. */ 85 static volatile int ipending; 86 87 #ifdef __HAVE_FAST_SOFTINTS 88 /* 89 * Map a software interrupt queue index (to the unused bits in the 90 * ICU registers -- XXX will need to revisit this if those bits are 91 * ever used in future steppings). 92 */ 93 static const u_int32_t si_to_irqbit[SI_NQUEUES] = { 94 IXP12X0_INTR_bit30, /* SI_SOFT */ 95 IXP12X0_INTR_bit29, /* SI_SOFTCLOCK */ 96 IXP12X0_INTR_bit28, /* SI_SOFTNET */ 97 IXP12X0_INTR_bit27, /* SI_SOFTSERIAL */ 98 }; 99 100 #define INT_SWMASK \ 101 ((1U << IXP12X0_INTR_bit30) | (1U << IXP12X0_INTR_bit29) | \ 102 (1U << IXP12X0_INTR_bit28) | (1U << IXP12X0_INTR_bit27)) 103 104 #define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)]) 105 106 /* 107 * Map a software interrupt queue to an interrupt priority level. 108 */ 109 static const int si_to_ipl[] = { 110 [SI_SOFTBIO] = IPL_SOFTBIO, 111 [SI_SOFTCLOCK] = IPL_SOFTCLOCK, 112 [SI_SOFTNET] = IPL_SOFTNET, 113 [SI_SOFTSERIAL] = IPL_SOFTSERIAL, 114 }; 115 #endif /* __HAVE_FAST_SOFTINTS */ 116 117 void ixp12x0_intr_dispatch(struct irqframe *frame); 118 119 #define IXPREG(reg) *((volatile u_int32_t*) (reg)) 120 121 static inline u_int32_t 122 ixp12x0_irq_read(void) 123 { 124 return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK; 125 } 126 127 static inline u_int32_t 128 ixp12x0_pci_irq_read(void) 129 { 130 return IXPREG(IXPPCI_IRQ_STATUS); 131 } 132 133 static void 134 ixp12x0_enable_uart_irq(void) 135 { 136 ixpcom_imask = 0; 137 if (ixpcom_sc) 138 bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh, 139 IXPCOM_CR, ixpcom_cr & ~ixpcom_imask); 140 } 141 142 static void 143 ixp12x0_disable_uart_irq(void) 144 { 145 ixpcom_imask = CR_RIE | CR_XIE; 146 if (ixpcom_sc) 147 bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh, 148 IXPCOM_CR, ixpcom_cr & ~ixpcom_imask); 149 } 150 151 static void 152 ixp12x0_set_intrmask(u_int32_t irqs, u_int32_t pci_irqs) 153 { 154 if (irqs & (1U << IXP12X0_INTR_UART)) { 155 ixp12x0_disable_uart_irq(); 156 } else { 157 ixp12x0_enable_uart_irq(); 158 } 159 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs; 160 IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs; 161 } 162 163 static void 164 ixp12x0_enable_irq(int irq) 165 { 166 if (irq < SYS_NIRQ) { 167 intr_enabled |= (1U << irq); 168 switch (irq) { 169 case IXP12X0_INTR_UART: 170 ixp12x0_enable_uart_irq(); 171 break; 172 173 case IXP12X0_INTR_PCI: 174 /* nothing to do */ 175 break; 176 default: 177 panic("enable_irq:bad IRQ %d", irq); 178 } 179 } else { 180 pci_intr_enabled |= (1U << (irq - SYS_NIRQ)); 181 IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ)); 182 } 183 } 184 185 static inline void 186 ixp12x0_disable_irq(int irq) 187 { 188 if (irq < SYS_NIRQ) { 189 intr_enabled ^= ~(1U << irq); 190 switch (irq) { 191 case IXP12X0_INTR_UART: 192 ixp12x0_disable_uart_irq(); 193 break; 194 195 case IXP12X0_INTR_PCI: 196 /* nothing to do */ 197 break; 198 default: 199 /* nothing to do */ 200 break; 201 } 202 } else { 203 pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ)); 204 IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ)); 205 } 206 } 207 208 /* 209 * NOTE: This routine must be called with interrupts disabled in the CPSR. 210 */ 211 static void 212 ixp12x0_intr_calculate_masks(void) 213 { 214 struct intrq *iq; 215 struct intrhand *ih; 216 int irq, ipl; 217 218 /* First, figure out which IPLs each IRQ has. */ 219 for (irq = 0; irq < NIRQ; irq++) { 220 int levels = 0; 221 iq = &intrq[irq]; 222 ixp12x0_disable_irq(irq); 223 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 224 ih = TAILQ_NEXT(ih, ih_list)) 225 levels |= (1U << ih->ih_ipl); 226 iq->iq_levels = levels; 227 } 228 229 /* Next, figure out which IRQs are used by each IPL. */ 230 for (ipl = 0; ipl < NIPL; ipl++) { 231 int irqs = 0; 232 int pci_irqs = 0; 233 for (irq = 0; irq < SYS_NIRQ; irq++) { 234 if (intrq[irq].iq_levels & (1U << ipl)) 235 irqs |= (1U << irq); 236 } 237 imask[ipl] = irqs; 238 for (irq = 0; irq < SYS_NIRQ; irq++) { 239 if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl)) 240 pci_irqs |= (1U << irq); 241 } 242 pci_imask[ipl] = pci_irqs; 243 } 244 245 KASSERT(imask[IPL_NONE] == 0); 246 KASSERT(pci_imask[IPL_NONE] == 0); 247 248 #ifdef __HAVE_FAST_SOFTINTS 249 /* 250 * Initialize the soft interrupt masks to block themselves. 251 */ 252 imask[IPL_SOFTBIO] = SI_TO_IRQBIT(SI_SOFTBIO); 253 imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK); 254 imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET); 255 imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL); 256 #endif 257 258 /* 259 * splsoftclock() is the only interface that users of the 260 * generic software interrupt facility have to block their 261 * soft intrs, so splsoftclock() must also block IPL_SOFT. 262 */ 263 imask[IPL_SOFTCLOCK] |= imask[IPL_SOFTBIO]; 264 pci_imask[IPL_SOFTCLOCK] |= pci_imask[IPL_SOFTBIO]; 265 266 /* 267 * splsoftnet() must also block splsoftclock(), since we don't 268 * want timer-driven network events to occur while we're 269 * processing incoming packets. 270 */ 271 imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK]; 272 pci_imask[IPL_SOFTNET] |= pci_imask[IPL_SOFTCLOCK]; 273 274 /* 275 * Enforce a hierarchy that gives "slow" device (or devices with 276 * limited input buffer space/"real-time" requirements) a better 277 * chance at not dropping data. 278 */ 279 imask[IPL_SOFTSERIAL] |= imask[IPL_SOFTNET]; 280 pci_imask[IPL_SOFTSERIAL] |= pci_imask[IPL_SOFTNET]; 281 282 /* 283 * splvm() blocks all interrupts that use the kernel memory 284 * allocation facilities. 285 */ 286 imask[IPL_VM] |= imask[IPL_SOFTSERIAL]; 287 pci_imask[IPL_VM] |= pci_imask[IPL_SOFTSERIAL]; 288 289 /* 290 * splclock() must block anything that uses the scheduler. 291 */ 292 imask[IPL_CLOCK] |= imask[IPL_VM]; 293 pci_imask[IPL_CLOCK] |= pci_imask[IPL_VM]; 294 295 /* 296 * splhigh() must block "everything". 297 */ 298 imask[IPL_HIGH] |= imask[IPL_CLOCK]; 299 pci_imask[IPL_HIGH] |= pci_imask[IPL_CLOCK]; 300 301 /* 302 * Now compute which IRQs must be blocked when servicing any 303 * given IRQ. 304 */ 305 for (irq = 0; irq < NIRQ; irq++) { 306 int irqs; 307 int pci_irqs; 308 309 if (irq < SYS_NIRQ) { 310 irqs = (1U << irq); 311 pci_irqs = 0; 312 } else { 313 irqs = 0; 314 pci_irqs = (1U << (irq - SYS_NIRQ)); 315 } 316 iq = &intrq[irq]; 317 if (TAILQ_FIRST(&iq->iq_list) != NULL) 318 ixp12x0_enable_irq(irq); 319 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 320 ih = TAILQ_NEXT(ih, ih_list)) { 321 irqs |= imask[ih->ih_ipl]; 322 pci_irqs |= pci_imask[ih->ih_ipl]; 323 } 324 iq->iq_mask = irqs; 325 iq->iq_pci_mask = pci_irqs; 326 } 327 } 328 329 #ifdef __HAVE_FAST_SOFTINTS 330 static void 331 ixp12x0_do_pending(void) 332 { 333 static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED; 334 int new; 335 u_int oldirqstate; 336 337 if (__cpu_simple_lock_try(&processing) == 0) 338 return; 339 340 new = current_spl_level; 341 342 oldirqstate = disable_interrupts(I32_bit); 343 344 #define DO_SOFTINT(si) \ 345 if ((ipending & ~imask[new]) & SI_TO_IRQBIT(si)) { \ 346 ipending &= ~SI_TO_IRQBIT(si); \ 347 current_spl_level = si_to_ipl[(si)]; \ 348 restore_interrupts(oldirqstate); \ 349 softintr_dispatch(si); \ 350 oldirqstate = disable_interrupts(I32_bit); \ 351 current_spl_level = new; \ 352 } 353 354 DO_SOFTINT(SI_SOFTSERIAL); 355 DO_SOFTINT(SI_SOFTNET); 356 DO_SOFTINT(SI_SOFTCLOCK); 357 DO_SOFTINT(SI_SOFT); 358 359 __cpu_simple_unlock(&processing); 360 361 restore_interrupts(oldirqstate); 362 } 363 #endif 364 365 inline void 366 splx(int new) 367 { 368 int old; 369 u_int oldirqstate; 370 371 oldirqstate = disable_interrupts(I32_bit); 372 old = current_spl_level; 373 current_spl_level = new; 374 if (new != hardware_spl_level) { 375 hardware_spl_level = new; 376 ixp12x0_set_intrmask(imask[new], pci_imask[new]); 377 } 378 restore_interrupts(oldirqstate); 379 380 #ifdef __HAVE_FAST_SOFTINTS 381 /* If there are software interrupts to process, do it. */ 382 if ((ipending & INT_SWMASK) & ~imask[new]) 383 ixp12x0_do_pending(); 384 #endif 385 } 386 387 int 388 _splraise(int ipl) 389 { 390 int old; 391 u_int oldirqstate; 392 393 oldirqstate = disable_interrupts(I32_bit); 394 old = current_spl_level; 395 current_spl_level = ipl; 396 restore_interrupts(oldirqstate); 397 return (old); 398 } 399 400 int 401 _spllower(int ipl) 402 { 403 int old = current_spl_level; 404 405 if (old <= ipl) 406 return (old); 407 splx(ipl); 408 return (old); 409 } 410 411 #ifdef __HAVE_FAST_SOFTINTS 412 void 413 _setsoftintr(int si) 414 { 415 u_int oldirqstate; 416 417 oldirqstate = disable_interrupts(I32_bit); 418 ipending |= SI_TO_IRQBIT(si); 419 restore_interrupts(oldirqstate); 420 421 /* Process unmasked pending soft interrupts. */ 422 if ((ipending & INT_SWMASK) & ~imask[current_spl_level]) 423 ixp12x0_do_pending(); 424 } 425 #endif 426 427 /* 428 * ixp12x0_intr_init: 429 * 430 * Initialize the rest of the interrupt subsystem, making it 431 * ready to handle interrupts from devices. 432 */ 433 void 434 ixp12x0_intr_init(void) 435 { 436 struct intrq *iq; 437 int i; 438 439 intr_enabled = 0; 440 pci_intr_enabled = 0; 441 442 for (i = 0; i < NIRQ; i++) { 443 iq = &intrq[i]; 444 TAILQ_INIT(&iq->iq_list); 445 446 sprintf(iq->iq_name, "ipl %d", i); 447 evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR, 448 NULL, "ixpintr", iq->iq_name); 449 } 450 current_spl_level = 0; 451 hardware_spl_level = 0; 452 453 ixp12x0_intr_calculate_masks(); 454 455 /* Enable IRQs (don't yet use FIQs). */ 456 enable_interrupts(I32_bit); 457 } 458 459 void * 460 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg) 461 { 462 struct intrq* iq; 463 struct intrhand* ih; 464 u_int oldirqstate; 465 #ifdef DEBUG 466 printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n", 467 irq, ipl, (u_int32_t) ih_func, (u_int32_t) arg); 468 #endif 469 if (irq < 0 || irq > NIRQ) 470 panic("ixp12x0_intr_establish: IRQ %d out of range", ipl); 471 if (ipl < 0 || ipl > NIPL) 472 panic("ixp12x0_intr_establish: IPL %d out of range", ipl); 473 474 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT); 475 if (ih == NULL) 476 return (NULL); 477 478 ih->ih_func = ih_func; 479 ih->ih_arg = arg; 480 ih->ih_irq = irq; 481 ih->ih_ipl = ipl; 482 483 iq = &intrq[irq]; 484 iq->iq_ist = IST_LEVEL; 485 486 oldirqstate = disable_interrupts(I32_bit); 487 TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list); 488 ixp12x0_intr_calculate_masks(); 489 restore_interrupts(oldirqstate); 490 491 return (ih); 492 } 493 494 void 495 ixp12x0_intr_disestablish(void *cookie) 496 { 497 struct intrhand* ih = cookie; 498 struct intrq* iq = &intrq[ih->ih_ipl]; 499 u_int oldirqstate; 500 501 oldirqstate = disable_interrupts(I32_bit); 502 TAILQ_REMOVE(&iq->iq_list, ih, ih_list); 503 ixp12x0_intr_calculate_masks(); 504 restore_interrupts(oldirqstate); 505 } 506 507 void 508 ixp12x0_intr_dispatch(struct irqframe *frame) 509 { 510 struct intrq* iq; 511 struct intrhand* ih; 512 u_int oldirqstate; 513 int pcpl; 514 u_int32_t hwpend; 515 u_int32_t pci_hwpend; 516 int irq; 517 u_int32_t ibit; 518 519 pcpl = current_spl_level; 520 521 hwpend = ixp12x0_irq_read(); 522 pci_hwpend = ixp12x0_pci_irq_read(); 523 524 hardware_spl_level = pcpl; 525 ixp12x0_set_intrmask(imask[pcpl] | hwpend, 526 pci_imask[pcpl] | pci_hwpend); 527 528 hwpend &= ~imask[pcpl]; 529 pci_hwpend &= ~pci_imask[pcpl]; 530 531 while (hwpend) { 532 irq = ffs(hwpend) - 1; 533 ibit = (1U << irq); 534 535 iq = &intrq[irq]; 536 iq->iq_ev.ev_count++; 537 uvmexp.intrs++; 538 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 539 ih = TAILQ_NEXT(ih, ih_list)) { 540 int ipl; 541 542 current_spl_level = ipl = ih->ih_ipl; 543 oldirqstate = enable_interrupts(I32_bit); 544 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 545 restore_interrupts(oldirqstate); 546 hwpend &= ~ibit; 547 } 548 } 549 while (pci_hwpend) { 550 irq = ffs(pci_hwpend) - 1; 551 ibit = (1U << irq); 552 553 iq = &intrq[irq + SYS_NIRQ]; 554 iq->iq_ev.ev_count++; 555 uvmexp.intrs++; 556 for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL; 557 ih = TAILQ_NEXT(ih, ih_list)) { 558 int ipl; 559 560 current_spl_level = ipl = ih->ih_ipl; 561 oldirqstate = enable_interrupts(I32_bit); 562 (void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame); 563 restore_interrupts(oldirqstate); 564 pci_hwpend &= ~ibit; 565 } 566 } 567 568 current_spl_level = pcpl; 569 hardware_spl_level = pcpl; 570 ixp12x0_set_intrmask(imask[pcpl], pci_imask[pcpl]); 571 572 #ifdef __HAVE_FAST_SOFTINTS 573 /* Check for pendings soft intrs. */ 574 if ((ipending & INT_SWMASK) & ~imask[pcpl]) { 575 oldirqstate = enable_interrupts(I32_bit); 576 ixp12x0_do_pending(); 577 restore_interrupts(oldirqstate); 578 } 579 #endif 580 } 581