1*ec6aa33eSskrll /* $NetBSD: intr.c,v 1.8 2022/09/29 06:39:59 skrll Exp $ */
26d3ceb1dSskrll /* $OpenBSD: intr.c,v 1.27 2009/12/31 12:52:35 jsing Exp $ */
36d3ceb1dSskrll
46d3ceb1dSskrll /*
56d3ceb1dSskrll * Copyright (c) 2002 The NetBSD Foundation, Inc.
66d3ceb1dSskrll * All rights reserved.
76d3ceb1dSskrll *
86d3ceb1dSskrll * This code is derived from software contributed to The NetBSD Foundation
96d3ceb1dSskrll * by Matthew Fredette.
106d3ceb1dSskrll *
116d3ceb1dSskrll * Redistribution and use in source and binary forms, with or without
126d3ceb1dSskrll * modification, are permitted provided that the following conditions
136d3ceb1dSskrll * are met:
146d3ceb1dSskrll * 1. Redistributions of source code must retain the above copyright
156d3ceb1dSskrll * notice, this list of conditions and the following disclaimer.
166d3ceb1dSskrll * 2. Redistributions in binary form must reproduce the above copyright
176d3ceb1dSskrll * notice, this list of conditions and the following disclaimer in the
186d3ceb1dSskrll * documentation and/or other materials provided with the distribution.
196d3ceb1dSskrll *
206d3ceb1dSskrll * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
216d3ceb1dSskrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
226d3ceb1dSskrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
236d3ceb1dSskrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
246d3ceb1dSskrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
256d3ceb1dSskrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
266d3ceb1dSskrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
276d3ceb1dSskrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
286d3ceb1dSskrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
296d3ceb1dSskrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
306d3ceb1dSskrll * POSSIBILITY OF SUCH DAMAGE.
316d3ceb1dSskrll */
326d3ceb1dSskrll
336d3ceb1dSskrll /*
346d3ceb1dSskrll * Interrupt handling for NetBSD/hppa.
356d3ceb1dSskrll */
366d3ceb1dSskrll
376d3ceb1dSskrll #include <sys/cdefs.h>
38*ec6aa33eSskrll __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.8 2022/09/29 06:39:59 skrll Exp $");
396d3ceb1dSskrll
406d3ceb1dSskrll #define __MUTEX_PRIVATE
416d3ceb1dSskrll
426d3ceb1dSskrll #include <sys/param.h>
436d3ceb1dSskrll #include <sys/cpu.h>
446d3ceb1dSskrll
456d3ceb1dSskrll #include <uvm/uvm_extern.h>
466d3ceb1dSskrll
476d3ceb1dSskrll #include <machine/autoconf.h>
486d3ceb1dSskrll #include <machine/cpufunc.h>
496d3ceb1dSskrll #include <machine/intr.h>
506d3ceb1dSskrll #include <machine/reg.h>
516d3ceb1dSskrll
526d3ceb1dSskrll #include <hppa/hppa/machdep.h>
536d3ceb1dSskrll
546d3ceb1dSskrll #include <machine/mutex.h>
556d3ceb1dSskrll
566d3ceb1dSskrll #if defined(_KERNEL_OPT)
576d3ceb1dSskrll #include "opt_lockdebug.h"
586d3ceb1dSskrll #endif
596d3ceb1dSskrll
606d3ceb1dSskrll static int hppa_intr_ipl_next(struct cpu_info *);
616d3ceb1dSskrll void hppa_intr_calculatemasks(struct cpu_info *);
626d3ceb1dSskrll int hppa_intr_ipending(struct hppa_interrupt_register *, int);
636d3ceb1dSskrll void hppa_intr_dispatch(int , int , struct trapframe *);
646d3ceb1dSskrll
656d3ceb1dSskrll /* The list of all interrupt registers. */
666d3ceb1dSskrll struct hppa_interrupt_register *hppa_interrupt_registers[HPPA_INTERRUPT_BITS];
676d3ceb1dSskrll
686d3ceb1dSskrll
696d3ceb1dSskrll /*
706d3ceb1dSskrll * This establishes a new interrupt register.
716d3ceb1dSskrll */
726d3ceb1dSskrll void
hppa_interrupt_register_establish(struct cpu_info * ci,struct hppa_interrupt_register * ir)736d3ceb1dSskrll hppa_interrupt_register_establish(struct cpu_info *ci,
746d3ceb1dSskrll struct hppa_interrupt_register *ir)
756d3ceb1dSskrll {
766d3ceb1dSskrll int idx;
776d3ceb1dSskrll
786d3ceb1dSskrll /* Initialize the register structure. */
796d3ceb1dSskrll memset(ir, 0, sizeof(*ir));
806d3ceb1dSskrll ir->ir_ci = ci;
816d3ceb1dSskrll
826d3ceb1dSskrll for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
836d3ceb1dSskrll ir->ir_bits_map[idx] = IR_BIT_UNUSED;
846d3ceb1dSskrll
856d3ceb1dSskrll ir->ir_bits = ~0;
866d3ceb1dSskrll /* Add this structure to the list. */
876d3ceb1dSskrll for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
886d3ceb1dSskrll if (hppa_interrupt_registers[idx] == NULL)
896d3ceb1dSskrll break;
906d3ceb1dSskrll if (idx == HPPA_INTERRUPT_BITS)
916d3ceb1dSskrll panic("%s: too many regs", __func__);
926d3ceb1dSskrll hppa_interrupt_registers[idx] = ir;
936d3ceb1dSskrll }
946d3ceb1dSskrll
956d3ceb1dSskrll /*
966d3ceb1dSskrll * This initialise interrupts for a CPU.
976d3ceb1dSskrll */
986d3ceb1dSskrll void
hppa_intr_initialise(struct cpu_info * ci)996d3ceb1dSskrll hppa_intr_initialise(struct cpu_info *ci)
1006d3ceb1dSskrll {
1016d3ceb1dSskrll int i;
1026d3ceb1dSskrll
1036d3ceb1dSskrll /* Initialize all prority level masks to mask everything. */
1046d3ceb1dSskrll for (i = 0; i < NIPL; i++)
1056d3ceb1dSskrll ci->ci_imask[i] = -1;
1066d3ceb1dSskrll
1076d3ceb1dSskrll /* We are now at the highest priority level. */
1086d3ceb1dSskrll ci->ci_cpl = -1;
1096d3ceb1dSskrll
1106d3ceb1dSskrll /* There are no pending interrupts. */
1116d3ceb1dSskrll ci->ci_ipending = 0;
1126d3ceb1dSskrll
1136d3ceb1dSskrll /* We are not running an interrupt handler. */
1146d3ceb1dSskrll ci->ci_intr_depth = 0;
1156d3ceb1dSskrll
1166d3ceb1dSskrll /* There are no interrupt handlers. */
1176d3ceb1dSskrll memset(ci->ci_ib, 0, sizeof(ci->ci_ib));
1186d3ceb1dSskrll
1196d3ceb1dSskrll /* There are no interrupt registers. */
1206d3ceb1dSskrll memset(hppa_interrupt_registers, 0, sizeof(hppa_interrupt_registers));
1216d3ceb1dSskrll }
1226d3ceb1dSskrll
1236d3ceb1dSskrll /*
1246d3ceb1dSskrll * This establishes a new interrupt handler.
1256d3ceb1dSskrll */
1266d3ceb1dSskrll void *
hppa_intr_establish(int ipl,int (* handler)(void *),void * arg,struct hppa_interrupt_register * ir,int bit_pos)1276d3ceb1dSskrll hppa_intr_establish(int ipl, int (*handler)(void *), void *arg,
1286d3ceb1dSskrll struct hppa_interrupt_register *ir, int bit_pos)
1296d3ceb1dSskrll {
1306d3ceb1dSskrll struct hppa_interrupt_bit *ib;
1316d3ceb1dSskrll struct cpu_info *ci = ir->ir_ci;
1326d3ceb1dSskrll int idx;
1336d3ceb1dSskrll
1346d3ceb1dSskrll /* Panic on a bad interrupt bit. */
1356d3ceb1dSskrll if (bit_pos < 0 || bit_pos >= HPPA_INTERRUPT_BITS)
1366d3ceb1dSskrll panic("%s: bad interrupt bit %d", __func__, bit_pos);
1376d3ceb1dSskrll
1386d3ceb1dSskrll /*
1396d3ceb1dSskrll * Panic if this interrupt bit is already handled, but allow
1406d3ceb1dSskrll * shared interrupts for cascaded registers, e.g. dino and gsc
1416d3ceb1dSskrll * XXX This could be improved.
1426d3ceb1dSskrll */
1436d3ceb1dSskrll if (handler != NULL) {
1446d3ceb1dSskrll if (IR_BIT_USED_P(ir->ir_bits_map[31 ^ bit_pos]))
1456d3ceb1dSskrll panic("%s: interrupt already handled", __func__);
1466d3ceb1dSskrll }
1476d3ceb1dSskrll
1486d3ceb1dSskrll /*
1496d3ceb1dSskrll * If this interrupt bit leads us to another interrupt register,
1506d3ceb1dSskrll * simply note that in the mapping for the bit.
1516d3ceb1dSskrll */
1526d3ceb1dSskrll if (handler == NULL) {
1536d3ceb1dSskrll for (idx = 1; idx < HPPA_INTERRUPT_BITS; idx++)
1546d3ceb1dSskrll if (hppa_interrupt_registers[idx] == arg)
1556d3ceb1dSskrll break;
1566d3ceb1dSskrll if (idx == HPPA_INTERRUPT_BITS)
1576d3ceb1dSskrll panic("%s: unknown int reg", __func__);
1586d3ceb1dSskrll
1596d3ceb1dSskrll ir->ir_bits_map[31 ^ bit_pos] = IR_BIT_REG(idx);
1606d3ceb1dSskrll
1616d3ceb1dSskrll return NULL;
1626d3ceb1dSskrll }
1636d3ceb1dSskrll
1646d3ceb1dSskrll /*
1656d3ceb1dSskrll * Otherwise, allocate a new bit in the spl.
1666d3ceb1dSskrll */
1676d3ceb1dSskrll idx = hppa_intr_ipl_next(ir->ir_ci);
1686d3ceb1dSskrll
1696d3ceb1dSskrll ir->ir_bits &= ~(1 << bit_pos);
1706d3ceb1dSskrll ir->ir_rbits &= ~(1 << bit_pos);
1716d3ceb1dSskrll if (!IR_BIT_USED_P(ir->ir_bits_map[31 ^ bit_pos])) {
1726d3ceb1dSskrll ir->ir_bits_map[31 ^ bit_pos] = 1 << idx;
1736d3ceb1dSskrll } else {
1746d3ceb1dSskrll int j;
1756d3ceb1dSskrll
1766d3ceb1dSskrll ir->ir_bits_map[31 ^ bit_pos] |= 1 << idx;
1776d3ceb1dSskrll j = (ir - hppa_interrupt_registers[0]);
1786d3ceb1dSskrll ci->ci_ishared |= (1 << j);
1796d3ceb1dSskrll }
1806d3ceb1dSskrll ib = &ci->ci_ib[idx];
1816d3ceb1dSskrll
1826d3ceb1dSskrll /* Fill this interrupt bit. */
1836d3ceb1dSskrll ib->ib_reg = ir;
1846d3ceb1dSskrll ib->ib_ipl = ipl;
1856d3ceb1dSskrll ib->ib_spl = (1 << idx);
1866d3ceb1dSskrll snprintf(ib->ib_name, sizeof(ib->ib_name), "irq %d", bit_pos);
1876d3ceb1dSskrll
1886d3ceb1dSskrll evcnt_attach_dynamic(&ib->ib_evcnt, EVCNT_TYPE_INTR, NULL, ir->ir_name,
1896d3ceb1dSskrll ib->ib_name);
1906d3ceb1dSskrll ib->ib_handler = handler;
1916d3ceb1dSskrll ib->ib_arg = arg;
1926d3ceb1dSskrll
1936d3ceb1dSskrll hppa_intr_calculatemasks(ci);
1946d3ceb1dSskrll
1956d3ceb1dSskrll return ib;
1966d3ceb1dSskrll }
1976d3ceb1dSskrll
1986d3ceb1dSskrll /*
1996d3ceb1dSskrll * This allocates an interrupt bit within an interrupt register.
2006d3ceb1dSskrll * It returns the bit position, or -1 if no bits were available.
2016d3ceb1dSskrll */
2026d3ceb1dSskrll int
hppa_intr_allocate_bit(struct hppa_interrupt_register * ir,int irq)2036d3ceb1dSskrll hppa_intr_allocate_bit(struct hppa_interrupt_register *ir, int irq)
2046d3ceb1dSskrll {
2056d3ceb1dSskrll int bit_pos;
2066d3ceb1dSskrll int last_bit;
2076d3ceb1dSskrll u_int mask;
2086d3ceb1dSskrll int *bits;
2096d3ceb1dSskrll
2106d3ceb1dSskrll if (irq == -1) {
2116d3ceb1dSskrll bit_pos = 31;
2126d3ceb1dSskrll last_bit = 0;
2136d3ceb1dSskrll bits = &ir->ir_bits;
2146d3ceb1dSskrll } else {
2156d3ceb1dSskrll bit_pos = irq;
2166d3ceb1dSskrll last_bit = irq;
2176d3ceb1dSskrll bits = &ir->ir_rbits;
2186d3ceb1dSskrll }
2196d3ceb1dSskrll for (mask = (1 << bit_pos); bit_pos >= last_bit; bit_pos--) {
2206d3ceb1dSskrll if (*bits & mask)
2216d3ceb1dSskrll break;
2226d3ceb1dSskrll mask >>= 1;
2236d3ceb1dSskrll }
2246d3ceb1dSskrll if (bit_pos >= last_bit) {
2256d3ceb1dSskrll *bits &= ~mask;
2266d3ceb1dSskrll return bit_pos;
2276d3ceb1dSskrll }
2286d3ceb1dSskrll
2296d3ceb1dSskrll return -1;
2306d3ceb1dSskrll }
2316d3ceb1dSskrll
2326d3ceb1dSskrll /*
2336d3ceb1dSskrll * This returns the next available spl bit.
2346d3ceb1dSskrll */
2356d3ceb1dSskrll static int
hppa_intr_ipl_next(struct cpu_info * ci)2366d3ceb1dSskrll hppa_intr_ipl_next(struct cpu_info *ci)
2376d3ceb1dSskrll {
2386d3ceb1dSskrll int idx;
2396d3ceb1dSskrll
2406d3ceb1dSskrll for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
2416d3ceb1dSskrll if (ci->ci_ib[idx].ib_reg == NULL)
2426d3ceb1dSskrll break;
2436d3ceb1dSskrll if (idx == HPPA_INTERRUPT_BITS)
2446d3ceb1dSskrll panic("%s: too many devices", __func__);
2456d3ceb1dSskrll return idx;
2466d3ceb1dSskrll }
2476d3ceb1dSskrll
2486d3ceb1dSskrll /*
2496d3ceb1dSskrll * This finally initializes interrupts.
2506d3ceb1dSskrll */
2516d3ceb1dSskrll void
hppa_intr_calculatemasks(struct cpu_info * ci)2526d3ceb1dSskrll hppa_intr_calculatemasks(struct cpu_info *ci)
2536d3ceb1dSskrll {
2546d3ceb1dSskrll struct hppa_interrupt_bit *ib;
2556d3ceb1dSskrll struct hppa_interrupt_register *ir;
2566d3ceb1dSskrll int idx, bit_pos;
2576d3ceb1dSskrll int mask;
2586d3ceb1dSskrll int ipl;
2596d3ceb1dSskrll
2606d3ceb1dSskrll /*
2616d3ceb1dSskrll * Put together the initial imask for each level.
2626d3ceb1dSskrll */
2636d3ceb1dSskrll memset(ci->ci_imask, 0, sizeof(ci->ci_imask));
2646d3ceb1dSskrll for (bit_pos = 0; bit_pos < HPPA_INTERRUPT_BITS; bit_pos++) {
2656d3ceb1dSskrll ib = &ci->ci_ib[bit_pos];
2666d3ceb1dSskrll if (ib->ib_reg == NULL)
2676d3ceb1dSskrll continue;
2686d3ceb1dSskrll ci->ci_imask[ib->ib_ipl] |= ib->ib_spl;
2696d3ceb1dSskrll }
2706d3ceb1dSskrll
2716d3ceb1dSskrll /*
2726d3ceb1dSskrll * IPL_NONE is used for hardware interrupts that are never blocked,
2736d3ceb1dSskrll * and do not block anything else.
2746d3ceb1dSskrll */
2756d3ceb1dSskrll ci->ci_imask[IPL_NONE] = 0;
2766d3ceb1dSskrll
2776d3ceb1dSskrll /*
2786d3ceb1dSskrll * Enforce a hierarchy that gives slow devices a better chance at not
2796d3ceb1dSskrll * dropping data.
2806d3ceb1dSskrll */
2816d3ceb1dSskrll for (ipl = NIPL - 1; ipl > 0; ipl--)
2826d3ceb1dSskrll ci->ci_imask[ipl - 1] |= ci->ci_imask[ipl];
2836d3ceb1dSskrll
2846d3ceb1dSskrll /*
2856d3ceb1dSskrll * Load all mask registers, loading %eiem last. This will finally
2866d3ceb1dSskrll * enable interrupts, but since cpl and ipending should be -1 and 0,
2876d3ceb1dSskrll * respectively, no interrupts will get dispatched until the priority
2886d3ceb1dSskrll * level is lowered.
2896d3ceb1dSskrll */
2906d3ceb1dSskrll KASSERT(ci->ci_cpl == -1);
2916d3ceb1dSskrll KASSERT(ci->ci_ipending == 0);
2926d3ceb1dSskrll
2936d3ceb1dSskrll for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++) {
2946d3ceb1dSskrll ir = hppa_interrupt_registers[idx];
2956d3ceb1dSskrll if (ir == NULL || ir->ir_ci != ci)
2966d3ceb1dSskrll continue;
2976d3ceb1dSskrll mask = 0;
2986d3ceb1dSskrll for (bit_pos = 0; bit_pos < HPPA_INTERRUPT_BITS; bit_pos++) {
299f7dbfa87Smacallan if (!IR_BIT_UNUSED_P(ir->ir_bits_map[31 ^ bit_pos]))
3006d3ceb1dSskrll mask |= (1 << bit_pos);
3016d3ceb1dSskrll }
3026d3ceb1dSskrll if (ir->ir_iscpu)
3036d3ceb1dSskrll ir->ir_ci->ci_eiem = mask;
3046d3ceb1dSskrll else if (ir->ir_mask != NULL)
3056d3ceb1dSskrll *ir->ir_mask = mask;
3066d3ceb1dSskrll }
3076d3ceb1dSskrll }
3086d3ceb1dSskrll
3096d3ceb1dSskrll void
hppa_intr_enable(void)3106d3ceb1dSskrll hppa_intr_enable(void)
3116d3ceb1dSskrll {
3126d3ceb1dSskrll struct cpu_info *ci = curcpu();
3136d3ceb1dSskrll
3146d3ceb1dSskrll mtctl(ci->ci_eiem, CR_EIEM);
3156d3ceb1dSskrll ci->ci_psw |= PSW_I;
3166d3ceb1dSskrll hppa_enable_irq();
3176d3ceb1dSskrll }
3186d3ceb1dSskrll
3196d3ceb1dSskrll
3206d3ceb1dSskrll /*
3216d3ceb1dSskrll * Service interrupts. This doesn't necessarily dispatch them. This is called
3226d3ceb1dSskrll * with %eiem loaded with zero. It's named hppa_intr instead of hppa_intr
3236d3ceb1dSskrll * because trap.c calls it.
3246d3ceb1dSskrll */
3256d3ceb1dSskrll void
hppa_intr(struct trapframe * frame)3266d3ceb1dSskrll hppa_intr(struct trapframe *frame)
3276d3ceb1dSskrll {
3286d3ceb1dSskrll struct cpu_info *ci = curcpu();
3296d3ceb1dSskrll int eirr;
3306d3ceb1dSskrll int i;
3316d3ceb1dSskrll
3326d3ceb1dSskrll #ifndef LOCKDEBUG
3336d3ceb1dSskrll extern char mutex_enter_crit_start[];
3346d3ceb1dSskrll extern char mutex_enter_crit_end[];
3356d3ceb1dSskrll
3366d3ceb1dSskrll #ifndef MULTIPROCESSOR
3376d3ceb1dSskrll extern char _lock_cas_ras_start[];
3386d3ceb1dSskrll extern char _lock_cas_ras_end[];
3396d3ceb1dSskrll
3406d3ceb1dSskrll if (frame->tf_iisq_head == HPPA_SID_KERNEL &&
34120f5b7d4Sskrll frame->tf_iioq_head > (u_int)_lock_cas_ras_start &&
34220f5b7d4Sskrll frame->tf_iioq_head < (u_int)_lock_cas_ras_end) {
3436d3ceb1dSskrll frame->tf_iioq_head = (u_int)_lock_cas_ras_start;
3446d3ceb1dSskrll frame->tf_iioq_tail = (u_int)_lock_cas_ras_start + 4;
3456d3ceb1dSskrll }
3466d3ceb1dSskrll #endif
3476d3ceb1dSskrll
3486d3ceb1dSskrll /*
3496d3ceb1dSskrll * If we interrupted in the middle of mutex_enter(), we must patch up
3506d3ceb1dSskrll * the lock owner value quickly if we got the interlock. If any of the
3514ddb8793Sandvar * interrupt handlers need to acquire the mutex, they could deadlock if
3526d3ceb1dSskrll * the owner value is left unset.
3536d3ceb1dSskrll */
3546d3ceb1dSskrll if (frame->tf_iisq_head == HPPA_SID_KERNEL &&
35520f5b7d4Sskrll frame->tf_iioq_head > (u_int)mutex_enter_crit_start &&
35620f5b7d4Sskrll frame->tf_iioq_head < (u_int)mutex_enter_crit_end &&
3576d3ceb1dSskrll frame->tf_ret0 != 0)
3586d3ceb1dSskrll ((kmutex_t *)frame->tf_arg0)->mtx_owner = (uintptr_t)curlwp;
3596d3ceb1dSskrll #endif
3606d3ceb1dSskrll
3616d3ceb1dSskrll /*
3626d3ceb1dSskrll * Read the CPU interrupt register and acknowledge all interrupts.
3636d3ceb1dSskrll * Starting with this value, get our set of new pending interrupts and
3646d3ceb1dSskrll * add these new bits to ipending.
3656d3ceb1dSskrll */
3666d3ceb1dSskrll mfctl(CR_EIRR, eirr);
3676d3ceb1dSskrll mtctl(eirr, CR_EIRR);
3686d3ceb1dSskrll
3696d3ceb1dSskrll ci->ci_ipending |= hppa_intr_ipending(&ci->ci_ir, eirr);
3706d3ceb1dSskrll
3716d3ceb1dSskrll i = 0;
3726d3ceb1dSskrll /* If we have interrupts to dispatch, do so. */
3736d3ceb1dSskrll while (ci->ci_ipending & ~ci->ci_cpl) {
3746d3ceb1dSskrll int shared;
3756d3ceb1dSskrll
3766d3ceb1dSskrll hppa_intr_dispatch(ci->ci_cpl, frame->tf_eiem, frame);
3776d3ceb1dSskrll
3786d3ceb1dSskrll shared = ci->ci_ishared;
3796d3ceb1dSskrll while (shared) {
3806d3ceb1dSskrll struct hppa_interrupt_register *sir;
3816d3ceb1dSskrll int sbit, lvl;
3826d3ceb1dSskrll
3836d3ceb1dSskrll sbit = ffs(shared) - 1;
3846d3ceb1dSskrll sir = hppa_interrupt_registers[sbit];
3856d3ceb1dSskrll lvl = *sir->ir_level;
3866d3ceb1dSskrll
3876d3ceb1dSskrll ci->ci_ipending |= hppa_intr_ipending(sir, lvl);
3886d3ceb1dSskrll shared &= ~(1 << sbit);
3896d3ceb1dSskrll }
3906d3ceb1dSskrll i++;
3916d3ceb1dSskrll KASSERTMSG(i <= 2,
3926d3ceb1dSskrll "%s: ci->ipending %08x ci->ci_cpl %08x shared %08x\n",
3936d3ceb1dSskrll __func__, ci->ci_ipending, ci->ci_cpl, shared);
3946d3ceb1dSskrll }
3956d3ceb1dSskrll }
3966d3ceb1dSskrll
3976d3ceb1dSskrll /*
3986d3ceb1dSskrll * Dispatch interrupts. This dispatches at least one interrupt.
3996d3ceb1dSskrll * This is called with %eiem loaded with zero.
4006d3ceb1dSskrll */
4016d3ceb1dSskrll void
hppa_intr_dispatch(int ncpl,int eiem,struct trapframe * frame)4026d3ceb1dSskrll hppa_intr_dispatch(int ncpl, int eiem, struct trapframe *frame)
4036d3ceb1dSskrll {
4046d3ceb1dSskrll struct cpu_info *ci = curcpu();
4056d3ceb1dSskrll struct hppa_interrupt_bit *ib;
4066d3ceb1dSskrll struct clockframe clkframe;
4076d3ceb1dSskrll int ipending_run;
4086d3ceb1dSskrll int bit_pos;
4096d3ceb1dSskrll void *arg;
4106d3ceb1dSskrll int handled __unused;
4116d3ceb1dSskrll bool locked = false;
4126d3ceb1dSskrll
4136d3ceb1dSskrll /*
4146d3ceb1dSskrll * Increment our depth
4156d3ceb1dSskrll */
4166d3ceb1dSskrll ci->ci_intr_depth++;
4176d3ceb1dSskrll
4186d3ceb1dSskrll /* Loop while we have interrupts to dispatch. */
4196d3ceb1dSskrll for (;;) {
4206d3ceb1dSskrll
4216d3ceb1dSskrll /* Read ipending and mask it with ncpl. */
4226d3ceb1dSskrll ipending_run = (ci->ci_ipending & ~ncpl);
4236d3ceb1dSskrll if (ipending_run == 0)
4246d3ceb1dSskrll break;
4256d3ceb1dSskrll
4266d3ceb1dSskrll /* Choose one of the resulting bits to dispatch. */
4276d3ceb1dSskrll bit_pos = ffs(ipending_run) - 1;
4286d3ceb1dSskrll
4296d3ceb1dSskrll /*
4306d3ceb1dSskrll * If this interrupt handler takes the clockframe
4316d3ceb1dSskrll * as an argument, conjure one up.
4326d3ceb1dSskrll */
4336d3ceb1dSskrll ib = &ci->ci_ib[bit_pos];
4346d3ceb1dSskrll ib->ib_evcnt.ev_count++;
4356d3ceb1dSskrll arg = ib->ib_arg;
4366d3ceb1dSskrll if (arg == NULL) {
437965aa034Smacallan clkframe.cf_flags = (ci->ci_intr_depth > 1 ?
4386d3ceb1dSskrll TFF_INTR : 0);
4396d3ceb1dSskrll clkframe.cf_spl = ncpl;
4406d3ceb1dSskrll if (frame != NULL) {
4416d3ceb1dSskrll clkframe.cf_flags |= frame->tf_flags;
4426d3ceb1dSskrll clkframe.cf_pc = frame->tf_iioq_head;
4436d3ceb1dSskrll }
4446d3ceb1dSskrll arg = &clkframe;
4456d3ceb1dSskrll }
4466d3ceb1dSskrll
4476d3ceb1dSskrll /*
4486d3ceb1dSskrll * Remove this bit from ipending, raise spl to
4496d3ceb1dSskrll * the level required to run this interrupt,
4506d3ceb1dSskrll * and reenable interrupts.
4516d3ceb1dSskrll */
4526d3ceb1dSskrll ci->ci_ipending &= ~(1 << bit_pos);
4536d3ceb1dSskrll ci->ci_cpl = ncpl | ci->ci_imask[ib->ib_ipl];
4546d3ceb1dSskrll mtctl(eiem, CR_EIEM);
4556d3ceb1dSskrll
4566d3ceb1dSskrll if (ib->ib_ipl == IPL_VM) {
4576d3ceb1dSskrll KERNEL_LOCK(1, NULL);
4586d3ceb1dSskrll locked = true;
4596d3ceb1dSskrll }
4606d3ceb1dSskrll
4616d3ceb1dSskrll /* Count and dispatch the interrupt. */
4626d3ceb1dSskrll ci->ci_data.cpu_nintr++;
4636d3ceb1dSskrll handled = (*ib->ib_handler)(arg);
4646d3ceb1dSskrll #if 0
4656d3ceb1dSskrll if (!handled)
4666d3ceb1dSskrll printf("%s: can't handle interrupt\n",
4676d3ceb1dSskrll ib->ib_evcnt.ev_name);
4686d3ceb1dSskrll #endif
4696d3ceb1dSskrll if (locked) {
4706d3ceb1dSskrll KERNEL_UNLOCK_ONE(NULL);
4716d3ceb1dSskrll locked = false;
4726d3ceb1dSskrll }
4736d3ceb1dSskrll
4746d3ceb1dSskrll /* Disable interrupts and loop. */
4756d3ceb1dSskrll mtctl(0, CR_EIEM);
4766d3ceb1dSskrll }
4776d3ceb1dSskrll
4786d3ceb1dSskrll /* Interrupts are disabled again, restore cpl and the depth. */
4796d3ceb1dSskrll ci->ci_cpl = ncpl;
4806d3ceb1dSskrll ci->ci_intr_depth--;
4816d3ceb1dSskrll }
4826d3ceb1dSskrll
4836d3ceb1dSskrll
4846d3ceb1dSskrll int
hppa_intr_ipending(struct hppa_interrupt_register * ir,int eirr)4856d3ceb1dSskrll hppa_intr_ipending(struct hppa_interrupt_register *ir, int eirr)
4866d3ceb1dSskrll {
4876d3ceb1dSskrll int pending = 0;
4886d3ceb1dSskrll int idx;
4896d3ceb1dSskrll
4906d3ceb1dSskrll for (idx = 31; idx >= 0; idx--) {
4916d3ceb1dSskrll if ((eirr & (1 << idx)) == 0)
4926d3ceb1dSskrll continue;
4936d3ceb1dSskrll if (IR_BIT_NESTED_P(ir->ir_bits_map[31 ^ idx])) {
4946d3ceb1dSskrll struct hppa_interrupt_register *nir;
4956d3ceb1dSskrll int reg = ir->ir_bits_map[31 ^ idx] & ~IR_BIT_MASK;
4966d3ceb1dSskrll
4976d3ceb1dSskrll nir = hppa_interrupt_registers[reg];
4986d3ceb1dSskrll pending |= hppa_intr_ipending(nir, *(nir->ir_req));
4996d3ceb1dSskrll } else {
5006d3ceb1dSskrll pending |= ir->ir_bits_map[31 ^ idx];
5016d3ceb1dSskrll }
5026d3ceb1dSskrll }
5036d3ceb1dSskrll
5046d3ceb1dSskrll return pending;
5056d3ceb1dSskrll }
5066d3ceb1dSskrll
5076d3ceb1dSskrll bool
cpu_intr_p(void)5086d3ceb1dSskrll cpu_intr_p(void)
5096d3ceb1dSskrll {
5106d3ceb1dSskrll struct cpu_info *ci = curcpu();
5116d3ceb1dSskrll
5126d3ceb1dSskrll #ifdef __HAVE_FAST_SOFTINTS
5136d3ceb1dSskrll #error this should not count fast soft interrupts
5146d3ceb1dSskrll #else
5156d3ceb1dSskrll return ci->ci_intr_depth != 0;
5166d3ceb1dSskrll #endif
5176d3ceb1dSskrll }
518