1*12683SJimmy.Vetayases@oracle.com /*
2*12683SJimmy.Vetayases@oracle.com * CDDL HEADER START
3*12683SJimmy.Vetayases@oracle.com *
4*12683SJimmy.Vetayases@oracle.com * The contents of this file are subject to the terms of the
5*12683SJimmy.Vetayases@oracle.com * Common Development and Distribution License (the "License").
6*12683SJimmy.Vetayases@oracle.com * You may not use this file except in compliance with the License.
7*12683SJimmy.Vetayases@oracle.com *
8*12683SJimmy.Vetayases@oracle.com * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*12683SJimmy.Vetayases@oracle.com * or http://www.opensolaris.org/os/licensing.
10*12683SJimmy.Vetayases@oracle.com * See the License for the specific language governing permissions
11*12683SJimmy.Vetayases@oracle.com * and limitations under the License.
12*12683SJimmy.Vetayases@oracle.com *
13*12683SJimmy.Vetayases@oracle.com * When distributing Covered Code, include this CDDL HEADER in each
14*12683SJimmy.Vetayases@oracle.com * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*12683SJimmy.Vetayases@oracle.com * If applicable, add the following below this CDDL HEADER, with the
16*12683SJimmy.Vetayases@oracle.com * fields enclosed by brackets "[]" replaced with your own identifying
17*12683SJimmy.Vetayases@oracle.com * information: Portions Copyright [yyyy] [name of copyright owner]
18*12683SJimmy.Vetayases@oracle.com *
19*12683SJimmy.Vetayases@oracle.com * CDDL HEADER END
20*12683SJimmy.Vetayases@oracle.com */
21*12683SJimmy.Vetayases@oracle.com
22*12683SJimmy.Vetayases@oracle.com /*
23*12683SJimmy.Vetayases@oracle.com * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24*12683SJimmy.Vetayases@oracle.com */
25*12683SJimmy.Vetayases@oracle.com
26*12683SJimmy.Vetayases@oracle.com #include <sys/cpuvar.h>
27*12683SJimmy.Vetayases@oracle.com #include <sys/cpu_event.h>
28*12683SJimmy.Vetayases@oracle.com #include <sys/param.h>
29*12683SJimmy.Vetayases@oracle.com #include <sys/cmn_err.h>
30*12683SJimmy.Vetayases@oracle.com #include <sys/t_lock.h>
31*12683SJimmy.Vetayases@oracle.com #include <sys/kmem.h>
32*12683SJimmy.Vetayases@oracle.com #include <sys/machlock.h>
33*12683SJimmy.Vetayases@oracle.com #include <sys/systm.h>
34*12683SJimmy.Vetayases@oracle.com #include <sys/archsystm.h>
35*12683SJimmy.Vetayases@oracle.com #include <sys/atomic.h>
36*12683SJimmy.Vetayases@oracle.com #include <sys/sdt.h>
37*12683SJimmy.Vetayases@oracle.com #include <sys/processor.h>
38*12683SJimmy.Vetayases@oracle.com #include <sys/time.h>
39*12683SJimmy.Vetayases@oracle.com #include <sys/psm.h>
40*12683SJimmy.Vetayases@oracle.com #include <sys/smp_impldefs.h>
41*12683SJimmy.Vetayases@oracle.com #include <sys/cram.h>
42*12683SJimmy.Vetayases@oracle.com #include <sys/apic.h>
43*12683SJimmy.Vetayases@oracle.com #include <sys/pit.h>
44*12683SJimmy.Vetayases@oracle.com #include <sys/ddi.h>
45*12683SJimmy.Vetayases@oracle.com #include <sys/sunddi.h>
46*12683SJimmy.Vetayases@oracle.com #include <sys/ddi_impldefs.h>
47*12683SJimmy.Vetayases@oracle.com #include <sys/pci.h>
48*12683SJimmy.Vetayases@oracle.com #include <sys/promif.h>
49*12683SJimmy.Vetayases@oracle.com #include <sys/x86_archext.h>
50*12683SJimmy.Vetayases@oracle.com #include <sys/cpc_impl.h>
51*12683SJimmy.Vetayases@oracle.com #include <sys/uadmin.h>
52*12683SJimmy.Vetayases@oracle.com #include <sys/panic.h>
53*12683SJimmy.Vetayases@oracle.com #include <sys/debug.h>
54*12683SJimmy.Vetayases@oracle.com #include <sys/trap.h>
55*12683SJimmy.Vetayases@oracle.com #include <sys/machsystm.h>
56*12683SJimmy.Vetayases@oracle.com #include <sys/sysmacros.h>
57*12683SJimmy.Vetayases@oracle.com #include <sys/rm_platter.h>
58*12683SJimmy.Vetayases@oracle.com #include <sys/privregs.h>
59*12683SJimmy.Vetayases@oracle.com #include <sys/note.h>
60*12683SJimmy.Vetayases@oracle.com #include <sys/pci_intr_lib.h>
61*12683SJimmy.Vetayases@oracle.com #include <sys/spl.h>
62*12683SJimmy.Vetayases@oracle.com #include <sys/clock.h>
63*12683SJimmy.Vetayases@oracle.com #include <sys/dditypes.h>
64*12683SJimmy.Vetayases@oracle.com #include <sys/sunddi.h>
65*12683SJimmy.Vetayases@oracle.com #include <sys/x_call.h>
66*12683SJimmy.Vetayases@oracle.com #include <sys/reboot.h>
67*12683SJimmy.Vetayases@oracle.com #include <vm/hat_i86.h>
68*12683SJimmy.Vetayases@oracle.com #include <sys/stack.h>
69*12683SJimmy.Vetayases@oracle.com #include <sys/apix.h>
70*12683SJimmy.Vetayases@oracle.com
71*12683SJimmy.Vetayases@oracle.com static void apix_post_hardint(int);
72*12683SJimmy.Vetayases@oracle.com
73*12683SJimmy.Vetayases@oracle.com /*
74*12683SJimmy.Vetayases@oracle.com * Insert an vector into the tail of the interrupt pending list
75*12683SJimmy.Vetayases@oracle.com */
76*12683SJimmy.Vetayases@oracle.com static __inline__ void
apix_insert_pending_av(apix_impl_t * apixp,struct autovec * avp,int ipl)77*12683SJimmy.Vetayases@oracle.com apix_insert_pending_av(apix_impl_t *apixp, struct autovec *avp, int ipl)
78*12683SJimmy.Vetayases@oracle.com {
79*12683SJimmy.Vetayases@oracle.com struct autovec **head = apixp->x_intr_head;
80*12683SJimmy.Vetayases@oracle.com struct autovec **tail = apixp->x_intr_tail;
81*12683SJimmy.Vetayases@oracle.com
82*12683SJimmy.Vetayases@oracle.com avp->av_ipl_link = NULL;
83*12683SJimmy.Vetayases@oracle.com if (tail[ipl] == NULL) {
84*12683SJimmy.Vetayases@oracle.com head[ipl] = tail[ipl] = avp;
85*12683SJimmy.Vetayases@oracle.com return;
86*12683SJimmy.Vetayases@oracle.com }
87*12683SJimmy.Vetayases@oracle.com
88*12683SJimmy.Vetayases@oracle.com tail[ipl]->av_ipl_link = avp;
89*12683SJimmy.Vetayases@oracle.com tail[ipl] = avp;
90*12683SJimmy.Vetayases@oracle.com }
91*12683SJimmy.Vetayases@oracle.com
92*12683SJimmy.Vetayases@oracle.com /*
93*12683SJimmy.Vetayases@oracle.com * Remove and return an vector from the head of hardware interrupt
94*12683SJimmy.Vetayases@oracle.com * pending list.
95*12683SJimmy.Vetayases@oracle.com */
96*12683SJimmy.Vetayases@oracle.com static __inline__ struct autovec *
apix_remove_pending_av(apix_impl_t * apixp,int ipl)97*12683SJimmy.Vetayases@oracle.com apix_remove_pending_av(apix_impl_t *apixp, int ipl)
98*12683SJimmy.Vetayases@oracle.com {
99*12683SJimmy.Vetayases@oracle.com struct cpu *cpu = CPU;
100*12683SJimmy.Vetayases@oracle.com struct autovec **head = apixp->x_intr_head;
101*12683SJimmy.Vetayases@oracle.com struct autovec **tail = apixp->x_intr_tail;
102*12683SJimmy.Vetayases@oracle.com struct autovec *avp = head[ipl];
103*12683SJimmy.Vetayases@oracle.com
104*12683SJimmy.Vetayases@oracle.com if (avp == NULL)
105*12683SJimmy.Vetayases@oracle.com return (NULL);
106*12683SJimmy.Vetayases@oracle.com
107*12683SJimmy.Vetayases@oracle.com if (avp->av_vector != NULL && avp->av_prilevel < cpu->cpu_base_spl) {
108*12683SJimmy.Vetayases@oracle.com /*
109*12683SJimmy.Vetayases@oracle.com * If there is blocked higher level interrupts, return
110*12683SJimmy.Vetayases@oracle.com * NULL to quit handling of current IPL level.
111*12683SJimmy.Vetayases@oracle.com */
112*12683SJimmy.Vetayases@oracle.com apixp->x_intr_pending |= (1 << avp->av_prilevel);
113*12683SJimmy.Vetayases@oracle.com return (NULL);
114*12683SJimmy.Vetayases@oracle.com }
115*12683SJimmy.Vetayases@oracle.com
116*12683SJimmy.Vetayases@oracle.com avp->av_flags &= ~AV_PENTRY_PEND;
117*12683SJimmy.Vetayases@oracle.com avp->av_flags |= AV_PENTRY_ONPROC;
118*12683SJimmy.Vetayases@oracle.com head[ipl] = avp->av_ipl_link;
119*12683SJimmy.Vetayases@oracle.com avp->av_ipl_link = NULL;
120*12683SJimmy.Vetayases@oracle.com
121*12683SJimmy.Vetayases@oracle.com if (head[ipl] == NULL)
122*12683SJimmy.Vetayases@oracle.com tail[ipl] = NULL;
123*12683SJimmy.Vetayases@oracle.com
124*12683SJimmy.Vetayases@oracle.com return (avp);
125*12683SJimmy.Vetayases@oracle.com }
126*12683SJimmy.Vetayases@oracle.com
127*12683SJimmy.Vetayases@oracle.com /*
128*12683SJimmy.Vetayases@oracle.com * add_pending_hardint:
129*12683SJimmy.Vetayases@oracle.com *
130*12683SJimmy.Vetayases@oracle.com * Add hardware interrupts to the interrupt pending list.
131*12683SJimmy.Vetayases@oracle.com */
132*12683SJimmy.Vetayases@oracle.com static void
apix_add_pending_hardint(int vector)133*12683SJimmy.Vetayases@oracle.com apix_add_pending_hardint(int vector)
134*12683SJimmy.Vetayases@oracle.com {
135*12683SJimmy.Vetayases@oracle.com uint32_t cpuid = psm_get_cpu_id();
136*12683SJimmy.Vetayases@oracle.com apix_impl_t *apixp = apixs[cpuid];
137*12683SJimmy.Vetayases@oracle.com apix_vector_t *vecp = apixp->x_vectbl[vector];
138*12683SJimmy.Vetayases@oracle.com struct autovec *p, *prevp = NULL;
139*12683SJimmy.Vetayases@oracle.com int ipl;
140*12683SJimmy.Vetayases@oracle.com
141*12683SJimmy.Vetayases@oracle.com /*
142*12683SJimmy.Vetayases@oracle.com * The MSI interrupt not supporting per-vector masking could
143*12683SJimmy.Vetayases@oracle.com * be triggered on a false vector as a result of rebinding
144*12683SJimmy.Vetayases@oracle.com * operation cannot programme MSI address & data atomically.
145*12683SJimmy.Vetayases@oracle.com * Add ISR of this interrupt to the pending list for such
146*12683SJimmy.Vetayases@oracle.com * suspicious interrupt.
147*12683SJimmy.Vetayases@oracle.com */
148*12683SJimmy.Vetayases@oracle.com APIX_DO_FAKE_INTR(cpuid, vector);
149*12683SJimmy.Vetayases@oracle.com if (vecp == NULL)
150*12683SJimmy.Vetayases@oracle.com return;
151*12683SJimmy.Vetayases@oracle.com
152*12683SJimmy.Vetayases@oracle.com for (p = vecp->v_autovect; p != NULL; p = p->av_link) {
153*12683SJimmy.Vetayases@oracle.com if (p->av_vector == NULL)
154*12683SJimmy.Vetayases@oracle.com continue; /* skip freed entry */
155*12683SJimmy.Vetayases@oracle.com
156*12683SJimmy.Vetayases@oracle.com ipl = p->av_prilevel;
157*12683SJimmy.Vetayases@oracle.com prevp = p;
158*12683SJimmy.Vetayases@oracle.com
159*12683SJimmy.Vetayases@oracle.com /* set pending at specified priority level */
160*12683SJimmy.Vetayases@oracle.com apixp->x_intr_pending |= (1 << ipl);
161*12683SJimmy.Vetayases@oracle.com
162*12683SJimmy.Vetayases@oracle.com if (p->av_flags & AV_PENTRY_PEND)
163*12683SJimmy.Vetayases@oracle.com continue; /* already in the pending list */
164*12683SJimmy.Vetayases@oracle.com p->av_flags |= AV_PENTRY_PEND;
165*12683SJimmy.Vetayases@oracle.com
166*12683SJimmy.Vetayases@oracle.com /* insert into pending list by it original IPL */
167*12683SJimmy.Vetayases@oracle.com apix_insert_pending_av(apixp, p, ipl);
168*12683SJimmy.Vetayases@oracle.com }
169*12683SJimmy.Vetayases@oracle.com
170*12683SJimmy.Vetayases@oracle.com /* last one of the linked list */
171*12683SJimmy.Vetayases@oracle.com if (prevp && ((prevp->av_flags & AV_PENTRY_LEVEL) != 0))
172*12683SJimmy.Vetayases@oracle.com prevp->av_flags |= (vector & AV_PENTRY_VECTMASK);
173*12683SJimmy.Vetayases@oracle.com }
174*12683SJimmy.Vetayases@oracle.com
175*12683SJimmy.Vetayases@oracle.com /*
176*12683SJimmy.Vetayases@oracle.com * Walk pending hardware interrupts at given priority level, invoking
177*12683SJimmy.Vetayases@oracle.com * each interrupt handler as we go.
178*12683SJimmy.Vetayases@oracle.com */
179*12683SJimmy.Vetayases@oracle.com extern uint64_t intr_get_time(void);
180*12683SJimmy.Vetayases@oracle.com
181*12683SJimmy.Vetayases@oracle.com static void
apix_dispatch_pending_autovect(uint_t ipl)182*12683SJimmy.Vetayases@oracle.com apix_dispatch_pending_autovect(uint_t ipl)
183*12683SJimmy.Vetayases@oracle.com {
184*12683SJimmy.Vetayases@oracle.com uint32_t cpuid = psm_get_cpu_id();
185*12683SJimmy.Vetayases@oracle.com apix_impl_t *apixp = apixs[cpuid];
186*12683SJimmy.Vetayases@oracle.com struct autovec *av;
187*12683SJimmy.Vetayases@oracle.com
188*12683SJimmy.Vetayases@oracle.com while ((av = apix_remove_pending_av(apixp, ipl)) != NULL) {
189*12683SJimmy.Vetayases@oracle.com uint_t r;
190*12683SJimmy.Vetayases@oracle.com uint_t (*intr)() = av->av_vector;
191*12683SJimmy.Vetayases@oracle.com caddr_t arg1 = av->av_intarg1;
192*12683SJimmy.Vetayases@oracle.com caddr_t arg2 = av->av_intarg2;
193*12683SJimmy.Vetayases@oracle.com dev_info_t *dip = av->av_dip;
194*12683SJimmy.Vetayases@oracle.com uchar_t vector = av->av_flags & AV_PENTRY_VECTMASK;
195*12683SJimmy.Vetayases@oracle.com
196*12683SJimmy.Vetayases@oracle.com if (intr == NULL)
197*12683SJimmy.Vetayases@oracle.com continue;
198*12683SJimmy.Vetayases@oracle.com
199*12683SJimmy.Vetayases@oracle.com /* Don't enable interrupts during x-calls */
200*12683SJimmy.Vetayases@oracle.com if (ipl != XC_HI_PIL)
201*12683SJimmy.Vetayases@oracle.com sti();
202*12683SJimmy.Vetayases@oracle.com
203*12683SJimmy.Vetayases@oracle.com DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
204*12683SJimmy.Vetayases@oracle.com void *, intr, caddr_t, arg1, caddr_t, arg2);
205*12683SJimmy.Vetayases@oracle.com r = (*intr)(arg1, arg2);
206*12683SJimmy.Vetayases@oracle.com DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
207*12683SJimmy.Vetayases@oracle.com void *, intr, caddr_t, arg1, uint_t, r);
208*12683SJimmy.Vetayases@oracle.com
209*12683SJimmy.Vetayases@oracle.com if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
210*12683SJimmy.Vetayases@oracle.com atomic_add_64(av->av_ticksp, intr_get_time());
211*12683SJimmy.Vetayases@oracle.com
212*12683SJimmy.Vetayases@oracle.com cli();
213*12683SJimmy.Vetayases@oracle.com
214*12683SJimmy.Vetayases@oracle.com if (vector) {
215*12683SJimmy.Vetayases@oracle.com if ((av->av_flags & AV_PENTRY_PEND) == 0)
216*12683SJimmy.Vetayases@oracle.com av->av_flags &= ~AV_PENTRY_VECTMASK;
217*12683SJimmy.Vetayases@oracle.com
218*12683SJimmy.Vetayases@oracle.com apix_post_hardint(vector);
219*12683SJimmy.Vetayases@oracle.com }
220*12683SJimmy.Vetayases@oracle.com
221*12683SJimmy.Vetayases@oracle.com /* mark it as idle */
222*12683SJimmy.Vetayases@oracle.com av->av_flags &= ~AV_PENTRY_ONPROC;
223*12683SJimmy.Vetayases@oracle.com }
224*12683SJimmy.Vetayases@oracle.com }
225*12683SJimmy.Vetayases@oracle.com
226*12683SJimmy.Vetayases@oracle.com static caddr_t
apix_do_softint_prolog(struct cpu * cpu,uint_t pil,uint_t oldpil,caddr_t stackptr)227*12683SJimmy.Vetayases@oracle.com apix_do_softint_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil,
228*12683SJimmy.Vetayases@oracle.com caddr_t stackptr)
229*12683SJimmy.Vetayases@oracle.com {
230*12683SJimmy.Vetayases@oracle.com kthread_t *t, *volatile it;
231*12683SJimmy.Vetayases@oracle.com struct machcpu *mcpu = &cpu->cpu_m;
232*12683SJimmy.Vetayases@oracle.com hrtime_t now;
233*12683SJimmy.Vetayases@oracle.com
234*12683SJimmy.Vetayases@oracle.com UNREFERENCED_1PARAMETER(oldpil);
235*12683SJimmy.Vetayases@oracle.com ASSERT(pil > mcpu->mcpu_pri && pil > cpu->cpu_base_spl);
236*12683SJimmy.Vetayases@oracle.com
237*12683SJimmy.Vetayases@oracle.com atomic_and_32((uint32_t *)&mcpu->mcpu_softinfo.st_pending, ~(1 << pil));
238*12683SJimmy.Vetayases@oracle.com
239*12683SJimmy.Vetayases@oracle.com mcpu->mcpu_pri = pil;
240*12683SJimmy.Vetayases@oracle.com
241*12683SJimmy.Vetayases@oracle.com now = tsc_read();
242*12683SJimmy.Vetayases@oracle.com
243*12683SJimmy.Vetayases@oracle.com /*
244*12683SJimmy.Vetayases@oracle.com * Get set to run interrupt thread.
245*12683SJimmy.Vetayases@oracle.com * There should always be an interrupt thread since we
246*12683SJimmy.Vetayases@oracle.com * allocate one for each level on the CPU.
247*12683SJimmy.Vetayases@oracle.com */
248*12683SJimmy.Vetayases@oracle.com it = cpu->cpu_intr_thread;
249*12683SJimmy.Vetayases@oracle.com ASSERT(it != NULL);
250*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_thread = it->t_link;
251*12683SJimmy.Vetayases@oracle.com
252*12683SJimmy.Vetayases@oracle.com /* t_intr_start could be zero due to cpu_intr_swtch_enter. */
253*12683SJimmy.Vetayases@oracle.com t = cpu->cpu_thread;
254*12683SJimmy.Vetayases@oracle.com if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) {
255*12683SJimmy.Vetayases@oracle.com hrtime_t intrtime = now - t->t_intr_start;
256*12683SJimmy.Vetayases@oracle.com mcpu->intrstat[pil][0] += intrtime;
257*12683SJimmy.Vetayases@oracle.com cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
258*12683SJimmy.Vetayases@oracle.com t->t_intr_start = 0;
259*12683SJimmy.Vetayases@oracle.com }
260*12683SJimmy.Vetayases@oracle.com
261*12683SJimmy.Vetayases@oracle.com /*
262*12683SJimmy.Vetayases@oracle.com * Note that the code in kcpc_overflow_intr -relies- on the
263*12683SJimmy.Vetayases@oracle.com * ordering of events here - in particular that t->t_lwp of
264*12683SJimmy.Vetayases@oracle.com * the interrupt thread is set to the pinned thread *before*
265*12683SJimmy.Vetayases@oracle.com * curthread is changed.
266*12683SJimmy.Vetayases@oracle.com */
267*12683SJimmy.Vetayases@oracle.com it->t_lwp = t->t_lwp;
268*12683SJimmy.Vetayases@oracle.com it->t_state = TS_ONPROC;
269*12683SJimmy.Vetayases@oracle.com
270*12683SJimmy.Vetayases@oracle.com /*
271*12683SJimmy.Vetayases@oracle.com * Push interrupted thread onto list from new thread.
272*12683SJimmy.Vetayases@oracle.com * Set the new thread as the current one.
273*12683SJimmy.Vetayases@oracle.com * Set interrupted thread's T_SP because if it is the idle thread,
274*12683SJimmy.Vetayases@oracle.com * resume() may use that stack between threads.
275*12683SJimmy.Vetayases@oracle.com */
276*12683SJimmy.Vetayases@oracle.com
277*12683SJimmy.Vetayases@oracle.com ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
278*12683SJimmy.Vetayases@oracle.com t->t_sp = (uintptr_t)stackptr;
279*12683SJimmy.Vetayases@oracle.com
280*12683SJimmy.Vetayases@oracle.com it->t_intr = t;
281*12683SJimmy.Vetayases@oracle.com cpu->cpu_thread = it;
282*12683SJimmy.Vetayases@oracle.com
283*12683SJimmy.Vetayases@oracle.com /*
284*12683SJimmy.Vetayases@oracle.com * Set bit for this pil in CPU's interrupt active bitmask.
285*12683SJimmy.Vetayases@oracle.com */
286*12683SJimmy.Vetayases@oracle.com ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
287*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_actv |= (1 << pil);
288*12683SJimmy.Vetayases@oracle.com
289*12683SJimmy.Vetayases@oracle.com /*
290*12683SJimmy.Vetayases@oracle.com * Initialize thread priority level from intr_pri
291*12683SJimmy.Vetayases@oracle.com */
292*12683SJimmy.Vetayases@oracle.com it->t_pil = (uchar_t)pil;
293*12683SJimmy.Vetayases@oracle.com it->t_pri = (pri_t)pil + intr_pri;
294*12683SJimmy.Vetayases@oracle.com it->t_intr_start = now;
295*12683SJimmy.Vetayases@oracle.com
296*12683SJimmy.Vetayases@oracle.com return (it->t_stk);
297*12683SJimmy.Vetayases@oracle.com }
298*12683SJimmy.Vetayases@oracle.com
299*12683SJimmy.Vetayases@oracle.com static void
apix_do_softint_epilog(struct cpu * cpu,uint_t oldpil)300*12683SJimmy.Vetayases@oracle.com apix_do_softint_epilog(struct cpu *cpu, uint_t oldpil)
301*12683SJimmy.Vetayases@oracle.com {
302*12683SJimmy.Vetayases@oracle.com struct machcpu *mcpu = &cpu->cpu_m;
303*12683SJimmy.Vetayases@oracle.com kthread_t *t, *it;
304*12683SJimmy.Vetayases@oracle.com uint_t pil, basespl;
305*12683SJimmy.Vetayases@oracle.com hrtime_t intrtime;
306*12683SJimmy.Vetayases@oracle.com hrtime_t now = tsc_read();
307*12683SJimmy.Vetayases@oracle.com
308*12683SJimmy.Vetayases@oracle.com it = cpu->cpu_thread;
309*12683SJimmy.Vetayases@oracle.com pil = it->t_pil;
310*12683SJimmy.Vetayases@oracle.com
311*12683SJimmy.Vetayases@oracle.com cpu->cpu_stats.sys.intr[pil - 1]++;
312*12683SJimmy.Vetayases@oracle.com
313*12683SJimmy.Vetayases@oracle.com ASSERT(cpu->cpu_intr_actv & (1 << pil));
314*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_actv &= ~(1 << pil);
315*12683SJimmy.Vetayases@oracle.com
316*12683SJimmy.Vetayases@oracle.com intrtime = now - it->t_intr_start;
317*12683SJimmy.Vetayases@oracle.com mcpu->intrstat[pil][0] += intrtime;
318*12683SJimmy.Vetayases@oracle.com cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
319*12683SJimmy.Vetayases@oracle.com
320*12683SJimmy.Vetayases@oracle.com /*
321*12683SJimmy.Vetayases@oracle.com * If there is still an interrupted thread underneath this one
322*12683SJimmy.Vetayases@oracle.com * then the interrupt was never blocked and the return is
323*12683SJimmy.Vetayases@oracle.com * fairly simple. Otherwise it isn't.
324*12683SJimmy.Vetayases@oracle.com */
325*12683SJimmy.Vetayases@oracle.com if ((t = it->t_intr) == NULL) {
326*12683SJimmy.Vetayases@oracle.com /*
327*12683SJimmy.Vetayases@oracle.com * Put thread back on the interrupt thread list.
328*12683SJimmy.Vetayases@oracle.com * This was an interrupt thread, so set CPU's base SPL.
329*12683SJimmy.Vetayases@oracle.com */
330*12683SJimmy.Vetayases@oracle.com set_base_spl();
331*12683SJimmy.Vetayases@oracle.com /* mcpu->mcpu_pri = cpu->cpu_base_spl; */
332*12683SJimmy.Vetayases@oracle.com
333*12683SJimmy.Vetayases@oracle.com it->t_state = TS_FREE;
334*12683SJimmy.Vetayases@oracle.com it->t_link = cpu->cpu_intr_thread;
335*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_thread = it;
336*12683SJimmy.Vetayases@oracle.com (void) splhigh();
337*12683SJimmy.Vetayases@oracle.com sti();
338*12683SJimmy.Vetayases@oracle.com swtch();
339*12683SJimmy.Vetayases@oracle.com /*NOTREACHED*/
340*12683SJimmy.Vetayases@oracle.com panic("dosoftint_epilog: swtch returned");
341*12683SJimmy.Vetayases@oracle.com }
342*12683SJimmy.Vetayases@oracle.com it->t_link = cpu->cpu_intr_thread;
343*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_thread = it;
344*12683SJimmy.Vetayases@oracle.com it->t_state = TS_FREE;
345*12683SJimmy.Vetayases@oracle.com cpu->cpu_thread = t;
346*12683SJimmy.Vetayases@oracle.com if (t->t_flag & T_INTR_THREAD)
347*12683SJimmy.Vetayases@oracle.com t->t_intr_start = now;
348*12683SJimmy.Vetayases@oracle.com basespl = cpu->cpu_base_spl;
349*12683SJimmy.Vetayases@oracle.com pil = MAX(oldpil, basespl);
350*12683SJimmy.Vetayases@oracle.com mcpu->mcpu_pri = pil;
351*12683SJimmy.Vetayases@oracle.com }
352*12683SJimmy.Vetayases@oracle.com
353*12683SJimmy.Vetayases@oracle.com /*
354*12683SJimmy.Vetayases@oracle.com * Dispatch a soft interrupt
355*12683SJimmy.Vetayases@oracle.com */
356*12683SJimmy.Vetayases@oracle.com static void
apix_dispatch_softint(uint_t oldpil,uint_t arg2)357*12683SJimmy.Vetayases@oracle.com apix_dispatch_softint(uint_t oldpil, uint_t arg2)
358*12683SJimmy.Vetayases@oracle.com {
359*12683SJimmy.Vetayases@oracle.com struct cpu *cpu = CPU;
360*12683SJimmy.Vetayases@oracle.com
361*12683SJimmy.Vetayases@oracle.com UNREFERENCED_1PARAMETER(arg2);
362*12683SJimmy.Vetayases@oracle.com
363*12683SJimmy.Vetayases@oracle.com sti();
364*12683SJimmy.Vetayases@oracle.com av_dispatch_softvect((int)cpu->cpu_thread->t_pil);
365*12683SJimmy.Vetayases@oracle.com cli();
366*12683SJimmy.Vetayases@oracle.com
367*12683SJimmy.Vetayases@oracle.com /*
368*12683SJimmy.Vetayases@oracle.com * Must run softint_epilog() on the interrupt thread stack, since
369*12683SJimmy.Vetayases@oracle.com * there may not be a return from it if the interrupt thread blocked.
370*12683SJimmy.Vetayases@oracle.com */
371*12683SJimmy.Vetayases@oracle.com apix_do_softint_epilog(cpu, oldpil);
372*12683SJimmy.Vetayases@oracle.com }
373*12683SJimmy.Vetayases@oracle.com
374*12683SJimmy.Vetayases@oracle.com /*
375*12683SJimmy.Vetayases@oracle.com * Deliver any softints the current interrupt priority allows.
376*12683SJimmy.Vetayases@oracle.com * Called with interrupts disabled.
377*12683SJimmy.Vetayases@oracle.com */
378*12683SJimmy.Vetayases@oracle.com int
apix_do_softint(struct regs * regs)379*12683SJimmy.Vetayases@oracle.com apix_do_softint(struct regs *regs)
380*12683SJimmy.Vetayases@oracle.com {
381*12683SJimmy.Vetayases@oracle.com struct cpu *cpu = CPU;
382*12683SJimmy.Vetayases@oracle.com int oldipl;
383*12683SJimmy.Vetayases@oracle.com int newipl;
384*12683SJimmy.Vetayases@oracle.com volatile uint16_t pending;
385*12683SJimmy.Vetayases@oracle.com caddr_t newsp;
386*12683SJimmy.Vetayases@oracle.com
387*12683SJimmy.Vetayases@oracle.com while ((pending = cpu->cpu_softinfo.st_pending) != 0) {
388*12683SJimmy.Vetayases@oracle.com newipl = bsrw_insn(pending);
389*12683SJimmy.Vetayases@oracle.com oldipl = cpu->cpu_pri;
390*12683SJimmy.Vetayases@oracle.com if (newipl <= oldipl || newipl <= cpu->cpu_base_spl)
391*12683SJimmy.Vetayases@oracle.com return (-1);
392*12683SJimmy.Vetayases@oracle.com
393*12683SJimmy.Vetayases@oracle.com newsp = apix_do_softint_prolog(cpu, newipl, oldipl,
394*12683SJimmy.Vetayases@oracle.com (caddr_t)regs);
395*12683SJimmy.Vetayases@oracle.com ASSERT(newsp != NULL);
396*12683SJimmy.Vetayases@oracle.com switch_sp_and_call(newsp, apix_dispatch_softint, oldipl, 0);
397*12683SJimmy.Vetayases@oracle.com }
398*12683SJimmy.Vetayases@oracle.com
399*12683SJimmy.Vetayases@oracle.com return (0);
400*12683SJimmy.Vetayases@oracle.com }
401*12683SJimmy.Vetayases@oracle.com
402*12683SJimmy.Vetayases@oracle.com static int
apix_hilevel_intr_prolog(struct cpu * cpu,uint_t pil,uint_t oldpil,struct regs * rp)403*12683SJimmy.Vetayases@oracle.com apix_hilevel_intr_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil,
404*12683SJimmy.Vetayases@oracle.com struct regs *rp)
405*12683SJimmy.Vetayases@oracle.com {
406*12683SJimmy.Vetayases@oracle.com struct machcpu *mcpu = &cpu->cpu_m;
407*12683SJimmy.Vetayases@oracle.com hrtime_t intrtime;
408*12683SJimmy.Vetayases@oracle.com hrtime_t now = tsc_read();
409*12683SJimmy.Vetayases@oracle.com apix_impl_t *apixp = apixs[cpu->cpu_id];
410*12683SJimmy.Vetayases@oracle.com uint_t mask;
411*12683SJimmy.Vetayases@oracle.com
412*12683SJimmy.Vetayases@oracle.com ASSERT(pil > mcpu->mcpu_pri && pil > cpu->cpu_base_spl);
413*12683SJimmy.Vetayases@oracle.com
414*12683SJimmy.Vetayases@oracle.com if (pil == CBE_HIGH_PIL) { /* 14 */
415*12683SJimmy.Vetayases@oracle.com cpu->cpu_profile_pil = oldpil;
416*12683SJimmy.Vetayases@oracle.com if (USERMODE(rp->r_cs)) {
417*12683SJimmy.Vetayases@oracle.com cpu->cpu_profile_pc = 0;
418*12683SJimmy.Vetayases@oracle.com cpu->cpu_profile_upc = rp->r_pc;
419*12683SJimmy.Vetayases@oracle.com cpu->cpu_cpcprofile_pc = 0;
420*12683SJimmy.Vetayases@oracle.com cpu->cpu_cpcprofile_upc = rp->r_pc;
421*12683SJimmy.Vetayases@oracle.com } else {
422*12683SJimmy.Vetayases@oracle.com cpu->cpu_profile_pc = rp->r_pc;
423*12683SJimmy.Vetayases@oracle.com cpu->cpu_profile_upc = 0;
424*12683SJimmy.Vetayases@oracle.com cpu->cpu_cpcprofile_pc = rp->r_pc;
425*12683SJimmy.Vetayases@oracle.com cpu->cpu_cpcprofile_upc = 0;
426*12683SJimmy.Vetayases@oracle.com }
427*12683SJimmy.Vetayases@oracle.com }
428*12683SJimmy.Vetayases@oracle.com
429*12683SJimmy.Vetayases@oracle.com mcpu->mcpu_pri = pil;
430*12683SJimmy.Vetayases@oracle.com
431*12683SJimmy.Vetayases@oracle.com mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK;
432*12683SJimmy.Vetayases@oracle.com if (mask != 0) {
433*12683SJimmy.Vetayases@oracle.com int nestpil;
434*12683SJimmy.Vetayases@oracle.com
435*12683SJimmy.Vetayases@oracle.com /*
436*12683SJimmy.Vetayases@oracle.com * We have interrupted another high-level interrupt.
437*12683SJimmy.Vetayases@oracle.com * Load starting timestamp, compute interval, update
438*12683SJimmy.Vetayases@oracle.com * cumulative counter.
439*12683SJimmy.Vetayases@oracle.com */
440*12683SJimmy.Vetayases@oracle.com nestpil = bsrw_insn((uint16_t)mask);
441*12683SJimmy.Vetayases@oracle.com intrtime = now -
442*12683SJimmy.Vetayases@oracle.com mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)];
443*12683SJimmy.Vetayases@oracle.com mcpu->intrstat[nestpil][0] += intrtime;
444*12683SJimmy.Vetayases@oracle.com cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
445*12683SJimmy.Vetayases@oracle.com } else {
446*12683SJimmy.Vetayases@oracle.com kthread_t *t = cpu->cpu_thread;
447*12683SJimmy.Vetayases@oracle.com
448*12683SJimmy.Vetayases@oracle.com /*
449*12683SJimmy.Vetayases@oracle.com * See if we are interrupting a low-level interrupt thread.
450*12683SJimmy.Vetayases@oracle.com * If so, account for its time slice only if its time stamp
451*12683SJimmy.Vetayases@oracle.com * is non-zero.
452*12683SJimmy.Vetayases@oracle.com */
453*12683SJimmy.Vetayases@oracle.com if ((t->t_flag & T_INTR_THREAD) != 0 && t->t_intr_start != 0) {
454*12683SJimmy.Vetayases@oracle.com intrtime = now - t->t_intr_start;
455*12683SJimmy.Vetayases@oracle.com mcpu->intrstat[t->t_pil][0] += intrtime;
456*12683SJimmy.Vetayases@oracle.com cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
457*12683SJimmy.Vetayases@oracle.com t->t_intr_start = 0;
458*12683SJimmy.Vetayases@oracle.com }
459*12683SJimmy.Vetayases@oracle.com }
460*12683SJimmy.Vetayases@oracle.com
461*12683SJimmy.Vetayases@oracle.com /* store starting timestamp in CPu structure for this IPL */
462*12683SJimmy.Vetayases@oracle.com mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] = now;
463*12683SJimmy.Vetayases@oracle.com
464*12683SJimmy.Vetayases@oracle.com if (pil == 15) {
465*12683SJimmy.Vetayases@oracle.com /*
466*12683SJimmy.Vetayases@oracle.com * To support reentrant level 15 interrupts, we maintain a
467*12683SJimmy.Vetayases@oracle.com * recursion count in the top half of cpu_intr_actv. Only
468*12683SJimmy.Vetayases@oracle.com * when this count hits zero do we clear the PIL 15 bit from
469*12683SJimmy.Vetayases@oracle.com * the lower half of cpu_intr_actv.
470*12683SJimmy.Vetayases@oracle.com */
471*12683SJimmy.Vetayases@oracle.com uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
472*12683SJimmy.Vetayases@oracle.com (*refcntp)++;
473*12683SJimmy.Vetayases@oracle.com }
474*12683SJimmy.Vetayases@oracle.com
475*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_actv |= (1 << pil);
476*12683SJimmy.Vetayases@oracle.com /* clear pending ipl level bit */
477*12683SJimmy.Vetayases@oracle.com apixp->x_intr_pending &= ~(1 << pil);
478*12683SJimmy.Vetayases@oracle.com
479*12683SJimmy.Vetayases@oracle.com return (mask);
480*12683SJimmy.Vetayases@oracle.com }
481*12683SJimmy.Vetayases@oracle.com
482*12683SJimmy.Vetayases@oracle.com static int
apix_hilevel_intr_epilog(struct cpu * cpu,uint_t oldpil)483*12683SJimmy.Vetayases@oracle.com apix_hilevel_intr_epilog(struct cpu *cpu, uint_t oldpil)
484*12683SJimmy.Vetayases@oracle.com {
485*12683SJimmy.Vetayases@oracle.com struct machcpu *mcpu = &cpu->cpu_m;
486*12683SJimmy.Vetayases@oracle.com uint_t mask, pil;
487*12683SJimmy.Vetayases@oracle.com hrtime_t intrtime;
488*12683SJimmy.Vetayases@oracle.com hrtime_t now = tsc_read();
489*12683SJimmy.Vetayases@oracle.com
490*12683SJimmy.Vetayases@oracle.com pil = mcpu->mcpu_pri;
491*12683SJimmy.Vetayases@oracle.com cpu->cpu_stats.sys.intr[pil - 1]++;
492*12683SJimmy.Vetayases@oracle.com
493*12683SJimmy.Vetayases@oracle.com ASSERT(cpu->cpu_intr_actv & (1 << pil));
494*12683SJimmy.Vetayases@oracle.com
495*12683SJimmy.Vetayases@oracle.com if (pil == 15) {
496*12683SJimmy.Vetayases@oracle.com /*
497*12683SJimmy.Vetayases@oracle.com * To support reentrant level 15 interrupts, we maintain a
498*12683SJimmy.Vetayases@oracle.com * recursion count in the top half of cpu_intr_actv. Only
499*12683SJimmy.Vetayases@oracle.com * when this count hits zero do we clear the PIL 15 bit from
500*12683SJimmy.Vetayases@oracle.com * the lower half of cpu_intr_actv.
501*12683SJimmy.Vetayases@oracle.com */
502*12683SJimmy.Vetayases@oracle.com uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
503*12683SJimmy.Vetayases@oracle.com
504*12683SJimmy.Vetayases@oracle.com ASSERT(*refcntp > 0);
505*12683SJimmy.Vetayases@oracle.com
506*12683SJimmy.Vetayases@oracle.com if (--(*refcntp) == 0)
507*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_actv &= ~(1 << pil);
508*12683SJimmy.Vetayases@oracle.com } else {
509*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_actv &= ~(1 << pil);
510*12683SJimmy.Vetayases@oracle.com }
511*12683SJimmy.Vetayases@oracle.com
512*12683SJimmy.Vetayases@oracle.com ASSERT(mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] != 0);
513*12683SJimmy.Vetayases@oracle.com
514*12683SJimmy.Vetayases@oracle.com intrtime = now - mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)];
515*12683SJimmy.Vetayases@oracle.com mcpu->intrstat[pil][0] += intrtime;
516*12683SJimmy.Vetayases@oracle.com cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
517*12683SJimmy.Vetayases@oracle.com
518*12683SJimmy.Vetayases@oracle.com /*
519*12683SJimmy.Vetayases@oracle.com * Check for lower-pil nested high-level interrupt beneath
520*12683SJimmy.Vetayases@oracle.com * current one. If so, place a starting timestamp in its
521*12683SJimmy.Vetayases@oracle.com * pil_high_start entry.
522*12683SJimmy.Vetayases@oracle.com */
523*12683SJimmy.Vetayases@oracle.com mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK;
524*12683SJimmy.Vetayases@oracle.com if (mask != 0) {
525*12683SJimmy.Vetayases@oracle.com int nestpil;
526*12683SJimmy.Vetayases@oracle.com
527*12683SJimmy.Vetayases@oracle.com /*
528*12683SJimmy.Vetayases@oracle.com * find PIL of nested interrupt
529*12683SJimmy.Vetayases@oracle.com */
530*12683SJimmy.Vetayases@oracle.com nestpil = bsrw_insn((uint16_t)mask);
531*12683SJimmy.Vetayases@oracle.com ASSERT(nestpil < pil);
532*12683SJimmy.Vetayases@oracle.com mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)] = now;
533*12683SJimmy.Vetayases@oracle.com /*
534*12683SJimmy.Vetayases@oracle.com * (Another high-level interrupt is active below this one,
535*12683SJimmy.Vetayases@oracle.com * so there is no need to check for an interrupt
536*12683SJimmy.Vetayases@oracle.com * thread. That will be done by the lowest priority
537*12683SJimmy.Vetayases@oracle.com * high-level interrupt active.)
538*12683SJimmy.Vetayases@oracle.com */
539*12683SJimmy.Vetayases@oracle.com } else {
540*12683SJimmy.Vetayases@oracle.com /*
541*12683SJimmy.Vetayases@oracle.com * Check to see if there is a low-level interrupt active.
542*12683SJimmy.Vetayases@oracle.com * If so, place a starting timestamp in the thread
543*12683SJimmy.Vetayases@oracle.com * structure.
544*12683SJimmy.Vetayases@oracle.com */
545*12683SJimmy.Vetayases@oracle.com kthread_t *t = cpu->cpu_thread;
546*12683SJimmy.Vetayases@oracle.com
547*12683SJimmy.Vetayases@oracle.com if (t->t_flag & T_INTR_THREAD)
548*12683SJimmy.Vetayases@oracle.com t->t_intr_start = now;
549*12683SJimmy.Vetayases@oracle.com }
550*12683SJimmy.Vetayases@oracle.com
551*12683SJimmy.Vetayases@oracle.com mcpu->mcpu_pri = oldpil;
552*12683SJimmy.Vetayases@oracle.com if (pil < CBE_HIGH_PIL)
553*12683SJimmy.Vetayases@oracle.com (void) (*setlvlx)(oldpil, 0);
554*12683SJimmy.Vetayases@oracle.com
555*12683SJimmy.Vetayases@oracle.com return (mask);
556*12683SJimmy.Vetayases@oracle.com }
557*12683SJimmy.Vetayases@oracle.com
558*12683SJimmy.Vetayases@oracle.com /*
559*12683SJimmy.Vetayases@oracle.com * Dispatch a hilevel interrupt (one above LOCK_LEVEL)
560*12683SJimmy.Vetayases@oracle.com */
561*12683SJimmy.Vetayases@oracle.com static void
apix_dispatch_pending_hilevel(uint_t ipl,uint_t arg2)562*12683SJimmy.Vetayases@oracle.com apix_dispatch_pending_hilevel(uint_t ipl, uint_t arg2)
563*12683SJimmy.Vetayases@oracle.com {
564*12683SJimmy.Vetayases@oracle.com UNREFERENCED_1PARAMETER(arg2);
565*12683SJimmy.Vetayases@oracle.com
566*12683SJimmy.Vetayases@oracle.com apix_dispatch_pending_autovect(ipl);
567*12683SJimmy.Vetayases@oracle.com }
568*12683SJimmy.Vetayases@oracle.com
569*12683SJimmy.Vetayases@oracle.com static __inline__ int
apix_do_pending_hilevel(struct cpu * cpu,struct regs * rp)570*12683SJimmy.Vetayases@oracle.com apix_do_pending_hilevel(struct cpu *cpu, struct regs *rp)
571*12683SJimmy.Vetayases@oracle.com {
572*12683SJimmy.Vetayases@oracle.com volatile uint16_t pending;
573*12683SJimmy.Vetayases@oracle.com uint_t newipl, oldipl;
574*12683SJimmy.Vetayases@oracle.com caddr_t newsp;
575*12683SJimmy.Vetayases@oracle.com
576*12683SJimmy.Vetayases@oracle.com while ((pending = HILEVEL_PENDING(cpu)) != 0) {
577*12683SJimmy.Vetayases@oracle.com newipl = bsrw_insn(pending);
578*12683SJimmy.Vetayases@oracle.com ASSERT(newipl > LOCK_LEVEL && newipl > cpu->cpu_base_spl);
579*12683SJimmy.Vetayases@oracle.com oldipl = cpu->cpu_pri;
580*12683SJimmy.Vetayases@oracle.com if (newipl <= oldipl)
581*12683SJimmy.Vetayases@oracle.com return (-1);
582*12683SJimmy.Vetayases@oracle.com
583*12683SJimmy.Vetayases@oracle.com /*
584*12683SJimmy.Vetayases@oracle.com * High priority interrupts run on this cpu's interrupt stack.
585*12683SJimmy.Vetayases@oracle.com */
586*12683SJimmy.Vetayases@oracle.com if (apix_hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) {
587*12683SJimmy.Vetayases@oracle.com newsp = cpu->cpu_intr_stack;
588*12683SJimmy.Vetayases@oracle.com switch_sp_and_call(newsp, apix_dispatch_pending_hilevel,
589*12683SJimmy.Vetayases@oracle.com newipl, 0);
590*12683SJimmy.Vetayases@oracle.com } else { /* already on the interrupt stack */
591*12683SJimmy.Vetayases@oracle.com apix_dispatch_pending_hilevel(newipl, 0);
592*12683SJimmy.Vetayases@oracle.com }
593*12683SJimmy.Vetayases@oracle.com (void) apix_hilevel_intr_epilog(cpu, oldipl);
594*12683SJimmy.Vetayases@oracle.com }
595*12683SJimmy.Vetayases@oracle.com
596*12683SJimmy.Vetayases@oracle.com return (0);
597*12683SJimmy.Vetayases@oracle.com }
598*12683SJimmy.Vetayases@oracle.com
599*12683SJimmy.Vetayases@oracle.com /*
600*12683SJimmy.Vetayases@oracle.com * Get an interrupt thread and swith to it. It's called from do_interrupt().
601*12683SJimmy.Vetayases@oracle.com * The IF flag is cleared and thus all maskable interrupts are blocked at
602*12683SJimmy.Vetayases@oracle.com * the time of calling.
603*12683SJimmy.Vetayases@oracle.com */
604*12683SJimmy.Vetayases@oracle.com static caddr_t
apix_intr_thread_prolog(struct cpu * cpu,uint_t pil,caddr_t stackptr)605*12683SJimmy.Vetayases@oracle.com apix_intr_thread_prolog(struct cpu *cpu, uint_t pil, caddr_t stackptr)
606*12683SJimmy.Vetayases@oracle.com {
607*12683SJimmy.Vetayases@oracle.com apix_impl_t *apixp = apixs[cpu->cpu_id];
608*12683SJimmy.Vetayases@oracle.com struct machcpu *mcpu = &cpu->cpu_m;
609*12683SJimmy.Vetayases@oracle.com hrtime_t now = tsc_read();
610*12683SJimmy.Vetayases@oracle.com kthread_t *t, *volatile it;
611*12683SJimmy.Vetayases@oracle.com
612*12683SJimmy.Vetayases@oracle.com ASSERT(pil > mcpu->mcpu_pri && pil > cpu->cpu_base_spl);
613*12683SJimmy.Vetayases@oracle.com
614*12683SJimmy.Vetayases@oracle.com apixp->x_intr_pending &= ~(1 << pil);
615*12683SJimmy.Vetayases@oracle.com ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
616*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_actv |= (1 << pil);
617*12683SJimmy.Vetayases@oracle.com mcpu->mcpu_pri = pil;
618*12683SJimmy.Vetayases@oracle.com
619*12683SJimmy.Vetayases@oracle.com /*
620*12683SJimmy.Vetayases@oracle.com * Get set to run interrupt thread.
621*12683SJimmy.Vetayases@oracle.com * There should always be an interrupt thread since we
622*12683SJimmy.Vetayases@oracle.com * allocate one for each level on the CPU.
623*12683SJimmy.Vetayases@oracle.com */
624*12683SJimmy.Vetayases@oracle.com /* t_intr_start could be zero due to cpu_intr_swtch_enter. */
625*12683SJimmy.Vetayases@oracle.com t = cpu->cpu_thread;
626*12683SJimmy.Vetayases@oracle.com if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) {
627*12683SJimmy.Vetayases@oracle.com hrtime_t intrtime = now - t->t_intr_start;
628*12683SJimmy.Vetayases@oracle.com mcpu->intrstat[pil][0] += intrtime;
629*12683SJimmy.Vetayases@oracle.com cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
630*12683SJimmy.Vetayases@oracle.com t->t_intr_start = 0;
631*12683SJimmy.Vetayases@oracle.com }
632*12683SJimmy.Vetayases@oracle.com
633*12683SJimmy.Vetayases@oracle.com /*
634*12683SJimmy.Vetayases@oracle.com * Push interrupted thread onto list from new thread.
635*12683SJimmy.Vetayases@oracle.com * Set the new thread as the current one.
636*12683SJimmy.Vetayases@oracle.com * Set interrupted thread's T_SP because if it is the idle thread,
637*12683SJimmy.Vetayases@oracle.com * resume() may use that stack between threads.
638*12683SJimmy.Vetayases@oracle.com */
639*12683SJimmy.Vetayases@oracle.com
640*12683SJimmy.Vetayases@oracle.com ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
641*12683SJimmy.Vetayases@oracle.com
642*12683SJimmy.Vetayases@oracle.com t->t_sp = (uintptr_t)stackptr; /* mark stack in curthread for resume */
643*12683SJimmy.Vetayases@oracle.com
644*12683SJimmy.Vetayases@oracle.com /*
645*12683SJimmy.Vetayases@oracle.com * Note that the code in kcpc_overflow_intr -relies- on the
646*12683SJimmy.Vetayases@oracle.com * ordering of events here - in particular that t->t_lwp of
647*12683SJimmy.Vetayases@oracle.com * the interrupt thread is set to the pinned thread *before*
648*12683SJimmy.Vetayases@oracle.com * curthread is changed.
649*12683SJimmy.Vetayases@oracle.com */
650*12683SJimmy.Vetayases@oracle.com it = cpu->cpu_intr_thread;
651*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_thread = it->t_link;
652*12683SJimmy.Vetayases@oracle.com it->t_intr = t;
653*12683SJimmy.Vetayases@oracle.com it->t_lwp = t->t_lwp;
654*12683SJimmy.Vetayases@oracle.com
655*12683SJimmy.Vetayases@oracle.com /*
656*12683SJimmy.Vetayases@oracle.com * (threads on the interrupt thread free list could have state
657*12683SJimmy.Vetayases@oracle.com * preset to TS_ONPROC, but it helps in debugging if
658*12683SJimmy.Vetayases@oracle.com * they're TS_FREE.)
659*12683SJimmy.Vetayases@oracle.com */
660*12683SJimmy.Vetayases@oracle.com it->t_state = TS_ONPROC;
661*12683SJimmy.Vetayases@oracle.com
662*12683SJimmy.Vetayases@oracle.com cpu->cpu_thread = it;
663*12683SJimmy.Vetayases@oracle.com
664*12683SJimmy.Vetayases@oracle.com /*
665*12683SJimmy.Vetayases@oracle.com * Initialize thread priority level from intr_pri
666*12683SJimmy.Vetayases@oracle.com */
667*12683SJimmy.Vetayases@oracle.com it->t_pil = (uchar_t)pil;
668*12683SJimmy.Vetayases@oracle.com it->t_pri = (pri_t)pil + intr_pri;
669*12683SJimmy.Vetayases@oracle.com it->t_intr_start = now;
670*12683SJimmy.Vetayases@oracle.com
671*12683SJimmy.Vetayases@oracle.com return (it->t_stk);
672*12683SJimmy.Vetayases@oracle.com }
673*12683SJimmy.Vetayases@oracle.com
674*12683SJimmy.Vetayases@oracle.com static void
apix_intr_thread_epilog(struct cpu * cpu,uint_t oldpil)675*12683SJimmy.Vetayases@oracle.com apix_intr_thread_epilog(struct cpu *cpu, uint_t oldpil)
676*12683SJimmy.Vetayases@oracle.com {
677*12683SJimmy.Vetayases@oracle.com struct machcpu *mcpu = &cpu->cpu_m;
678*12683SJimmy.Vetayases@oracle.com kthread_t *t, *it = cpu->cpu_thread;
679*12683SJimmy.Vetayases@oracle.com uint_t pil, basespl;
680*12683SJimmy.Vetayases@oracle.com hrtime_t intrtime;
681*12683SJimmy.Vetayases@oracle.com hrtime_t now = tsc_read();
682*12683SJimmy.Vetayases@oracle.com
683*12683SJimmy.Vetayases@oracle.com pil = it->t_pil;
684*12683SJimmy.Vetayases@oracle.com cpu->cpu_stats.sys.intr[pil - 1]++;
685*12683SJimmy.Vetayases@oracle.com
686*12683SJimmy.Vetayases@oracle.com ASSERT(cpu->cpu_intr_actv & (1 << pil));
687*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_actv &= ~(1 << pil);
688*12683SJimmy.Vetayases@oracle.com
689*12683SJimmy.Vetayases@oracle.com ASSERT(it->t_intr_start != 0);
690*12683SJimmy.Vetayases@oracle.com intrtime = now - it->t_intr_start;
691*12683SJimmy.Vetayases@oracle.com mcpu->intrstat[pil][0] += intrtime;
692*12683SJimmy.Vetayases@oracle.com cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
693*12683SJimmy.Vetayases@oracle.com
694*12683SJimmy.Vetayases@oracle.com /*
695*12683SJimmy.Vetayases@oracle.com * If there is still an interrupted thread underneath this one
696*12683SJimmy.Vetayases@oracle.com * then the interrupt was never blocked and the return is
697*12683SJimmy.Vetayases@oracle.com * fairly simple. Otherwise it isn't.
698*12683SJimmy.Vetayases@oracle.com */
699*12683SJimmy.Vetayases@oracle.com if ((t = it->t_intr) == NULL) {
700*12683SJimmy.Vetayases@oracle.com /*
701*12683SJimmy.Vetayases@oracle.com * The interrupted thread is no longer pinned underneath
702*12683SJimmy.Vetayases@oracle.com * the interrupt thread. This means the interrupt must
703*12683SJimmy.Vetayases@oracle.com * have blocked, and the interrupted thread has been
704*12683SJimmy.Vetayases@oracle.com * unpinned, and has probably been running around the
705*12683SJimmy.Vetayases@oracle.com * system for a while.
706*12683SJimmy.Vetayases@oracle.com *
707*12683SJimmy.Vetayases@oracle.com * Since there is no longer a thread under this one, put
708*12683SJimmy.Vetayases@oracle.com * this interrupt thread back on the CPU's free list and
709*12683SJimmy.Vetayases@oracle.com * resume the idle thread which will dispatch the next
710*12683SJimmy.Vetayases@oracle.com * thread to run.
711*12683SJimmy.Vetayases@oracle.com */
712*12683SJimmy.Vetayases@oracle.com cpu->cpu_stats.sys.intrblk++;
713*12683SJimmy.Vetayases@oracle.com
714*12683SJimmy.Vetayases@oracle.com /*
715*12683SJimmy.Vetayases@oracle.com * Put thread back on the interrupt thread list.
716*12683SJimmy.Vetayases@oracle.com * This was an interrupt thread, so set CPU's base SPL.
717*12683SJimmy.Vetayases@oracle.com */
718*12683SJimmy.Vetayases@oracle.com set_base_spl();
719*12683SJimmy.Vetayases@oracle.com basespl = cpu->cpu_base_spl;
720*12683SJimmy.Vetayases@oracle.com mcpu->mcpu_pri = basespl;
721*12683SJimmy.Vetayases@oracle.com (*setlvlx)(basespl, 0);
722*12683SJimmy.Vetayases@oracle.com
723*12683SJimmy.Vetayases@oracle.com it->t_state = TS_FREE;
724*12683SJimmy.Vetayases@oracle.com /*
725*12683SJimmy.Vetayases@oracle.com * Return interrupt thread to pool
726*12683SJimmy.Vetayases@oracle.com */
727*12683SJimmy.Vetayases@oracle.com it->t_link = cpu->cpu_intr_thread;
728*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_thread = it;
729*12683SJimmy.Vetayases@oracle.com
730*12683SJimmy.Vetayases@oracle.com (void) splhigh();
731*12683SJimmy.Vetayases@oracle.com sti();
732*12683SJimmy.Vetayases@oracle.com swtch();
733*12683SJimmy.Vetayases@oracle.com /*NOTREACHED*/
734*12683SJimmy.Vetayases@oracle.com panic("dosoftint_epilog: swtch returned");
735*12683SJimmy.Vetayases@oracle.com }
736*12683SJimmy.Vetayases@oracle.com
737*12683SJimmy.Vetayases@oracle.com /*
738*12683SJimmy.Vetayases@oracle.com * Return interrupt thread to the pool
739*12683SJimmy.Vetayases@oracle.com */
740*12683SJimmy.Vetayases@oracle.com it->t_link = cpu->cpu_intr_thread;
741*12683SJimmy.Vetayases@oracle.com cpu->cpu_intr_thread = it;
742*12683SJimmy.Vetayases@oracle.com it->t_state = TS_FREE;
743*12683SJimmy.Vetayases@oracle.com
744*12683SJimmy.Vetayases@oracle.com cpu->cpu_thread = t;
745*12683SJimmy.Vetayases@oracle.com if (t->t_flag & T_INTR_THREAD)
746*12683SJimmy.Vetayases@oracle.com t->t_intr_start = now;
747*12683SJimmy.Vetayases@oracle.com basespl = cpu->cpu_base_spl;
748*12683SJimmy.Vetayases@oracle.com mcpu->mcpu_pri = MAX(oldpil, basespl);
749*12683SJimmy.Vetayases@oracle.com (*setlvlx)(mcpu->mcpu_pri, 0);
750*12683SJimmy.Vetayases@oracle.com }
751*12683SJimmy.Vetayases@oracle.com
752*12683SJimmy.Vetayases@oracle.com
753*12683SJimmy.Vetayases@oracle.com static void
apix_dispatch_pending_hardint(uint_t oldpil,uint_t arg2)754*12683SJimmy.Vetayases@oracle.com apix_dispatch_pending_hardint(uint_t oldpil, uint_t arg2)
755*12683SJimmy.Vetayases@oracle.com {
756*12683SJimmy.Vetayases@oracle.com struct cpu *cpu = CPU;
757*12683SJimmy.Vetayases@oracle.com
758*12683SJimmy.Vetayases@oracle.com UNREFERENCED_1PARAMETER(arg2);
759*12683SJimmy.Vetayases@oracle.com
760*12683SJimmy.Vetayases@oracle.com apix_dispatch_pending_autovect((int)cpu->cpu_thread->t_pil);
761*12683SJimmy.Vetayases@oracle.com
762*12683SJimmy.Vetayases@oracle.com /*
763*12683SJimmy.Vetayases@oracle.com * Must run intr_thread_epilog() on the interrupt thread stack, since
764*12683SJimmy.Vetayases@oracle.com * there may not be a return from it if the interrupt thread blocked.
765*12683SJimmy.Vetayases@oracle.com */
766*12683SJimmy.Vetayases@oracle.com apix_intr_thread_epilog(cpu, oldpil);
767*12683SJimmy.Vetayases@oracle.com }
768*12683SJimmy.Vetayases@oracle.com
769*12683SJimmy.Vetayases@oracle.com static __inline__ int
apix_do_pending_hardint(struct cpu * cpu,struct regs * rp)770*12683SJimmy.Vetayases@oracle.com apix_do_pending_hardint(struct cpu *cpu, struct regs *rp)
771*12683SJimmy.Vetayases@oracle.com {
772*12683SJimmy.Vetayases@oracle.com volatile uint16_t pending;
773*12683SJimmy.Vetayases@oracle.com uint_t newipl, oldipl;
774*12683SJimmy.Vetayases@oracle.com caddr_t newsp;
775*12683SJimmy.Vetayases@oracle.com
776*12683SJimmy.Vetayases@oracle.com while ((pending = LOWLEVEL_PENDING(cpu)) != 0) {
777*12683SJimmy.Vetayases@oracle.com newipl = bsrw_insn(pending);
778*12683SJimmy.Vetayases@oracle.com ASSERT(newipl <= LOCK_LEVEL);
779*12683SJimmy.Vetayases@oracle.com oldipl = cpu->cpu_pri;
780*12683SJimmy.Vetayases@oracle.com if (newipl <= oldipl || newipl <= cpu->cpu_base_spl)
781*12683SJimmy.Vetayases@oracle.com return (-1);
782*12683SJimmy.Vetayases@oracle.com
783*12683SJimmy.Vetayases@oracle.com /*
784*12683SJimmy.Vetayases@oracle.com * Run this interrupt in a separate thread.
785*12683SJimmy.Vetayases@oracle.com */
786*12683SJimmy.Vetayases@oracle.com newsp = apix_intr_thread_prolog(cpu, newipl, (caddr_t)rp);
787*12683SJimmy.Vetayases@oracle.com ASSERT(newsp != NULL);
788*12683SJimmy.Vetayases@oracle.com switch_sp_and_call(newsp, apix_dispatch_pending_hardint,
789*12683SJimmy.Vetayases@oracle.com oldipl, 0);
790*12683SJimmy.Vetayases@oracle.com }
791*12683SJimmy.Vetayases@oracle.com
792*12683SJimmy.Vetayases@oracle.com return (0);
793*12683SJimmy.Vetayases@oracle.com }
794*12683SJimmy.Vetayases@oracle.com
795*12683SJimmy.Vetayases@oracle.com /*
796*12683SJimmy.Vetayases@oracle.com * Unmask level triggered interrupts
797*12683SJimmy.Vetayases@oracle.com */
798*12683SJimmy.Vetayases@oracle.com static void
apix_post_hardint(int vector)799*12683SJimmy.Vetayases@oracle.com apix_post_hardint(int vector)
800*12683SJimmy.Vetayases@oracle.com {
801*12683SJimmy.Vetayases@oracle.com apix_vector_t *vecp = xv_vector(psm_get_cpu_id(), vector);
802*12683SJimmy.Vetayases@oracle.com int irqno = vecp->v_inum;
803*12683SJimmy.Vetayases@oracle.com
804*12683SJimmy.Vetayases@oracle.com ASSERT(vecp->v_type == APIX_TYPE_FIXED && apic_level_intr[irqno]);
805*12683SJimmy.Vetayases@oracle.com
806*12683SJimmy.Vetayases@oracle.com apix_level_intr_post_dispatch(irqno);
807*12683SJimmy.Vetayases@oracle.com }
808*12683SJimmy.Vetayases@oracle.com
809*12683SJimmy.Vetayases@oracle.com static void
apix_dispatch_by_vector(uint_t vector)810*12683SJimmy.Vetayases@oracle.com apix_dispatch_by_vector(uint_t vector)
811*12683SJimmy.Vetayases@oracle.com {
812*12683SJimmy.Vetayases@oracle.com struct cpu *cpu = CPU;
813*12683SJimmy.Vetayases@oracle.com apix_vector_t *vecp = xv_vector(cpu->cpu_id, vector);
814*12683SJimmy.Vetayases@oracle.com struct autovec *avp;
815*12683SJimmy.Vetayases@oracle.com uint_t r, (*intr)();
816*12683SJimmy.Vetayases@oracle.com caddr_t arg1, arg2;
817*12683SJimmy.Vetayases@oracle.com dev_info_t *dip;
818*12683SJimmy.Vetayases@oracle.com
819*12683SJimmy.Vetayases@oracle.com if (vecp == NULL ||
820*12683SJimmy.Vetayases@oracle.com (avp = vecp->v_autovect) == NULL || avp->av_vector == NULL)
821*12683SJimmy.Vetayases@oracle.com return;
822*12683SJimmy.Vetayases@oracle.com
823*12683SJimmy.Vetayases@oracle.com avp->av_flags |= AV_PENTRY_ONPROC;
824*12683SJimmy.Vetayases@oracle.com intr = avp->av_vector;
825*12683SJimmy.Vetayases@oracle.com arg1 = avp->av_intarg1;
826*12683SJimmy.Vetayases@oracle.com arg2 = avp->av_intarg2;
827*12683SJimmy.Vetayases@oracle.com dip = avp->av_dip;
828*12683SJimmy.Vetayases@oracle.com
829*12683SJimmy.Vetayases@oracle.com if (avp->av_prilevel != XC_HI_PIL)
830*12683SJimmy.Vetayases@oracle.com sti();
831*12683SJimmy.Vetayases@oracle.com
832*12683SJimmy.Vetayases@oracle.com DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
833*12683SJimmy.Vetayases@oracle.com void *, intr, caddr_t, arg1, caddr_t, arg2);
834*12683SJimmy.Vetayases@oracle.com r = (*intr)(arg1, arg2);
835*12683SJimmy.Vetayases@oracle.com DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
836*12683SJimmy.Vetayases@oracle.com void *, intr, caddr_t, arg1, uint_t, r);
837*12683SJimmy.Vetayases@oracle.com
838*12683SJimmy.Vetayases@oracle.com cli();
839*12683SJimmy.Vetayases@oracle.com avp->av_flags &= ~AV_PENTRY_ONPROC;
840*12683SJimmy.Vetayases@oracle.com }
841*12683SJimmy.Vetayases@oracle.com
842*12683SJimmy.Vetayases@oracle.com
843*12683SJimmy.Vetayases@oracle.com static void
apix_dispatch_hilevel(uint_t vector,uint_t arg2)844*12683SJimmy.Vetayases@oracle.com apix_dispatch_hilevel(uint_t vector, uint_t arg2)
845*12683SJimmy.Vetayases@oracle.com {
846*12683SJimmy.Vetayases@oracle.com UNREFERENCED_1PARAMETER(arg2);
847*12683SJimmy.Vetayases@oracle.com
848*12683SJimmy.Vetayases@oracle.com apix_dispatch_by_vector(vector);
849*12683SJimmy.Vetayases@oracle.com }
850*12683SJimmy.Vetayases@oracle.com
851*12683SJimmy.Vetayases@oracle.com static void
apix_dispatch_lowlevel(uint_t vector,uint_t oldipl)852*12683SJimmy.Vetayases@oracle.com apix_dispatch_lowlevel(uint_t vector, uint_t oldipl)
853*12683SJimmy.Vetayases@oracle.com {
854*12683SJimmy.Vetayases@oracle.com struct cpu *cpu = CPU;
855*12683SJimmy.Vetayases@oracle.com
856*12683SJimmy.Vetayases@oracle.com apix_dispatch_by_vector(vector);
857*12683SJimmy.Vetayases@oracle.com
858*12683SJimmy.Vetayases@oracle.com /*
859*12683SJimmy.Vetayases@oracle.com * Must run intr_thread_epilog() on the interrupt thread stack, since
860*12683SJimmy.Vetayases@oracle.com * there may not be a return from it if the interrupt thread blocked.
861*12683SJimmy.Vetayases@oracle.com */
862*12683SJimmy.Vetayases@oracle.com apix_intr_thread_epilog(cpu, oldipl);
863*12683SJimmy.Vetayases@oracle.com }
864*12683SJimmy.Vetayases@oracle.com
865*12683SJimmy.Vetayases@oracle.com void
apix_do_interrupt(struct regs * rp,trap_trace_rec_t * ttp)866*12683SJimmy.Vetayases@oracle.com apix_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp)
867*12683SJimmy.Vetayases@oracle.com {
868*12683SJimmy.Vetayases@oracle.com struct cpu *cpu = CPU;
869*12683SJimmy.Vetayases@oracle.com int vector = rp->r_trapno, newipl, oldipl = cpu->cpu_pri, ret;
870*12683SJimmy.Vetayases@oracle.com apix_vector_t *vecp = NULL;
871*12683SJimmy.Vetayases@oracle.com
872*12683SJimmy.Vetayases@oracle.com #ifdef TRAPTRACE
873*12683SJimmy.Vetayases@oracle.com ttp->ttr_marker = TT_INTERRUPT;
874*12683SJimmy.Vetayases@oracle.com ttp->ttr_cpuid = cpu->cpu_id;
875*12683SJimmy.Vetayases@oracle.com ttp->ttr_ipl = 0xff;
876*12683SJimmy.Vetayases@oracle.com ttp->ttr_pri = (uchar_t)oldipl;
877*12683SJimmy.Vetayases@oracle.com ttp->ttr_spl = cpu->cpu_base_spl;
878*12683SJimmy.Vetayases@oracle.com ttp->ttr_vector = 0xff;
879*12683SJimmy.Vetayases@oracle.com #endif /* TRAPTRACE */
880*12683SJimmy.Vetayases@oracle.com
881*12683SJimmy.Vetayases@oracle.com cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR);
882*12683SJimmy.Vetayases@oracle.com
883*12683SJimmy.Vetayases@oracle.com ++*(uint16_t *)&cpu->cpu_m.mcpu_istamp;
884*12683SJimmy.Vetayases@oracle.com
885*12683SJimmy.Vetayases@oracle.com /*
886*12683SJimmy.Vetayases@oracle.com * If it's a softint go do it now.
887*12683SJimmy.Vetayases@oracle.com */
888*12683SJimmy.Vetayases@oracle.com if (rp->r_trapno == T_SOFTINT) {
889*12683SJimmy.Vetayases@oracle.com /*
890*12683SJimmy.Vetayases@oracle.com * It might be the case that when an interrupt is triggered,
891*12683SJimmy.Vetayases@oracle.com * the spl is raised to high by splhigh(). Later when do_splx()
892*12683SJimmy.Vetayases@oracle.com * is called to restore the spl, both hardware and software
893*12683SJimmy.Vetayases@oracle.com * interrupt pending flags are check and an SOFTINT is faked
894*12683SJimmy.Vetayases@oracle.com * accordingly.
895*12683SJimmy.Vetayases@oracle.com */
896*12683SJimmy.Vetayases@oracle.com (void) apix_do_pending_hilevel(cpu, rp);
897*12683SJimmy.Vetayases@oracle.com (void) apix_do_pending_hardint(cpu, rp);
898*12683SJimmy.Vetayases@oracle.com (void) apix_do_softint(rp);
899*12683SJimmy.Vetayases@oracle.com ASSERT(!interrupts_enabled());
900*12683SJimmy.Vetayases@oracle.com #ifdef TRAPTRACE
901*12683SJimmy.Vetayases@oracle.com ttp->ttr_vector = T_SOFTINT;
902*12683SJimmy.Vetayases@oracle.com #endif
903*12683SJimmy.Vetayases@oracle.com return;
904*12683SJimmy.Vetayases@oracle.com }
905*12683SJimmy.Vetayases@oracle.com
906*12683SJimmy.Vetayases@oracle.com /*
907*12683SJimmy.Vetayases@oracle.com * Raise the interrupt priority. Send EOI to local APIC
908*12683SJimmy.Vetayases@oracle.com */
909*12683SJimmy.Vetayases@oracle.com newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno);
910*12683SJimmy.Vetayases@oracle.com #ifdef TRAPTRACE
911*12683SJimmy.Vetayases@oracle.com ttp->ttr_ipl = (uchar_t)newipl;
912*12683SJimmy.Vetayases@oracle.com #endif /* TRAPTRACE */
913*12683SJimmy.Vetayases@oracle.com
914*12683SJimmy.Vetayases@oracle.com /*
915*12683SJimmy.Vetayases@oracle.com * Bail if it is a spurious interrupt
916*12683SJimmy.Vetayases@oracle.com */
917*12683SJimmy.Vetayases@oracle.com if (newipl == -1)
918*12683SJimmy.Vetayases@oracle.com return;
919*12683SJimmy.Vetayases@oracle.com
920*12683SJimmy.Vetayases@oracle.com vector = rp->r_trapno;
921*12683SJimmy.Vetayases@oracle.com vecp = xv_vector(cpu->cpu_id, vector);
922*12683SJimmy.Vetayases@oracle.com #ifdef TRAPTRACE
923*12683SJimmy.Vetayases@oracle.com ttp->ttr_vector = (short)vector;
924*12683SJimmy.Vetayases@oracle.com #endif /* TRAPTRACE */
925*12683SJimmy.Vetayases@oracle.com
926*12683SJimmy.Vetayases@oracle.com /*
927*12683SJimmy.Vetayases@oracle.com * Direct dispatch for IPI, MSI, MSI-X
928*12683SJimmy.Vetayases@oracle.com */
929*12683SJimmy.Vetayases@oracle.com if (vecp && vecp->v_type != APIX_TYPE_FIXED &&
930*12683SJimmy.Vetayases@oracle.com newipl > MAX(oldipl, cpu->cpu_base_spl)) {
931*12683SJimmy.Vetayases@oracle.com caddr_t newsp;
932*12683SJimmy.Vetayases@oracle.com
933*12683SJimmy.Vetayases@oracle.com if (newipl > LOCK_LEVEL) {
934*12683SJimmy.Vetayases@oracle.com if (apix_hilevel_intr_prolog(cpu, newipl, oldipl, rp)
935*12683SJimmy.Vetayases@oracle.com == 0) {
936*12683SJimmy.Vetayases@oracle.com newsp = cpu->cpu_intr_stack;
937*12683SJimmy.Vetayases@oracle.com switch_sp_and_call(newsp, apix_dispatch_hilevel,
938*12683SJimmy.Vetayases@oracle.com vector, 0);
939*12683SJimmy.Vetayases@oracle.com } else {
940*12683SJimmy.Vetayases@oracle.com apix_dispatch_hilevel(vector, 0);
941*12683SJimmy.Vetayases@oracle.com }
942*12683SJimmy.Vetayases@oracle.com (void) apix_hilevel_intr_epilog(cpu, oldipl);
943*12683SJimmy.Vetayases@oracle.com } else {
944*12683SJimmy.Vetayases@oracle.com newsp = apix_intr_thread_prolog(cpu, newipl,
945*12683SJimmy.Vetayases@oracle.com (caddr_t)rp);
946*12683SJimmy.Vetayases@oracle.com switch_sp_and_call(newsp, apix_dispatch_lowlevel,
947*12683SJimmy.Vetayases@oracle.com vector, oldipl);
948*12683SJimmy.Vetayases@oracle.com }
949*12683SJimmy.Vetayases@oracle.com } else {
950*12683SJimmy.Vetayases@oracle.com /* Add to per-pil pending queue */
951*12683SJimmy.Vetayases@oracle.com apix_add_pending_hardint(vector);
952*12683SJimmy.Vetayases@oracle.com if (newipl <= MAX(oldipl, cpu->cpu_base_spl) ||
953*12683SJimmy.Vetayases@oracle.com !apixs[cpu->cpu_id]->x_intr_pending)
954*12683SJimmy.Vetayases@oracle.com return;
955*12683SJimmy.Vetayases@oracle.com }
956*12683SJimmy.Vetayases@oracle.com
957*12683SJimmy.Vetayases@oracle.com if (apix_do_pending_hilevel(cpu, rp) < 0)
958*12683SJimmy.Vetayases@oracle.com return;
959*12683SJimmy.Vetayases@oracle.com
960*12683SJimmy.Vetayases@oracle.com do {
961*12683SJimmy.Vetayases@oracle.com ret = apix_do_pending_hardint(cpu, rp);
962*12683SJimmy.Vetayases@oracle.com
963*12683SJimmy.Vetayases@oracle.com /*
964*12683SJimmy.Vetayases@oracle.com * Deliver any pending soft interrupts.
965*12683SJimmy.Vetayases@oracle.com */
966*12683SJimmy.Vetayases@oracle.com (void) apix_do_softint(rp);
967*12683SJimmy.Vetayases@oracle.com } while (!ret && LOWLEVEL_PENDING(cpu));
968*12683SJimmy.Vetayases@oracle.com }
969