xref: /illumos-gate/usr/src/uts/sun4/ml/interrupt.S (revision 55fea89dcaa64928bed4327112404dcb3e07b79f)
1*5d9d9091SRichard Lowe/*
2*5d9d9091SRichard Lowe * CDDL HEADER START
3*5d9d9091SRichard Lowe *
4*5d9d9091SRichard Lowe * The contents of this file are subject to the terms of the
5*5d9d9091SRichard Lowe * Common Development and Distribution License (the "License").
6*5d9d9091SRichard Lowe * You may not use this file except in compliance with the License.
7*5d9d9091SRichard Lowe *
8*5d9d9091SRichard Lowe * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*5d9d9091SRichard Lowe * or http://www.opensolaris.org/os/licensing.
10*5d9d9091SRichard Lowe * See the License for the specific language governing permissions
11*5d9d9091SRichard Lowe * and limitations under the License.
12*5d9d9091SRichard Lowe *
13*5d9d9091SRichard Lowe * When distributing Covered Code, include this CDDL HEADER in each
14*5d9d9091SRichard Lowe * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*5d9d9091SRichard Lowe * If applicable, add the following below this CDDL HEADER, with the
16*5d9d9091SRichard Lowe * fields enclosed by brackets "[]" replaced with your own identifying
17*5d9d9091SRichard Lowe * information: Portions Copyright [yyyy] [name of copyright owner]
18*5d9d9091SRichard Lowe *
19*5d9d9091SRichard Lowe * CDDL HEADER END
20*5d9d9091SRichard Lowe */
21*5d9d9091SRichard Lowe/*
22*5d9d9091SRichard Lowe * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23*5d9d9091SRichard Lowe */
24*5d9d9091SRichard Lowe
25*5d9d9091SRichard Lowe#include "assym.h"
26*5d9d9091SRichard Lowe
27*5d9d9091SRichard Lowe#include <sys/cmn_err.h>
28*5d9d9091SRichard Lowe#include <sys/ftrace.h>
29*5d9d9091SRichard Lowe#include <sys/asm_linkage.h>
30*5d9d9091SRichard Lowe#include <sys/machthread.h>
31*5d9d9091SRichard Lowe#include <sys/machcpuvar.h>
32*5d9d9091SRichard Lowe#include <sys/intreg.h>
33*5d9d9091SRichard Lowe#include <sys/ivintr.h>
34*5d9d9091SRichard Lowe
35*5d9d9091SRichard Lowe#ifdef TRAPTRACE
36*5d9d9091SRichard Lowe#include <sys/traptrace.h>
37*5d9d9091SRichard Lowe#endif /* TRAPTRACE */
38*5d9d9091SRichard Lowe
39*5d9d9091SRichard Lowe
40*5d9d9091SRichard Lowe/*
41*5d9d9091SRichard Lowe * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
42*5d9d9091SRichard Lowe * 	Register passed from LEVEL_INTERRUPT(level)
43*5d9d9091SRichard Lowe *	%g4 - interrupt request level
44*5d9d9091SRichard Lowe */
45*5d9d9091SRichard Lowe	ENTRY_NP(pil_interrupt)
46*5d9d9091SRichard Lowe	!
47*5d9d9091SRichard Lowe	! Register usage
48*5d9d9091SRichard Lowe	!	%g1 - cpu
49*5d9d9091SRichard Lowe	!	%g2 - pointer to intr_vec_t (iv)
50*5d9d9091SRichard Lowe	!	%g4 - pil
51*5d9d9091SRichard Lowe	!	%g3, %g5, %g6, %g7 - temps
52*5d9d9091SRichard Lowe	!
53*5d9d9091SRichard Lowe	! Grab the first or list head intr_vec_t off the intr_head[pil]
54*5d9d9091SRichard Lowe	! and panic immediately if list head is NULL. Otherwise, update
55*5d9d9091SRichard Lowe	! intr_head[pil] to next intr_vec_t on the list and clear softint
56*5d9d9091SRichard Lowe	! %clear_softint, if next intr_vec_t is NULL.
57*5d9d9091SRichard Lowe	!
58*5d9d9091SRichard Lowe	CPU_ADDR(%g1, %g5)		! %g1 = cpu
59*5d9d9091SRichard Lowe	!
60*5d9d9091SRichard Lowe	ALTENTRY(pil_interrupt_common)
61*5d9d9091SRichard Lowe	sll	%g4, CPTRSHIFT, %g5	! %g5 = offset to the pil entry
62*5d9d9091SRichard Lowe	add	%g1, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head
63*5d9d9091SRichard Lowe	add	%g6, %g5, %g6		! %g6 = &cpu->m_cpu.intr_head[pil]
64*5d9d9091SRichard Lowe	ldn	[%g6], %g2		! %g2 = cpu->m_cpu.intr_head[pil]
65*5d9d9091SRichard Lowe	brnz,pt	%g2, 0f			! check list head (iv) is NULL
66*5d9d9091SRichard Lowe	nop
67*5d9d9091SRichard Lowe	ba	ptl1_panic		! panic, list head (iv) is NULL
68*5d9d9091SRichard Lowe	mov	PTL1_BAD_INTR_VEC, %g1
69*5d9d9091SRichard Lowe0:
70*5d9d9091SRichard Lowe	lduh	[%g2 + IV_FLAGS], %g7	! %g7 = iv->iv_flags
71*5d9d9091SRichard Lowe	and	%g7, IV_SOFTINT_MT, %g3 ! %g3 = iv->iv_flags & IV_SOFTINT_MT
72*5d9d9091SRichard Lowe	brz,pt	%g3, 1f			! check for multi target softint
73*5d9d9091SRichard Lowe	add	%g2, IV_PIL_NEXT, %g7	! g7% = &iv->iv_pil_next
74*5d9d9091SRichard Lowe	ld	[%g1 + CPU_ID], %g3	! for multi target softint, use cpuid
75*5d9d9091SRichard Lowe	sll	%g3, CPTRSHIFT, %g3	! convert cpuid to offset address
76*5d9d9091SRichard Lowe	add	%g7, %g3, %g7		! %g5 = &iv->iv_xpil_next[cpuid]
77*5d9d9091SRichard Lowe1:
78*5d9d9091SRichard Lowe	ldn	[%g7], %g3		! %g3 = next intr_vec_t
79*5d9d9091SRichard Lowe	brnz,pn	%g3, 2f			! branch if next intr_vec_t non NULL
80*5d9d9091SRichard Lowe	stn	%g3, [%g6]		! update cpu->m_cpu.intr_head[pil]
81*5d9d9091SRichard Lowe	add	%g1, INTR_TAIL, %g6	! %g6 =  &cpu->m_cpu.intr_tail
82*5d9d9091SRichard Lowe	stn	%g0, [%g5 + %g6]	! clear cpu->m_cpu.intr_tail[pil]
83*5d9d9091SRichard Lowe	mov	1, %g5			! %g5 = 1
84*5d9d9091SRichard Lowe	sll	%g5, %g4, %g5		! %g5 = 1 << pil
85*5d9d9091SRichard Lowe	wr	%g5, CLEAR_SOFTINT	! clear interrupt on this pil
86*5d9d9091SRichard Lowe2:
87*5d9d9091SRichard Lowe#ifdef TRAPTRACE
88*5d9d9091SRichard Lowe	TRACE_PTR(%g5, %g6)
89*5d9d9091SRichard Lowe	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
90*5d9d9091SRichard Lowe	rdpr	%tt, %g6
91*5d9d9091SRichard Lowe	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
92*5d9d9091SRichard Lowe	rdpr	%tpc, %g6
93*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
94*5d9d9091SRichard Lowe	rdpr	%tstate, %g6
95*5d9d9091SRichard Lowe	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
96*5d9d9091SRichard Lowe	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
97*5d9d9091SRichard Lowe	stna	%g2, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = first intr_vec
98*5d9d9091SRichard Lowe	stna	%g3, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = next intr_vec
99*5d9d9091SRichard Lowe	GET_TRACE_TICK(%g6, %g3)
100*5d9d9091SRichard Lowe	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
101*5d9d9091SRichard Lowe	sll	%g4, CPTRSHIFT, %g3
102*5d9d9091SRichard Lowe	add	%g1, INTR_HEAD, %g6
103*5d9d9091SRichard Lowe	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_head[pil]
104*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
105*5d9d9091SRichard Lowe	add	%g1, INTR_TAIL, %g6
106*5d9d9091SRichard Lowe	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
107*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
108*5d9d9091SRichard Lowe	stna	%g4, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
109*5d9d9091SRichard Lowe	TRACE_NEXT(%g5, %g6, %g3)
110*5d9d9091SRichard Lowe#endif /* TRAPTRACE */
111*5d9d9091SRichard Lowe	!
112*5d9d9091SRichard Lowe	! clear the iv_pending flag for this interrupt request
113*5d9d9091SRichard Lowe	!
114*5d9d9091SRichard Lowe	lduh	[%g2 + IV_FLAGS], %g3		! %g3 = iv->iv_flags
115*5d9d9091SRichard Lowe	andn	%g3, IV_SOFTINT_PEND, %g3	! %g3 = !(iv->iv_flags & PEND)
116*5d9d9091SRichard Lowe	sth	%g3, [%g2 + IV_FLAGS]		! clear IV_SOFTINT_PEND flag
117*5d9d9091SRichard Lowe	stn	%g0, [%g7]			! clear iv->iv_pil_next or
118*5d9d9091SRichard Lowe						!       iv->iv_pil_xnext
119*5d9d9091SRichard Lowe
120*5d9d9091SRichard Lowe	!
121*5d9d9091SRichard Lowe	! Prepare for sys_trap()
122*5d9d9091SRichard Lowe	!
123*5d9d9091SRichard Lowe	! Registers passed to sys_trap()
124*5d9d9091SRichard Lowe	!	%g1 - interrupt handler at TL==0
125*5d9d9091SRichard Lowe	!	%g2 - pointer to current intr_vec_t (iv),
126*5d9d9091SRichard Lowe	!	      job queue for intr_thread or current_thread
127*5d9d9091SRichard Lowe	!	%g3 - pil
128*5d9d9091SRichard Lowe	!	%g4 - initial pil for handler
129*5d9d9091SRichard Lowe	!
130*5d9d9091SRichard Lowe	! figure which handler to run and which %pil it starts at
131*5d9d9091SRichard Lowe	! intr_thread starts at DISP_LEVEL to prevent preemption
132*5d9d9091SRichard Lowe	! current_thread starts at PIL_MAX to protect cpu_intr_actv
133*5d9d9091SRichard Lowe	!
134*5d9d9091SRichard Lowe	mov	%g4, %g3		! %g3 = %g4, pil
135*5d9d9091SRichard Lowe	cmp	%g4, LOCK_LEVEL
136*5d9d9091SRichard Lowe	bg,a,pt	%xcc, 3f		! branch if pil > LOCK_LEVEL
137*5d9d9091SRichard Lowe	mov	PIL_MAX, %g4		! %g4 = PIL_MAX (15)
138*5d9d9091SRichard Lowe	sethi	%hi(intr_thread), %g1	! %g1 = intr_thread
139*5d9d9091SRichard Lowe	mov	DISP_LEVEL, %g4		! %g4 = DISP_LEVEL (11)
140*5d9d9091SRichard Lowe	ba,pt	%xcc, sys_trap
141*5d9d9091SRichard Lowe	or	%g1, %lo(intr_thread), %g1
142*5d9d9091SRichard Lowe3:
143*5d9d9091SRichard Lowe	sethi	%hi(current_thread), %g1 ! %g1 = current_thread
144*5d9d9091SRichard Lowe	ba,pt	%xcc, sys_trap
145*5d9d9091SRichard Lowe	or	%g1, %lo(current_thread), %g1
146*5d9d9091SRichard Lowe	SET_SIZE(pil_interrupt_common)
147*5d9d9091SRichard Lowe	SET_SIZE(pil_interrupt)
148*5d9d9091SRichard Lowe
149*5d9d9091SRichard Lowe
150*5d9d9091SRichard Lowe_spurious:
151*5d9d9091SRichard Lowe	.asciz	"!interrupt 0x%x at level %d not serviced"
152*5d9d9091SRichard Lowe
153*5d9d9091SRichard Lowe/*
154*5d9d9091SRichard Lowe * SERVE_INTR_PRE is called once, just before the first invocation
155*5d9d9091SRichard Lowe * of SERVE_INTR.
156*5d9d9091SRichard Lowe *
157*5d9d9091SRichard Lowe * Registers on entry:
158*5d9d9091SRichard Lowe *
159*5d9d9091SRichard Lowe * iv_p, cpu, regs: may be out-registers
160*5d9d9091SRichard Lowe * ls1, ls2: local scratch registers
161*5d9d9091SRichard Lowe * os1, os2, os3: scratch registers, may be out
162*5d9d9091SRichard Lowe */
163*5d9d9091SRichard Lowe
164*5d9d9091SRichard Lowe#define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs)	\
165*5d9d9091SRichard Lowe	mov	iv_p, ls1;						\
166*5d9d9091SRichard Lowe	mov	iv_p, ls2;						\
167*5d9d9091SRichard Lowe	SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
168*5d9d9091SRichard Lowe
169*5d9d9091SRichard Lowe/*
170*5d9d9091SRichard Lowe * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
171*5d9d9091SRichard Lowe * SERVE_INTR_NEXT, without intervening code. No register values
172*5d9d9091SRichard Lowe * may be modified.
173*5d9d9091SRichard Lowe *
174*5d9d9091SRichard Lowe * After calling SERVE_INTR, the caller must check if os3 is set. If
175*5d9d9091SRichard Lowe * so, there is another interrupt to process. The caller must call
176*5d9d9091SRichard Lowe * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
177*5d9d9091SRichard Lowe *
178*5d9d9091SRichard Lowe * Before calling SERVE_INTR_NEXT, the caller may perform accounting
179*5d9d9091SRichard Lowe * and other actions which need to occur after invocation of an interrupt
180*5d9d9091SRichard Lowe * handler. However, the values of ls1 and os3 *must* be preserved and
181*5d9d9091SRichard Lowe * passed unmodified into SERVE_INTR_NEXT.
182*5d9d9091SRichard Lowe *
183*5d9d9091SRichard Lowe * Registers on return from SERVE_INTR:
184*5d9d9091SRichard Lowe *
185*5d9d9091SRichard Lowe * ls1 - the pil just processed
186*5d9d9091SRichard Lowe * ls2 - the pointer to intr_vec_t (iv) just processed
187*5d9d9091SRichard Lowe * os3 - if set, another interrupt needs to be processed
188*5d9d9091SRichard Lowe * cpu, ls1, os3 - must be preserved if os3 is set
189*5d9d9091SRichard Lowe */
190*5d9d9091SRichard Lowe
191*5d9d9091SRichard Lowe#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
192*5d9d9091SRichard Lowe	ldn	[ls1 + IV_HANDLER], os2;				\
193*5d9d9091SRichard Lowe	ldn	[ls1 + IV_ARG1], %o0;					\
194*5d9d9091SRichard Lowe	ldn	[ls1 + IV_ARG2], %o1;					\
195*5d9d9091SRichard Lowe	call	os2;							\
196*5d9d9091SRichard Lowe	lduh	[ls1 + IV_PIL], ls1;					\
197*5d9d9091SRichard Lowe	brnz,pt	%o0, 2f;						\
198*5d9d9091SRichard Lowe	mov	CE_WARN, %o0;						\
199*5d9d9091SRichard Lowe	set	_spurious, %o1;						\
200*5d9d9091SRichard Lowe	mov	ls2, %o2;						\
201*5d9d9091SRichard Lowe	call	cmn_err;						\
202*5d9d9091SRichard Lowe	rdpr	%pil, %o3;						\
203*5d9d9091SRichard Lowe2:	ldn	[THREAD_REG + T_CPU], cpu;				\
204*5d9d9091SRichard Lowe	sll	ls1, 3, os1;						\
205*5d9d9091SRichard Lowe	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
206*5d9d9091SRichard Lowe	ldx	[cpu + os2], os3;					\
207*5d9d9091SRichard Lowe	inc	os3;							\
208*5d9d9091SRichard Lowe	stx	os3, [cpu + os2];					\
209*5d9d9091SRichard Lowe	sll	ls1, CPTRSHIFT, os2;					\
210*5d9d9091SRichard Lowe	add	cpu,  INTR_HEAD, os1;					\
211*5d9d9091SRichard Lowe	add	os1, os2, os1;						\
212*5d9d9091SRichard Lowe	ldn	[os1], os3;
213*5d9d9091SRichard Lowe
214*5d9d9091SRichard Lowe/*
215*5d9d9091SRichard Lowe * Registers on entry:
216*5d9d9091SRichard Lowe *
217*5d9d9091SRichard Lowe * cpu			- cpu pointer (clobbered, set to cpu upon completion)
218*5d9d9091SRichard Lowe * ls1, os3		- preserved from prior call to SERVE_INTR
219*5d9d9091SRichard Lowe * ls2			- local scratch reg (not preserved)
220*5d9d9091SRichard Lowe * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
221*5d9d9091SRichard Lowe */
222*5d9d9091SRichard Lowe#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
223*5d9d9091SRichard Lowe	sll	ls1, CPTRSHIFT, os4;					\
224*5d9d9091SRichard Lowe	add	cpu, INTR_HEAD, os1;					\
225*5d9d9091SRichard Lowe	rdpr	%pstate, ls2;						\
226*5d9d9091SRichard Lowe	wrpr	ls2, PSTATE_IE, %pstate;				\
227*5d9d9091SRichard Lowe	lduh	[os3 + IV_FLAGS], os2;					\
228*5d9d9091SRichard Lowe	and	os2, IV_SOFTINT_MT, os2;				\
229*5d9d9091SRichard Lowe	brz,pt	os2, 4f;						\
230*5d9d9091SRichard Lowe	add	os3, IV_PIL_NEXT, os2;					\
231*5d9d9091SRichard Lowe	ld	[cpu + CPU_ID], os5;					\
232*5d9d9091SRichard Lowe	sll	os5, CPTRSHIFT, os5;					\
233*5d9d9091SRichard Lowe	add	os2, os5, os2;						\
234*5d9d9091SRichard Lowe4:	ldn	[os2], os5;						\
235*5d9d9091SRichard Lowe	brnz,pn	os5, 5f;						\
236*5d9d9091SRichard Lowe	stn	os5, [os1 + os4];					\
237*5d9d9091SRichard Lowe	add	cpu, INTR_TAIL, os1;					\
238*5d9d9091SRichard Lowe	stn	%g0, [os1 + os4];					\
239*5d9d9091SRichard Lowe	mov	1, os1;							\
240*5d9d9091SRichard Lowe	sll	os1, ls1, os1;						\
241*5d9d9091SRichard Lowe	wr	os1, CLEAR_SOFTINT;					\
242*5d9d9091SRichard Lowe5:	lduh	[os3 + IV_FLAGS], ls1;                                  \
243*5d9d9091SRichard Lowe	andn	ls1, IV_SOFTINT_PEND, ls1;				\
244*5d9d9091SRichard Lowe	sth	ls1, [os3 + IV_FLAGS];				        \
245*5d9d9091SRichard Lowe	stn	%g0, [os2];						\
246*5d9d9091SRichard Lowe	wrpr	%g0, ls2, %pstate;					\
247*5d9d9091SRichard Lowe	mov	os3, ls1;						\
248*5d9d9091SRichard Lowe	mov	os3, ls2;						\
249*5d9d9091SRichard Lowe	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);
250*5d9d9091SRichard Lowe
251*5d9d9091SRichard Lowe#ifdef TRAPTRACE
252*5d9d9091SRichard Lowe/*
253*5d9d9091SRichard Lowe * inum - not modified, _spurious depends on it.
254*5d9d9091SRichard Lowe */
255*5d9d9091SRichard Lowe#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
256*5d9d9091SRichard Lowe	rdpr	%pstate, os3;						\
257*5d9d9091SRichard Lowe	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
258*5d9d9091SRichard Lowe	wrpr	%g0, os2, %pstate;					\
259*5d9d9091SRichard Lowe	TRACE_PTR(os1, os2); 						\
260*5d9d9091SRichard Lowe	ldn	[os4 + PC_OFF], os2;					\
261*5d9d9091SRichard Lowe	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
262*5d9d9091SRichard Lowe	ldx	[os4 + TSTATE_OFF], os2;				\
263*5d9d9091SRichard Lowe	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
264*5d9d9091SRichard Lowe	mov	os3, os4;						\
265*5d9d9091SRichard Lowe	GET_TRACE_TICK(os2, os3);					\
266*5d9d9091SRichard Lowe	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
267*5d9d9091SRichard Lowe	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
268*5d9d9091SRichard Lowe	set	TT_SERVE_INTR, os2;					\
269*5d9d9091SRichard Lowe	rdpr	%pil, os3;						\
270*5d9d9091SRichard Lowe	or	os2, os3, os2;						\
271*5d9d9091SRichard Lowe	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
272*5d9d9091SRichard Lowe	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
273*5d9d9091SRichard Lowe	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
274*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
275*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
276*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
277*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
278*5d9d9091SRichard Lowe	TRACE_NEXT(os1, os2, os3);					\
279*5d9d9091SRichard Lowe	wrpr	%g0, os4, %pstate
280*5d9d9091SRichard Lowe#else	/* TRAPTRACE */
281*5d9d9091SRichard Lowe#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
282*5d9d9091SRichard Lowe#endif	/* TRAPTRACE */
283*5d9d9091SRichard Lowe
284*5d9d9091SRichard Lowe#ifdef TRAPTRACE
285*5d9d9091SRichard Lowe/*
286*5d9d9091SRichard Lowe * inum - not modified, _spurious depends on it.
287*5d9d9091SRichard Lowe */
288*5d9d9091SRichard Lowe#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
289*5d9d9091SRichard Lowe	rdpr	%pstate, os3;						\
290*5d9d9091SRichard Lowe	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
291*5d9d9091SRichard Lowe	wrpr	%g0, os2, %pstate;					\
292*5d9d9091SRichard Lowe	TRACE_PTR(os1, os2); 						\
293*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
294*5d9d9091SRichard Lowe	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
295*5d9d9091SRichard Lowe	mov	os3, os4;						\
296*5d9d9091SRichard Lowe	GET_TRACE_TICK(os2, os3);					\
297*5d9d9091SRichard Lowe	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
298*5d9d9091SRichard Lowe	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
299*5d9d9091SRichard Lowe	set	TT_SERVE_INTR, os2;					\
300*5d9d9091SRichard Lowe	rdpr	%pil, os3;						\
301*5d9d9091SRichard Lowe	or	os2, os3, os2;						\
302*5d9d9091SRichard Lowe	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
303*5d9d9091SRichard Lowe	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
304*5d9d9091SRichard Lowe	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
305*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
306*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
307*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
308*5d9d9091SRichard Lowe	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
309*5d9d9091SRichard Lowe	TRACE_NEXT(os1, os2, os3);					\
310*5d9d9091SRichard Lowe	wrpr	%g0, os4, %pstate
311*5d9d9091SRichard Lowe#else	/* TRAPTRACE */
312*5d9d9091SRichard Lowe#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
313*5d9d9091SRichard Lowe#endif	/* TRAPTRACE */
314*5d9d9091SRichard Lowe
315*5d9d9091SRichard Lowe#define	INTRCNT_LIMIT 16
316*5d9d9091SRichard Lowe
317*5d9d9091SRichard Lowe/*
318*5d9d9091SRichard Lowe * Handle an interrupt in a new thread.
319*5d9d9091SRichard Lowe *	Entry:
320*5d9d9091SRichard Lowe *		%o0       = pointer to regs structure
321*5d9d9091SRichard Lowe *		%o1       = pointer to current intr_vec_t (iv) to be processed
322*5d9d9091SRichard Lowe *		%o2       = pil
323*5d9d9091SRichard Lowe *		%sp       = on current thread's kernel stack
324*5d9d9091SRichard Lowe *		%o7       = return linkage to trap code
325*5d9d9091SRichard Lowe *		%g7       = current thread
326*5d9d9091SRichard Lowe *		%pstate   = normal globals, interrupts enabled,
327*5d9d9091SRichard Lowe *		            privileged, fp disabled
328*5d9d9091SRichard Lowe *		%pil      = DISP_LEVEL
329*5d9d9091SRichard Lowe *
330*5d9d9091SRichard Lowe *	Register Usage
331*5d9d9091SRichard Lowe *		%l0       = return linkage
332*5d9d9091SRichard Lowe *		%l1       = pil
333*5d9d9091SRichard Lowe *		%l2 - %l3 = scratch
334*5d9d9091SRichard Lowe *		%l4 - %l7 = reserved for sys_trap
335*5d9d9091SRichard Lowe *		%o2       = cpu
336*5d9d9091SRichard Lowe *		%o3       = intr thread
337*5d9d9091SRichard Lowe *		%o0       = scratch
338*5d9d9091SRichard Lowe *		%o4 - %o5 = scratch
339*5d9d9091SRichard Lowe */
340*5d9d9091SRichard Lowe	ENTRY_NP(intr_thread)
341*5d9d9091SRichard Lowe	mov	%o7, %l0
342*5d9d9091SRichard Lowe	mov	%o2, %l1
343*5d9d9091SRichard Lowe	!
344*5d9d9091SRichard Lowe	! See if we are interrupting another interrupt thread.
345*5d9d9091SRichard Lowe	!
346*5d9d9091SRichard Lowe	lduh	[THREAD_REG + T_FLAGS], %o3
347*5d9d9091SRichard Lowe	andcc	%o3, T_INTR_THREAD, %g0
348*5d9d9091SRichard Lowe	bz,pt	%xcc, 1f
349*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
350*5d9d9091SRichard Lowe
351*5d9d9091SRichard Lowe	! We have interrupted an interrupt thread. Take a timestamp,
352*5d9d9091SRichard Lowe	! compute its interval, and update its cumulative counter.
353*5d9d9091SRichard Lowe	add	THREAD_REG, T_INTR_START, %o5
354*5d9d9091SRichard Lowe0:
355*5d9d9091SRichard Lowe	ldx	[%o5], %o3
356*5d9d9091SRichard Lowe	brz,pn	%o3, 1f
357*5d9d9091SRichard Lowe	! We came in on top of an interrupt thread that had no timestamp.
358*5d9d9091SRichard Lowe	! This could happen if, for instance, an interrupt thread which had
359*5d9d9091SRichard Lowe	! previously blocked is being set up to run again in resume(), but
360*5d9d9091SRichard Lowe	! resume() hasn't yet stored a timestamp for it. Or, it could be in
361*5d9d9091SRichard Lowe	! swtch() after its slice has been accounted for.
362*5d9d9091SRichard Lowe	! Only account for the time slice if the starting timestamp is non-zero.
363*5d9d9091SRichard Lowe	RD_CLOCK_TICK(%o4,%l2,%l3,__LINE__)
364*5d9d9091SRichard Lowe	sub	%o4, %o3, %o4			! o4 has interval
365*5d9d9091SRichard Lowe
366*5d9d9091SRichard Lowe	! A high-level interrupt in current_thread() interrupting here
367*5d9d9091SRichard Lowe	! will account for the interrupted thread's time slice, but
368*5d9d9091SRichard Lowe	! only if t_intr_start is non-zero. Since this code is going to account
369*5d9d9091SRichard Lowe	! for the time slice, we want to "atomically" load the thread's
370*5d9d9091SRichard Lowe	! starting timestamp, calculate the interval with %tick, and zero
371*5d9d9091SRichard Lowe	! its starting timestamp.
372*5d9d9091SRichard Lowe	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
373*5d9d9091SRichard Lowe	! If it has changed since we loaded it above, we need to re-compute the
374*5d9d9091SRichard Lowe	! interval, since a changed t_intr_start implies current_thread placed
375*5d9d9091SRichard Lowe	! a new, later timestamp there after running a high-level interrupt,
376*5d9d9091SRichard Lowe	! and the %tick val in %o4 had become stale.
377*5d9d9091SRichard Lowe	mov	%g0, %l2
378*5d9d9091SRichard Lowe	casx	[%o5], %o3, %l2
379*5d9d9091SRichard Lowe
380*5d9d9091SRichard Lowe	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
381*5d9d9091SRichard Lowe	! changed between loading it (after label 0b) and computing the
382*5d9d9091SRichard Lowe	! interval above.
383*5d9d9091SRichard Lowe	cmp	%l2, %o3
384*5d9d9091SRichard Lowe	bne,pn	%xcc, 0b
385*5d9d9091SRichard Lowe
386*5d9d9091SRichard Lowe	! Check for Energy Star mode
387*5d9d9091SRichard Lowe	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
388*5d9d9091SRichard Lowe	cmp	%l2, 1
389*5d9d9091SRichard Lowe	bg,a,pn	%xcc, 2f
390*5d9d9091SRichard Lowe	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
391*5d9d9091SRichard Lowe2:
392*5d9d9091SRichard Lowe	! We now know that a valid interval for the interrupted interrupt
393*5d9d9091SRichard Lowe	! thread is in %o4. Update its cumulative counter.
394*5d9d9091SRichard Lowe	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
395*5d9d9091SRichard Lowe	sllx	%l3, 4, %l3		! convert PIL index to byte offset
396*5d9d9091SRichard Lowe	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
397*5d9d9091SRichard Lowe	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
398*5d9d9091SRichard Lowe	ldx	[%o2 + %l3], %o5	! old counter in o5
399*5d9d9091SRichard Lowe	add	%o5, %o4, %o5		! new counter in o5
400*5d9d9091SRichard Lowe	stx	%o5, [%o2 + %l3]	! store new counter
401*5d9d9091SRichard Lowe
402*5d9d9091SRichard Lowe	! Also update intracct[]
403*5d9d9091SRichard Lowe	lduh	[%o2 + CPU_MSTATE], %l3
404*5d9d9091SRichard Lowe	sllx	%l3, 3, %l3
405*5d9d9091SRichard Lowe	add	%l3, CPU_INTRACCT, %l3
406*5d9d9091SRichard Lowe	add	%l3, %o2, %l3
407*5d9d9091SRichard Lowe0:
408*5d9d9091SRichard Lowe	ldx	[%l3], %o5
409*5d9d9091SRichard Lowe	add	%o5, %o4, %o3
410*5d9d9091SRichard Lowe	casx	[%l3], %o5, %o3
411*5d9d9091SRichard Lowe	cmp	%o5, %o3
412*5d9d9091SRichard Lowe	bne,pn	%xcc, 0b
413*5d9d9091SRichard Lowe	nop
414*5d9d9091SRichard Lowe
415*5d9d9091SRichard Lowe1:
416*5d9d9091SRichard Lowe	!
417*5d9d9091SRichard Lowe	! Get set to run interrupt thread.
418*5d9d9091SRichard Lowe	! There should always be an interrupt thread since we allocate one
419*5d9d9091SRichard Lowe	! for each level on the CPU.
420*5d9d9091SRichard Lowe	!
421*5d9d9091SRichard Lowe	! Note that the code in kcpc_overflow_intr -relies- on the ordering
422*5d9d9091SRichard Lowe	! of events here -- in particular that t->t_lwp of the interrupt thread
423*5d9d9091SRichard Lowe	! is set to the pinned thread *before* curthread is changed.
424*5d9d9091SRichard Lowe	!
425*5d9d9091SRichard Lowe	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
426*5d9d9091SRichard Lowe	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
427*5d9d9091SRichard Lowe	stn	%o4, [%o2 + CPU_INTR_THREAD]
428*5d9d9091SRichard Lowe	!
429*5d9d9091SRichard Lowe	! Set bit for this level in CPU's active interrupt bitmask.
430*5d9d9091SRichard Lowe	!
431*5d9d9091SRichard Lowe	ld	[%o2 + CPU_INTR_ACTV], %o5
432*5d9d9091SRichard Lowe	mov	1, %o4
433*5d9d9091SRichard Lowe	sll	%o4, %l1, %o4
434*5d9d9091SRichard Lowe#ifdef DEBUG
435*5d9d9091SRichard Lowe	!
436*5d9d9091SRichard Lowe	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
437*5d9d9091SRichard Lowe	!
438*5d9d9091SRichard Lowe	andcc	%o5, %o4, %g0
439*5d9d9091SRichard Lowe	bz,pt	%xcc, 0f
440*5d9d9091SRichard Lowe	nop
441*5d9d9091SRichard Lowe	! Do not call panic if a panic is already in progress.
442*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %l2
443*5d9d9091SRichard Lowe	ld	[%l2 + %lo(panic_quiesce)], %l2
444*5d9d9091SRichard Lowe	brnz,pn	%l2, 0f
445*5d9d9091SRichard Lowe	nop
446*5d9d9091SRichard Lowe	sethi	%hi(intr_thread_actv_bit_set), %o0
447*5d9d9091SRichard Lowe	call	panic
448*5d9d9091SRichard Lowe	or	%o0, %lo(intr_thread_actv_bit_set), %o0
449*5d9d9091SRichard Lowe0:
450*5d9d9091SRichard Lowe#endif /* DEBUG */
451*5d9d9091SRichard Lowe	or	%o5, %o4, %o5
452*5d9d9091SRichard Lowe	st	%o5, [%o2 + CPU_INTR_ACTV]
453*5d9d9091SRichard Lowe	!
454*5d9d9091SRichard Lowe	! Consider the new thread part of the same LWP so that
455*5d9d9091SRichard Lowe	! window overflow code can find the PCB.
456*5d9d9091SRichard Lowe	!
457*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_LWP], %o4
458*5d9d9091SRichard Lowe	stn	%o4, [%o3 + T_LWP]
459*5d9d9091SRichard Lowe	!
460*5d9d9091SRichard Lowe	! Threads on the interrupt thread free list could have state already
461*5d9d9091SRichard Lowe	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
462*5d9d9091SRichard Lowe	! Could eliminate the next two instructions with a little work.
463*5d9d9091SRichard Lowe	!
464*5d9d9091SRichard Lowe	mov	TS_ONPROC, %o4
465*5d9d9091SRichard Lowe	st	%o4, [%o3 + T_STATE]
466*5d9d9091SRichard Lowe	!
467*5d9d9091SRichard Lowe	! Push interrupted thread onto list from new thread.
468*5d9d9091SRichard Lowe	! Set the new thread as the current one.
469*5d9d9091SRichard Lowe	! Set interrupted thread's T_SP because if it is the idle thread,
470*5d9d9091SRichard Lowe	! resume may use that stack between threads.
471*5d9d9091SRichard Lowe	!
472*5d9d9091SRichard Lowe	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
473*5d9d9091SRichard Lowe	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
474*5d9d9091SRichard Lowe	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
475*5d9d9091SRichard Lowe	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
476*5d9d9091SRichard Lowe	mov	%o3, THREAD_REG			! set global curthread register
477*5d9d9091SRichard Lowe	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
478*5d9d9091SRichard Lowe	sub	%o4, STACK_BIAS, %sp
479*5d9d9091SRichard Lowe	!
480*5d9d9091SRichard Lowe	! Initialize thread priority level from intr_pri
481*5d9d9091SRichard Lowe	!
482*5d9d9091SRichard Lowe	sethi	%hi(intr_pri), %o4
483*5d9d9091SRichard Lowe	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
484*5d9d9091SRichard Lowe	add	%l1, %o4, %o4		! convert level to dispatch priority
485*5d9d9091SRichard Lowe	sth	%o4, [THREAD_REG + T_PRI]
486*5d9d9091SRichard Lowe	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
487*5d9d9091SRichard Lowe
488*5d9d9091SRichard Lowe	! Store starting timestamp in thread structure.
489*5d9d9091SRichard Lowe	add	THREAD_REG, T_INTR_START, %o3
490*5d9d9091SRichard Lowe1:
491*5d9d9091SRichard Lowe	ldx	[%o3], %o5
492*5d9d9091SRichard Lowe	RD_CLOCK_TICK(%o4,%l2,%l3,__LINE__)
493*5d9d9091SRichard Lowe	casx	[%o3], %o5, %o4
494*5d9d9091SRichard Lowe	cmp	%o4, %o5
495*5d9d9091SRichard Lowe	! If a high-level interrupt occurred while we were attempting to store
496*5d9d9091SRichard Lowe	! the timestamp, try again.
497*5d9d9091SRichard Lowe	bne,pn	%xcc, 1b
498*5d9d9091SRichard Lowe	nop
499*5d9d9091SRichard Lowe
500*5d9d9091SRichard Lowe	wrpr	%g0, %l1, %pil			! lower %pil to new level
501*5d9d9091SRichard Lowe	!
502*5d9d9091SRichard Lowe	! Fast event tracing.
503*5d9d9091SRichard Lowe	!
504*5d9d9091SRichard Lowe	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
505*5d9d9091SRichard Lowe	btst	FTRACE_ENABLED, %o4
506*5d9d9091SRichard Lowe	be,pt	%icc, 1f			! skip if ftrace disabled
507*5d9d9091SRichard Lowe	  mov	%l1, %o5
508*5d9d9091SRichard Lowe	!
509*5d9d9091SRichard Lowe	! Tracing is enabled - write the trace entry.
510*5d9d9091SRichard Lowe	!
511*5d9d9091SRichard Lowe	save	%sp, -SA(MINFRAME), %sp
512*5d9d9091SRichard Lowe	set	ftrace_intr_thread_format_str, %o0
513*5d9d9091SRichard Lowe	mov	%i0, %o1
514*5d9d9091SRichard Lowe	mov	%i1, %o2
515*5d9d9091SRichard Lowe	mov	%i5, %o3
516*5d9d9091SRichard Lowe	call	ftrace_3
517*5d9d9091SRichard Lowe	ldn	[%i0 + PC_OFF], %o4
518*5d9d9091SRichard Lowe	restore
519*5d9d9091SRichard Lowe1:
520*5d9d9091SRichard Lowe	!
521*5d9d9091SRichard Lowe	! call the handler
522*5d9d9091SRichard Lowe	!
523*5d9d9091SRichard Lowe	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
524*5d9d9091SRichard Lowe	!
525*5d9d9091SRichard Lowe	! %o0 and %o1 are now available as scratch registers.
526*5d9d9091SRichard Lowe	!
527*5d9d9091SRichard Lowe0:
528*5d9d9091SRichard Lowe	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
529*5d9d9091SRichard Lowe	!
530*5d9d9091SRichard Lowe	! If %o3 is set, we must call serve_intr_next, and both %l1 and %o3
531*5d9d9091SRichard Lowe	! must be preserved. %l1 holds our pil, %l3 holds our inum.
532*5d9d9091SRichard Lowe	!
533*5d9d9091SRichard Lowe	! Note: %l1 is the pil level we're processing, but we may have a
534*5d9d9091SRichard Lowe	! higher effective pil because a higher-level interrupt may have
535*5d9d9091SRichard Lowe	! blocked.
536*5d9d9091SRichard Lowe	!
537*5d9d9091SRichard Lowe	wrpr	%g0, DISP_LEVEL, %pil
538*5d9d9091SRichard Lowe	!
539*5d9d9091SRichard Lowe	! Take timestamp, compute interval, update cumulative counter.
540*5d9d9091SRichard Lowe	!
541*5d9d9091SRichard Lowe	add	THREAD_REG, T_INTR_START, %o5
542*5d9d9091SRichard Lowe1:
543*5d9d9091SRichard Lowe	ldx	[%o5], %o0
544*5d9d9091SRichard Lowe#ifdef DEBUG
545*5d9d9091SRichard Lowe	brnz	%o0, 9f
546*5d9d9091SRichard Lowe	nop
547*5d9d9091SRichard Lowe	! Do not call panic if a panic is already in progress.
548*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %o1
549*5d9d9091SRichard Lowe	ld	[%o1 + %lo(panic_quiesce)], %o1
550*5d9d9091SRichard Lowe	brnz,pn	%o1, 9f
551*5d9d9091SRichard Lowe	nop
552*5d9d9091SRichard Lowe	sethi	%hi(intr_thread_t_intr_start_zero), %o0
553*5d9d9091SRichard Lowe	call	panic
554*5d9d9091SRichard Lowe	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
555*5d9d9091SRichard Lowe9:
556*5d9d9091SRichard Lowe#endif /* DEBUG */
557*5d9d9091SRichard Lowe	RD_CLOCK_TICK(%o1,%l2,%l3,__LINE__)
558*5d9d9091SRichard Lowe	sub	%o1, %o0, %l2			! l2 has interval
559*5d9d9091SRichard Lowe	!
560*5d9d9091SRichard Lowe	! The general outline of what the code here does is:
561*5d9d9091SRichard Lowe	! 1. load t_intr_start, %tick, and calculate the delta
562*5d9d9091SRichard Lowe	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
563*5d9d9091SRichard Lowe	!
564*5d9d9091SRichard Lowe	! The problem is that a high-level interrupt could arrive at any time.
565*5d9d9091SRichard Lowe	! It will account for (%tick - t_intr_start) for us when it starts,
566*5d9d9091SRichard Lowe	! unless we have set t_intr_start to zero, and then set t_intr_start
567*5d9d9091SRichard Lowe	! to a new %tick when it finishes. To account for this, our first step
568*5d9d9091SRichard Lowe	! is to load t_intr_start and the last is to use casx to store the new
569*5d9d9091SRichard Lowe	! t_intr_start. This guarantees atomicity in reading t_intr_start,
570*5d9d9091SRichard Lowe	! reading %tick, and updating t_intr_start.
571*5d9d9091SRichard Lowe	!
572*5d9d9091SRichard Lowe	movrz	%o3, %g0, %o1
573*5d9d9091SRichard Lowe	casx	[%o5], %o0, %o1
574*5d9d9091SRichard Lowe	cmp	%o0, %o1
575*5d9d9091SRichard Lowe	bne,pn	%xcc, 1b
576*5d9d9091SRichard Lowe	!
577*5d9d9091SRichard Lowe	! Check for Energy Star mode
578*5d9d9091SRichard Lowe	!
579*5d9d9091SRichard Lowe	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
580*5d9d9091SRichard Lowe	cmp	%o0, 1
581*5d9d9091SRichard Lowe	bg,a,pn	%xcc, 2f
582*5d9d9091SRichard Lowe	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
583*5d9d9091SRichard Lowe2:
584*5d9d9091SRichard Lowe	!
585*5d9d9091SRichard Lowe	! Update cpu_intrstat. If o3 is set then we will be processing another
586*5d9d9091SRichard Lowe	! interrupt. Above we have set t_intr_start to %tick, not 0. This
587*5d9d9091SRichard Lowe	! means a high-level interrupt can arrive and update the same stats
588*5d9d9091SRichard Lowe	! we're updating. Need to use casx.
589*5d9d9091SRichard Lowe	!
590*5d9d9091SRichard Lowe	sllx	%l1, 4, %o1			! delay - PIL as byte offset
591*5d9d9091SRichard Lowe	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
592*5d9d9091SRichard Lowe	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
593*5d9d9091SRichard Lowe	add	%o1, %o2, %o1
594*5d9d9091SRichard Lowe1:
595*5d9d9091SRichard Lowe	ldx	[%o1], %o5			! old counter in o5
596*5d9d9091SRichard Lowe	add	%o5, %l2, %o0			! new counter in o0
597*5d9d9091SRichard Lowe 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
598*5d9d9091SRichard Lowe	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
599*5d9d9091SRichard Lowe	cmp	%o5, %o0
600*5d9d9091SRichard Lowe	bne,pn	%xcc, 1b
601*5d9d9091SRichard Lowe	nop
602*5d9d9091SRichard Lowe
603*5d9d9091SRichard Lowe	! Also update intracct[]
604*5d9d9091SRichard Lowe	lduh	[%o2 + CPU_MSTATE], %o1
605*5d9d9091SRichard Lowe	sllx	%o1, 3, %o1
606*5d9d9091SRichard Lowe	add	%o1, CPU_INTRACCT, %o1
607*5d9d9091SRichard Lowe	add	%o1, %o2, %o1
608*5d9d9091SRichard Lowe1:
609*5d9d9091SRichard Lowe	ldx	[%o1], %o5
610*5d9d9091SRichard Lowe	add	%o5, %l2, %o0
611*5d9d9091SRichard Lowe	casx	[%o1], %o5, %o0
612*5d9d9091SRichard Lowe	cmp	%o5, %o0
613*5d9d9091SRichard Lowe	bne,pn	%xcc, 1b
614*5d9d9091SRichard Lowe	nop
615*5d9d9091SRichard Lowe
616*5d9d9091SRichard Lowe	!
617*5d9d9091SRichard Lowe	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
618*5d9d9091SRichard Lowe	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
619*5d9d9091SRichard Lowe	! we've crossed the threshold and we should unpin the pinned threads
620*5d9d9091SRichard Lowe	! by preempt()ing ourselves, which will bubble up the t_intr chain
621*5d9d9091SRichard Lowe	! until hitting the non-interrupt thread, which will then in turn
622*5d9d9091SRichard Lowe	! preempt itself allowing the interrupt processing to resume. Finally,
623*5d9d9091SRichard Lowe	! the scheduler takes over and picks the next thread to run.
624*5d9d9091SRichard Lowe	!
625*5d9d9091SRichard Lowe	! If our CPU is quiesced, we cannot preempt because the idle thread
626*5d9d9091SRichard Lowe	! won't ever re-enter the scheduler, and the interrupt will be forever
627*5d9d9091SRichard Lowe	! blocked.
628*5d9d9091SRichard Lowe	!
629*5d9d9091SRichard Lowe	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
630*5d9d9091SRichard Lowe	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
631*5d9d9091SRichard Lowe	! This insures we enter the scheduler if a higher-priority thread
632*5d9d9091SRichard Lowe	! has become runnable.
633*5d9d9091SRichard Lowe	!
634*5d9d9091SRichard Lowe	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
635*5d9d9091SRichard Lowe	andcc	%o5, CPU_QUIESCED, %g0
636*5d9d9091SRichard Lowe	bnz,pn	%xcc, 1f
637*5d9d9091SRichard Lowe
638*5d9d9091SRichard Lowe	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
639*5d9d9091SRichard Lowe	brz,pn  %o5, 3f				! if not, don't inc intrcnt
640*5d9d9091SRichard Lowe
641*5d9d9091SRichard Lowe	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
642*5d9d9091SRichard Lowe	inc	%o5
643*5d9d9091SRichard Lowe	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
644*5d9d9091SRichard Lowe	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
645*5d9d9091SRichard Lowe	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
646*5d9d9091SRichard Lowe	bg,pn	%xcc, 2f			! don't inc stats again
647*5d9d9091SRichard Lowe	!
648*5d9d9091SRichard Lowe	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
649*5d9d9091SRichard Lowe	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
650*5d9d9091SRichard Lowe	!
651*5d9d9091SRichard Lowe	mov	1, %o4				! delay
652*5d9d9091SRichard Lowe	stub	%o4, [%o2 + CPU_KPRUNRUN]
653*5d9d9091SRichard Lowe	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
654*5d9d9091SRichard Lowe	inc	%o4
655*5d9d9091SRichard Lowe	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
656*5d9d9091SRichard Lowe	ba	2f
657*5d9d9091SRichard Lowe	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
658*5d9d9091SRichard Lowe3:
659*5d9d9091SRichard Lowe	! Code for t_intr == NULL
660*5d9d9091SRichard Lowe	ldub	[%o2 + CPU_KPRUNRUN], %o5
661*5d9d9091SRichard Lowe	brz,pt	%o5, 1f				! don't preempt unless kprunrun
662*5d9d9091SRichard Lowe2:
663*5d9d9091SRichard Lowe	! Time to call preempt
664*5d9d9091SRichard Lowe	mov	%o2, %l3			! delay - save %o2
665*5d9d9091SRichard Lowe	call	preempt
666*5d9d9091SRichard Lowe	mov	%o3, %l2			! delay - save %o3.
667*5d9d9091SRichard Lowe	mov	%l3, %o2			! restore %o2
668*5d9d9091SRichard Lowe	mov	%l2, %o3			! restore %o3
669*5d9d9091SRichard Lowe	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
670*5d9d9091SRichard Lowe1:
671*5d9d9091SRichard Lowe	!
672*5d9d9091SRichard Lowe	! Do we need to call serve_intr_next and do this again?
673*5d9d9091SRichard Lowe	!
674*5d9d9091SRichard Lowe	brz,a,pt %o3, 0f
675*5d9d9091SRichard Lowe	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
676*5d9d9091SRichard Lowe	!
677*5d9d9091SRichard Lowe	! Restore %pil before calling serve_intr() again. We must check
678*5d9d9091SRichard Lowe	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
679*5d9d9091SRichard Lowe	!
680*5d9d9091SRichard Lowe	ld	[%o2 + CPU_BASE_SPL], %o4
681*5d9d9091SRichard Lowe	cmp	%o4, %l1
682*5d9d9091SRichard Lowe	movl	%xcc, %l1, %o4
683*5d9d9091SRichard Lowe	wrpr	%g0, %o4, %pil
684*5d9d9091SRichard Lowe	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
685*5d9d9091SRichard Lowe	ba	0b				! compute new stats
686*5d9d9091SRichard Lowe	nop
687*5d9d9091SRichard Lowe0:
688*5d9d9091SRichard Lowe	!
689*5d9d9091SRichard Lowe	! Clear bit for this level in CPU's interrupt active bitmask.
690*5d9d9091SRichard Lowe	!
691*5d9d9091SRichard Lowe	mov	1, %o4
692*5d9d9091SRichard Lowe	sll	%o4, %l1, %o4
693*5d9d9091SRichard Lowe#ifdef DEBUG
694*5d9d9091SRichard Lowe	!
695*5d9d9091SRichard Lowe	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
696*5d9d9091SRichard Lowe	!
697*5d9d9091SRichard Lowe	andcc	%o4, %o5, %g0
698*5d9d9091SRichard Lowe	bnz,pt	%xcc, 0f
699*5d9d9091SRichard Lowe	nop
700*5d9d9091SRichard Lowe	! Do not call panic if a panic is already in progress.
701*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %l2
702*5d9d9091SRichard Lowe	ld	[%l2 + %lo(panic_quiesce)], %l2
703*5d9d9091SRichard Lowe	brnz,pn	%l2, 0f
704*5d9d9091SRichard Lowe	nop
705*5d9d9091SRichard Lowe	sethi	%hi(intr_thread_actv_bit_not_set), %o0
706*5d9d9091SRichard Lowe	call	panic
707*5d9d9091SRichard Lowe	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
708*5d9d9091SRichard Lowe0:
709*5d9d9091SRichard Lowe#endif /* DEBUG */
710*5d9d9091SRichard Lowe	andn	%o5, %o4, %o5
711*5d9d9091SRichard Lowe	st	%o5, [%o2 + CPU_INTR_ACTV]
712*5d9d9091SRichard Lowe	!
713*5d9d9091SRichard Lowe	! If there is still an interrupted thread underneath this one,
714*5d9d9091SRichard Lowe	! then the interrupt was never blocked and the return is fairly
715*5d9d9091SRichard Lowe	! simple.  Otherwise jump to intr_thread_exit.
716*5d9d9091SRichard Lowe	!
717*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
718*5d9d9091SRichard Lowe	brz,pn	%o4, intr_thread_exit		! branch if none
719*5d9d9091SRichard Lowe	nop
720*5d9d9091SRichard Lowe	!
721*5d9d9091SRichard Lowe	! link the thread back onto the interrupt thread pool
722*5d9d9091SRichard Lowe	!
723*5d9d9091SRichard Lowe	ldn	[%o2 + CPU_INTR_THREAD], %o3
724*5d9d9091SRichard Lowe	stn	%o3, [THREAD_REG + T_LINK]
725*5d9d9091SRichard Lowe	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
726*5d9d9091SRichard Lowe	!
727*5d9d9091SRichard Lowe	! set the thread state to free so kernel debuggers don't see it
728*5d9d9091SRichard Lowe	!
729*5d9d9091SRichard Lowe	mov	TS_FREE, %o5
730*5d9d9091SRichard Lowe	st	%o5, [THREAD_REG + T_STATE]
731*5d9d9091SRichard Lowe	!
732*5d9d9091SRichard Lowe	! Switch back to the interrupted thread and return
733*5d9d9091SRichard Lowe	!
734*5d9d9091SRichard Lowe	stn	%o4, [%o2 + CPU_THREAD]
735*5d9d9091SRichard Lowe	membar	#StoreLoad			! sync with mutex_exit()
736*5d9d9091SRichard Lowe	mov	%o4, THREAD_REG
737*5d9d9091SRichard Lowe
738*5d9d9091SRichard Lowe	! If we pinned an interrupt thread, store its starting timestamp.
739*5d9d9091SRichard Lowe	lduh	[THREAD_REG + T_FLAGS], %o5
740*5d9d9091SRichard Lowe	andcc	%o5, T_INTR_THREAD, %g0
741*5d9d9091SRichard Lowe	bz,pt	%xcc, 1f
742*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
743*5d9d9091SRichard Lowe
744*5d9d9091SRichard Lowe	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
745*5d9d9091SRichard Lowe0:
746*5d9d9091SRichard Lowe	ldx	[%o3], %o4			! o4 = t_intr_start before
747*5d9d9091SRichard Lowe	RD_CLOCK_TICK(%o5,%l2,%l3,__LINE__)
748*5d9d9091SRichard Lowe	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
749*5d9d9091SRichard Lowe	cmp	%o4, %o5
750*5d9d9091SRichard Lowe	! If a high-level interrupt occurred while we were attempting to store
751*5d9d9091SRichard Lowe	! the timestamp, try again.
752*5d9d9091SRichard Lowe	bne,pn	%xcc, 0b
753*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
754*5d9d9091SRichard Lowe1:
755*5d9d9091SRichard Lowe	! If the thread being restarted isn't pinning anyone, and no interrupts
756*5d9d9091SRichard Lowe	! are pending, zero out cpu_intrcnt
757*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_INTR], %o4
758*5d9d9091SRichard Lowe	brnz,pn	%o4, 2f
759*5d9d9091SRichard Lowe	rd	SOFTINT, %o4			! delay
760*5d9d9091SRichard Lowe	set	SOFTINT_MASK, %o5
761*5d9d9091SRichard Lowe	andcc	%o4, %o5, %g0
762*5d9d9091SRichard Lowe	bz,a,pt	%xcc, 2f
763*5d9d9091SRichard Lowe	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
764*5d9d9091SRichard Lowe2:
765*5d9d9091SRichard Lowe	jmp	%l0 + 8
766*5d9d9091SRichard Lowe	nop
767*5d9d9091SRichard Lowe	SET_SIZE(intr_thread)
768*5d9d9091SRichard Lowe	/* Not Reached */
769*5d9d9091SRichard Lowe
770*5d9d9091SRichard Lowe	!
771*5d9d9091SRichard Lowe	! An interrupt returned on what was once (and still might be)
772*5d9d9091SRichard Lowe	! an interrupt thread stack, but the interrupted process is no longer
773*5d9d9091SRichard Lowe	! there.  This means the interrupt must have blocked.
774*5d9d9091SRichard Lowe	!
775*5d9d9091SRichard Lowe	! There is no longer a thread under this one, so put this thread back
776*5d9d9091SRichard Lowe	! on the CPU's free list and resume the idle thread which will dispatch
777*5d9d9091SRichard Lowe	! the next thread to run.
778*5d9d9091SRichard Lowe	!
779*5d9d9091SRichard Lowe	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
780*5d9d9091SRichard Lowe	! is enabled.
781*5d9d9091SRichard Lowe	!
782*5d9d9091SRichard Lowe	ENTRY_NP(intr_thread_exit)
783*5d9d9091SRichard Lowe#ifdef TRAPTRACE
784*5d9d9091SRichard Lowe	rdpr	%pstate, %l2
785*5d9d9091SRichard Lowe	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
786*5d9d9091SRichard Lowe	wrpr	%g0, %o4, %pstate			! cpu to known state
787*5d9d9091SRichard Lowe	TRACE_PTR(%o4, %o5)
788*5d9d9091SRichard Lowe	GET_TRACE_TICK(%o5, %o0)
789*5d9d9091SRichard Lowe	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
790*5d9d9091SRichard Lowe	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
791*5d9d9091SRichard Lowe	set	TT_INTR_EXIT, %o5
792*5d9d9091SRichard Lowe	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
793*5d9d9091SRichard Lowe	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
794*5d9d9091SRichard Lowe	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
795*5d9d9091SRichard Lowe	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
796*5d9d9091SRichard Lowe	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
797*5d9d9091SRichard Lowe	ld	[%o2 + CPU_BASE_SPL], %o5
798*5d9d9091SRichard Lowe	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
799*5d9d9091SRichard Lowe	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
800*5d9d9091SRichard Lowe	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
801*5d9d9091SRichard Lowe	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
802*5d9d9091SRichard Lowe	TRACE_NEXT(%o4, %o5, %o0)
803*5d9d9091SRichard Lowe	wrpr	%g0, %l2, %pstate
804*5d9d9091SRichard Lowe#endif /* TRAPTRACE */
805*5d9d9091SRichard Lowe	! cpu_stats.sys.intrblk++
806*5d9d9091SRichard Lowe        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
807*5d9d9091SRichard Lowe        inc     %o4
808*5d9d9091SRichard Lowe        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
809*5d9d9091SRichard Lowe	!
810*5d9d9091SRichard Lowe	! Put thread back on the interrupt thread list.
811*5d9d9091SRichard Lowe	!
812*5d9d9091SRichard Lowe
813*5d9d9091SRichard Lowe	!
814*5d9d9091SRichard Lowe	! Set the CPU's base SPL level.
815*5d9d9091SRichard Lowe	!
816*5d9d9091SRichard Lowe#ifdef DEBUG
817*5d9d9091SRichard Lowe	!
818*5d9d9091SRichard Lowe	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
819*5d9d9091SRichard Lowe	!
820*5d9d9091SRichard Lowe	ld	[%o2 + CPU_INTR_ACTV], %o5
821*5d9d9091SRichard Lowe	mov	1, %o4
822*5d9d9091SRichard Lowe	sll	%o4, %l1, %o4
823*5d9d9091SRichard Lowe	and	%o5, %o4, %o4
824*5d9d9091SRichard Lowe	brz,pt	%o4, 0f
825*5d9d9091SRichard Lowe	nop
826*5d9d9091SRichard Lowe	! Do not call panic if a panic is already in progress.
827*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %l2
828*5d9d9091SRichard Lowe	ld	[%l2 + %lo(panic_quiesce)], %l2
829*5d9d9091SRichard Lowe	brnz,pn	%l2, 0f
830*5d9d9091SRichard Lowe	nop
831*5d9d9091SRichard Lowe	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
832*5d9d9091SRichard Lowe	call	panic
833*5d9d9091SRichard Lowe	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
834*5d9d9091SRichard Lowe0:
835*5d9d9091SRichard Lowe#endif /* DEBUG */
836*5d9d9091SRichard Lowe	call	_intr_set_spl			! set CPU's base SPL level
837*5d9d9091SRichard Lowe	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
838*5d9d9091SRichard Lowe	!
839*5d9d9091SRichard Lowe	! set the thread state to free so kernel debuggers don't see it
840*5d9d9091SRichard Lowe	!
841*5d9d9091SRichard Lowe	mov	TS_FREE, %o4
842*5d9d9091SRichard Lowe	st	%o4, [THREAD_REG + T_STATE]
843*5d9d9091SRichard Lowe	!
844*5d9d9091SRichard Lowe	! Put thread on either the interrupt pool or the free pool and
845*5d9d9091SRichard Lowe	! call swtch() to resume another thread.
846*5d9d9091SRichard Lowe	!
847*5d9d9091SRichard Lowe	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
848*5d9d9091SRichard Lowe	stn	%o5, [THREAD_REG + T_LINK]
849*5d9d9091SRichard Lowe	call	swtch				! switch to best thread
850*5d9d9091SRichard Lowe	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
851*5d9d9091SRichard Lowe	ba,a,pt	%xcc, .				! swtch() shouldn't return
852*5d9d9091SRichard Lowe	SET_SIZE(intr_thread_exit)
853*5d9d9091SRichard Lowe
854*5d9d9091SRichard Lowe	.global ftrace_intr_thread_format_str
855*5d9d9091SRichard Loweftrace_intr_thread_format_str:
856*5d9d9091SRichard Lowe	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
857*5d9d9091SRichard Lowe#ifdef DEBUG
858*5d9d9091SRichard Loweintr_thread_actv_bit_set:
859*5d9d9091SRichard Lowe	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
860*5d9d9091SRichard Loweintr_thread_actv_bit_not_set:
861*5d9d9091SRichard Lowe	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
862*5d9d9091SRichard Loweintr_thread_exit_actv_bit_set:
863*5d9d9091SRichard Lowe	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
864*5d9d9091SRichard Loweintr_thread_t_intr_start_zero:
865*5d9d9091SRichard Lowe	.asciz	"intr_thread():	t_intr_start zero upon handler return"
866*5d9d9091SRichard Lowe#endif /* DEBUG */
867*5d9d9091SRichard Lowe
868*5d9d9091SRichard Lowe/*
869*5d9d9091SRichard Lowe * Handle an interrupt in the current thread
870*5d9d9091SRichard Lowe *	Entry:
871*5d9d9091SRichard Lowe *		%o0       = pointer to regs structure
872*5d9d9091SRichard Lowe *		%o1       = pointer to current intr_vec_t (iv) to be processed
873*5d9d9091SRichard Lowe *		%o2       = pil
874*5d9d9091SRichard Lowe *		%sp       = on current thread's kernel stack
875*5d9d9091SRichard Lowe *		%o7       = return linkage to trap code
876*5d9d9091SRichard Lowe *		%g7       = current thread
877*5d9d9091SRichard Lowe *		%pstate   = normal globals, interrupts enabled,
878*5d9d9091SRichard Lowe *		            privileged, fp disabled
879*5d9d9091SRichard Lowe *		%pil      = PIL_MAX
880*5d9d9091SRichard Lowe *
881*5d9d9091SRichard Lowe *	Register Usage
882*5d9d9091SRichard Lowe *		%l0       = return linkage
883*5d9d9091SRichard Lowe *		%l1       = old stack
884*5d9d9091SRichard Lowe *		%l2 - %l3 = scratch
885*5d9d9091SRichard Lowe *		%l4 - %l7 = reserved for sys_trap
886*5d9d9091SRichard Lowe *		%o3       = cpu
887*5d9d9091SRichard Lowe *		%o0       = scratch
888*5d9d9091SRichard Lowe *		%o4 - %o5 = scratch
889*5d9d9091SRichard Lowe */
890*5d9d9091SRichard Lowe	ENTRY_NP(current_thread)
891*5d9d9091SRichard Lowe
892*5d9d9091SRichard Lowe	mov	%o7, %l0
893*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_CPU], %o3
894*5d9d9091SRichard Lowe
895*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_ONFAULT], %l2
896*5d9d9091SRichard Lowe	brz,pt	%l2, no_onfault		! branch if no onfault label set
897*5d9d9091SRichard Lowe	nop
898*5d9d9091SRichard Lowe	stn	%g0, [THREAD_REG + T_ONFAULT]! clear onfault label
899*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_LOFAULT], %l3
900*5d9d9091SRichard Lowe	stn	%g0, [THREAD_REG + T_LOFAULT]! clear lofault data
901*5d9d9091SRichard Lowe
902*5d9d9091SRichard Lowe	sub	%o2, LOCK_LEVEL + 1, %o5
903*5d9d9091SRichard Lowe	sll	%o5, CPTRSHIFT, %o5
904*5d9d9091SRichard Lowe	add	%o5, CPU_OFD, %o4	! %o4 has on_fault data offset
905*5d9d9091SRichard Lowe	stn	%l2, [%o3 + %o4]	! save onfault label for pil %o2
906*5d9d9091SRichard Lowe	add	%o5, CPU_LFD, %o4	! %o4 has lofault data offset
907*5d9d9091SRichard Lowe	stn	%l3, [%o3 + %o4]	! save lofault data for pil %o2
908*5d9d9091SRichard Lowe
909*5d9d9091SRichard Loweno_onfault:
910*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_ONTRAP], %l2
911*5d9d9091SRichard Lowe	brz,pt	%l2, 6f			! branch if no on_trap protection
912*5d9d9091SRichard Lowe	nop
913*5d9d9091SRichard Lowe	stn	%g0, [THREAD_REG + T_ONTRAP]! clear on_trap protection
914*5d9d9091SRichard Lowe	sub	%o2, LOCK_LEVEL + 1, %o5
915*5d9d9091SRichard Lowe	sll	%o5, CPTRSHIFT, %o5
916*5d9d9091SRichard Lowe	add	%o5, CPU_OTD, %o4	! %o4 has on_trap data offset
917*5d9d9091SRichard Lowe	stn	%l2, [%o3 + %o4]	! save on_trap label for pil %o2
918*5d9d9091SRichard Lowe
919*5d9d9091SRichard Lowe	!
920*5d9d9091SRichard Lowe	! Set bit for this level in CPU's active interrupt bitmask.
921*5d9d9091SRichard Lowe	!
922*5d9d9091SRichard Lowe6:	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
923*5d9d9091SRichard Lowe	mov	1, %o4
924*5d9d9091SRichard Lowe	sll	%o4, %o2, %o4			! construct mask for level
925*5d9d9091SRichard Lowe#ifdef DEBUG
926*5d9d9091SRichard Lowe	!
927*5d9d9091SRichard Lowe	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
928*5d9d9091SRichard Lowe	!
929*5d9d9091SRichard Lowe	andcc	%o5, %o4, %g0
930*5d9d9091SRichard Lowe	bz,pt	%xcc, 0f
931*5d9d9091SRichard Lowe	nop
932*5d9d9091SRichard Lowe	! Do not call panic if a panic is already in progress.
933*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %l2
934*5d9d9091SRichard Lowe	ld	[%l2 + %lo(panic_quiesce)], %l2
935*5d9d9091SRichard Lowe	brnz,pn	%l2, 0f
936*5d9d9091SRichard Lowe	nop
937*5d9d9091SRichard Lowe	sethi	%hi(current_thread_actv_bit_set), %o0
938*5d9d9091SRichard Lowe	call	panic
939*5d9d9091SRichard Lowe	or	%o0, %lo(current_thread_actv_bit_set), %o0
940*5d9d9091SRichard Lowe0:
941*5d9d9091SRichard Lowe#endif /* DEBUG */
942*5d9d9091SRichard Lowe	or	%o5, %o4, %o4
943*5d9d9091SRichard Lowe	!
944*5d9d9091SRichard Lowe	! See if we are interrupting another high-level interrupt.
945*5d9d9091SRichard Lowe	!
946*5d9d9091SRichard Lowe	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
947*5d9d9091SRichard Lowe	brz,pt	%o5, 1f
948*5d9d9091SRichard Lowe	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
949*5d9d9091SRichard Lowe	!
950*5d9d9091SRichard Lowe	! We have interrupted another high-level interrupt. Find its PIL,
951*5d9d9091SRichard Lowe	! compute the interval it ran for, and update its cumulative counter.
952*5d9d9091SRichard Lowe	!
953*5d9d9091SRichard Lowe	! Register usage:
954*5d9d9091SRichard Lowe
955*5d9d9091SRichard Lowe	! o2 = PIL of this interrupt
956*5d9d9091SRichard Lowe	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
957*5d9d9091SRichard Lowe	! l1 = bitmask used to find other active high-level PIL
958*5d9d9091SRichard Lowe	! o4 = index of bit set in l1
959*5d9d9091SRichard Lowe	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
960*5d9d9091SRichard Lowe	! interrupted high-level interrupt.
961*5d9d9091SRichard Lowe	! Create mask for cpu_intr_actv. Begin by looking for bits set
962*5d9d9091SRichard Lowe	! at one level below the current PIL. Since %o5 contains the active
963*5d9d9091SRichard Lowe	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
964*5d9d9091SRichard Lowe	! at bit (current_pil - (LOCK_LEVEL + 2)).
965*5d9d9091SRichard Lowe	sub	%o2, LOCK_LEVEL + 2, %o4
966*5d9d9091SRichard Lowe	mov	1, %l1
967*5d9d9091SRichard Lowe	sll	%l1, %o4, %l1
968*5d9d9091SRichard Lowe2:
969*5d9d9091SRichard Lowe#ifdef DEBUG
970*5d9d9091SRichard Lowe	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
971*5d9d9091SRichard Lowe	brnz,pt	%l1, 9f
972*5d9d9091SRichard Lowe	nop
973*5d9d9091SRichard Lowe
974*5d9d9091SRichard Lowe	! Don't panic if a panic is already in progress.
975*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %l3
976*5d9d9091SRichard Lowe	ld	[%l3 + %lo(panic_quiesce)], %l3
977*5d9d9091SRichard Lowe	brnz,pn	%l3, 9f
978*5d9d9091SRichard Lowe	nop
979*5d9d9091SRichard Lowe	sethi	%hi(current_thread_nested_PIL_not_found), %o0
980*5d9d9091SRichard Lowe	call	panic
981*5d9d9091SRichard Lowe	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
982*5d9d9091SRichard Lowe9:
983*5d9d9091SRichard Lowe#endif /* DEBUG */
984*5d9d9091SRichard Lowe	andcc	%l1, %o5, %g0		! test mask against high-level bits of
985*5d9d9091SRichard Lowe	bnz	%xcc, 3f		! cpu_intr_actv
986*5d9d9091SRichard Lowe	nop
987*5d9d9091SRichard Lowe	srl	%l1, 1, %l1		! No match. Try next lower PIL.
988*5d9d9091SRichard Lowe	ba,pt	%xcc, 2b
989*5d9d9091SRichard Lowe	sub	%o4, 1, %o4		! delay - decrement PIL
990*5d9d9091SRichard Lowe3:
991*5d9d9091SRichard Lowe	sll	%o4, 3, %o4			! index to byte offset
992*5d9d9091SRichard Lowe	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
993*5d9d9091SRichard Lowe	add	%l1, MCPU_PIL_HIGH_START, %l1
994*5d9d9091SRichard Lowe	ldx	[%o3 + %l1], %l3		! load starting timestamp
995*5d9d9091SRichard Lowe#ifdef DEBUG
996*5d9d9091SRichard Lowe	brnz,pt	%l3, 9f
997*5d9d9091SRichard Lowe	nop
998*5d9d9091SRichard Lowe	! Don't panic if a panic is already in progress.
999*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %l1
1000*5d9d9091SRichard Lowe	ld	[%l1 + %lo(panic_quiesce)], %l1
1001*5d9d9091SRichard Lowe	brnz,pn	%l1, 9f
1002*5d9d9091SRichard Lowe	nop
1003*5d9d9091SRichard Lowe	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1004*5d9d9091SRichard Lowe	add	%o1, LOCK_LEVEL + 1, %o1
1005*5d9d9091SRichard Lowe	sethi	%hi(current_thread_nested_pil_zero), %o0
1006*5d9d9091SRichard Lowe	call	panic
1007*5d9d9091SRichard Lowe	or	%o0, %lo(current_thread_nested_pil_zero), %o0
1008*5d9d9091SRichard Lowe9:
1009*5d9d9091SRichard Lowe#endif /* DEBUG */
1010*5d9d9091SRichard Lowe	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%l1, %l2)
1011*5d9d9091SRichard Lowe	sub	%l1, %l3, %l3			! interval in %l3
1012*5d9d9091SRichard Lowe	!
1013*5d9d9091SRichard Lowe	! Check for Energy Star mode
1014*5d9d9091SRichard Lowe	!
1015*5d9d9091SRichard Lowe	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1016*5d9d9091SRichard Lowe	cmp	%l1, 1
1017*5d9d9091SRichard Lowe	bg,a,pn	%xcc, 2f
1018*5d9d9091SRichard Lowe	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
1019*5d9d9091SRichard Lowe2:
1020*5d9d9091SRichard Lowe	!
1021*5d9d9091SRichard Lowe	! We need to find the CPU offset of the cumulative counter. We start
1022*5d9d9091SRichard Lowe	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1023*5d9d9091SRichard Lowe	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1024*5d9d9091SRichard Lowe	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1025*5d9d9091SRichard Lowe	!
1026*5d9d9091SRichard Lowe	sll	%o4, 1, %o4
1027*5d9d9091SRichard Lowe	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1028*5d9d9091SRichard Lowe	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1029*5d9d9091SRichard Lowe	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1030*5d9d9091SRichard Lowe	ldx	[%o3 + %o4], %l1		! old counter in l1
1031*5d9d9091SRichard Lowe	add	%l1, %l3, %l1			! new counter in l1
1032*5d9d9091SRichard Lowe	stx	%l1, [%o3 + %o4]		! store new counter
1033*5d9d9091SRichard Lowe
1034*5d9d9091SRichard Lowe	! Also update intracct[]
1035*5d9d9091SRichard Lowe	lduh	[%o3 + CPU_MSTATE], %o4
1036*5d9d9091SRichard Lowe	sllx	%o4, 3, %o4
1037*5d9d9091SRichard Lowe	add	%o4, CPU_INTRACCT, %o4
1038*5d9d9091SRichard Lowe	ldx	[%o3 + %o4], %l1
1039*5d9d9091SRichard Lowe	add	%l1, %l3, %l1
1040*5d9d9091SRichard Lowe	! Another high-level interrupt is active below this one, so
1041*5d9d9091SRichard Lowe	! there is no need to check for an interrupt thread. That will be
1042*5d9d9091SRichard Lowe	! done by the lowest priority high-level interrupt active.
1043*5d9d9091SRichard Lowe	ba,pt	%xcc, 5f
1044*5d9d9091SRichard Lowe	stx	%l1, [%o3 + %o4]		! delay - store new counter
1045*5d9d9091SRichard Lowe1:
1046*5d9d9091SRichard Lowe	! If we haven't interrupted another high-level interrupt, we may be
1047*5d9d9091SRichard Lowe	! interrupting a low level interrupt thread. If so, compute its interval
1048*5d9d9091SRichard Lowe	! and update its cumulative counter.
1049*5d9d9091SRichard Lowe	lduh	[THREAD_REG + T_FLAGS], %o4
1050*5d9d9091SRichard Lowe	andcc	%o4, T_INTR_THREAD, %g0
1051*5d9d9091SRichard Lowe	bz,pt	%xcc, 4f
1052*5d9d9091SRichard Lowe	nop
1053*5d9d9091SRichard Lowe
1054*5d9d9091SRichard Lowe	! We have interrupted an interrupt thread. Take timestamp, compute
1055*5d9d9091SRichard Lowe	! interval, update cumulative counter.
1056*5d9d9091SRichard Lowe
1057*5d9d9091SRichard Lowe	! Check t_intr_start. If it is zero, either intr_thread() or
1058*5d9d9091SRichard Lowe	! current_thread() (at a lower PIL, of course) already did
1059*5d9d9091SRichard Lowe	! the accounting for the underlying interrupt thread.
1060*5d9d9091SRichard Lowe	ldx	[THREAD_REG + T_INTR_START], %o5
1061*5d9d9091SRichard Lowe	brz,pn	%o5, 4f
1062*5d9d9091SRichard Lowe	nop
1063*5d9d9091SRichard Lowe
1064*5d9d9091SRichard Lowe	stx	%g0, [THREAD_REG + T_INTR_START]
1065*5d9d9091SRichard Lowe	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1066*5d9d9091SRichard Lowe	sub	%o4, %o5, %o5			! o5 has the interval
1067*5d9d9091SRichard Lowe
1068*5d9d9091SRichard Lowe	! Check for Energy Star mode
1069*5d9d9091SRichard Lowe	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1070*5d9d9091SRichard Lowe	cmp	%o4, 1
1071*5d9d9091SRichard Lowe	bg,a,pn	%xcc, 2f
1072*5d9d9091SRichard Lowe	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
1073*5d9d9091SRichard Lowe2:
1074*5d9d9091SRichard Lowe	ldub	[THREAD_REG + T_PIL], %o4
1075*5d9d9091SRichard Lowe	sllx	%o4, 4, %o4			! PIL index to byte offset
1076*5d9d9091SRichard Lowe	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1077*5d9d9091SRichard Lowe	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1078*5d9d9091SRichard Lowe	ldx	[%o3 + %o4], %l2		! old counter in l2
1079*5d9d9091SRichard Lowe	add	%l2, %o5, %l2			! new counter in l2
1080*5d9d9091SRichard Lowe	stx	%l2, [%o3 + %o4]		! store new counter
1081*5d9d9091SRichard Lowe
1082*5d9d9091SRichard Lowe	! Also update intracct[]
1083*5d9d9091SRichard Lowe	lduh	[%o3 + CPU_MSTATE], %o4
1084*5d9d9091SRichard Lowe	sllx	%o4, 3, %o4
1085*5d9d9091SRichard Lowe	add	%o4, CPU_INTRACCT, %o4
1086*5d9d9091SRichard Lowe	ldx	[%o3 + %o4], %l2
1087*5d9d9091SRichard Lowe	add	%l2, %o5, %l2
1088*5d9d9091SRichard Lowe	stx	%l2, [%o3 + %o4]
1089*5d9d9091SRichard Lowe4:
1090*5d9d9091SRichard Lowe	!
1091*5d9d9091SRichard Lowe	! Handle high-level interrupts on separate interrupt stack.
1092*5d9d9091SRichard Lowe	! No other high-level interrupts are active, so switch to int stack.
1093*5d9d9091SRichard Lowe	!
1094*5d9d9091SRichard Lowe	mov	%sp, %l1
1095*5d9d9091SRichard Lowe	ldn	[%o3 + CPU_INTR_STACK], %l3
1096*5d9d9091SRichard Lowe	sub	%l3, STACK_BIAS, %sp
1097*5d9d9091SRichard Lowe
1098*5d9d9091SRichard Lowe5:
1099*5d9d9091SRichard Lowe#ifdef DEBUG
1100*5d9d9091SRichard Lowe	!
1101*5d9d9091SRichard Lowe	! ASSERT(%o2 > LOCK_LEVEL)
1102*5d9d9091SRichard Lowe	!
1103*5d9d9091SRichard Lowe	cmp	%o2, LOCK_LEVEL
1104*5d9d9091SRichard Lowe	bg,pt	%xcc, 3f
1105*5d9d9091SRichard Lowe	nop
1106*5d9d9091SRichard Lowe	mov	CE_PANIC, %o0
1107*5d9d9091SRichard Lowe	sethi	%hi(current_thread_wrong_pil), %o1
1108*5d9d9091SRichard Lowe	call	cmn_err				! %o2 has the %pil already
1109*5d9d9091SRichard Lowe	or	%o1, %lo(current_thread_wrong_pil), %o1
1110*5d9d9091SRichard Lowe#endif
1111*5d9d9091SRichard Lowe3:
1112*5d9d9091SRichard Lowe	! Store starting timestamp for this PIL in CPU structure at
1113*5d9d9091SRichard Lowe	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1114*5d9d9091SRichard Lowe        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1115*5d9d9091SRichard Lowe	sllx    %o4, 3, %o4			! index to byte offset
1116*5d9d9091SRichard Lowe	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1117*5d9d9091SRichard Lowe	add	%o4, MCPU_PIL_HIGH_START, %o4
1118*5d9d9091SRichard Lowe	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o5, %l2)
1119*5d9d9091SRichard Lowe        stx     %o5, [%o3 + %o4]
1120*5d9d9091SRichard Lowe
1121*5d9d9091SRichard Lowe	wrpr	%g0, %o2, %pil			! enable interrupts
1122*5d9d9091SRichard Lowe
1123*5d9d9091SRichard Lowe	!
1124*5d9d9091SRichard Lowe	! call the handler
1125*5d9d9091SRichard Lowe	!
1126*5d9d9091SRichard Lowe	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1127*5d9d9091SRichard Lowe1:
1128*5d9d9091SRichard Lowe	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1129*5d9d9091SRichard Lowe
1130*5d9d9091SRichard Lowe	brz,a,pt %o2, 0f			! if %o2, more intrs await
1131*5d9d9091SRichard Lowe	rdpr	%pil, %o2			! delay annulled
1132*5d9d9091SRichard Lowe	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1133*5d9d9091SRichard Lowe	ba	1b
1134*5d9d9091SRichard Lowe	nop
1135*5d9d9091SRichard Lowe0:
1136*5d9d9091SRichard Lowe	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1137*5d9d9091SRichard Lowe
1138*5d9d9091SRichard Lowe	cmp	%o2, PIL_15
1139*5d9d9091SRichard Lowe	bne,pt	%xcc, 3f
1140*5d9d9091SRichard Lowe	nop
1141*5d9d9091SRichard Lowe
1142*5d9d9091SRichard Lowe	sethi	%hi(cpc_level15_inum), %o1
1143*5d9d9091SRichard Lowe	ldx	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1144*5d9d9091SRichard Lowe	brz	%o1, 3f
1145*5d9d9091SRichard Lowe	nop
1146*5d9d9091SRichard Lowe
1147*5d9d9091SRichard Lowe	rdpr 	%pstate, %g5
1148*5d9d9091SRichard Lowe	andn	%g5, PSTATE_IE, %g1
1149*5d9d9091SRichard Lowe	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1150*5d9d9091SRichard Lowe
1151*5d9d9091SRichard Lowe	call	intr_enqueue_req		! preserves %g5
1152*5d9d9091SRichard Lowe	mov	PIL_15, %o0
1153*5d9d9091SRichard Lowe
1154*5d9d9091SRichard Lowe	! clear perfcntr overflow
1155*5d9d9091SRichard Lowe	mov	1, %o0
1156*5d9d9091SRichard Lowe	sllx	%o0, PIL_15, %o0
1157*5d9d9091SRichard Lowe	wr	%o0, CLEAR_SOFTINT
1158*5d9d9091SRichard Lowe
1159*5d9d9091SRichard Lowe	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1160*5d9d9091SRichard Lowe
1161*5d9d9091SRichard Lowe3:
1162*5d9d9091SRichard Lowe	cmp	%o2, PIL_14
1163*5d9d9091SRichard Lowe	be	tick_rtt			!  cpu-specific tick processing
1164*5d9d9091SRichard Lowe	nop
1165*5d9d9091SRichard Lowe	.global	current_thread_complete
1166*5d9d9091SRichard Lowecurrent_thread_complete:
1167*5d9d9091SRichard Lowe	!
1168*5d9d9091SRichard Lowe	! Register usage:
1169*5d9d9091SRichard Lowe	!
1170*5d9d9091SRichard Lowe	! %l1 = stack pointer
1171*5d9d9091SRichard Lowe	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1172*5d9d9091SRichard Lowe	! %o2 = PIL
1173*5d9d9091SRichard Lowe	! %o3 = CPU pointer
1174*5d9d9091SRichard Lowe	! %o4, %o5, %l3, %l4, %l5 = scratch
1175*5d9d9091SRichard Lowe	!
1176*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_CPU], %o3
1177*5d9d9091SRichard Lowe	!
1178*5d9d9091SRichard Lowe	! Clear bit for this level in CPU's interrupt active bitmask.
1179*5d9d9091SRichard Lowe	!
1180*5d9d9091SRichard Lowe	ld	[%o3 + CPU_INTR_ACTV], %l2
1181*5d9d9091SRichard Lowe	mov	1, %o5
1182*5d9d9091SRichard Lowe	sll	%o5, %o2, %o5
1183*5d9d9091SRichard Lowe#ifdef DEBUG
1184*5d9d9091SRichard Lowe	!
1185*5d9d9091SRichard Lowe	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1186*5d9d9091SRichard Lowe	!
1187*5d9d9091SRichard Lowe	andcc	%l2, %o5, %g0
1188*5d9d9091SRichard Lowe	bnz,pt	%xcc, 0f
1189*5d9d9091SRichard Lowe	nop
1190*5d9d9091SRichard Lowe	! Do not call panic if a panic is already in progress.
1191*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %l2
1192*5d9d9091SRichard Lowe	ld	[%l2 + %lo(panic_quiesce)], %l2
1193*5d9d9091SRichard Lowe	brnz,pn	%l2, 0f
1194*5d9d9091SRichard Lowe	nop
1195*5d9d9091SRichard Lowe	sethi	%hi(current_thread_actv_bit_not_set), %o0
1196*5d9d9091SRichard Lowe	call	panic
1197*5d9d9091SRichard Lowe	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
1198*5d9d9091SRichard Lowe0:
1199*5d9d9091SRichard Lowe#endif /* DEBUG */
1200*5d9d9091SRichard Lowe	andn	%l2, %o5, %l2
1201*5d9d9091SRichard Lowe	st	%l2, [%o3 + CPU_INTR_ACTV]
1202*5d9d9091SRichard Lowe
1203*5d9d9091SRichard Lowe	! Take timestamp, compute interval, update cumulative counter.
1204*5d9d9091SRichard Lowe        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1205*5d9d9091SRichard Lowe	sllx    %o4, 3, %o4			! index to byte offset
1206*5d9d9091SRichard Lowe	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1207*5d9d9091SRichard Lowe	add	%o4, MCPU_PIL_HIGH_START, %o4
1208*5d9d9091SRichard Lowe	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o5, %o0)
1209*5d9d9091SRichard Lowe	ldx     [%o3 + %o4], %o0
1210*5d9d9091SRichard Lowe#ifdef DEBUG
1211*5d9d9091SRichard Lowe	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1212*5d9d9091SRichard Lowe	brnz,pt	%o0, 9f
1213*5d9d9091SRichard Lowe	nop
1214*5d9d9091SRichard Lowe	! Don't panic if a panic is already in progress.
1215*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %l2
1216*5d9d9091SRichard Lowe	ld	[%l2 + %lo(panic_quiesce)], %l2
1217*5d9d9091SRichard Lowe	brnz,pn	%l2, 9f
1218*5d9d9091SRichard Lowe	nop
1219*5d9d9091SRichard Lowe	sethi	%hi(current_thread_timestamp_zero), %o0
1220*5d9d9091SRichard Lowe	call	panic
1221*5d9d9091SRichard Lowe	or	%o0, %lo(current_thread_timestamp_zero), %o0
1222*5d9d9091SRichard Lowe9:
1223*5d9d9091SRichard Lowe#endif /* DEBUG */
1224*5d9d9091SRichard Lowe	stx	%g0, [%o3 + %o4]
1225*5d9d9091SRichard Lowe	sub	%o5, %o0, %o5			! interval in o5
1226*5d9d9091SRichard Lowe
1227*5d9d9091SRichard Lowe	! Check for Energy Star mode
1228*5d9d9091SRichard Lowe	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1229*5d9d9091SRichard Lowe	cmp	%o4, 1
1230*5d9d9091SRichard Lowe	bg,a,pn	%xcc, 2f
1231*5d9d9091SRichard Lowe	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
1232*5d9d9091SRichard Lowe2:
1233*5d9d9091SRichard Lowe	sllx	%o2, 4, %o4			! PIL index to byte offset
1234*5d9d9091SRichard Lowe	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1235*5d9d9091SRichard Lowe	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1236*5d9d9091SRichard Lowe	ldx	[%o3 + %o4], %o0		! old counter in o0
1237*5d9d9091SRichard Lowe	add	%o0, %o5, %o0			! new counter in o0
1238*5d9d9091SRichard Lowe	stx	%o0, [%o3 + %o4]		! store new counter
1239*5d9d9091SRichard Lowe
1240*5d9d9091SRichard Lowe	! Also update intracct[]
1241*5d9d9091SRichard Lowe	lduh	[%o3 + CPU_MSTATE], %o4
1242*5d9d9091SRichard Lowe	sllx	%o4, 3, %o4
1243*5d9d9091SRichard Lowe	add	%o4, CPU_INTRACCT, %o4
1244*5d9d9091SRichard Lowe	ldx	[%o3 + %o4], %o0
1245*5d9d9091SRichard Lowe	add	%o0, %o5, %o0
1246*5d9d9091SRichard Lowe	stx	%o0, [%o3 + %o4]
1247*5d9d9091SRichard Lowe
1248*5d9d9091SRichard Lowe	!
1249*5d9d9091SRichard Lowe	! get back on current thread's stack
1250*5d9d9091SRichard Lowe	!
1251*5d9d9091SRichard Lowe	srl	%l2, LOCK_LEVEL + 1, %l2
1252*5d9d9091SRichard Lowe	tst	%l2				! any more high-level ints?
1253*5d9d9091SRichard Lowe	movz	%xcc, %l1, %sp
1254*5d9d9091SRichard Lowe	!
1255*5d9d9091SRichard Lowe	! Current register usage:
1256*5d9d9091SRichard Lowe	! o2 = PIL
1257*5d9d9091SRichard Lowe	! o3 = CPU pointer
1258*5d9d9091SRichard Lowe	! l0 = return address
1259*5d9d9091SRichard Lowe	! l2 = intr_actv shifted right
1260*5d9d9091SRichard Lowe	!
1261*5d9d9091SRichard Lowe	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1262*5d9d9091SRichard Lowe	nop
1263*5d9d9091SRichard Lowe	!
1264*5d9d9091SRichard Lowe	! We found another high-level interrupt active below the one that just
1265*5d9d9091SRichard Lowe	! returned. Store a starting timestamp for it in the CPU structure.
1266*5d9d9091SRichard Lowe	!
1267*5d9d9091SRichard Lowe	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1268*5d9d9091SRichard Lowe	! interrupted high-level interrupt.
1269*5d9d9091SRichard Lowe	! Create mask for cpu_intr_actv. Begin by looking for bits set
1270*5d9d9091SRichard Lowe	! at one level below the current PIL. Since %l2 contains the active
1271*5d9d9091SRichard Lowe	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1272*5d9d9091SRichard Lowe	! at bit (current_pil - (LOCK_LEVEL + 2)).
1273*5d9d9091SRichard Lowe	! %l1 = mask, %o5 = index of bit set in mask
1274*5d9d9091SRichard Lowe	!
1275*5d9d9091SRichard Lowe	mov	1, %l1
1276*5d9d9091SRichard Lowe	sub	%o2, LOCK_LEVEL + 2, %o5
1277*5d9d9091SRichard Lowe	sll	%l1, %o5, %l1			! l1 = mask for level
1278*5d9d9091SRichard Lowe1:
1279*5d9d9091SRichard Lowe#ifdef DEBUG
1280*5d9d9091SRichard Lowe	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1281*5d9d9091SRichard Lowe	brnz,pt	%l1, 9f
1282*5d9d9091SRichard Lowe	nop
1283*5d9d9091SRichard Lowe	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1284*5d9d9091SRichard Lowe	call	panic
1285*5d9d9091SRichard Lowe	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
1286*5d9d9091SRichard Lowe9:
1287*5d9d9091SRichard Lowe#endif /* DEBUG */
1288*5d9d9091SRichard Lowe	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1289*5d9d9091SRichard Lowe	bnz	%xcc, 2f		! cpu_intr_actv
1290*5d9d9091SRichard Lowe	nop
1291*5d9d9091SRichard Lowe	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1292*5d9d9091SRichard Lowe	ba,pt	%xcc, 1b
1293*5d9d9091SRichard Lowe	sub	%o5, 1, %o5		! delay - decrement PIL
1294*5d9d9091SRichard Lowe2:
1295*5d9d9091SRichard Lowe	sll	%o5, 3, %o5		! convert array index to byte offset
1296*5d9d9091SRichard Lowe	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1297*5d9d9091SRichard Lowe	add	%o5, MCPU_PIL_HIGH_START, %o5
1298*5d9d9091SRichard Lowe	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1299*5d9d9091SRichard Lowe	! Another high-level interrupt is active below this one, so
1300*5d9d9091SRichard Lowe	! there is no need to check for an interrupt thread. That will be
1301*5d9d9091SRichard Lowe	! done by the lowest priority high-level interrupt active.
1302*5d9d9091SRichard Lowe	ba,pt	%xcc, 7f
1303*5d9d9091SRichard Lowe	stx	%o4, [%o3 + %o5]	! delay - store timestamp
1304*5d9d9091SRichard Lowe3:
1305*5d9d9091SRichard Lowe	! If we haven't interrupted another high-level interrupt, we may have
1306*5d9d9091SRichard Lowe	! interrupted a low level interrupt thread. If so, store a starting
1307*5d9d9091SRichard Lowe	! timestamp in its thread structure.
1308*5d9d9091SRichard Lowe	lduh	[THREAD_REG + T_FLAGS], %o4
1309*5d9d9091SRichard Lowe	andcc	%o4, T_INTR_THREAD, %g0
1310*5d9d9091SRichard Lowe	bz,pt	%xcc, 7f
1311*5d9d9091SRichard Lowe	nop
1312*5d9d9091SRichard Lowe
1313*5d9d9091SRichard Lowe	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1314*5d9d9091SRichard Lowe	stx	%o4, [THREAD_REG + T_INTR_START]
1315*5d9d9091SRichard Lowe
1316*5d9d9091SRichard Lowe7:
1317*5d9d9091SRichard Lowe	sub	%o2, LOCK_LEVEL + 1, %o4
1318*5d9d9091SRichard Lowe	sll	%o4, CPTRSHIFT, %o5
1319*5d9d9091SRichard Lowe
1320*5d9d9091SRichard Lowe	! Check on_trap saved area and restore as needed
1321*5d9d9091SRichard Lowe	add	%o5, CPU_OTD, %o4
1322*5d9d9091SRichard Lowe	ldn	[%o3 + %o4], %l2
1323*5d9d9091SRichard Lowe	brz,pt %l2, no_ontrp_restore
1324*5d9d9091SRichard Lowe	nop
1325*5d9d9091SRichard Lowe	stn	%l2, [THREAD_REG + T_ONTRAP] ! restore
1326*5d9d9091SRichard Lowe	stn	%g0, [%o3 + %o4]	! clear
1327*5d9d9091SRichard Lowe
1328*5d9d9091SRichard Loweno_ontrp_restore:
1329*5d9d9091SRichard Lowe	! Check on_fault saved area and restore as needed
1330*5d9d9091SRichard Lowe	add	%o5, CPU_OFD, %o4
1331*5d9d9091SRichard Lowe	ldn	[%o3 + %o4], %l2
1332*5d9d9091SRichard Lowe	brz,pt %l2, 8f
1333*5d9d9091SRichard Lowe	nop
1334*5d9d9091SRichard Lowe	stn	%l2, [THREAD_REG + T_ONFAULT] ! restore
1335*5d9d9091SRichard Lowe	stn	%g0, [%o3 + %o4]	! clear
1336*5d9d9091SRichard Lowe	add	%o5, CPU_LFD, %o4
1337*5d9d9091SRichard Lowe	ldn	[%o3 + %o4], %l2
1338*5d9d9091SRichard Lowe	stn	%l2, [THREAD_REG + T_LOFAULT] ! restore
1339*5d9d9091SRichard Lowe	stn	%g0, [%o3 + %o4]	! clear
1340*5d9d9091SRichard Lowe
1341*5d9d9091SRichard Lowe
1342*5d9d9091SRichard Lowe8:
1343*5d9d9091SRichard Lowe	! Enable interrupts and return
1344*5d9d9091SRichard Lowe	jmp	%l0 + 8
1345*5d9d9091SRichard Lowe	wrpr	%g0, %o2, %pil			! enable interrupts
1346*5d9d9091SRichard Lowe	SET_SIZE(current_thread)
1347*5d9d9091SRichard Lowe
1348*5d9d9091SRichard Lowe
1349*5d9d9091SRichard Lowe#ifdef DEBUG
1350*5d9d9091SRichard Lowecurrent_thread_wrong_pil:
1351*5d9d9091SRichard Lowe	.asciz	"current_thread: unexpected pil level: %d"
1352*5d9d9091SRichard Lowecurrent_thread_actv_bit_set:
1353*5d9d9091SRichard Lowe	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1354*5d9d9091SRichard Lowecurrent_thread_actv_bit_not_set:
1355*5d9d9091SRichard Lowe	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1356*5d9d9091SRichard Lowecurrent_thread_nested_pil_zero:
1357*5d9d9091SRichard Lowe	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1358*5d9d9091SRichard Lowecurrent_thread_timestamp_zero:
1359*5d9d9091SRichard Lowe	.asciz	"current_thread(): timestamp zero upon handler return"
1360*5d9d9091SRichard Lowecurrent_thread_nested_PIL_not_found:
1361*5d9d9091SRichard Lowe	.asciz	"current_thread: couldn't find nested high-level PIL"
1362*5d9d9091SRichard Lowe#endif /* DEBUG */
1363*5d9d9091SRichard Lowe
1364*5d9d9091SRichard Lowe/*
1365*5d9d9091SRichard Lowe * Return a thread's interrupt level.
1366*5d9d9091SRichard Lowe * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1367*5d9d9091SRichard Lowe * must dig it out of the save area.
1368*5d9d9091SRichard Lowe *
1369*5d9d9091SRichard Lowe * Caller 'swears' that this really is an interrupt thread.
1370*5d9d9091SRichard Lowe *
1371*5d9d9091SRichard Lowe * int
1372*5d9d9091SRichard Lowe * intr_level(t)
1373*5d9d9091SRichard Lowe *	kthread_id_t	t;
1374*5d9d9091SRichard Lowe */
1375*5d9d9091SRichard Lowe
1376*5d9d9091SRichard Lowe	ENTRY_NP(intr_level)
1377*5d9d9091SRichard Lowe	retl
1378*5d9d9091SRichard Lowe	ldub	[%o0 + T_PIL], %o0		! return saved pil
1379*5d9d9091SRichard Lowe	SET_SIZE(intr_level)
1380*5d9d9091SRichard Lowe
1381*5d9d9091SRichard Lowe	ENTRY_NP(disable_pil_intr)
1382*5d9d9091SRichard Lowe	rdpr	%pil, %o0
1383*5d9d9091SRichard Lowe	retl
1384*5d9d9091SRichard Lowe	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1385*5d9d9091SRichard Lowe	SET_SIZE(disable_pil_intr)
1386*5d9d9091SRichard Lowe
1387*5d9d9091SRichard Lowe	ENTRY_NP(enable_pil_intr)
1388*5d9d9091SRichard Lowe	retl
1389*5d9d9091SRichard Lowe	wrpr	%o0, %pil
1390*5d9d9091SRichard Lowe	SET_SIZE(enable_pil_intr)
1391*5d9d9091SRichard Lowe
1392*5d9d9091SRichard Lowe	ENTRY_NP(disable_vec_intr)
1393*5d9d9091SRichard Lowe	rdpr	%pstate, %o0
1394*5d9d9091SRichard Lowe	andn	%o0, PSTATE_IE, %g1
1395*5d9d9091SRichard Lowe	retl
1396*5d9d9091SRichard Lowe	wrpr	%g0, %g1, %pstate		! disable interrupt
1397*5d9d9091SRichard Lowe	SET_SIZE(disable_vec_intr)
1398*5d9d9091SRichard Lowe
1399*5d9d9091SRichard Lowe	ENTRY_NP(enable_vec_intr)
1400*5d9d9091SRichard Lowe	retl
1401*5d9d9091SRichard Lowe	wrpr	%g0, %o0, %pstate
1402*5d9d9091SRichard Lowe	SET_SIZE(enable_vec_intr)
1403*5d9d9091SRichard Lowe
1404*5d9d9091SRichard Lowe	ENTRY_NP(cbe_level14)
1405*5d9d9091SRichard Lowe	save    %sp, -SA(MINFRAME), %sp ! get a new window
1406*5d9d9091SRichard Lowe	!
1407*5d9d9091SRichard Lowe	! Make sure that this is from TICK_COMPARE; if not just return
1408*5d9d9091SRichard Lowe	!
1409*5d9d9091SRichard Lowe	rd	SOFTINT, %l1
1410*5d9d9091SRichard Lowe	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1411*5d9d9091SRichard Lowe	andcc	%l1, %o2, %g0
1412*5d9d9091SRichard Lowe	bz,pn	%icc, 2f
1413*5d9d9091SRichard Lowe	nop
1414*5d9d9091SRichard Lowe
1415*5d9d9091SRichard Lowe	CPU_ADDR(%o1, %o2)
1416*5d9d9091SRichard Lowe	call	cyclic_fire
1417*5d9d9091SRichard Lowe	mov	%o1, %o0
1418*5d9d9091SRichard Lowe2:
1419*5d9d9091SRichard Lowe	ret
1420*5d9d9091SRichard Lowe	restore	%g0, 1, %o0
1421*5d9d9091SRichard Lowe	SET_SIZE(cbe_level14)
1422*5d9d9091SRichard Lowe
1423*5d9d9091SRichard Lowe
1424*5d9d9091SRichard Lowe	ENTRY_NP(kdi_setsoftint)
1425*5d9d9091SRichard Lowe	save	%sp, -SA(MINFRAME), %sp	! get a new window
1426*5d9d9091SRichard Lowe	rdpr	%pstate, %l5
1427*5d9d9091SRichard Lowe	andn	%l5, PSTATE_IE, %l1
1428*5d9d9091SRichard Lowe	wrpr	%l1, %pstate		! disable interrupt
1429*5d9d9091SRichard Lowe	!
1430*5d9d9091SRichard Lowe	! We have a pointer to an interrupt vector data structure.
1431*5d9d9091SRichard Lowe	! Put the request on the cpu's softint priority list and
1432*5d9d9091SRichard Lowe	! set %set_softint.
1433*5d9d9091SRichard Lowe	!
1434*5d9d9091SRichard Lowe	! Register usage
1435*5d9d9091SRichard Lowe	! 	%i0 - pointer to intr_vec_t (iv)
1436*5d9d9091SRichard Lowe	!	%l2 - requested pil
1437*5d9d9091SRichard Lowe	!	%l4 - cpu
1438*5d9d9091SRichard Lowe	!	%l5 - pstate
1439*5d9d9091SRichard Lowe	!	%l1, %l3, %l6 - temps
1440*5d9d9091SRichard Lowe	!
1441*5d9d9091SRichard Lowe	! check if a softint is pending for this softint,
1442*5d9d9091SRichard Lowe	! if one is pending, don't bother queuing another.
1443*5d9d9091SRichard Lowe	!
1444*5d9d9091SRichard Lowe	lduh	[%i0 + IV_FLAGS], %l1	! %l1 = iv->iv_flags
1445*5d9d9091SRichard Lowe	and	%l1, IV_SOFTINT_PEND, %l6 ! %l6 = iv->iv_flags & IV_SOFTINT_PEND
1446*5d9d9091SRichard Lowe	brnz,pn	%l6, 4f			! branch if softint is already pending
1447*5d9d9091SRichard Lowe	or	%l1, IV_SOFTINT_PEND, %l2
1448*5d9d9091SRichard Lowe	sth	%l2, [%i0 + IV_FLAGS]	! Set IV_SOFTINT_PEND flag
1449*5d9d9091SRichard Lowe
1450*5d9d9091SRichard Lowe	CPU_ADDR(%l4, %l2)		! %l4 = cpu
1451*5d9d9091SRichard Lowe	lduh	[%i0 + IV_PIL], %l2	! %l2 = iv->iv_pil
1452*5d9d9091SRichard Lowe
1453*5d9d9091SRichard Lowe	!
1454*5d9d9091SRichard Lowe	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1455*5d9d9091SRichard Lowe	!
1456*5d9d9091SRichard Lowe	sll	%l2, CPTRSHIFT, %l0	! %l0 = offset to pil entry
1457*5d9d9091SRichard Lowe	add	%l4, INTR_TAIL, %l6	! %l6 = &cpu->m_cpu.intr_tail
1458*5d9d9091SRichard Lowe	ldn	[%l6 + %l0], %l1	! %l1 = cpu->m_cpu.intr_tail[pil]
1459*5d9d9091SRichard Lowe					!       current tail (ct)
1460*5d9d9091SRichard Lowe	brz,pt	%l1, 2f			! branch if current tail is NULL
1461*5d9d9091SRichard Lowe	stn	%i0, [%l6 + %l0]	! make intr_vec_t (iv) as new tail
1462*5d9d9091SRichard Lowe	!
1463*5d9d9091SRichard Lowe	! there's pending intr_vec_t already
1464*5d9d9091SRichard Lowe	!
1465*5d9d9091SRichard Lowe	lduh	[%l1 + IV_FLAGS], %l6	! %l6 = ct->iv_flags
1466*5d9d9091SRichard Lowe	and	%l6, IV_SOFTINT_MT, %l6	! %l6 = ct->iv_flags & IV_SOFTINT_MT
1467*5d9d9091SRichard Lowe	brz,pt	%l6, 1f			! check for Multi target softint flag
1468*5d9d9091SRichard Lowe	add	%l1, IV_PIL_NEXT, %l3	! %l3 = &ct->iv_pil_next
1469*5d9d9091SRichard Lowe	ld	[%l4 + CPU_ID], %l6	! for multi target softint, use cpuid
1470*5d9d9091SRichard Lowe	sll	%l6, CPTRSHIFT, %l6	! calculate offset address from cpuid
1471*5d9d9091SRichard Lowe	add	%l3, %l6, %l3		! %l3 =  &ct->iv_xpil_next[cpuid]
1472*5d9d9091SRichard Lowe1:
1473*5d9d9091SRichard Lowe	!
1474*5d9d9091SRichard Lowe	! update old tail
1475*5d9d9091SRichard Lowe	!
1476*5d9d9091SRichard Lowe	ba,pt	%xcc, 3f
1477*5d9d9091SRichard Lowe	stn	%i0, [%l3]		! [%l3] = iv, set pil_next field
1478*5d9d9091SRichard Lowe2:
1479*5d9d9091SRichard Lowe	!
1480*5d9d9091SRichard Lowe	! no pending intr_vec_t; make intr_vec_t as new head
1481*5d9d9091SRichard Lowe	!
1482*5d9d9091SRichard Lowe	add	%l4, INTR_HEAD, %l6	! %l6 = &cpu->m_cpu.intr_head[pil]
1483*5d9d9091SRichard Lowe	stn	%i0, [%l6 + %l0]	! cpu->m_cpu.intr_head[pil] = iv
1484*5d9d9091SRichard Lowe3:
1485*5d9d9091SRichard Lowe	!
1486*5d9d9091SRichard Lowe	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1487*5d9d9091SRichard Lowe	!
1488*5d9d9091SRichard Lowe	mov	1, %l1			! %l1 = 1
1489*5d9d9091SRichard Lowe	sll	%l1, %l2, %l1		! %l1 = 1 << pil
1490*5d9d9091SRichard Lowe	wr	%l1, SET_SOFTINT	! trigger required pil softint
1491*5d9d9091SRichard Lowe4:
1492*5d9d9091SRichard Lowe	wrpr	%g0, %l5, %pstate	! %pstate = saved %pstate (in %l5)
1493*5d9d9091SRichard Lowe	ret
1494*5d9d9091SRichard Lowe	restore
1495*5d9d9091SRichard Lowe	SET_SIZE(kdi_setsoftint)
1496*5d9d9091SRichard Lowe
1497*5d9d9091SRichard Lowe	!
1498*5d9d9091SRichard Lowe	! Register usage
1499*5d9d9091SRichard Lowe	!	Arguments:
1500*5d9d9091SRichard Lowe	! 	%g1 - Pointer to intr_vec_t (iv)
1501*5d9d9091SRichard Lowe	!
1502*5d9d9091SRichard Lowe	!	Internal:
1503*5d9d9091SRichard Lowe	!	%g2 - pil
1504*5d9d9091SRichard Lowe	!	%g4 - cpu
1505*5d9d9091SRichard Lowe	!	%g3,%g5-g7 - temps
1506*5d9d9091SRichard Lowe	!
1507*5d9d9091SRichard Lowe	ENTRY_NP(setsoftint_tl1)
1508*5d9d9091SRichard Lowe	!
1509*5d9d9091SRichard Lowe	! We have a pointer to an interrupt vector data structure.
1510*5d9d9091SRichard Lowe	! Put the request on the cpu's softint priority list and
1511*5d9d9091SRichard Lowe	! set %set_softint.
1512*5d9d9091SRichard Lowe	!
1513*5d9d9091SRichard Lowe	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1514*5d9d9091SRichard Lowe	lduh	[%g1 + IV_PIL], %g2	! %g2 = iv->iv_pil
1515*5d9d9091SRichard Lowe
1516*5d9d9091SRichard Lowe	!
1517*5d9d9091SRichard Lowe	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1518*5d9d9091SRichard Lowe	!
1519*5d9d9091SRichard Lowe	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1520*5d9d9091SRichard Lowe	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1521*5d9d9091SRichard Lowe	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1522*5d9d9091SRichard Lowe					!       current tail (ct)
1523*5d9d9091SRichard Lowe	brz,pt	%g5, 1f			! branch if current tail is NULL
1524*5d9d9091SRichard Lowe	stn	%g1, [%g6 + %g7]	! make intr_rec_t (iv) as new tail
1525*5d9d9091SRichard Lowe	!
1526*5d9d9091SRichard Lowe	! there's pending intr_vec_t already
1527*5d9d9091SRichard Lowe	!
1528*5d9d9091SRichard Lowe	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1529*5d9d9091SRichard Lowe	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1530*5d9d9091SRichard Lowe	brz,pt	%g6, 0f			! check for Multi target softint flag
1531*5d9d9091SRichard Lowe	add	%g5, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1532*5d9d9091SRichard Lowe	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1533*5d9d9091SRichard Lowe	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1534*5d9d9091SRichard Lowe	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
1535*5d9d9091SRichard Lowe0:
1536*5d9d9091SRichard Lowe	!
1537*5d9d9091SRichard Lowe	! update old tail
1538*5d9d9091SRichard Lowe	!
1539*5d9d9091SRichard Lowe	ba,pt	%xcc, 2f
1540*5d9d9091SRichard Lowe	stn	%g1, [%g3]		! [%g3] = iv, set pil_next field
1541*5d9d9091SRichard Lowe1:
1542*5d9d9091SRichard Lowe	!
1543*5d9d9091SRichard Lowe	! no pending intr_vec_t; make intr_vec_t as new head
1544*5d9d9091SRichard Lowe	!
1545*5d9d9091SRichard Lowe	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1546*5d9d9091SRichard Lowe	stn	%g1, [%g6 + %g7]	! cpu->m_cpu.intr_head[pil] = iv
1547*5d9d9091SRichard Lowe2:
1548*5d9d9091SRichard Lowe#ifdef TRAPTRACE
1549*5d9d9091SRichard Lowe	TRACE_PTR(%g5, %g6)
1550*5d9d9091SRichard Lowe	GET_TRACE_TICK(%g6, %g3)
1551*5d9d9091SRichard Lowe	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1552*5d9d9091SRichard Lowe	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1553*5d9d9091SRichard Lowe	rdpr	%tt, %g6
1554*5d9d9091SRichard Lowe	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
1555*5d9d9091SRichard Lowe	rdpr	%tpc, %g6
1556*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1557*5d9d9091SRichard Lowe	rdpr	%tstate, %g6
1558*5d9d9091SRichard Lowe	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1559*5d9d9091SRichard Lowe	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1560*5d9d9091SRichard Lowe	stna	%g1, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1561*5d9d9091SRichard Lowe	ldn	[%g1 + IV_PIL_NEXT], %g6	!
1562*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = iv->iv_pil_next
1563*5d9d9091SRichard Lowe	add	%g4, INTR_HEAD, %g6
1564*5d9d9091SRichard Lowe	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1565*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1566*5d9d9091SRichard Lowe	add	%g4, INTR_TAIL, %g6
1567*5d9d9091SRichard Lowe	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1568*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1569*5d9d9091SRichard Lowe	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1570*5d9d9091SRichard Lowe	TRACE_NEXT(%g5, %g6, %g3)
1571*5d9d9091SRichard Lowe#endif /* TRAPTRACE */
1572*5d9d9091SRichard Lowe	!
1573*5d9d9091SRichard Lowe	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1574*5d9d9091SRichard Lowe	!
1575*5d9d9091SRichard Lowe	mov	1, %g5			! %g5 = 1
1576*5d9d9091SRichard Lowe	sll	%g5, %g2, %g5		! %g5 = 1 << pil
1577*5d9d9091SRichard Lowe	wr	%g5, SET_SOFTINT	! trigger required pil softint
1578*5d9d9091SRichard Lowe	retry
1579*5d9d9091SRichard Lowe	SET_SIZE(setsoftint_tl1)
1580*5d9d9091SRichard Lowe
1581*5d9d9091SRichard Lowe	!
1582*5d9d9091SRichard Lowe	! Register usage
1583*5d9d9091SRichard Lowe	!	Arguments:
1584*5d9d9091SRichard Lowe	! 	%g1 - inumber
1585*5d9d9091SRichard Lowe	!
1586*5d9d9091SRichard Lowe	!	Internal:
1587*5d9d9091SRichard Lowe	! 	%g1 - softint pil mask
1588*5d9d9091SRichard Lowe	!	%g2 - pil of intr_vec_t
1589*5d9d9091SRichard Lowe	!	%g3 - pointer to current intr_vec_t (iv)
1590*5d9d9091SRichard Lowe	!	%g4 - cpu
1591*5d9d9091SRichard Lowe	!	%g5, %g6,%g7 - temps
1592*5d9d9091SRichard Lowe	!
1593*5d9d9091SRichard Lowe	ENTRY_NP(setvecint_tl1)
1594*5d9d9091SRichard Lowe	!
1595*5d9d9091SRichard Lowe	! Verify the inumber received (should be inum < MAXIVNUM).
1596*5d9d9091SRichard Lowe	!
1597*5d9d9091SRichard Lowe	set	MAXIVNUM, %g2
1598*5d9d9091SRichard Lowe	cmp	%g1, %g2
1599*5d9d9091SRichard Lowe	bgeu,pn	%xcc, .no_ivintr
1600*5d9d9091SRichard Lowe	clr	%g2			! expected in .no_ivintr
1601*5d9d9091SRichard Lowe
1602*5d9d9091SRichard Lowe	!
1603*5d9d9091SRichard Lowe	! Fetch data from intr_vec_table according to the inum.
1604*5d9d9091SRichard Lowe	!
1605*5d9d9091SRichard Lowe	! We have an interrupt number. Fetch the interrupt vector requests
1606*5d9d9091SRichard Lowe	! from the interrupt vector table for a given interrupt number and
1607*5d9d9091SRichard Lowe	! insert them into cpu's softint priority lists and set %set_softint.
1608*5d9d9091SRichard Lowe	!
1609*5d9d9091SRichard Lowe	set	intr_vec_table, %g5	! %g5 = intr_vec_table
1610*5d9d9091SRichard Lowe	sll	%g1, CPTRSHIFT, %g6	! %g6 = offset to inum entry in table
1611*5d9d9091SRichard Lowe	add	%g5, %g6, %g5		! %g5 = &intr_vec_table[inum]
1612*5d9d9091SRichard Lowe	ldn	[%g5], %g3		! %g3 = pointer to first entry of
1613*5d9d9091SRichard Lowe					!       intr_vec_t list
1614*5d9d9091SRichard Lowe
1615*5d9d9091SRichard Lowe	! Verify the first intr_vec_t pointer for a given inum and it should
1616*5d9d9091SRichard Lowe	! not be NULL. This used to be guarded by DEBUG but broken drivers can
1617*5d9d9091SRichard Lowe	! cause spurious tick interrupts when the softint register is programmed
1618*5d9d9091SRichard Lowe	! with 1 << 0 at the end of this routine. Now we always check for a
1619*5d9d9091SRichard Lowe	! valid intr_vec_t pointer.
1620*5d9d9091SRichard Lowe	brz,pn	%g3, .no_ivintr
1621*5d9d9091SRichard Lowe	nop
1622*5d9d9091SRichard Lowe
1623*5d9d9091SRichard Lowe	!
1624*5d9d9091SRichard Lowe	! Traverse the intr_vec_t link list, put each item on to corresponding
1625*5d9d9091SRichard Lowe	! CPU softint priority queue, and compose the final softint pil mask.
1626*5d9d9091SRichard Lowe	!
1627*5d9d9091SRichard Lowe	! At this point:
1628*5d9d9091SRichard Lowe	!	%g3 = intr_vec_table[inum]
1629*5d9d9091SRichard Lowe	!
1630*5d9d9091SRichard Lowe	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1631*5d9d9091SRichard Lowe	mov	%g0, %g1		! %g1 = 0, initialize pil mask to 0
1632*5d9d9091SRichard Lowe0:
1633*5d9d9091SRichard Lowe	!
1634*5d9d9091SRichard Lowe	! Insert next intr_vec_t (iv) to appropriate cpu's softint priority list
1635*5d9d9091SRichard Lowe	!
1636*5d9d9091SRichard Lowe	! At this point:
1637*5d9d9091SRichard Lowe	!	%g1 = softint pil mask
1638*5d9d9091SRichard Lowe	!	%g3 = pointer to next intr_vec_t (iv)
1639*5d9d9091SRichard Lowe	!	%g4 = cpu
1640*5d9d9091SRichard Lowe	!
1641*5d9d9091SRichard Lowe	lduh	[%g3 + IV_PIL], %g2	! %g2 = iv->iv_pil
1642*5d9d9091SRichard Lowe	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1643*5d9d9091SRichard Lowe	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1644*5d9d9091SRichard Lowe	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1645*5d9d9091SRichard Lowe					! 	current tail (ct)
1646*5d9d9091SRichard Lowe	brz,pt	%g5, 2f			! branch if current tail is NULL
1647*5d9d9091SRichard Lowe	stn	%g3, [%g6 + %g7]	! make intr_vec_t (iv) as new tail
1648*5d9d9091SRichard Lowe					! cpu->m_cpu.intr_tail[pil] = iv
1649*5d9d9091SRichard Lowe	!
1650*5d9d9091SRichard Lowe	! there's pending intr_vec_t already
1651*5d9d9091SRichard Lowe	!
1652*5d9d9091SRichard Lowe	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1653*5d9d9091SRichard Lowe	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1654*5d9d9091SRichard Lowe	brz,pt	%g6, 1f			! check for Multi target softint flag
1655*5d9d9091SRichard Lowe	add	%g5, IV_PIL_NEXT, %g5	! %g5 = &ct->iv_pil_next
1656*5d9d9091SRichard Lowe	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1657*5d9d9091SRichard Lowe	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1658*5d9d9091SRichard Lowe	add	%g5, %g6, %g5		! %g5 = &ct->iv_xpil_next[cpuid]
1659*5d9d9091SRichard Lowe1:
1660*5d9d9091SRichard Lowe	!
1661*5d9d9091SRichard Lowe	! update old tail
1662*5d9d9091SRichard Lowe	!
1663*5d9d9091SRichard Lowe	ba,pt	%xcc, 3f
1664*5d9d9091SRichard Lowe	stn	%g3, [%g5]		! [%g5] = iv, set pil_next field
1665*5d9d9091SRichard Lowe2:
1666*5d9d9091SRichard Lowe	!
1667*5d9d9091SRichard Lowe	! no pending intr_vec_t; make intr_vec_t as new head
1668*5d9d9091SRichard Lowe	!
1669*5d9d9091SRichard Lowe	add	%g4, INTR_HEAD, %g6	!  %g6 = &cpu->m_cpu.intr_head[pil]
1670*5d9d9091SRichard Lowe	stn	%g3, [%g6 + %g7]	!  cpu->m_cpu.intr_head[pil] = iv
1671*5d9d9091SRichard Lowe3:
1672*5d9d9091SRichard Lowe#ifdef TRAPTRACE
1673*5d9d9091SRichard Lowe	TRACE_PTR(%g5, %g6)
1674*5d9d9091SRichard Lowe	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1675*5d9d9091SRichard Lowe	rdpr	%tt, %g6
1676*5d9d9091SRichard Lowe	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt`
1677*5d9d9091SRichard Lowe	rdpr	%tpc, %g6
1678*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1679*5d9d9091SRichard Lowe	rdpr	%tstate, %g6
1680*5d9d9091SRichard Lowe	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1681*5d9d9091SRichard Lowe	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1682*5d9d9091SRichard Lowe	stna	%g3, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1683*5d9d9091SRichard Lowe	stna	%g1, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = pil mask
1684*5d9d9091SRichard Lowe	add	%g4, INTR_HEAD, %g6
1685*5d9d9091SRichard Lowe	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1686*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1687*5d9d9091SRichard Lowe	add	%g4, INTR_TAIL, %g6
1688*5d9d9091SRichard Lowe	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1689*5d9d9091SRichard Lowe	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1690*5d9d9091SRichard Lowe	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1691*5d9d9091SRichard Lowe	GET_TRACE_TICK(%g6, %g7)
1692*5d9d9091SRichard Lowe	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1693*5d9d9091SRichard Lowe	TRACE_NEXT(%g5, %g6, %g7)
1694*5d9d9091SRichard Lowe#endif /* TRAPTRACE */
1695*5d9d9091SRichard Lowe	mov	1, %g6			! %g6 = 1
1696*5d9d9091SRichard Lowe	sll	%g6, %g2, %g6		! %g6 = 1 << pil
1697*5d9d9091SRichard Lowe	or	%g1, %g6, %g1		! %g1 |= (1 << pil), pil mask
1698*5d9d9091SRichard Lowe	ldn	[%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1699*5d9d9091SRichard Lowe	brnz,pn	%g3, 0b			! iv->iv_vec_next is non NULL, goto 0b
1700*5d9d9091SRichard Lowe	nop
1701*5d9d9091SRichard Lowe	wr	%g1, SET_SOFTINT	! triggered one or more pil softints
1702*5d9d9091SRichard Lowe	retry
1703*5d9d9091SRichard Lowe
1704*5d9d9091SRichard Lowe.no_ivintr:
1705*5d9d9091SRichard Lowe	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1706*5d9d9091SRichard Lowe	mov	%g2, %g3
1707*5d9d9091SRichard Lowe	mov	%g1, %g2
1708*5d9d9091SRichard Lowe	set	no_ivintr, %g1
1709*5d9d9091SRichard Lowe	ba,pt	%xcc, sys_trap
1710*5d9d9091SRichard Lowe	mov	PIL_15, %g4
1711*5d9d9091SRichard Lowe	SET_SIZE(setvecint_tl1)
1712*5d9d9091SRichard Lowe
1713*5d9d9091SRichard Lowe	ENTRY_NP(wr_clr_softint)
1714*5d9d9091SRichard Lowe	retl
1715*5d9d9091SRichard Lowe	wr	%o0, CLEAR_SOFTINT
1716*5d9d9091SRichard Lowe	SET_SIZE(wr_clr_softint)
1717*5d9d9091SRichard Lowe
1718*5d9d9091SRichard Lowe/*
1719*5d9d9091SRichard Lowe * intr_enqueue_req
1720*5d9d9091SRichard Lowe *
1721*5d9d9091SRichard Lowe * %o0 - pil
1722*5d9d9091SRichard Lowe * %o1 - pointer to intr_vec_t (iv)
1723*5d9d9091SRichard Lowe * %o5 - preserved
1724*5d9d9091SRichard Lowe * %g5 - preserved
1725*5d9d9091SRichard Lowe */
1726*5d9d9091SRichard Lowe	ENTRY_NP(intr_enqueue_req)
1727*5d9d9091SRichard Lowe	!
1728*5d9d9091SRichard Lowe	CPU_ADDR(%g4, %g1)		! %g4 = cpu
1729*5d9d9091SRichard Lowe
1730*5d9d9091SRichard Lowe	!
1731*5d9d9091SRichard Lowe	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1732*5d9d9091SRichard Lowe	!
1733*5d9d9091SRichard Lowe	sll	%o0, CPTRSHIFT, %o0	! %o0 = offset to pil entry
1734*5d9d9091SRichard Lowe	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1735*5d9d9091SRichard Lowe	ldn	[%o0 + %g6], %g1	! %g1 = cpu->m_cpu.intr_tail[pil]
1736*5d9d9091SRichard Lowe					!       current tail (ct)
1737*5d9d9091SRichard Lowe	brz,pt	%g1, 2f			! branch if current tail is NULL
1738*5d9d9091SRichard Lowe	stn	%o1, [%g6 + %o0]	! make intr_vec_t (iv) as new tail
1739*5d9d9091SRichard Lowe
1740*5d9d9091SRichard Lowe	!
1741*5d9d9091SRichard Lowe	! there's pending intr_vec_t already
1742*5d9d9091SRichard Lowe	!
1743*5d9d9091SRichard Lowe	lduh	[%g1 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1744*5d9d9091SRichard Lowe	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1745*5d9d9091SRichard Lowe	brz,pt	%g6, 1f			! check for Multi target softint flag
1746*5d9d9091SRichard Lowe	add	%g1, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1747*5d9d9091SRichard Lowe	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1748*5d9d9091SRichard Lowe	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1749*5d9d9091SRichard Lowe	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
1750*5d9d9091SRichard Lowe1:
1751*5d9d9091SRichard Lowe	!
1752*5d9d9091SRichard Lowe	! update old tail
1753*5d9d9091SRichard Lowe	!
1754*5d9d9091SRichard Lowe	ba,pt	%xcc, 3f
1755*5d9d9091SRichard Lowe	stn	%o1, [%g3]		! {%g5] = iv, set pil_next field
1756*5d9d9091SRichard Lowe2:
1757*5d9d9091SRichard Lowe	!
1758*5d9d9091SRichard Lowe	! no intr_vec_t's queued so make intr_vec_t as new head
1759*5d9d9091SRichard Lowe	!
1760*5d9d9091SRichard Lowe	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1761*5d9d9091SRichard Lowe	stn	%o1, [%g6 + %o0]	! cpu->m_cpu.intr_head[pil] = iv
1762*5d9d9091SRichard Lowe3:
1763*5d9d9091SRichard Lowe	retl
1764*5d9d9091SRichard Lowe	nop
1765*5d9d9091SRichard Lowe	SET_SIZE(intr_enqueue_req)
1766*5d9d9091SRichard Lowe
1767*5d9d9091SRichard Lowe/*
1768*5d9d9091SRichard Lowe * Set CPU's base SPL level, based on which interrupt levels are active.
1769*5d9d9091SRichard Lowe * 	Called at spl7 or above.
1770*5d9d9091SRichard Lowe */
1771*5d9d9091SRichard Lowe
1772*5d9d9091SRichard Lowe	ENTRY_NP(set_base_spl)
1773*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1774*5d9d9091SRichard Lowe	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1775*5d9d9091SRichard Lowe
1776*5d9d9091SRichard Lowe/*
1777*5d9d9091SRichard Lowe * WARNING: non-standard callinq sequence; do not call from C
1778*5d9d9091SRichard Lowe *	%o2 = pointer to CPU
1779*5d9d9091SRichard Lowe *	%o5 = updated CPU_INTR_ACTV
1780*5d9d9091SRichard Lowe */
1781*5d9d9091SRichard Lowe_intr_set_spl:					! intr_thread_exit enters here
1782*5d9d9091SRichard Lowe	!
1783*5d9d9091SRichard Lowe	! Determine highest interrupt level active.  Several could be blocked
1784*5d9d9091SRichard Lowe	! at higher levels than this one, so must convert flags to a PIL
1785*5d9d9091SRichard Lowe	! Normally nothing will be blocked, so test this first.
1786*5d9d9091SRichard Lowe	!
1787*5d9d9091SRichard Lowe	brz,pt	%o5, 1f				! nothing active
1788*5d9d9091SRichard Lowe	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1789*5d9d9091SRichard Lowe	set	_intr_flag_table, %o1
1790*5d9d9091SRichard Lowe	tst	%o3				! see if any of the bits set
1791*5d9d9091SRichard Lowe	ldub	[%o1 + %o3], %o3		! load bit number
1792*5d9d9091SRichard Lowe	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1793*5d9d9091SRichard Lowe	add	%o3, 11-1, %o3			! delay - add bit number - 1
1794*5d9d9091SRichard Lowe
1795*5d9d9091SRichard Lowe	sra	%o5, 6, %o3			! test bits 10-6
1796*5d9d9091SRichard Lowe	tst	%o3
1797*5d9d9091SRichard Lowe	ldub	[%o1 + %o3], %o3
1798*5d9d9091SRichard Lowe	bnz,a,pn %xcc, 1f
1799*5d9d9091SRichard Lowe	add	%o3, 6-1, %o3
1800*5d9d9091SRichard Lowe
1801*5d9d9091SRichard Lowe	sra	%o5, 1, %o3			! test bits 5-1
1802*5d9d9091SRichard Lowe	ldub	[%o1 + %o3], %o3
1803*5d9d9091SRichard Lowe
1804*5d9d9091SRichard Lowe	!
1805*5d9d9091SRichard Lowe	! highest interrupt level number active is in %l6
1806*5d9d9091SRichard Lowe	!
1807*5d9d9091SRichard Lowe1:
1808*5d9d9091SRichard Lowe	retl
1809*5d9d9091SRichard Lowe	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1810*5d9d9091SRichard Lowe	SET_SIZE(set_base_spl)
1811*5d9d9091SRichard Lowe
1812*5d9d9091SRichard Lowe/*
1813*5d9d9091SRichard Lowe * Table that finds the most significant bit set in a five bit field.
1814*5d9d9091SRichard Lowe * Each entry is the high-order bit number + 1 of it's index in the table.
1815*5d9d9091SRichard Lowe * This read-only data is in the text segment.
1816*5d9d9091SRichard Lowe */
1817*5d9d9091SRichard Lowe_intr_flag_table:
1818*5d9d9091SRichard Lowe	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1819*5d9d9091SRichard Lowe	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1820*5d9d9091SRichard Lowe	.align	4
1821*5d9d9091SRichard Lowe
1822*5d9d9091SRichard Lowe/*
1823*5d9d9091SRichard Lowe * int
1824*5d9d9091SRichard Lowe * intr_passivate(from, to)
1825*5d9d9091SRichard Lowe *	kthread_id_t	from;		interrupt thread
1826*5d9d9091SRichard Lowe *	kthread_id_t	to;		interrupted thread
1827*5d9d9091SRichard Lowe */
1828*5d9d9091SRichard Lowe
1829*5d9d9091SRichard Lowe	ENTRY_NP(intr_passivate)
1830*5d9d9091SRichard Lowe	save	%sp, -SA(MINFRAME), %sp	! get a new window
1831*5d9d9091SRichard Lowe
1832*5d9d9091SRichard Lowe	flushw				! force register windows to stack
1833*5d9d9091SRichard Lowe	!
1834*5d9d9091SRichard Lowe	! restore registers from the base of the stack of the interrupt thread.
1835*5d9d9091SRichard Lowe	!
1836*5d9d9091SRichard Lowe	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
1837*5d9d9091SRichard Lowe	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
1838*5d9d9091SRichard Lowe	ldn	[%i2 + (1*GREGSIZE)], %l1
1839*5d9d9091SRichard Lowe	ldn	[%i2 + (2*GREGSIZE)], %l2
1840*5d9d9091SRichard Lowe	ldn	[%i2 + (3*GREGSIZE)], %l3
1841*5d9d9091SRichard Lowe	ldn	[%i2 + (4*GREGSIZE)], %l4
1842*5d9d9091SRichard Lowe	ldn	[%i2 + (5*GREGSIZE)], %l5
1843*5d9d9091SRichard Lowe	ldn	[%i2 + (6*GREGSIZE)], %l6
1844*5d9d9091SRichard Lowe	ldn	[%i2 + (7*GREGSIZE)], %l7
1845*5d9d9091SRichard Lowe	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
1846*5d9d9091SRichard Lowe	ldn	[%i2 + (9*GREGSIZE)], %o1
1847*5d9d9091SRichard Lowe	ldn	[%i2 + (10*GREGSIZE)], %o2
1848*5d9d9091SRichard Lowe	ldn	[%i2 + (11*GREGSIZE)], %o3
1849*5d9d9091SRichard Lowe	ldn	[%i2 + (12*GREGSIZE)], %o4
1850*5d9d9091SRichard Lowe	ldn	[%i2 + (13*GREGSIZE)], %o5
1851*5d9d9091SRichard Lowe	ldn	[%i2 + (14*GREGSIZE)], %i4
1852*5d9d9091SRichard Lowe					! copy stack/pointer without using %sp
1853*5d9d9091SRichard Lowe	ldn	[%i2 + (15*GREGSIZE)], %i5
1854*5d9d9091SRichard Lowe	!
1855*5d9d9091SRichard Lowe	! put registers into the save area at the top of the interrupted
1856*5d9d9091SRichard Lowe	! thread's stack, pointed to by %l7 in the save area just loaded.
1857*5d9d9091SRichard Lowe	!
1858*5d9d9091SRichard Lowe	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
1859*5d9d9091SRichard Lowe	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
1860*5d9d9091SRichard Lowe	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
1861*5d9d9091SRichard Lowe	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
1862*5d9d9091SRichard Lowe	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
1863*5d9d9091SRichard Lowe	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
1864*5d9d9091SRichard Lowe	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
1865*5d9d9091SRichard Lowe	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
1866*5d9d9091SRichard Lowe	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
1867*5d9d9091SRichard Lowe	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
1868*5d9d9091SRichard Lowe	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
1869*5d9d9091SRichard Lowe	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
1870*5d9d9091SRichard Lowe	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
1871*5d9d9091SRichard Lowe	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
1872*5d9d9091SRichard Lowe	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
1873*5d9d9091SRichard Lowe	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
1874*5d9d9091SRichard Lowe						! fp, %i7 copied using %i4
1875*5d9d9091SRichard Lowe	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
1876*5d9d9091SRichard Lowe	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
1877*5d9d9091SRichard Lowe						! clear fp in save area
1878*5d9d9091SRichard Lowe
1879*5d9d9091SRichard Lowe	! load saved pil for return
1880*5d9d9091SRichard Lowe	ldub	[%i0 + T_PIL], %i0
1881*5d9d9091SRichard Lowe	ret
1882*5d9d9091SRichard Lowe	restore
1883*5d9d9091SRichard Lowe	SET_SIZE(intr_passivate)
1884*5d9d9091SRichard Lowe
1885*5d9d9091SRichard Lowe/*
1886*5d9d9091SRichard Lowe * intr_get_time() is a resource for interrupt handlers to determine how
1887*5d9d9091SRichard Lowe * much time has been spent handling the current interrupt. Such a function
1888*5d9d9091SRichard Lowe * is needed because higher level interrupts can arrive during the
1889*5d9d9091SRichard Lowe * processing of an interrupt, thus making direct comparisons of %tick by
1890*5d9d9091SRichard Lowe * the handler inaccurate. intr_get_time() only returns time spent in the
1891*5d9d9091SRichard Lowe * current interrupt handler.
1892*5d9d9091SRichard Lowe *
1893*5d9d9091SRichard Lowe * The caller must be calling from an interrupt handler running at a pil
1894*5d9d9091SRichard Lowe * below or at lock level. Timings are not provided for high-level
1895*5d9d9091SRichard Lowe * interrupts.
1896*5d9d9091SRichard Lowe *
1897*5d9d9091SRichard Lowe * The first time intr_get_time() is called while handling an interrupt,
1898*5d9d9091SRichard Lowe * it returns the time since the interrupt handler was invoked. Subsequent
1899*5d9d9091SRichard Lowe * calls will return the time since the prior call to intr_get_time(). Time
1900*5d9d9091SRichard Lowe * is returned as ticks, adjusted for any clock divisor due to power
1901*5d9d9091SRichard Lowe * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
1902*5d9d9091SRichard Lowe * not be the same across CPUs.
1903*5d9d9091SRichard Lowe *
1904*5d9d9091SRichard Lowe * Theory Of Intrstat[][]:
1905*5d9d9091SRichard Lowe *
1906*5d9d9091SRichard Lowe * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
1907*5d9d9091SRichard Lowe * uint64_ts per pil.
1908*5d9d9091SRichard Lowe *
1909*5d9d9091SRichard Lowe * intrstat[pil][0] is a cumulative count of the number of ticks spent
1910*5d9d9091SRichard Lowe * handling all interrupts at the specified pil on this CPU. It is
1911*5d9d9091SRichard Lowe * exported via kstats to the user.
1912*5d9d9091SRichard Lowe *
1913*5d9d9091SRichard Lowe * intrstat[pil][1] is always a count of ticks less than or equal to the
1914*5d9d9091SRichard Lowe * value in [0]. The difference between [1] and [0] is the value returned
1915*5d9d9091SRichard Lowe * by a call to intr_get_time(). At the start of interrupt processing,
1916*5d9d9091SRichard Lowe * [0] and [1] will be equal (or nearly so). As the interrupt consumes
1917*5d9d9091SRichard Lowe * time, [0] will increase, but [1] will remain the same. A call to
1918*5d9d9091SRichard Lowe * intr_get_time() will return the difference, then update [1] to be the
1919*5d9d9091SRichard Lowe * same as [0]. Future calls will return the time since the last call.
1920*5d9d9091SRichard Lowe * Finally, when the interrupt completes, [1] is updated to the same as [0].
1921*5d9d9091SRichard Lowe *
1922*5d9d9091SRichard Lowe * Implementation:
1923*5d9d9091SRichard Lowe *
1924*5d9d9091SRichard Lowe * intr_get_time() works much like a higher level interrupt arriving. It
1925*5d9d9091SRichard Lowe * "checkpoints" the timing information by incrementing intrstat[pil][0]
1926*5d9d9091SRichard Lowe * to include elapsed running time, and by setting t_intr_start to %tick.
1927*5d9d9091SRichard Lowe * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
1928*5d9d9091SRichard Lowe * and updates intrstat[pil][1] to be the same as the new value of
1929*5d9d9091SRichard Lowe * intrstat[pil][0].
1930*5d9d9091SRichard Lowe *
1931*5d9d9091SRichard Lowe * In the normal handling of interrupts, after an interrupt handler returns
1932*5d9d9091SRichard Lowe * and the code in intr_thread() updates intrstat[pil][0], it then sets
1933*5d9d9091SRichard Lowe * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
1934*5d9d9091SRichard Lowe * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
1935*5d9d9091SRichard Lowe * is 0.
1936*5d9d9091SRichard Lowe *
1937*5d9d9091SRichard Lowe * Whenever interrupts arrive on a CPU which is handling a lower pil
1938*5d9d9091SRichard Lowe * interrupt, they update the lower pil's [0] to show time spent in the
1939*5d9d9091SRichard Lowe * handler that they've interrupted. This results in a growing discrepancy
1940*5d9d9091SRichard Lowe * between [0] and [1], which is returned the next time intr_get_time() is
1941*5d9d9091SRichard Lowe * called. Time spent in the higher-pil interrupt will not be returned in
1942*5d9d9091SRichard Lowe * the next intr_get_time() call from the original interrupt, because
1943*5d9d9091SRichard Lowe * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
1944*5d9d9091SRichard Lowe */
1945*5d9d9091SRichard Lowe	ENTRY_NP(intr_get_time)
1946*5d9d9091SRichard Lowe#ifdef DEBUG
1947*5d9d9091SRichard Lowe	!
1948*5d9d9091SRichard Lowe	! Lots of asserts, but just check panic_quiesce first.
1949*5d9d9091SRichard Lowe	! Don't bother with lots of tests if we're just ignoring them.
1950*5d9d9091SRichard Lowe	!
1951*5d9d9091SRichard Lowe	sethi	%hi(panic_quiesce), %o0
1952*5d9d9091SRichard Lowe	ld	[%o0 + %lo(panic_quiesce)], %o0
1953*5d9d9091SRichard Lowe	brnz,pn	%o0, 2f
1954*5d9d9091SRichard Lowe	nop
1955*5d9d9091SRichard Lowe	!
1956*5d9d9091SRichard Lowe	! ASSERT(%pil <= LOCK_LEVEL)
1957*5d9d9091SRichard Lowe	!
1958*5d9d9091SRichard Lowe	rdpr	%pil, %o1
1959*5d9d9091SRichard Lowe	cmp	%o1, LOCK_LEVEL
1960*5d9d9091SRichard Lowe	ble,pt	%xcc, 0f
1961*5d9d9091SRichard Lowe	sethi	%hi(intr_get_time_high_pil), %o0	! delay
1962*5d9d9091SRichard Lowe	call	panic
1963*5d9d9091SRichard Lowe	or	%o0, %lo(intr_get_time_high_pil), %o0
1964*5d9d9091SRichard Lowe0:
1965*5d9d9091SRichard Lowe	!
1966*5d9d9091SRichard Lowe	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
1967*5d9d9091SRichard Lowe	!
1968*5d9d9091SRichard Lowe	lduh	[THREAD_REG + T_FLAGS], %o2
1969*5d9d9091SRichard Lowe	andcc	%o2, T_INTR_THREAD, %g0
1970*5d9d9091SRichard Lowe	bz,pn	%xcc, 1f
1971*5d9d9091SRichard Lowe	ldub	[THREAD_REG + T_PIL], %o1		! delay
1972*5d9d9091SRichard Lowe	brnz,pt	%o1, 0f
1973*5d9d9091SRichard Lowe1:
1974*5d9d9091SRichard Lowe	sethi	%hi(intr_get_time_not_intr), %o0
1975*5d9d9091SRichard Lowe	call	panic
1976*5d9d9091SRichard Lowe	or	%o0, %lo(intr_get_time_not_intr), %o0
1977*5d9d9091SRichard Lowe0:
1978*5d9d9091SRichard Lowe	!
1979*5d9d9091SRichard Lowe	! ASSERT(t_intr_start != 0)
1980*5d9d9091SRichard Lowe	!
1981*5d9d9091SRichard Lowe	ldx	[THREAD_REG + T_INTR_START], %o1
1982*5d9d9091SRichard Lowe	brnz,pt	%o1, 2f
1983*5d9d9091SRichard Lowe	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
1984*5d9d9091SRichard Lowe	call	panic
1985*5d9d9091SRichard Lowe	or	%o0, %lo(intr_get_time_no_start_time), %o0
1986*5d9d9091SRichard Lowe2:
1987*5d9d9091SRichard Lowe#endif /* DEBUG */
1988*5d9d9091SRichard Lowe	!
1989*5d9d9091SRichard Lowe	! %o0 = elapsed time and return value
1990*5d9d9091SRichard Lowe	! %o1 = pil
1991*5d9d9091SRichard Lowe	! %o2 = scratch
1992*5d9d9091SRichard Lowe	! %o3 = scratch
1993*5d9d9091SRichard Lowe	! %o4 = scratch
1994*5d9d9091SRichard Lowe	! %o5 = cpu
1995*5d9d9091SRichard Lowe	!
1996*5d9d9091SRichard Lowe	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
1997*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_CPU], %o5
1998*5d9d9091SRichard Lowe	ldub	[THREAD_REG + T_PIL], %o1
1999*5d9d9091SRichard Lowe	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2000*5d9d9091SRichard Lowe	!
2001*5d9d9091SRichard Lowe	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2002*5d9d9091SRichard Lowe	! get delta, and multiply by cpu_divisor if necessary.
2003*5d9d9091SRichard Lowe	!
2004*5d9d9091SRichard Lowe	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o2, %o0)
2005*5d9d9091SRichard Lowe	stx	%o2, [THREAD_REG + T_INTR_START]
2006*5d9d9091SRichard Lowe	sub	%o2, %o3, %o0
2007*5d9d9091SRichard Lowe
2008*5d9d9091SRichard Lowe	lduh	[%o5 + CPU_DIVISOR], %o4
2009*5d9d9091SRichard Lowe	cmp	%o4, 1
2010*5d9d9091SRichard Lowe	bg,a,pn	%xcc, 1f
2011*5d9d9091SRichard Lowe	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
2012*5d9d9091SRichard Lowe1:
2013*5d9d9091SRichard Lowe	! Update intracct[]
2014*5d9d9091SRichard Lowe	lduh	[%o5 + CPU_MSTATE], %o4
2015*5d9d9091SRichard Lowe	sllx	%o4, 3, %o4
2016*5d9d9091SRichard Lowe	add	%o4, CPU_INTRACCT, %o4
2017*5d9d9091SRichard Lowe	ldx	[%o5 + %o4], %o2
2018*5d9d9091SRichard Lowe	add	%o2, %o0, %o2
2019*5d9d9091SRichard Lowe	stx	%o2, [%o5 + %o4]
2020*5d9d9091SRichard Lowe
2021*5d9d9091SRichard Lowe	!
2022*5d9d9091SRichard Lowe	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2023*5d9d9091SRichard Lowe	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2024*5d9d9091SRichard Lowe	! first entered, or the last time intr_get_time() was invoked. Then
2025*5d9d9091SRichard Lowe	! update cpu_m.intrstat[pil][1] to match [0].
2026*5d9d9091SRichard Lowe	!
2027*5d9d9091SRichard Lowe	sllx	%o1, 4, %o3
2028*5d9d9091SRichard Lowe	add	%o3, CPU_MCPU, %o3
2029*5d9d9091SRichard Lowe	add	%o3, MCPU_INTRSTAT, %o3
2030*5d9d9091SRichard Lowe	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2031*5d9d9091SRichard Lowe	ldx	[%o3], %o2
2032*5d9d9091SRichard Lowe	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2033*5d9d9091SRichard Lowe	stx	%o2, [%o3]
2034*5d9d9091SRichard Lowe	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2035*5d9d9091SRichard Lowe	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2036*5d9d9091SRichard Lowe	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2037*5d9d9091SRichard Lowe
2038*5d9d9091SRichard Lowe	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2039*5d9d9091SRichard Lowe	cmp	%o2, %o1			! of either our pil %o1 or
2040*5d9d9091SRichard Lowe	movl	%xcc, %o1, %o2			! cpu_base_spl.
2041*5d9d9091SRichard Lowe	retl
2042*5d9d9091SRichard Lowe	wrpr	%g0, %o2, %pil
2043*5d9d9091SRichard Lowe	SET_SIZE(intr_get_time)
2044*5d9d9091SRichard Lowe
2045*5d9d9091SRichard Lowe#ifdef DEBUG
2046*5d9d9091SRichard Loweintr_get_time_high_pil:
2047*5d9d9091SRichard Lowe	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2048*5d9d9091SRichard Loweintr_get_time_not_intr:
2049*5d9d9091SRichard Lowe	.asciz	"intr_get_time(): not called from an interrupt thread"
2050*5d9d9091SRichard Loweintr_get_time_no_start_time:
2051*5d9d9091SRichard Lowe	.asciz	"intr_get_time(): t_intr_start == 0"
2052*5d9d9091SRichard Lowe#endif /* DEBUG */
2053