xref: /netbsd-src/sys/arch/i386/i386/vector.S (revision 29134a5a3a8cd07f3262c0f4344135db4f9e0f87)
1/*	$NetBSD: vector.S,v 1.90 2024/08/28 23:20:50 christos Exp $	*/
2
3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*-
39 * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
40 * All rights reserved.
41 *
42 * This code is derived from software contributed to The NetBSD Foundation
43 * by Charles M. Hannum, and by Andrew Doran.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 *    notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 *    notice, this list of conditions and the following disclaimer in the
52 *    documentation and/or other materials provided with the distribution.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67#include <machine/asm.h>
68__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.90 2024/08/28 23:20:50 christos Exp $");
69
70#include "opt_ddb.h"
71#include "opt_multiprocessor.h"
72#include "opt_xen.h"
73#include "opt_dtrace.h"
74
75#include <machine/i8259.h>
76#include <machine/i82093reg.h>
77#include <machine/i82489reg.h>
78#include <machine/frameasm.h>
79#include <machine/segments.h>
80#include <machine/specialreg.h>
81#include <machine/trap.h>
82#ifdef XEN
83#include <xen/xen.h>
84#endif
85
86#include "ioapic.h"
87#include "lapic.h"
88#ifndef XENPV
89#include "hyperv.h"
90#endif
91
92#include "assym.h"
93
94/*
95 * Macros for interrupt entry, call to handler, and exit.
96 *
97 * XXX
98 * The interrupt frame is set up to look like a trap frame.  This may be a
99 * waste.  The only handler which needs a frame is the clock handler, and it
100 * only needs a few bits.  Xdoreti() needs a trap frame for handling ASTs, but
101 * it could easily convert the frame on demand.
102 *
103 * The direct costs of setting up a trap frame are two pushl's (error code and
104 * trap number), an addl to get rid of these, and pushing and popping the
105 * callee-saved registers %esi, %edi, %ebx, and %ebp twice.
106 *
107 * If the interrupt frame is made more flexible,  INTR can push %eax first and
108 * decide the ipending case with less overhead, e.g., by avoiding loading the
109 * segment registers.
110 */
111
112/*
113 * Store address of TSS in %eax, given a selector in %eax.
114 * Clobbers %eax, %ecx, %edx, but that's ok for its usage.
115 * This is a bit complicated, but it's done to make as few
116 * assumptions as possible about the validity of the environment.
117 * The GDT and the current and previous TSS are known to be OK,
118 * otherwise we would not be here. The only other thing that needs
119 * to be OK is the cpu_info structure for the current CPU.
120 */
121#define GET_TSS \
122	andl	$0xfff8,%eax				;\
123	addl	CPUVAR(GDT),%eax			;\
124	movl	2(%eax),%edx				;\
125	andl	$0xffffff,%edx				;\
126	movzbl	7(%eax),%eax				;\
127	shl	$24,%eax				;\
128	orl	%edx,%eax
129
130#ifdef KDTRACE_HOOKS
131	.bss
132	.globl	dtrace_invop_jump_addr
133	.align	4
134	.type	dtrace_invop_jump_addr, @object
135	.size	dtrace_invop_jump_addr, 4
136dtrace_invop_jump_addr:
137	.zero	4
138	.globl	dtrace_invop_calltrap_addr
139	.align	4
140	.type	dtrace_invop_calltrap_addr, @object
141	.size	dtrace_invop_calltrap_addr, 4
142dtrace_invop_calltrap_addr:
143	.zero	8
144	.text
145#endif
146
147#ifndef XENPV
148#if NLAPIC > 0
149#ifdef MULTIPROCESSOR
150/*
151 * General purpose IPI handler.
152 */
153IDTVEC(recurse_lapic_ipi)
154	INTR_RECURSE_HWFRAME
155	pushl	$0
156	pushl	$T_ASTFLT
157	INTRENTRY
158	jmp	1f
159IDTVEC_END(recurse_lapic_ipi)
160IDTVEC(intr_x2apic_ipi)
161	pushl	$0
162	pushl	$T_ASTFLT
163	INTRENTRY
164	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
165	xorl	%eax,%eax
166	xorl	%edx,%edx
167	wrmsr
168	movzbl	CPUVAR(ILEVEL),%ebx
169	cmpl	$IPL_HIGH,%ebx
170	jae	2f
171	jmp	1f
172IDTVEC_END(intr_x2apic_ipi)
173IDTVEC(intr_lapic_ipi)
174	pushl	$0
175	pushl	$T_ASTFLT
176	INTRENTRY
177	movl	_C_LABEL(local_apic_va),%ebx
178	movl	$0,LAPIC_EOI(%ebx)
179	movzbl	CPUVAR(ILEVEL),%ebx
180	cmpl	$IPL_HIGH,%ebx
181	jae	2f
182IDTVEC_END(intr_lapic_ipi)
183IDTVEC(resume_lapic_ipi)
1841:
185	pushl	%ebx
186	IDEPTH_INCR
187	movb	$IPL_HIGH,CPUVAR(ILEVEL)
188	sti
189	call	_C_LABEL(x86_ipi_handler)
190	cli
191	jmp	_C_LABEL(Xdoreti)
1922:
193	btsl	$(LIR_IPI - 32),CPUVAR(IPENDING)+4
194	INTRFASTEXIT
195IDTVEC_END(resume_lapic_ipi)
196
197/*
198 * TLB shootdown handler.
199 */
200IDTVEC(intr_lapic_tlb)
201	pushl	$0
202	pushl	$T_ASTFLT
203	INTRENTRY
204	call	_C_LABEL(pmap_tlb_intr)
205	movl	_C_LABEL(local_apic_va),%eax
206	movl	$0,LAPIC_EOI(%eax)
207	INTRFASTEXIT
208IDTVEC_END(intr_lapic_tlb)
209
210IDTVEC(intr_x2apic_tlb)
211	pushl	$0
212	pushl	$T_ASTFLT
213	INTRENTRY
214	call	_C_LABEL(pmap_tlb_intr)
215	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
216	xorl	%eax,%eax
217	xorl	%edx,%edx
218	wrmsr
219	INTRFASTEXIT
220IDTVEC_END(intr_x2apic_tlb)
221
222#if defined(DDB)
223/*
224 * No need to use INTRENTRY, since we were brought here through a task-gate
225 * which triggered a hardware context switch and saved the GPRs in the TSS.
226 */
227IDTVEC(intr_ddbipi)
2281:
229	str	%ax
230	GET_TSS
231	movzwl	(%eax),%eax
232	GET_TSS
233	pushl	%eax
234	movl	_C_LABEL(local_apic_va),%ebx
235	movl	$0xff,LAPIC_TPRI(%ebx)
236	movl	_C_LABEL(local_apic_va),%ebx
237	movl	$0,LAPIC_EOI(%ebx)
238	sti
239	call	_C_LABEL(ddb_ipi_tss)
240	addl	$4,%esp
241	movl	_C_LABEL(local_apic_va),%ebx
242	movl	$0,LAPIC_TPRI(%ebx)
243	iret
244	jmp	1b
245IDTVEC_END(intr_ddbipi)
246
247IDTVEC(intr_x2apic_ddbipi)
2481:
249	str	%ax
250	GET_TSS
251	movzwl	(%eax),%eax
252	GET_TSS
253	pushl	%eax
254	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_TPRI),%ecx
255	movl	0xff,%eax
256	xorl	%edx,%edx
257	wrmsr
258	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
259	xorl	%eax,%eax
260	xorl	%edx,%edx
261	wrmsr
262	sti
263	call	_C_LABEL(ddb_ipi_tss)
264	addl	$4,%esp
265	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_TPRI),%ecx
266	xorl	%eax,%eax
267	xorl	%edx,%edx
268	wrmsr
269	iret
270	jmp	1b
271IDTVEC_END(intr_x2apic_ddbipi)
272#endif /* DDB */
273#endif /* MULTIPROCESSOR */
274
275	/*
276	 * Interrupt from the local APIC timer.
277	 */
278IDTVEC(recurse_lapic_ltimer)
279	INTR_RECURSE_HWFRAME
280	pushl	$0
281	pushl	$T_ASTFLT
282	INTRENTRY
283	jmp	1f
284IDTVEC_END(recurse_lapic_ltimer)
285IDTVEC(intr_x2apic_ltimer)
286	pushl	$0
287	pushl	$T_ASTFLT
288	INTRENTRY
289	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
290	xorl	%eax,%eax
291	xorl	%edx,%edx
292	wrmsr
293	movzbl	CPUVAR(ILEVEL),%ebx
294	cmpl	$IPL_CLOCK,%ebx
295	jae	2f
296	jmp	1f
297IDTVEC_END(intr_x2apic_ltimer)
298IDTVEC(intr_lapic_ltimer)
299	pushl	$0
300	pushl	$T_ASTFLT
301	INTRENTRY
302	movl	_C_LABEL(local_apic_va),%ebx
303	movl	$0,LAPIC_EOI(%ebx)
304	movzbl	CPUVAR(ILEVEL),%ebx
305	cmpl	$IPL_CLOCK,%ebx
306	jae	2f
307IDTVEC_END(intr_lapic_ltimer)
308IDTVEC(resume_lapic_ltimer)
3091:
310	pushl	%ebx
311	IDEPTH_INCR
312	movb	$IPL_CLOCK,CPUVAR(ILEVEL)
313	sti
314	pushl	$0
315	call	_C_LABEL(lapic_clockintr)
316	addl	$4,%esp
317	cli
318	jmp	_C_LABEL(Xdoreti)
3192:
320	btsl	$(LIR_TIMER - 32),CPUVAR(IPENDING)+4
321	INTRFASTEXIT
322IDTVEC_END(resume_lapic_ltimer)
323
324#if NHYPERV > 0
325	/*
326	 * Hyper-V event channel upcall interrupt handler.
327	 * Only used when the hypervisor supports direct vector callbacks.
328	 */
329IDTVEC(recurse_hyperv_hypercall)
330	INTR_RECURSE_HWFRAME
331	pushl	$0
332	pushl	$T_ASTFLT
333	INTRENTRY
334	jmp	1f
335IDTVEC_END(recurse_hyperv_hypercall)
336IDTVEC(intr_hyperv_hypercall)
337	pushl	$0
338	pushl	$T_ASTFLT
339	INTRENTRY
340	movzbl	CPUVAR(ILEVEL),%ebx
341	cmpl	$IPL_NET,%ebx
342	jae	2f
343	jmp	1f
344IDTVEC_END(intr_hyperv_hypercall)
345IDTVEC(resume_hyperv_hypercall)
3461:
347	pushl	%ebx
348	IDEPTH_INCR
349	movb	$IPL_NET,CPUVAR(ILEVEL)
350	sti
351	pushl	%esp
352	call	_C_LABEL(hyperv_hypercall_intr)
353	addl	$4,%esp
354	cli
355	jmp	_C_LABEL(Xdoreti)
3562:
357	orl	$(1 << LIR_HV),CPUVAR(IPENDING)
358	INTRFASTEXIT
359IDTVEC_END(resume_hyperv_hypercall)
360#endif	/* NHYPERV > 0 */
361
362#endif /* NLAPIC > 0 */
363
364
365#define voidop(num)
366
367
368/*
369 * This macro defines the generic stub code. Its arguments modifiy it
370 * for specific PICs.
371 */
372
373#define	INTRSTUB1(name, num, sub, off, early_ack, late_ack, mask, unmask, level_mask) \
374IDTVEC(recurse_ ## name ## num)						;\
375	INTR_RECURSE_HWFRAME						;\
376	subl	$4,%esp							;\
377	pushl	$T_ASTFLT		/* trap # for doing ASTs */	;\
378	INTRENTRY							;\
379IDTVEC_END(recurse_ ## name ## num)					;\
380IDTVEC(resume_ ## name ## num)						\
381	movl	$IREENT_MAGIC,TF_ERR(%esp)				;\
382	movl	%ebx,%esi						;\
383	movl	CPUVAR(ISOURCES) + (num) * 4,%ebp			;\
384	movl	IS_MAXLEVEL(%ebp),%ebx					;\
385	jmp	1f							;\
386IDTVEC_END(resume_ ## name ## num)					;\
387IDTVEC(intr_ ## name ## num)						;\
388	pushl	$0			/* dummy error code */		;\
389	pushl	$T_ASTFLT		/* trap # for doing ASTs */	;\
390	INTRENTRY							;\
391	movl	CPUVAR(ISOURCES) + (num) * 4,%ebp			;\
392	mask(num)			/* mask it in hardware */	;\
393	early_ack(num)			/* and allow other intrs */	;\
394	testl	%ebp,%ebp						;\
395	jz	9f			/* stray */			;\
396	movl	IS_MAXLEVEL(%ebp),%ebx					;\
397	movzbl	CPUVAR(ILEVEL),%esi					;\
398	cmpl	%ebx,%esi						;\
399	jae	10f			/* currently masked; hold it */	;\
400	addl	$1,CPUVAR(NINTR)	/* statistical info */		;\
401	adcl	$0,CPUVAR(NINTR)+4					;\
402	addl	$1,IS_EVCNTLO(%ebp)	/* inc event counter */		;\
403	adcl	$0,IS_EVCNTHI(%ebp)					;\
4041:									\
405	pushl	%esi			/* if_ppi */			;\
406	movb	%bl,CPUVAR(ILEVEL)					;\
407	/* switch stack if necessary, and push a ptr to our intrframe */ \
408	IDEPTH_INCR							;\
409	sti								;\
410	movl	IS_HANDLERS(%ebp),%ebx					;\
411	cmpl	$0,IS_MASK_COUNT(%ebp)	/* source currently masked? */	;\
412	jne	12f			/* yes, hold it */		;\
4136:									\
414	movl	IH_LEVEL(%ebx),%eax					;\
415	cmpl	%esi,%eax						;\
416	jle	7f							;\
417	pushl	IH_ARG(%ebx)						;\
418	movl	IH_FUN(%ebx),%edi					;\
419	movb	%al,CPUVAR(ILEVEL)					;\
420	movl	IH_NEXT(%ebx),%ebx	/* next handler in chain */	;\
421	call	*%edi			/* call it */			;\
422	addl	$4,%esp			/* toss the arg */		;\
423	testl	%ebx,%ebx						;\
424	jnz	6b							;\
425	cmpl	$0,IS_MASK_COUNT(%ebp)	/* source now masked? */	;\
426	jne	12f			/* yes, deal */			;\
427	cli								;\
428	unmask(num)			/* unmask it in hardware */	;\
429	late_ack(num)							;\
430	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
4317:									\
432	cli								;\
433	btsl	$(num - sub),CPUVAR(IPENDING) + off			;\
4348:	level_mask(num)							;\
435	late_ack(num)							;\
436	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
43712:									\
438	cli								;\
439	btsl	$(num - sub),CPUVAR(IMASKED) + off			;\
440	btrl	$(num - sub),CPUVAR(IPENDING) + off			;\
441	jmp	8b							;\
44210:									\
443	btsl	$(num - sub),CPUVAR(IPENDING) + off			;\
444	level_mask(num)							;\
445	late_ack(num)							;\
446	INTRFASTEXIT							;\
4479:									\
448	pushl	%esp			/* for unmask */		;\
449	unmask(num)							;\
450	late_ack(num)							;\
451	addl	$4,%esp							;\
452	INTRFASTEXIT							;\
453IDTVEC_END(intr_ ## name ## num)
454
455#define	INTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \
456    INTRSTUB1(name, num, 0, 0, early_ack, late_ack, mask, unmask, level_mask)
457#define	INTRSTUB32(name, num, early_ack, late_ack, mask, unmask, level_mask) \
458    INTRSTUB1(name, num, 32, 4, early_ack, late_ack, mask, unmask, level_mask)
459
460#define ICUADDR IO_ICU1
461
462INTRSTUB(legacy,0,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
463    voidop)
464INTRSTUB(legacy,1,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
465    voidop)
466INTRSTUB(legacy,2,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
467    voidop)
468INTRSTUB(legacy,3,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
469    voidop)
470INTRSTUB(legacy,4,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
471    voidop)
472INTRSTUB(legacy,5,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
473    voidop)
474INTRSTUB(legacy,6,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
475    voidop)
476INTRSTUB(legacy,7,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
477    voidop)
478#undef ICUADDR
479#define ICUADDR IO_ICU2
480
481INTRSTUB(legacy,8,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
482    voidop)
483INTRSTUB(legacy,9,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
484    voidop)
485INTRSTUB(legacy,10,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
486    voidop)
487INTRSTUB(legacy,11,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
488    voidop)
489INTRSTUB(legacy,12,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
490    voidop)
491INTRSTUB(legacy,13,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
492    voidop)
493INTRSTUB(legacy,14,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
494    voidop)
495INTRSTUB(legacy,15,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
496    voidop)
497
498#if NIOAPIC > 0
499
500#define INTRSTUB_56(name,early_ack,late_ack,mask,unmask,level_mask)	;\
501	INTRSTUB(name,0,early_ack,late_ack,mask,unmask,level_mask)	;\
502	INTRSTUB(name,1,early_ack,late_ack,mask,unmask,level_mask)	;\
503	INTRSTUB(name,2,early_ack,late_ack,mask,unmask,level_mask)	;\
504	INTRSTUB(name,3,early_ack,late_ack,mask,unmask,level_mask)	;\
505	INTRSTUB(name,4,early_ack,late_ack,mask,unmask,level_mask)	;\
506	INTRSTUB(name,5,early_ack,late_ack,mask,unmask,level_mask)	;\
507	INTRSTUB(name,6,early_ack,late_ack,mask,unmask,level_mask)	;\
508	INTRSTUB(name,7,early_ack,late_ack,mask,unmask,level_mask)	;\
509	INTRSTUB(name,8,early_ack,late_ack,mask,unmask,level_mask)	;\
510	INTRSTUB(name,9,early_ack,late_ack,mask,unmask,level_mask)	;\
511	INTRSTUB(name,10,early_ack,late_ack,mask,unmask,level_mask)	;\
512	INTRSTUB(name,11,early_ack,late_ack,mask,unmask,level_mask)	;\
513	INTRSTUB(name,12,early_ack,late_ack,mask,unmask,level_mask)	;\
514	INTRSTUB(name,13,early_ack,late_ack,mask,unmask,level_mask)	;\
515	INTRSTUB(name,14,early_ack,late_ack,mask,unmask,level_mask)	;\
516	INTRSTUB(name,15,early_ack,late_ack,mask,unmask,level_mask)	;\
517	INTRSTUB(name,16,early_ack,late_ack,mask,unmask,level_mask)	;\
518	INTRSTUB(name,17,early_ack,late_ack,mask,unmask,level_mask)	;\
519	INTRSTUB(name,18,early_ack,late_ack,mask,unmask,level_mask)	;\
520	INTRSTUB(name,19,early_ack,late_ack,mask,unmask,level_mask)	;\
521	INTRSTUB(name,20,early_ack,late_ack,mask,unmask,level_mask)	;\
522	INTRSTUB(name,21,early_ack,late_ack,mask,unmask,level_mask)	;\
523	INTRSTUB(name,22,early_ack,late_ack,mask,unmask,level_mask)	;\
524	INTRSTUB(name,23,early_ack,late_ack,mask,unmask,level_mask)	;\
525	INTRSTUB(name,24,early_ack,late_ack,mask,unmask,level_mask)	;\
526	INTRSTUB(name,25,early_ack,late_ack,mask,unmask,level_mask)	;\
527	INTRSTUB(name,26,early_ack,late_ack,mask,unmask,level_mask)	;\
528	INTRSTUB(name,27,early_ack,late_ack,mask,unmask,level_mask)	;\
529	INTRSTUB(name,28,early_ack,late_ack,mask,unmask,level_mask)	;\
530	INTRSTUB(name,29,early_ack,late_ack,mask,unmask,level_mask)	;\
531	INTRSTUB(name,30,early_ack,late_ack,mask,unmask,level_mask)	;\
532	INTRSTUB(name,31,early_ack,late_ack,mask,unmask,level_mask)	;\
533	INTRSTUB32(name,32,early_ack,late_ack,mask,unmask,level_mask)	;\
534	INTRSTUB32(name,33,early_ack,late_ack,mask,unmask,level_mask)	;\
535	INTRSTUB32(name,34,early_ack,late_ack,mask,unmask,level_mask)	;\
536	INTRSTUB32(name,35,early_ack,late_ack,mask,unmask,level_mask)	;\
537	INTRSTUB32(name,36,early_ack,late_ack,mask,unmask,level_mask)	;\
538	INTRSTUB32(name,37,early_ack,late_ack,mask,unmask,level_mask)	;\
539	INTRSTUB32(name,38,early_ack,late_ack,mask,unmask,level_mask)	;\
540	INTRSTUB32(name,39,early_ack,late_ack,mask,unmask,level_mask)	;\
541	INTRSTUB32(name,40,early_ack,late_ack,mask,unmask,level_mask)	;\
542	INTRSTUB32(name,41,early_ack,late_ack,mask,unmask,level_mask)	;\
543	INTRSTUB32(name,42,early_ack,late_ack,mask,unmask,level_mask)	;\
544	INTRSTUB32(name,43,early_ack,late_ack,mask,unmask,level_mask)	;\
545	INTRSTUB32(name,44,early_ack,late_ack,mask,unmask,level_mask)	;\
546	INTRSTUB32(name,45,early_ack,late_ack,mask,unmask,level_mask)	;\
547	INTRSTUB32(name,46,early_ack,late_ack,mask,unmask,level_mask)	;\
548	INTRSTUB32(name,47,early_ack,late_ack,mask,unmask,level_mask)	;\
549	INTRSTUB32(name,48,early_ack,late_ack,mask,unmask,level_mask)	;\
550	INTRSTUB32(name,49,early_ack,late_ack,mask,unmask,level_mask)	;\
551	INTRSTUB32(name,50,early_ack,late_ack,mask,unmask,level_mask)	;\
552	INTRSTUB32(name,51,early_ack,late_ack,mask,unmask,level_mask)	;\
553	INTRSTUB32(name,52,early_ack,late_ack,mask,unmask,level_mask)	;\
554	INTRSTUB32(name,53,early_ack,late_ack,mask,unmask,level_mask)	;\
555	INTRSTUB32(name,54,early_ack,late_ack,mask,unmask,level_mask)	;\
556	INTRSTUB32(name,55,early_ack,late_ack,mask,unmask,level_mask)
557
558INTRSTUB_56(ioapic_edge,voidop,ioapic_asm_ack,voidop,voidop,voidop)
559INTRSTUB_56(ioapic_level,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
560
561INTRSTUB_56(x2apic_edge,voidop,x2apic_asm_ack,voidop,voidop,voidop)
562INTRSTUB_56(x2apic_level,voidop,x2apic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
563
564#endif
565
566/*
567 * Create a struct intrstub.
568 */
569#define INTRSTUB_ENTRY(name) \
570	.long _C_LABEL(Xintr_ ## name ), _C_LABEL(Xrecurse_ ## name ) ; \
571	.long _C_LABEL(Xresume_ ## name ) ;
572
573/*
574 * Create an array of structs intrstub (16 entries).
575 */
576#define INTRSTUB_ARRAY_16(name) 		; \
577	.type _C_LABEL(name ## _stubs), @object	; \
578LABEL(name ## _stubs)				; \
579	INTRSTUB_ENTRY(name ## 0)		; \
580	INTRSTUB_ENTRY(name ## 1)		; \
581	INTRSTUB_ENTRY(name ## 2)		; \
582	INTRSTUB_ENTRY(name ## 3)		; \
583	INTRSTUB_ENTRY(name ## 4)		; \
584	INTRSTUB_ENTRY(name ## 5)		; \
585	INTRSTUB_ENTRY(name ## 6)		; \
586	INTRSTUB_ENTRY(name ## 7)		; \
587	INTRSTUB_ENTRY(name ## 8)		; \
588	INTRSTUB_ENTRY(name ## 9)		; \
589	INTRSTUB_ENTRY(name ## 10)		; \
590	INTRSTUB_ENTRY(name ## 11)		; \
591	INTRSTUB_ENTRY(name ## 12)		; \
592	INTRSTUB_ENTRY(name ## 13)		; \
593	INTRSTUB_ENTRY(name ## 14)		; \
594	INTRSTUB_ENTRY(name ## 15)		; \
595END(name ## _stubs)
596
597/*
598 * Create an array of structs intrstub (56 entries).
599 */
600#define INTRSTUB_ARRAY_56(name) 		; \
601	.type _C_LABEL(name ## _stubs), @object	; \
602LABEL(name ## _stubs)				; \
603	INTRSTUB_ENTRY(name ## 0)		; \
604	INTRSTUB_ENTRY(name ## 1)		; \
605	INTRSTUB_ENTRY(name ## 2)		; \
606	INTRSTUB_ENTRY(name ## 3)		; \
607	INTRSTUB_ENTRY(name ## 4)		; \
608	INTRSTUB_ENTRY(name ## 5)		; \
609	INTRSTUB_ENTRY(name ## 6)		; \
610	INTRSTUB_ENTRY(name ## 7)		; \
611	INTRSTUB_ENTRY(name ## 8)		; \
612	INTRSTUB_ENTRY(name ## 9)		; \
613	INTRSTUB_ENTRY(name ## 10)		; \
614	INTRSTUB_ENTRY(name ## 11)		; \
615	INTRSTUB_ENTRY(name ## 12)		; \
616	INTRSTUB_ENTRY(name ## 13)		; \
617	INTRSTUB_ENTRY(name ## 14)		; \
618	INTRSTUB_ENTRY(name ## 15)		; \
619	INTRSTUB_ENTRY(name ## 16)		; \
620	INTRSTUB_ENTRY(name ## 17)		; \
621	INTRSTUB_ENTRY(name ## 18)		; \
622	INTRSTUB_ENTRY(name ## 19)		; \
623	INTRSTUB_ENTRY(name ## 20)		; \
624	INTRSTUB_ENTRY(name ## 21)		; \
625	INTRSTUB_ENTRY(name ## 22)		; \
626	INTRSTUB_ENTRY(name ## 23)		; \
627	INTRSTUB_ENTRY(name ## 24)		; \
628	INTRSTUB_ENTRY(name ## 25)		; \
629	INTRSTUB_ENTRY(name ## 26)		; \
630	INTRSTUB_ENTRY(name ## 27)		; \
631	INTRSTUB_ENTRY(name ## 28)		; \
632	INTRSTUB_ENTRY(name ## 29)		; \
633	INTRSTUB_ENTRY(name ## 30)		; \
634	INTRSTUB_ENTRY(name ## 31)		; \
635	INTRSTUB_ENTRY(name ## 32)		; \
636	INTRSTUB_ENTRY(name ## 33)		; \
637	INTRSTUB_ENTRY(name ## 34)		; \
638	INTRSTUB_ENTRY(name ## 35)		; \
639	INTRSTUB_ENTRY(name ## 36)		; \
640	INTRSTUB_ENTRY(name ## 37)		; \
641	INTRSTUB_ENTRY(name ## 38)		; \
642	INTRSTUB_ENTRY(name ## 39)		; \
643	INTRSTUB_ENTRY(name ## 40)		; \
644	INTRSTUB_ENTRY(name ## 41)		; \
645	INTRSTUB_ENTRY(name ## 42)		; \
646	INTRSTUB_ENTRY(name ## 43)		; \
647	INTRSTUB_ENTRY(name ## 44)		; \
648	INTRSTUB_ENTRY(name ## 45)		; \
649	INTRSTUB_ENTRY(name ## 46)		; \
650	INTRSTUB_ENTRY(name ## 47)		; \
651	INTRSTUB_ENTRY(name ## 48)		; \
652	INTRSTUB_ENTRY(name ## 49)		; \
653	INTRSTUB_ENTRY(name ## 50)		; \
654	INTRSTUB_ENTRY(name ## 51)		; \
655	INTRSTUB_ENTRY(name ## 52)		; \
656	INTRSTUB_ENTRY(name ## 53)		; \
657	INTRSTUB_ENTRY(name ## 54)		; \
658	INTRSTUB_ENTRY(name ## 55)		; \
659END(name ## _stubs)
660
661#endif /* XENPV */
662
663#if defined(XEN)
664#define voidop(num)
665
666#define	XENINTRSTUB(name, sir, level, unmask) \
667IDTVEC(recurse_ ## name ## sir)						;\
668	INTR_RECURSE_HWFRAME						;\
669	subl	$4,%esp							;\
670	pushl	$T_ASTFLT		/* trap # for doing ASTs */	;\
671	INTRENTRY							;\
672IDTVEC(resume_ ## name ## sir)						\
673	movl	$IREENT_MAGIC,TF_ERR(%esp)				;\
674	pushl	%ebx							;\
675	movl	CPUVAR(ISOURCES) + (sir) * 4,%ebp			;\
676	movb	$level,CPUVAR(ILEVEL)					;\
677	IDEPTH_INCR /* leaves old %esp on stack	*/			;\
678	STI(%eax)							;\
679	movl	IS_HANDLERS(%ebp),%ebx					;\
6806:									\
681	cmpl	$0, IH_PENDING(%ebx)	/* is handler pending ? */	;\
682	je	7f			/* no */			;\
683	movl	$0, IH_PENDING(%ebx)					;\
684	pushl	IH_ARG(%ebx)						;\
685	call	*IH_FUN(%ebx)		/* call it */			;\
686	addl	$4,%esp			/* toss the arg */		;\
6877:									\
688	movl	IH_NEXT(%ebx),%ebx	/* next handler in chain */	;\
689	testl	%ebx,%ebx						;\
690	jnz	6b							;\
691									\
692	CLI(%eax)							;\
693	unmask(sir)			/* unmask it in hardware */	;\
694	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
695
696/*
697 * Just unmasking the event isn't enough, we also need to
698 * reassert the event pending bit if needed. For now just call
699 * the C function doing it, maybe rewrite in inline assembly ?
700 */
701#define hypervisor_asm_unmask(sir)			\
702	pushl	$sir					;\
703	call	_C_LABEL(hypervisor_enable_sir)		;\
704	addl	$4,%esp
705
706XENINTRSTUB(xenev,SIR_XENIPL_VM,IPL_VM,hypervisor_asm_unmask)
707XENINTRSTUB(xenev,SIR_XENIPL_SCHED,IPL_SCHED,hypervisor_asm_unmask)
708XENINTRSTUB(xenev,SIR_XENIPL_HIGH,IPL_HIGH,hypervisor_asm_unmask)
709
710/* On Xen, the xenev_stubs are purely for spl entry, since there is no
711 * vector based mechanism. We however provide the entrypoint to ensure
712 * that native and Xen struct intrstub ; definitions are uniform.
713 */
714panicmsg:	.ascii "vector Xen event entry path entered."
715LABEL(entry_xenev)
716	pushl $panicmsg
717	call  _C_LABEL(panic)
718END(entry_xenev)
719
720#define XENINTRSTUB_ENTRY(name, sir) \
721	.long entry_xenev , _C_LABEL(Xrecurse_ ## name ## sir); \
722	.long _C_LABEL(Xresume_ ## name ## sir);
723
724	.type	_C_LABEL(xenev_stubs), @object
725LABEL(xenev_stubs)
726	XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_VM) ;
727	XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_SCHED) ;
728	XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_HIGH) ;
729END(xenev_stubs)
730
731#endif /* XEN */
732
733#include "i386_trap.S"
734
735#ifdef XEN
736
737ENTRY(hypervisor_callback)
738IDTVEC(hypervisor_pvhvm_callback)
739	pushl	$0			/* dummy error code */
740	pushl	$T_ASTFLT
741	INTRENTRY
742	movzbl	CPUVAR(ILEVEL),%eax
743	pushl	%eax
744	IDEPTH_INCR
745	/* IDEPTH_INCR puts %esp on stack; we use it as argument to
746	 * do_hypervisor_callback. But don't restore the stack after,
747	 * Xdoreti needs it too.
748	 */
749	call	do_hypervisor_callback
750#ifndef XENPV
751	movzbl	_C_LABEL(xenhvm_use_percpu_callback),%eax
752	testl	%eax, %eax
753	jz	1f
754	movl	_C_LABEL(local_apic_va),%eax
755	movl	$0, LAPIC_EOI(%eax)
7561:
757#endif
758	jmp	_C_LABEL(Xdoreti)
759IDTVEC_END(hypervisor_pvhvm_callback)
760END(hypervisor_callback)
761
762#endif /* XEN */
763#ifdef XENPV
764/*
765 * Hypervisor uses this for application faults while it executes.
766 */
767ENTRY(failsafe_callback)
768	pop	%ds
769	pop	%es
770	pop	%fs
771	pop	%gs
772	call	_C_LABEL(xen_failsafe_handler)
773	iret
774END(failsafe_callback)
775
776#else	/* XENPV */
777
778	.section .rodata
779
780INTRSTUB_ARRAY_16(legacy)
781
782#if NIOAPIC > 0
783INTRSTUB_ARRAY_56(ioapic_edge)
784INTRSTUB_ARRAY_56(ioapic_level)
785
786INTRSTUB_ARRAY_56(x2apic_edge)
787INTRSTUB_ARRAY_56(x2apic_level)
788#endif
789
790#endif /* XENPV */
791