xref: /netbsd-src/sys/arch/amd64/amd64/vector.S (revision e0da9d1900a8993c49c54a7db51a172e621edda0)
1/*	$NetBSD: vector.S,v 1.80 2022/09/12 02:21:11 knakahara Exp $	*/
2
3/*
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Copyright (c) 2001 Wasabi Systems, Inc.
34 * All rights reserved.
35 *
36 * Written by Frank van der Linden for Wasabi Systems, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 *    notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 *    notice, this list of conditions and the following disclaimer in the
45 *    documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 *    must display the following acknowledgement:
48 *      This product includes software developed for the NetBSD Project by
49 *      Wasabi Systems, Inc.
50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
51 *    or promote products derived from this software without specific prior
52 *    written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67#include <machine/asm.h>
68
69#include "opt_ddb.h"
70#include "opt_multiprocessor.h"
71#include "opt_xen.h"
72#include "opt_dtrace.h"
73
74#define ALIGN_TEXT	.align 16,0x90
75
76#include <machine/i8259.h>
77#include <machine/i82093reg.h>
78#include <machine/i82489reg.h>
79#include <machine/frameasm.h>
80#include <machine/segments.h>
81#include <machine/trap.h>
82#include <machine/specialreg.h>
83
84#include "ioapic.h"
85#include "lapic.h"
86#include "assym.h"
87#ifndef XENPV
88#include "hyperv.h"
89#endif
90
91	.text
92
93/*
94 * Macros for interrupt entry, call to handler, and exit.
95 *
96 * XXX
97 * The interrupt frame is set up to look like a trap frame.  This may be a
98 * waste.  The only handler which needs a frame is the clock handler, and it
99 * only needs a few bits.  Xdoreti() needs a trap frame for handling ASTs, but
100 * it could easily convert the frame on demand.
101 *
102 * The direct costs of setting up a trap frame are two pushq's (error code and
103 * trap number), an addl to get rid of these, and pushing and popping the
104 * callee-saved registers %esi, %edi, %ebx, and %ebp twice.
105 *
106 * If the interrupt frame is made more flexible,  INTR can push %eax first and
107 * decide the ipending case with less overhead, e.g., by avoiding loading the
108 * segment registers.
109 */
110
111#ifndef XENPV
112#if NLAPIC > 0
113#ifdef MULTIPROCESSOR
114IDTVEC(recurse_lapic_ipi)
115	INTR_RECURSE_HWFRAME
116	pushq	$0
117	pushq	$T_ASTFLT
118	INTR_RECURSE_ENTRY
119	jmp	1f
120IDTVEC_END(recurse_lapic_ipi)
121IDTVEC(handle_x2apic_ipi)
122	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
123	xorl	%eax,%eax
124	xorl	%edx,%edx
125	wrmsr
126	movzbl	CPUVAR(ILEVEL),%ebx
127	cmpl	$IPL_HIGH,%ebx
128	jae	2f
129	jmp	1f
130IDTVEC_END(handle_x2apic_ipi)
131IDTVEC(handle_lapic_ipi)
132	movq	_C_LABEL(local_apic_va),%rbx
133	movl	$0,LAPIC_EOI(%rbx)
134	movzbl	CPUVAR(ILEVEL),%ebx
135	cmpl	$IPL_HIGH,%ebx
136	jae	2f
137	jmp	1f
138IDTVEC_END(handle_lapic_ipi)
139IDTVEC(resume_lapic_ipi)
1401:
141	incl	CPUVAR(IDEPTH)
142	movb	$IPL_HIGH,CPUVAR(ILEVEL)
143	sti
144	pushq	%rbx
145	call	_C_LABEL(x86_ipi_handler)
146	jmp	_C_LABEL(Xdoreti)
1472:
148	btsq	$LIR_IPI,CPUVAR(IPENDING)
149	INTRFASTEXIT
150IDTVEC_END(resume_lapic_ipi)
151
152	TEXT_USER_BEGIN
153IDTVEC(intr_x2apic_ipi)
154	pushq	$0
155	pushq	$T_ASTFLT
156	INTRENTRY
157	jmp	_C_LABEL(Xhandle_x2apic_ipi)
158IDTVEC_END(intr_x2apic_ipi)
159IDTVEC(intr_lapic_ipi)
160	pushq	$0
161	pushq	$T_ASTFLT
162	INTRENTRY
163	jmp	_C_LABEL(Xhandle_lapic_ipi)
164IDTVEC_END(intr_lapic_ipi)
165	TEXT_USER_END
166
167#if defined(DDB)
168IDTVEC(handle_ddbipi)
169	movl	$0xf,%eax
170	movq	%rax,%cr8
171	movq	_C_LABEL(local_apic_va),%rbx
172	movl	$0,LAPIC_EOI(%rbx)
173	sti
174	call	_C_LABEL(ddb_ipi)
175	xorl	%eax,%eax
176	movq	%rax,%cr8
177	INTRFASTEXIT
178IDTVEC_END(handle_ddbipi)
179IDTVEC(handle_x2apic_ddbipi)
180	movl	$0xf,%eax
181	movq	%rax,%cr8
182	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
183	xorl	%eax,%eax
184	xorl	%edx,%edx
185	wrmsr
186	sti
187	call	_C_LABEL(ddb_ipi)
188	xorl	%eax,%eax
189	movq	%rax,%cr8
190	INTRFASTEXIT
191IDTVEC_END(handle_x2apic_ddbipi)
192
193	TEXT_USER_BEGIN
194IDTVEC(intr_ddbipi)
195	pushq	$0
196	pushq	$T_BPTFLT
197	INTRENTRY
198	jmp	_C_LABEL(Xhandle_ddbipi)
199IDTVEC_END(intr_ddbipi)
200IDTVEC(intr_x2apic_ddbipi)
201	pushq	$0
202	pushq	$T_BPTFLT
203	INTRENTRY
204	jmp	_C_LABEL(Xhandle_x2apic_ddbipi)
205IDTVEC_END(intr_x2apic_ddbipi)
206	TEXT_USER_END
207
208#endif /* DDB */
209#endif /* MULTIPROCESSOR */
210
211	/*
212	 * Interrupt from the local APIC timer.
213	 */
214IDTVEC(recurse_lapic_ltimer)
215	INTR_RECURSE_HWFRAME
216	pushq	$0
217	pushq	$T_ASTFLT
218	INTR_RECURSE_ENTRY
219	jmp	1f
220IDTVEC_END(recurse_lapic_ltimer)
221IDTVEC(handle_x2apic_ltimer)
222	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
223	xorl	%eax,%eax
224	xorl	%edx,%edx
225	wrmsr
226	movzbl	CPUVAR(ILEVEL),%ebx
227	cmpl	$IPL_CLOCK,%ebx
228	jae	2f
229	jmp	1f
230IDTVEC_END(handle_x2apic_ltimer)
231IDTVEC(handle_lapic_ltimer)
232	movq	_C_LABEL(local_apic_va),%rbx
233	movl	$0,LAPIC_EOI(%rbx)
234	movzbl	CPUVAR(ILEVEL),%ebx
235	cmpl	$IPL_CLOCK,%ebx
236	jae	2f
237	jmp	1f
238IDTVEC_END(handle_lapic_ltimer)
239IDTVEC(resume_lapic_ltimer)
2401:
241	incl	CPUVAR(IDEPTH)
242	movb	$IPL_CLOCK,CPUVAR(ILEVEL)
243	sti
244	pushq	%rbx
245	movq	%rsp,%rsi
246	xorq	%rdi,%rdi
247	call	_C_LABEL(lapic_clockintr)
248	jmp	_C_LABEL(Xdoreti)
2492:
250	btsq	$LIR_TIMER,CPUVAR(IPENDING)
251	INTRFASTEXIT
252IDTVEC_END(resume_lapic_ltimer)
253
254	TEXT_USER_BEGIN
255IDTVEC(intr_x2apic_ltimer)
256	pushq	$0
257	pushq	$T_ASTFLT
258	INTRENTRY
259	jmp	_C_LABEL(Xhandle_x2apic_ltimer)
260IDTVEC_END(intr_x2apic_ltimer)
261IDTVEC(intr_lapic_ltimer)
262	pushq	$0
263	pushq	$T_ASTFLT
264	INTRENTRY
265	jmp	_C_LABEL(Xhandle_lapic_ltimer)
266IDTVEC_END(intr_lapic_ltimer)
267	TEXT_USER_END
268
269#if NHYPERV > 0
270	/*
271	 * Hyper-V event channel upcall interrupt handler.
272	 * Only used when the hypervisor supports direct vector callbacks.
273	 */
274IDTVEC(recurse_hyperv_hypercall)
275	INTR_RECURSE_HWFRAME
276	pushq	$0
277	pushq	$T_ASTFLT
278	INTR_RECURSE_ENTRY
279	jmp	1f
280IDTVEC_END(recurse_hyperv_hypercall)
281IDTVEC(handle_hyperv_hypercall)
282	movzbl	CPUVAR(ILEVEL),%ebx
283	cmpl	$IPL_NET,%ebx
284	jae	2f
285	jmp	1f
286IDTVEC_END(handle_hyperv_hypercall)
287IDTVEC(resume_hyperv_hypercall)
2881:
289	incl	CPUVAR(IDEPTH)
290	movb	$IPL_NET,CPUVAR(ILEVEL)
291	sti
292	pushq	%rbx
293	movq	%rsp,%rsi
294	call	_C_LABEL(hyperv_hypercall_intr)
295	jmp	_C_LABEL(Xdoreti)
2962:
297	btsq	$LIR_HV,CPUVAR(IPENDING)
298	INTRFASTEXIT
299IDTVEC_END(resume_hyperv_hypercall)
300
301	TEXT_USER_BEGIN
302IDTVEC(intr_hyperv_hypercall)
303	pushq	$0
304	pushq	$T_ASTFLT
305	INTRENTRY
306	jmp	_C_LABEL(Xhandle_hyperv_hypercall)
307IDTVEC_END(intr_hyperv_hypercall)
308	TEXT_USER_END
309#endif	/* NHYPERV > 0 */
310
311#endif /* NLAPIC > 0 */
312
313/*
314 * TLB shootdown handler.
315 */
316IDTVEC(handle_lapic_tlb)
317	KCOV_DISABLE
318	callq	_C_LABEL(pmap_tlb_intr)
319	KCOV_ENABLE
320	movq	_C_LABEL(local_apic_va),%rax
321	movl	$0,LAPIC_EOI(%rax)
322	INTRFASTEXIT
323IDTVEC_END(handle_lapic_tlb)
324IDTVEC(handle_x2apic_tlb)
325	KCOV_DISABLE
326	callq	_C_LABEL(pmap_tlb_intr)
327	KCOV_ENABLE
328	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
329	xorl	%eax,%eax
330	xorl	%edx,%edx
331	wrmsr
332	INTRFASTEXIT
333IDTVEC_END(handle_x2apic_tlb)
334
335	TEXT_USER_BEGIN
336IDTVEC(intr_lapic_tlb)
337	pushq	$0
338	pushq	$T_ASTFLT
339	INTRENTRY
340	jmp	_C_LABEL(Xhandle_lapic_tlb)
341IDTVEC_END(intr_lapic_tlb)
342IDTVEC(intr_x2apic_tlb)
343	pushq	$0
344	pushq	$T_ASTFLT
345	INTRENTRY
346	jmp	_C_LABEL(Xhandle_x2apic_tlb)
347IDTVEC_END(intr_x2apic_tlb)
348	TEXT_USER_END
349
350#endif /* !XENPV */
351
352#define voidop(num)
353
354#ifndef XENPV
355
356/*
357 * This macro defines the generic stub code. Its arguments modify it
358 * for specific PICs.
359 */
360
361#define	INTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \
362IDTVEC(recurse_ ## name ## num)						;\
363	INTR_RECURSE_HWFRAME						;\
364	subq	$8,%rsp							;\
365	pushq	$T_ASTFLT		/* trap # for doing ASTs */	;\
366	INTR_RECURSE_ENTRY						;\
367	jmp	1f							;\
368IDTVEC_END(recurse_ ## name ## num)					;\
369IDTVEC(resume_ ## name ## num)						\
3701:	movq	$IREENT_MAGIC,TF_ERR(%rsp)				;\
371	movl	%ebx,%r13d						;\
372	movq	CPUVAR(ISOURCES) + (num) * 8,%r14			;\
373	movl	IS_MAXLEVEL(%r14),%ebx					;\
374	jmp	1f							;\
375IDTVEC_END(resume_ ## name ## num)					;\
376IDTVEC(handle_ ## name ## num)						;\
377	movq	CPUVAR(ISOURCES) + (num) * 8,%r14			;\
378	mask(num)			/* mask it in hardware */	;\
379	early_ack(num)			/* and allow other intrs */	;\
380	testq	%r14,%r14						;\
381	jz	9f			/* stray */			;\
382	movl	IS_MAXLEVEL(%r14),%ebx					;\
383	movzbl	CPUVAR(ILEVEL),%r13d					;\
384	cmpl	%ebx,%r13d						;\
385	jae	10f			/* currently masked; hold it */	;\
386	incq	CPUVAR(NINTR)		/* statistical info */		;\
387	incq	IS_EVCNT(%r14)						;\
3881:									\
389	pushq	%r13			/* save for Xdoreti */		;\
390	movb	%bl,CPUVAR(ILEVEL)					;\
391	sti								;\
392	incl	CPUVAR(IDEPTH)						;\
393	movq	IS_HANDLERS(%r14),%rbx					;\
394	cmpl	$0,IS_MASK_COUNT(%r14)	/* source currently masked? */	;\
395	jne	12f			/* yes, hold it */		;\
3966:									\
397	movl	IH_LEVEL(%rbx),%r12d					;\
398	cmpl	%r13d,%r12d						;\
399	jle	7f							;\
400	movq	%rsp,%rsi						;\
401	movq	IH_ARG(%rbx),%rdi					;\
402	movb	%r12b,CPUVAR(ILEVEL)					;\
403	call	*IH_FUN(%rbx)		/* call it */			;\
404	movq	IH_NEXT(%rbx),%rbx	/* next handler in chain */	;\
405	testq	%rbx,%rbx						;\
406	jnz	6b							;\
4075:									\
408	cmpl	$0,IS_MASK_COUNT(%r14)	/* source now masked? */	;\
409	jne	12f			/* yes, deal */			;\
410	cli								;\
411	unmask(num)			/* unmask it in hardware */	;\
412	late_ack(num)							;\
413	sti								;\
414	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
4157:									\
416	cli								;\
417	btsq	$num,CPUVAR(IPENDING)					;\
4188:	level_mask(num)							;\
419	late_ack(num)							;\
420	sti								;\
421	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
42212:									\
423	cli								;\
424	btsq	$num,CPUVAR(IMASKED)					;\
425	btrq	$(num),CPUVAR(IPENDING)					;\
426	jmp	8b							;\
42710:									\
428	cli								;\
429	btsq	$num,CPUVAR(IPENDING)					;\
430	level_mask(num)							;\
431	late_ack(num)							;\
432	INTRFASTEXIT							;\
4339:									\
434	unmask(num)							;\
435	late_ack(num)							;\
436	INTRFASTEXIT							;\
437IDTVEC_END(handle_ ## name ## num)					;\
438	TEXT_USER_BEGIN							;\
439IDTVEC(intr_ ## name ## num)						;\
440	pushq	$0			/* dummy error code */		;\
441	pushq	$T_ASTFLT		/* trap # for doing ASTs */	;\
442	INTRENTRY							;\
443	jmp	_C_LABEL(Xhandle_ ## name ## num)			;\
444IDTVEC_END(intr_ ## name ## num)					;\
445	TEXT_USER_END
446
447#define ICUADDR IO_ICU1
448
449INTRSTUB(legacy,0,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
450    voidop)
451INTRSTUB(legacy,1,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
452    voidop)
453INTRSTUB(legacy,2,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
454    voidop)
455INTRSTUB(legacy,3,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
456    voidop)
457INTRSTUB(legacy,4,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
458    voidop)
459INTRSTUB(legacy,5,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
460    voidop)
461INTRSTUB(legacy,6,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
462    voidop)
463INTRSTUB(legacy,7,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
464    voidop)
465#undef ICUADDR
466#define ICUADDR IO_ICU2
467
468INTRSTUB(legacy,8,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
469    voidop)
470INTRSTUB(legacy,9,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
471    voidop)
472INTRSTUB(legacy,10,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
473    voidop)
474INTRSTUB(legacy,11,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
475    voidop)
476INTRSTUB(legacy,12,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
477    voidop)
478INTRSTUB(legacy,13,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
479    voidop)
480INTRSTUB(legacy,14,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
481    voidop)
482INTRSTUB(legacy,15,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
483    voidop)
484
485#if NIOAPIC > 0
486
487#define INTRSTUB_56(name,early_ack,late_ack,mask,unmask,level_mask)	;\
488	INTRSTUB(name,0,early_ack,late_ack,mask,unmask,level_mask)	;\
489	INTRSTUB(name,1,early_ack,late_ack,mask,unmask,level_mask)	;\
490	INTRSTUB(name,2,early_ack,late_ack,mask,unmask,level_mask)	;\
491	INTRSTUB(name,3,early_ack,late_ack,mask,unmask,level_mask)	;\
492	INTRSTUB(name,4,early_ack,late_ack,mask,unmask,level_mask)	;\
493	INTRSTUB(name,5,early_ack,late_ack,mask,unmask,level_mask)	;\
494	INTRSTUB(name,6,early_ack,late_ack,mask,unmask,level_mask)	;\
495	INTRSTUB(name,7,early_ack,late_ack,mask,unmask,level_mask)	;\
496	INTRSTUB(name,8,early_ack,late_ack,mask,unmask,level_mask)	;\
497	INTRSTUB(name,9,early_ack,late_ack,mask,unmask,level_mask)	;\
498	INTRSTUB(name,10,early_ack,late_ack,mask,unmask,level_mask)	;\
499	INTRSTUB(name,11,early_ack,late_ack,mask,unmask,level_mask)	;\
500	INTRSTUB(name,12,early_ack,late_ack,mask,unmask,level_mask)	;\
501	INTRSTUB(name,13,early_ack,late_ack,mask,unmask,level_mask)	;\
502	INTRSTUB(name,14,early_ack,late_ack,mask,unmask,level_mask)	;\
503	INTRSTUB(name,15,early_ack,late_ack,mask,unmask,level_mask)	;\
504	INTRSTUB(name,16,early_ack,late_ack,mask,unmask,level_mask)	;\
505	INTRSTUB(name,17,early_ack,late_ack,mask,unmask,level_mask)	;\
506	INTRSTUB(name,18,early_ack,late_ack,mask,unmask,level_mask)	;\
507	INTRSTUB(name,19,early_ack,late_ack,mask,unmask,level_mask)	;\
508	INTRSTUB(name,20,early_ack,late_ack,mask,unmask,level_mask)	;\
509	INTRSTUB(name,21,early_ack,late_ack,mask,unmask,level_mask)	;\
510	INTRSTUB(name,22,early_ack,late_ack,mask,unmask,level_mask)	;\
511	INTRSTUB(name,23,early_ack,late_ack,mask,unmask,level_mask)	;\
512	INTRSTUB(name,24,early_ack,late_ack,mask,unmask,level_mask)	;\
513	INTRSTUB(name,25,early_ack,late_ack,mask,unmask,level_mask)	;\
514	INTRSTUB(name,26,early_ack,late_ack,mask,unmask,level_mask)	;\
515	INTRSTUB(name,27,early_ack,late_ack,mask,unmask,level_mask)	;\
516	INTRSTUB(name,28,early_ack,late_ack,mask,unmask,level_mask)	;\
517	INTRSTUB(name,29,early_ack,late_ack,mask,unmask,level_mask)	;\
518	INTRSTUB(name,30,early_ack,late_ack,mask,unmask,level_mask)	;\
519	INTRSTUB(name,31,early_ack,late_ack,mask,unmask,level_mask)	;\
520	INTRSTUB(name,32,early_ack,late_ack,mask,unmask,level_mask)	;\
521	INTRSTUB(name,33,early_ack,late_ack,mask,unmask,level_mask)	;\
522	INTRSTUB(name,34,early_ack,late_ack,mask,unmask,level_mask)	;\
523	INTRSTUB(name,35,early_ack,late_ack,mask,unmask,level_mask)	;\
524	INTRSTUB(name,36,early_ack,late_ack,mask,unmask,level_mask)	;\
525	INTRSTUB(name,37,early_ack,late_ack,mask,unmask,level_mask)	;\
526	INTRSTUB(name,38,early_ack,late_ack,mask,unmask,level_mask)	;\
527	INTRSTUB(name,39,early_ack,late_ack,mask,unmask,level_mask)	;\
528	INTRSTUB(name,40,early_ack,late_ack,mask,unmask,level_mask)	;\
529	INTRSTUB(name,41,early_ack,late_ack,mask,unmask,level_mask)	;\
530	INTRSTUB(name,42,early_ack,late_ack,mask,unmask,level_mask)	;\
531	INTRSTUB(name,43,early_ack,late_ack,mask,unmask,level_mask)	;\
532	INTRSTUB(name,44,early_ack,late_ack,mask,unmask,level_mask)	;\
533	INTRSTUB(name,45,early_ack,late_ack,mask,unmask,level_mask)	;\
534	INTRSTUB(name,46,early_ack,late_ack,mask,unmask,level_mask)	;\
535	INTRSTUB(name,47,early_ack,late_ack,mask,unmask,level_mask)	;\
536	INTRSTUB(name,48,early_ack,late_ack,mask,unmask,level_mask)	;\
537	INTRSTUB(name,49,early_ack,late_ack,mask,unmask,level_mask)	;\
538	INTRSTUB(name,50,early_ack,late_ack,mask,unmask,level_mask)	;\
539	INTRSTUB(name,51,early_ack,late_ack,mask,unmask,level_mask)	;\
540	INTRSTUB(name,52,early_ack,late_ack,mask,unmask,level_mask)	;\
541	INTRSTUB(name,53,early_ack,late_ack,mask,unmask,level_mask)	;\
542	INTRSTUB(name,54,early_ack,late_ack,mask,unmask,level_mask)	;\
543	INTRSTUB(name,55,early_ack,late_ack,mask,unmask,level_mask)
544
545INTRSTUB_56(ioapic_edge,voidop,ioapic_asm_ack,voidop,voidop,voidop)
546INTRSTUB_56(ioapic_level,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
547
548INTRSTUB_56(x2apic_edge,voidop,x2apic_asm_ack,voidop,voidop,voidop)
549INTRSTUB_56(x2apic_level,voidop,x2apic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
550
551#endif
552
553/*
554 * Create a struct intrstub.
555 */
556#define INTRSTUB_ENTRY(name) \
557	.quad _C_LABEL(Xintr_ ## name ), _C_LABEL(Xrecurse_ ## name ) ; \
558	.quad _C_LABEL(Xresume_ ## name ) ;
559
560/*
561 * Create an array of structs intrstub (16 entries).
562 */
563#define INTRSTUB_ARRAY_16(name) 		; \
564	.type _C_LABEL(name ## _stubs), @object	; \
565	.align 8				; \
566LABEL(name ## _stubs)				; \
567	INTRSTUB_ENTRY(name ## 0)		; \
568	INTRSTUB_ENTRY(name ## 1)		; \
569	INTRSTUB_ENTRY(name ## 2)		; \
570	INTRSTUB_ENTRY(name ## 3)		; \
571	INTRSTUB_ENTRY(name ## 4)		; \
572	INTRSTUB_ENTRY(name ## 5)		; \
573	INTRSTUB_ENTRY(name ## 6)		; \
574	INTRSTUB_ENTRY(name ## 7)		; \
575	INTRSTUB_ENTRY(name ## 8)		; \
576	INTRSTUB_ENTRY(name ## 9)		; \
577	INTRSTUB_ENTRY(name ## 10)		; \
578	INTRSTUB_ENTRY(name ## 11)		; \
579	INTRSTUB_ENTRY(name ## 12)		; \
580	INTRSTUB_ENTRY(name ## 13)		; \
581	INTRSTUB_ENTRY(name ## 14)		; \
582	INTRSTUB_ENTRY(name ## 15)		; \
583END(name ## _stubs)
584
585/*
586 * Create an array of structs intrstub (56 entries).
587 */
588#define INTRSTUB_ARRAY_56(name) 		; \
589	.type _C_LABEL(name ## _stubs), @object	; \
590	.align 8				; \
591LABEL(name ## _stubs)				; \
592	INTRSTUB_ENTRY(name ## 0)		; \
593	INTRSTUB_ENTRY(name ## 1)		; \
594	INTRSTUB_ENTRY(name ## 2)		; \
595	INTRSTUB_ENTRY(name ## 3)		; \
596	INTRSTUB_ENTRY(name ## 4)		; \
597	INTRSTUB_ENTRY(name ## 5)		; \
598	INTRSTUB_ENTRY(name ## 6)		; \
599	INTRSTUB_ENTRY(name ## 7)		; \
600	INTRSTUB_ENTRY(name ## 8)		; \
601	INTRSTUB_ENTRY(name ## 9)		; \
602	INTRSTUB_ENTRY(name ## 10)		; \
603	INTRSTUB_ENTRY(name ## 11)		; \
604	INTRSTUB_ENTRY(name ## 12)		; \
605	INTRSTUB_ENTRY(name ## 13)		; \
606	INTRSTUB_ENTRY(name ## 14)		; \
607	INTRSTUB_ENTRY(name ## 15)		; \
608	INTRSTUB_ENTRY(name ## 16)		; \
609	INTRSTUB_ENTRY(name ## 17)		; \
610	INTRSTUB_ENTRY(name ## 18)		; \
611	INTRSTUB_ENTRY(name ## 19)		; \
612	INTRSTUB_ENTRY(name ## 20)		; \
613	INTRSTUB_ENTRY(name ## 21)		; \
614	INTRSTUB_ENTRY(name ## 22)		; \
615	INTRSTUB_ENTRY(name ## 23)		; \
616	INTRSTUB_ENTRY(name ## 24)		; \
617	INTRSTUB_ENTRY(name ## 25)		; \
618	INTRSTUB_ENTRY(name ## 26)		; \
619	INTRSTUB_ENTRY(name ## 27)		; \
620	INTRSTUB_ENTRY(name ## 28)		; \
621	INTRSTUB_ENTRY(name ## 29)		; \
622	INTRSTUB_ENTRY(name ## 30)		; \
623	INTRSTUB_ENTRY(name ## 31)		; \
624	INTRSTUB_ENTRY(name ## 32)		; \
625	INTRSTUB_ENTRY(name ## 33)		; \
626	INTRSTUB_ENTRY(name ## 34)		; \
627	INTRSTUB_ENTRY(name ## 35)		; \
628	INTRSTUB_ENTRY(name ## 36)		; \
629	INTRSTUB_ENTRY(name ## 37)		; \
630	INTRSTUB_ENTRY(name ## 38)		; \
631	INTRSTUB_ENTRY(name ## 39)		; \
632	INTRSTUB_ENTRY(name ## 40)		; \
633	INTRSTUB_ENTRY(name ## 41)		; \
634	INTRSTUB_ENTRY(name ## 42)		; \
635	INTRSTUB_ENTRY(name ## 43)		; \
636	INTRSTUB_ENTRY(name ## 44)		; \
637	INTRSTUB_ENTRY(name ## 45)		; \
638	INTRSTUB_ENTRY(name ## 46)		; \
639	INTRSTUB_ENTRY(name ## 47)		; \
640	INTRSTUB_ENTRY(name ## 48)		; \
641	INTRSTUB_ENTRY(name ## 49)		; \
642	INTRSTUB_ENTRY(name ## 50)		; \
643	INTRSTUB_ENTRY(name ## 51)		; \
644	INTRSTUB_ENTRY(name ## 52)		; \
645	INTRSTUB_ENTRY(name ## 53)		; \
646	INTRSTUB_ENTRY(name ## 54)		; \
647	INTRSTUB_ENTRY(name ## 55)		; \
648END(name ## _stubs)
649
650#endif /* !XENPV */
651
652#if defined(XEN)
653/* Resume/recurse procedures for spl() */
654#define	XENINTRSTUB(name, sir, level, unmask) \
655IDTVEC(recurse_ ## name ## sir)						;\
656	INTR_RECURSE_HWFRAME						;\
657	subq	$8,%rsp							;\
658	pushq	$T_ASTFLT		/* trap # for doing ASTs */	;\
659	INTR_RECURSE_ENTRY						;\
660IDTVEC(resume_ ## name ## sir)						\
661	movq	$IREENT_MAGIC,TF_ERR(%rsp)				;\
662	movl	%ebx,%r13d						;\
663	movq	CPUVAR(ISOURCES) + (sir) * 8,%r14			;\
6641:									\
665	pushq	%r13							;\
666	movb	$level,CPUVAR(ILEVEL)					;\
667	STI(si)								;\
668	incl	CPUVAR(IDEPTH)						;\
669	movq	IS_HANDLERS(%r14),%rbx					;\
6706:									\
671	cmpl	$0, IH_PENDING(%rbx)	/* is handler pending ? */	;\
672	je	7f			/* no */			;\
673	movl	$0, IH_PENDING(%rbx)					;\
674	movq	IH_ARG(%rbx),%rdi					;\
675	movq	%rsp,%rsi						;\
676	call	*IH_FUN(%rbx)		/* call it */			;\
6777:									\
678	movq	IH_NEXT(%rbx),%rbx	/* next handler in chain */	;\
679	testq	%rbx,%rbx						;\
680	jnz	6b							;\
6815:									\
682	CLI(si)								;\
683	unmask(sir)			/* unmask it in hardware */	;\
684	STI(si)								;\
685	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
686
687/* The unmask func for Xen events */
688#define hypervisor_asm_unmask(sir)			\
689	movq	$sir,%rdi				;\
690	call	_C_LABEL(hypervisor_enable_sir)
691
692XENINTRSTUB(xenev,SIR_XENIPL_VM,IPL_VM,hypervisor_asm_unmask)
693XENINTRSTUB(xenev,SIR_XENIPL_SCHED,IPL_SCHED,hypervisor_asm_unmask)
694XENINTRSTUB(xenev,SIR_XENIPL_HIGH,IPL_HIGH,hypervisor_asm_unmask)
695
696/* On Xen, the xenev_stubs are purely for spl entry, since there is no
697 * vector based mechanism. We however provide the entrypoint to ensure
698 * that native and Xen struct intrstub ; definitions are uniform.
699 */
700panicmsg:	.ascii "vector Xen event entry path entered."
701LABEL(entry_xenev)
702	movq $panicmsg, %rdi
703	callq _C_LABEL(panic)
704END(entry_xenev)
705
706#define XENINTRSTUB_ENTRY(name, sir) \
707	.quad entry_xenev , _C_LABEL(Xrecurse_ ## name ## sir); \
708	.quad _C_LABEL(Xresume_ ## name ## sir);
709
710	.align 8
711LABEL(xenev_stubs)
712	XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_VM) ;
713	XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_SCHED) ;
714	XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_HIGH) ;
715END(xenev_stubs)
716
717/*
718 * Xen callbacks
719 */
720
721/* Hypervisor callback */
722
723ENTRY(hypervisor_callback)
724	movq	(%rsp),%rcx
725	movq	8(%rsp),%r11
726	addq	$16,%rsp
727	pushq	$0		/* Dummy error code */
728	pushq	$T_ASTFLT
729	INTRENTRY
730IDTVEC(handle_hypervisor_callback)
731	movzbl	CPUVAR(ILEVEL),%edi
732	pushq   %rdi /* for Xdoreti */
733	incl	CPUVAR(IDEPTH)
734	movq	%rsp,%rdi
735	call	do_hypervisor_callback
736#ifndef XENPV
737	movzbl	_C_LABEL(xenhvm_use_percpu_callback),%edi
738	testl	%edi, %edi
739	jz 1f
740	movq	_C_LABEL(local_apic_va),%rdi
741	movl	$0,LAPIC_EOI(%rdi)
7421:
743#endif
744	jmp 	_C_LABEL(Xdoreti)
745IDTVEC_END(handle_hypervisor_callback)
746END(hypervisor_callback)
747
748	TEXT_USER_BEGIN
749IDTVEC(hypervisor_pvhvm_callback)
750	pushq	$0		/* Dummy error code */
751	pushq	$T_ASTFLT
752	INTRENTRY
753	jmp _C_LABEL(Xhandle_hypervisor_callback)
754IDTVEC_END(hypervisor_pvhvm_callback)
755	TEXT_USER_END
756#endif /* XEN */
757
758#ifdef XENPV
759/* Panic? */
760ENTRY(failsafe_callback)
761	movq	(%rsp),%rcx
762	movq	8(%rsp),%r11
763	addq	$16,%rsp
764	pushq	$0
765	pushq	$T_ASTFLT
766	INTRENTRY
767	movq	%rsp,%rdi
768	subq	$8,%rdi;	/* don't forget if_ppl */
769	call	xen_failsafe_handler
770	INTRFASTEXIT
771/*	jmp	HYPERVISOR_iret */
772END(failsafe_callback)
773
774#else	/* XENPV */
775
776	.section .rodata
777
778INTRSTUB_ARRAY_16(legacy)
779
780#if NIOAPIC > 0
781INTRSTUB_ARRAY_56(ioapic_edge)
782INTRSTUB_ARRAY_56(ioapic_level)
783
784INTRSTUB_ARRAY_56(x2apic_edge)
785INTRSTUB_ARRAY_56(x2apic_level)
786#endif
787#endif /* !XENPV */
788