xref: /netbsd-src/sys/arch/amd64/include/frameasm.h (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: frameasm.h,v 1.8 2007/11/22 16:16:45 bouyer Exp $	*/
2 
3 #ifndef _AMD64_MACHINE_FRAMEASM_H
4 #define _AMD64_MACHINE_FRAMEASM_H
5 #include "opt_xen.h"
6 
7 /*
8  * Macros to define pushing/popping frames for interrupts, traps
9  * and system calls. Currently all the same; will diverge later.
10  */
11 
12 #ifdef XEN
13 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
14 /* Xen do not need swapgs, done by hypervisor */
15 #define swapgs
16 #define iretq	pushq $0 ; jmp HYPERVISOR_iret
17 #endif
18 
19 /*
20  * These are used on interrupt or trap entry or exit.
21  */
22 #define INTR_SAVE_GPRS \
23 	subq	$120,%rsp	; \
24 	movq	%r15,TF_R15(%rsp)	; \
25 	movq	%r14,TF_R14(%rsp)	; \
26 	movq	%r13,TF_R13(%rsp)	; \
27 	movq	%r12,TF_R12(%rsp)	; \
28 	movq	%r11,TF_R11(%rsp)	; \
29 	movq	%r10,TF_R10(%rsp)	; \
30 	movq	%r9,TF_R9(%rsp)		; \
31 	movq	%r8,TF_R8(%rsp)		; \
32 	movq	%rdi,TF_RDI(%rsp)	; \
33 	movq	%rsi,TF_RSI(%rsp)	; \
34 	movq	%rbp,TF_RBP(%rsp)	; \
35 	movq	%rbx,TF_RBX(%rsp)	; \
36 	movq	%rdx,TF_RDX(%rsp)	; \
37 	movq	%rcx,TF_RCX(%rsp)	; \
38 	movq	%rax,TF_RAX(%rsp)	; \
39 	cld
40 
41 #define	INTR_RESTORE_GPRS \
42 	movq	TF_R15(%rsp),%r15	; \
43 	movq	TF_R14(%rsp),%r14	; \
44 	movq	TF_R13(%rsp),%r13	; \
45 	movq	TF_R12(%rsp),%r12	; \
46 	movq	TF_R11(%rsp),%r11	; \
47 	movq	TF_R10(%rsp),%r10	; \
48 	movq	TF_R9(%rsp),%r9		; \
49 	movq	TF_R8(%rsp),%r8		; \
50 	movq	TF_RDI(%rsp),%rdi	; \
51 	movq	TF_RSI(%rsp),%rsi	; \
52 	movq	TF_RBP(%rsp),%rbp	; \
53 	movq	TF_RBX(%rsp),%rbx	; \
54 	movq	TF_RDX(%rsp),%rdx	; \
55 	movq	TF_RCX(%rsp),%rcx	; \
56 	movq	TF_RAX(%rsp),%rax	; \
57 	addq	$120,%rsp
58 
59 #define	INTRENTRY \
60 	subq	$32,%rsp		; \
61 	testq	$SEL_UPL,56(%rsp)	; \
62 	je	98f			; \
63 	swapgs				; \
64 	movw	%gs,0(%rsp)		; \
65 	movw	%fs,8(%rsp)		; \
66 	movw	%es,16(%rsp)		; \
67 	movw	%ds,24(%rsp)		; \
68 98: 	INTR_SAVE_GPRS
69 
70 #ifndef XEN
71 #define INTRFASTEXIT \
72 	INTR_RESTORE_GPRS 		; \
73 	testq	$SEL_UPL,56(%rsp)	; \
74 	je	99f			; \
75 	cli				; \
76 	swapgs				; \
77 	movw	0(%rsp),%gs		; \
78 	movw	8(%rsp),%fs		; \
79 	movw	16(%rsp),%es		; \
80 	movw	24(%rsp),%ds		; \
81 99:	addq	$48,%rsp		; \
82 	iretq
83 
84 #define INTR_RECURSE_HWFRAME \
85 	movq	%rsp,%r10		; \
86 	movl	%ss,%r11d		; \
87 	pushq	%r11			; \
88 	pushq	%r10			; \
89 	pushfq				; \
90 	movl	%cs,%r11d		; \
91 	pushq	%r11			; \
92 	pushq	%r13			;
93 
94 #else	/* !XEN */
95 /*
96  * Disabling events before going to user mode sounds like a BAD idea
97  * do no restore gs either, HYPERVISOR_iret will do a swapgs
98  */
99 #define INTRFASTEXIT \
100  	INTR_RESTORE_GPRS 		; \
101  	testq	$SEL_UPL,56(%rsp)	; \
102  	je	99f			; \
103  	movw	8(%rsp),%fs		; \
104  	movw	16(%rsp),%es		; \
105  	movw	24(%rsp),%ds		; \
106 99:	addq	$48,%rsp		; \
107  	iretq
108 
109 /* We must fixup CS, as even kernel mode runs at CPL 3 */
110 #define INTR_RECURSE_HWFRAME \
111  	movq	%rsp,%r10		; \
112  	movl	%ss,%r11d		; \
113  	pushq	%r11			; \
114  	pushq	%r10			; \
115  	pushfq				; \
116  	movl	%cs,%r11d		; \
117  	pushq	%r11			; \
118  	andb	$0xfc,(%rsp)		; \
119  	pushq	%r13			;
120 
121 #endif	/* !XEN */
122 
123 #define	DO_DEFERRED_SWITCH \
124 	cmpq	$0, CPUVAR(WANT_PMAPLOAD)		; \
125 	jz	1f					; \
126 	call	_C_LABEL(do_pmap_load)			; \
127 	1:
128 
129 #define	CHECK_DEFERRED_SWITCH \
130 	cmpq	$0, CPUVAR(WANT_PMAPLOAD)
131 
132 #define CHECK_ASTPENDING(reg)	cmpq	$0, reg				; \
133 				je	99f				; \
134 				cmpl	$0, L_MD_ASTPENDING(reg)	; \
135 				99:
136 
137 #define CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
138 
139 #ifdef XEN
140 #define CLI(reg1,reg2) \
141  	movl CPUVAR(CPUID),%e/**/reg1 ;			\
142  	shlq $6,%r/**/reg1 ;					\
143  	movq _C_LABEL(HYPERVISOR_shared_info),%r/**/reg2 ;	\
144  	addq %r/**/reg1,%r/**/reg2 ;				\
145  	movb $1,EVTCHN_UPCALL_MASK(%r/**/reg2)
146 #define STI(reg1,reg2) \
147  	movl CPUVAR(CPUID),%e/**/reg1 ;			\
148  	shlq $6,%r/**/reg1 ;					\
149  	movq _C_LABEL(HYPERVISOR_shared_info),%r/**/reg2 ;	\
150  	addq %r/**/reg1,%r/**/reg2 ;				\
151  	movb $0,EVTCHN_UPCALL_MASK(%r/**/reg2)
152 #else /* XEN */
153 #define CLI(reg1,reg2) cli
154 #define STI(reg1,reg2) sti
155 #endif	/* XEN */
156 
157 #endif /* _AMD64_MACHINE_FRAMEASM_H */
158