xref: /netbsd-src/sys/arch/amd64/include/frameasm.h (revision 74b8eea55e36c8cefee4a29278aca4607e1a22dd)
1 /*	$NetBSD: frameasm.h,v 1.43 2019/05/14 16:59:25 maxv Exp $	*/
2 
3 #ifndef _AMD64_MACHINE_FRAMEASM_H
4 #define _AMD64_MACHINE_FRAMEASM_H
5 
6 #ifdef _KERNEL_OPT
7 #include "opt_xen.h"
8 #include "opt_svs.h"
9 #endif
10 
11 /*
12  * Macros to define pushing/popping frames for interrupts, traps
13  * and system calls. Currently all the same; will diverge later.
14  */
15 
16 #ifdef XENPV
17 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
18 /* Xen do not need swapgs, done by hypervisor */
19 #define swapgs
20 #define iretq	pushq $0 ; jmp HYPERVISOR_iret
21 #define	XEN_ONLY2(x,y)	x,y
22 #define	NOT_XEN(x)
23 
24 #define CLI(temp_reg) \
25  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
26 	movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
27 
28 #define STI(temp_reg) \
29  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
30 	movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
31 
32 #else /* XENPV */
33 #define	XEN_ONLY2(x,y)
34 #define	NOT_XEN(x)	x
35 #define CLI(temp_reg) cli
36 #define STI(temp_reg) sti
37 #endif	/* XEN */
38 
39 #define HP_NAME_CLAC		1
40 #define HP_NAME_STAC		2
41 #define HP_NAME_NOLOCK		3
42 #define HP_NAME_RETFENCE	4
43 #define HP_NAME_SVS_ENTER	5
44 #define HP_NAME_SVS_LEAVE	6
45 #define HP_NAME_SVS_ENTER_ALT	7
46 #define HP_NAME_SVS_LEAVE_ALT	8
47 #define HP_NAME_IBRS_ENTER	9
48 #define HP_NAME_IBRS_LEAVE	10
49 #define HP_NAME_SVS_ENTER_NMI	11
50 #define HP_NAME_SVS_LEAVE_NMI	12
51 #define HP_NAME_MDS_LEAVE	13
52 
53 #define HOTPATCH(name, size) \
54 123:						; \
55 	.pushsection	.rodata.hotpatch, "a"	; \
56 	.byte		name			; \
57 	.byte		size			; \
58 	.quad		123b			; \
59 	.popsection
60 
61 #define SMAP_ENABLE \
62 	HOTPATCH(HP_NAME_CLAC, 3)		; \
63 	.byte 0x0F, 0x1F, 0x00			; \
64 
65 #define SMAP_DISABLE \
66 	HOTPATCH(HP_NAME_STAC, 3)		; \
67 	.byte 0x0F, 0x1F, 0x00			; \
68 
69 /*
70  * IBRS
71  */
72 
73 #define IBRS_ENTER_BYTES	17
74 #define IBRS_ENTER \
75 	HOTPATCH(HP_NAME_IBRS_ENTER, IBRS_ENTER_BYTES)		; \
76 	NOIBRS_ENTER
77 #define NOIBRS_ENTER \
78 	.byte 0xEB, (IBRS_ENTER_BYTES-2)	/* jmp */	; \
79 	.fill	(IBRS_ENTER_BYTES-2),1,0xCC
80 
81 #define IBRS_LEAVE_BYTES	21
82 #define IBRS_LEAVE \
83 	HOTPATCH(HP_NAME_IBRS_LEAVE, IBRS_LEAVE_BYTES)		; \
84 	NOIBRS_LEAVE
85 #define NOIBRS_LEAVE \
86 	.byte 0xEB, (IBRS_LEAVE_BYTES-2)	/* jmp */	; \
87 	.fill	(IBRS_LEAVE_BYTES-2),1,0xCC
88 
89 /*
90  * MDS
91  */
92 
93 #define MDS_LEAVE_BYTES	20
94 #define MDS_LEAVE \
95 	HOTPATCH(HP_NAME_MDS_LEAVE, MDS_LEAVE_BYTES)		; \
96 	NOMDS_LEAVE
97 #define NOMDS_LEAVE \
98 	.byte 0xEB, (MDS_LEAVE_BYTES-2)	/* jmp */		; \
99 	.fill	(MDS_LEAVE_BYTES-2),1,0xCC
100 
101 #define	SWAPGS	NOT_XEN(swapgs)
102 
103 /*
104  * These are used on interrupt or trap entry or exit.
105  */
106 #define INTR_SAVE_GPRS \
107 	movq	%rdi,TF_RDI(%rsp)	; \
108 	movq	%rsi,TF_RSI(%rsp)	; \
109 	movq	%rdx,TF_RDX(%rsp)	; \
110 	movq	%rcx,TF_RCX(%rsp)	; \
111 	movq	%r8,TF_R8(%rsp)		; \
112 	movq	%r9,TF_R9(%rsp)		; \
113 	movq	%r10,TF_R10(%rsp)	; \
114 	movq	%r11,TF_R11(%rsp)	; \
115 	movq	%r12,TF_R12(%rsp)	; \
116 	movq	%r13,TF_R13(%rsp)	; \
117 	movq	%r14,TF_R14(%rsp)	; \
118 	movq	%r15,TF_R15(%rsp)	; \
119 	movq	%rbp,TF_RBP(%rsp)	; \
120 	movq	%rbx,TF_RBX(%rsp)	; \
121 	movq	%rax,TF_RAX(%rsp)
122 
123 #define	INTR_RESTORE_GPRS \
124 	movq	TF_RDI(%rsp),%rdi	; \
125 	movq	TF_RSI(%rsp),%rsi	; \
126 	movq	TF_RDX(%rsp),%rdx	; \
127 	movq	TF_RCX(%rsp),%rcx	; \
128 	movq	TF_R8(%rsp),%r8		; \
129 	movq	TF_R9(%rsp),%r9		; \
130 	movq	TF_R10(%rsp),%r10	; \
131 	movq	TF_R11(%rsp),%r11	; \
132 	movq	TF_R12(%rsp),%r12	; \
133 	movq	TF_R13(%rsp),%r13	; \
134 	movq	TF_R14(%rsp),%r14	; \
135 	movq	TF_R15(%rsp),%r15	; \
136 	movq	TF_RBP(%rsp),%rbp	; \
137 	movq	TF_RBX(%rsp),%rbx	; \
138 	movq	TF_RAX(%rsp),%rax
139 
140 #define TEXT_USER_BEGIN	.pushsection	.text.user, "ax"
141 #define TEXT_USER_END	.popsection
142 
143 #ifdef SVS
144 
145 /* XXX: put this somewhere else */
146 #define SVS_UTLS		0xffffff0000000000 /* PMAP_PCPU_BASE */
147 #define UTLS_KPDIRPA		0
148 #define UTLS_SCRATCH		8
149 #define UTLS_RSP0		16
150 
151 #define SVS_ENTER_BYTES	22
152 #define NOSVS_ENTER \
153 	.byte 0xEB, (SVS_ENTER_BYTES-2)	/* jmp */	; \
154 	.fill	(SVS_ENTER_BYTES-2),1,0xCC
155 #define SVS_ENTER \
156 	HOTPATCH(HP_NAME_SVS_ENTER, SVS_ENTER_BYTES)	; \
157 	NOSVS_ENTER
158 
159 #define SVS_LEAVE_BYTES	31
160 #define NOSVS_LEAVE \
161 	.byte 0xEB, (SVS_LEAVE_BYTES-2)	/* jmp */	; \
162 	.fill	(SVS_LEAVE_BYTES-2),1,0xCC
163 #define SVS_LEAVE \
164 	HOTPATCH(HP_NAME_SVS_LEAVE, SVS_LEAVE_BYTES)	; \
165 	NOSVS_LEAVE
166 
167 #define SVS_ENTER_ALT_BYTES	23
168 #define NOSVS_ENTER_ALTSTACK \
169 	.byte 0xEB, (SVS_ENTER_ALT_BYTES-2)	/* jmp */	; \
170 	.fill	(SVS_ENTER_ALT_BYTES-2),1,0xCC
171 #define SVS_ENTER_ALTSTACK \
172 	HOTPATCH(HP_NAME_SVS_ENTER_ALT, SVS_ENTER_ALT_BYTES)	; \
173 	NOSVS_ENTER_ALTSTACK
174 
175 #define SVS_LEAVE_ALT_BYTES	22
176 #define NOSVS_LEAVE_ALTSTACK \
177 	.byte 0xEB, (SVS_LEAVE_ALT_BYTES-2)	/* jmp */	; \
178 	.fill	(SVS_LEAVE_ALT_BYTES-2),1,0xCC
179 #define SVS_LEAVE_ALTSTACK \
180 	HOTPATCH(HP_NAME_SVS_LEAVE_ALT, SVS_LEAVE_ALT_BYTES)	; \
181 	NOSVS_LEAVE_ALTSTACK
182 
183 #define SVS_ENTER_NMI_BYTES	22
184 #define NOSVS_ENTER_NMI \
185 	.byte 0xEB, (SVS_ENTER_NMI_BYTES-2)	/* jmp */	; \
186 	.fill	(SVS_ENTER_NMI_BYTES-2),1,0xCC
187 #define SVS_ENTER_NMI \
188 	HOTPATCH(HP_NAME_SVS_ENTER_NMI, SVS_ENTER_NMI_BYTES)	; \
189 	NOSVS_ENTER_NMI
190 
191 #define SVS_LEAVE_NMI_BYTES	11
192 #define NOSVS_LEAVE_NMI \
193 	.byte 0xEB, (SVS_LEAVE_NMI_BYTES-2)	/* jmp */	; \
194 	.fill	(SVS_LEAVE_NMI_BYTES-2),1,0xCC
195 #define SVS_LEAVE_NMI \
196 	HOTPATCH(HP_NAME_SVS_LEAVE_NMI, SVS_LEAVE_NMI_BYTES)	; \
197 	NOSVS_LEAVE_NMI
198 
199 #else
200 #define SVS_ENTER	/* nothing */
201 #define SVS_ENTER_NMI	/* nothing */
202 #define SVS_LEAVE	/* nothing */
203 #define SVS_LEAVE_NMI	/* nothing */
204 #define SVS_ENTER_ALTSTACK	/* nothing */
205 #define SVS_LEAVE_ALTSTACK	/* nothing */
206 #endif
207 
208 #define	INTRENTRY \
209 	subq	$TF_REGSIZE,%rsp	; \
210 	INTR_SAVE_GPRS			; \
211 	cld				; \
212 	SMAP_ENABLE			; \
213 	testb	$SEL_UPL,TF_CS(%rsp)	; \
214 	je	98f			; \
215 	SWAPGS				; \
216 	IBRS_ENTER			; \
217 	SVS_ENTER			; \
218 	movw	%gs,TF_GS(%rsp)		; \
219 	movw	%fs,TF_FS(%rsp)		; \
220 	movw	%es,TF_ES(%rsp)		; \
221 	movw	%ds,TF_DS(%rsp)		; \
222 98:
223 
224 #define INTRFASTEXIT \
225 	jmp	intrfastexit
226 
227 #define INTR_RECURSE_HWFRAME \
228 	movq	%rsp,%r10		; \
229 	movl	%ss,%r11d		; \
230 	pushq	%r11			; \
231 	pushq	%r10			; \
232 	pushfq				; \
233 	pushq	$GSEL(GCODE_SEL,SEL_KPL); \
234 /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
235  	XEN_ONLY2(andb	$0xfc,(%rsp);)	  \
236 	pushq	%r13			;
237 
238 #define INTR_RECURSE_ENTRY \
239 	subq	$TF_REGSIZE,%rsp	; \
240 	INTR_SAVE_GPRS			; \
241 	cld
242 
243 #define	CHECK_DEFERRED_SWITCH \
244 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)
245 
246 #define CHECK_ASTPENDING(reg)	cmpl	$0, L_MD_ASTPENDING(reg)
247 #define CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
248 
249 #endif /* _AMD64_MACHINE_FRAMEASM_H */
250