xref: /netbsd-src/sys/arch/amd64/include/frameasm.h (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: frameasm.h,v 1.52 2020/07/19 07:35:08 maxv Exp $	*/
2 
3 #ifndef _AMD64_MACHINE_FRAMEASM_H
4 #define _AMD64_MACHINE_FRAMEASM_H
5 
6 #ifdef _KERNEL_OPT
7 #include "opt_xen.h"
8 #include "opt_svs.h"
9 #include "opt_kcov.h"
10 #include "opt_kmsan.h"
11 #endif
12 
13 /*
14  * Macros to define pushing/popping frames for interrupts, traps
15  * and system calls. Currently all the same; will diverge later.
16  */
17 
18 #ifdef XENPV
19 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
20 /* Xen do not need swapgs, done by hypervisor */
21 #define swapgs
22 #define iretq	pushq $0 ; jmp HYPERVISOR_iret
23 #define	XEN_ONLY2(x,y)	x,y
24 #define	NOT_XEN(x)
25 
26 #define CLI(temp_reg) \
27  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
28 	movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg);
29 
30 #define STI(temp_reg) \
31  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
32 	movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg);
33 
34 #define PUSHF(temp_reg) \
35  	movq CPUVAR(VCPU),%r ## temp_reg ;			\
36 	movzbl EVTCHN_UPCALL_MASK(%r ## temp_reg), %e ## temp_reg; \
37 	pushq %r ## temp_reg
38 
39 #define POPF \
40 	popq %rdi; \
41 	call _C_LABEL(xen_write_psl)
42 
43 
44 #else /* XENPV */
45 #define	XEN_ONLY2(x,y)
46 #define	NOT_XEN(x)	x
47 #define CLI(temp_reg) cli
48 #define STI(temp_reg) sti
49 #define PUSHF(temp_reg) pushf
50 #define POPL popl
51 #endif	/* XENPV */
52 
53 #define HP_NAME_CLAC		1
54 #define HP_NAME_STAC		2
55 #define HP_NAME_NOLOCK		3
56 #define HP_NAME_RETFENCE	4
57 #define HP_NAME_SVS_ENTER	5
58 #define HP_NAME_SVS_LEAVE	6
59 #define HP_NAME_SVS_ENTER_ALT	7
60 #define HP_NAME_SVS_LEAVE_ALT	8
61 #define HP_NAME_IBRS_ENTER	9
62 #define HP_NAME_IBRS_LEAVE	10
63 #define HP_NAME_SVS_ENTER_NMI	11
64 #define HP_NAME_SVS_LEAVE_NMI	12
65 #define HP_NAME_MDS_LEAVE	13
66 #define HP_NAME_SSE2_LFENCE	14
67 #define HP_NAME_SSE2_MFENCE	15
68 
69 #define HOTPATCH(name, size) \
70 123:						; \
71 	.pushsection	.rodata.hotpatch, "a"	; \
72 	.byte		name			; \
73 	.byte		size			; \
74 	.quad		123b			; \
75 	.popsection
76 
77 #define SMAP_ENABLE \
78 	HOTPATCH(HP_NAME_CLAC, 3)		; \
79 	.byte 0x0F, 0x1F, 0x00			; \
80 
81 #define SMAP_DISABLE \
82 	HOTPATCH(HP_NAME_STAC, 3)		; \
83 	.byte 0x0F, 0x1F, 0x00			; \
84 
85 /*
86  * IBRS
87  */
88 
89 #define IBRS_ENTER_BYTES	12
90 #define IBRS_ENTER \
91 	HOTPATCH(HP_NAME_IBRS_ENTER, IBRS_ENTER_BYTES)		; \
92 	NOIBRS_ENTER
93 #define NOIBRS_ENTER \
94 	.byte 0xEB, (IBRS_ENTER_BYTES-2)	/* jmp */	; \
95 	.fill	(IBRS_ENTER_BYTES-2),1,0xCC
96 
97 #define IBRS_LEAVE_BYTES	12
98 #define IBRS_LEAVE \
99 	HOTPATCH(HP_NAME_IBRS_LEAVE, IBRS_LEAVE_BYTES)		; \
100 	NOIBRS_LEAVE
101 #define NOIBRS_LEAVE \
102 	.byte 0xEB, (IBRS_LEAVE_BYTES-2)	/* jmp */	; \
103 	.fill	(IBRS_LEAVE_BYTES-2),1,0xCC
104 
105 /*
106  * MDS
107  */
108 
109 #define MDS_LEAVE_BYTES	10
110 #define MDS_LEAVE \
111 	HOTPATCH(HP_NAME_MDS_LEAVE, MDS_LEAVE_BYTES)		; \
112 	NOMDS_LEAVE
113 #define NOMDS_LEAVE \
114 	.byte 0xEB, (MDS_LEAVE_BYTES-2)	/* jmp */		; \
115 	.fill	(MDS_LEAVE_BYTES-2),1,0xCC
116 
117 #define	SWAPGS	NOT_XEN(swapgs)
118 
119 /*
120  * These are used on interrupt or trap entry or exit.
121  */
122 #define INTR_SAVE_GPRS \
123 	movq	%rdi,TF_RDI(%rsp)	; \
124 	movq	%rsi,TF_RSI(%rsp)	; \
125 	movq	%rdx,TF_RDX(%rsp)	; \
126 	movq	%rcx,TF_RCX(%rsp)	; \
127 	movq	%r8,TF_R8(%rsp)		; \
128 	movq	%r9,TF_R9(%rsp)		; \
129 	movq	%r10,TF_R10(%rsp)	; \
130 	movq	%r11,TF_R11(%rsp)	; \
131 	movq	%r12,TF_R12(%rsp)	; \
132 	movq	%r13,TF_R13(%rsp)	; \
133 	movq	%r14,TF_R14(%rsp)	; \
134 	movq	%r15,TF_R15(%rsp)	; \
135 	movq	%rbp,TF_RBP(%rsp)	; \
136 	movq	%rbx,TF_RBX(%rsp)	; \
137 	movq	%rax,TF_RAX(%rsp)
138 
139 #define	INTR_RESTORE_GPRS \
140 	movq	TF_RDI(%rsp),%rdi	; \
141 	movq	TF_RSI(%rsp),%rsi	; \
142 	movq	TF_RDX(%rsp),%rdx	; \
143 	movq	TF_RCX(%rsp),%rcx	; \
144 	movq	TF_R8(%rsp),%r8		; \
145 	movq	TF_R9(%rsp),%r9		; \
146 	movq	TF_R10(%rsp),%r10	; \
147 	movq	TF_R11(%rsp),%r11	; \
148 	movq	TF_R12(%rsp),%r12	; \
149 	movq	TF_R13(%rsp),%r13	; \
150 	movq	TF_R14(%rsp),%r14	; \
151 	movq	TF_R15(%rsp),%r15	; \
152 	movq	TF_RBP(%rsp),%rbp	; \
153 	movq	TF_RBX(%rsp),%rbx	; \
154 	movq	TF_RAX(%rsp),%rax
155 
156 #define TEXT_USER_BEGIN	.pushsection	.text.user, "ax"
157 #define TEXT_USER_END	.popsection
158 
159 #ifdef SVS
160 
161 /* XXX: put this somewhere else */
162 #define SVS_UTLS		0xffffff0000000000 /* PMAP_PCPU_BASE */
163 #define UTLS_KPDIRPA		0
164 #define UTLS_SCRATCH		8
165 #define UTLS_RSP0		16
166 
167 #define SVS_ENTER_BYTES	22
168 #define NOSVS_ENTER \
169 	.byte 0xEB, (SVS_ENTER_BYTES-2)	/* jmp */	; \
170 	.fill	(SVS_ENTER_BYTES-2),1,0xCC
171 #define SVS_ENTER \
172 	HOTPATCH(HP_NAME_SVS_ENTER, SVS_ENTER_BYTES)	; \
173 	NOSVS_ENTER
174 
175 #define SVS_LEAVE_BYTES	21
176 #define NOSVS_LEAVE \
177 	.byte 0xEB, (SVS_LEAVE_BYTES-2)	/* jmp */	; \
178 	.fill	(SVS_LEAVE_BYTES-2),1,0xCC
179 #define SVS_LEAVE \
180 	HOTPATCH(HP_NAME_SVS_LEAVE, SVS_LEAVE_BYTES)	; \
181 	NOSVS_LEAVE
182 
183 #define SVS_ENTER_ALT_BYTES	23
184 #define NOSVS_ENTER_ALTSTACK \
185 	.byte 0xEB, (SVS_ENTER_ALT_BYTES-2)	/* jmp */	; \
186 	.fill	(SVS_ENTER_ALT_BYTES-2),1,0xCC
187 #define SVS_ENTER_ALTSTACK \
188 	HOTPATCH(HP_NAME_SVS_ENTER_ALT, SVS_ENTER_ALT_BYTES)	; \
189 	NOSVS_ENTER_ALTSTACK
190 
191 #define SVS_LEAVE_ALT_BYTES	22
192 #define NOSVS_LEAVE_ALTSTACK \
193 	.byte 0xEB, (SVS_LEAVE_ALT_BYTES-2)	/* jmp */	; \
194 	.fill	(SVS_LEAVE_ALT_BYTES-2),1,0xCC
195 #define SVS_LEAVE_ALTSTACK \
196 	HOTPATCH(HP_NAME_SVS_LEAVE_ALT, SVS_LEAVE_ALT_BYTES)	; \
197 	NOSVS_LEAVE_ALTSTACK
198 
199 #define SVS_ENTER_NMI_BYTES	22
200 #define NOSVS_ENTER_NMI \
201 	.byte 0xEB, (SVS_ENTER_NMI_BYTES-2)	/* jmp */	; \
202 	.fill	(SVS_ENTER_NMI_BYTES-2),1,0xCC
203 #define SVS_ENTER_NMI \
204 	HOTPATCH(HP_NAME_SVS_ENTER_NMI, SVS_ENTER_NMI_BYTES)	; \
205 	NOSVS_ENTER_NMI
206 
207 #define SVS_LEAVE_NMI_BYTES	11
208 #define NOSVS_LEAVE_NMI \
209 	.byte 0xEB, (SVS_LEAVE_NMI_BYTES-2)	/* jmp */	; \
210 	.fill	(SVS_LEAVE_NMI_BYTES-2),1,0xCC
211 #define SVS_LEAVE_NMI \
212 	HOTPATCH(HP_NAME_SVS_LEAVE_NMI, SVS_LEAVE_NMI_BYTES)	; \
213 	NOSVS_LEAVE_NMI
214 
215 #else
216 #define SVS_ENTER	/* nothing */
217 #define SVS_ENTER_NMI	/* nothing */
218 #define SVS_LEAVE	/* nothing */
219 #define SVS_LEAVE_NMI	/* nothing */
220 #define SVS_ENTER_ALTSTACK	/* nothing */
221 #define SVS_LEAVE_ALTSTACK	/* nothing */
222 #endif
223 
224 #ifdef KMSAN
225 /* XXX this belongs somewhere else. */
226 #define KMSAN_ENTER	\
227 	movq	%rsp,%rdi		; \
228 	movq	$TF_REGSIZE+16+40,%rsi	; \
229 	xorq	%rdx,%rdx		; \
230 	callq	kmsan_mark		; \
231 	callq	kmsan_intr_enter
232 #define KMSAN_LEAVE	\
233 	pushq	%rbp			; \
234 	movq	%rsp,%rbp		; \
235 	callq	kmsan_intr_leave	; \
236 	popq	%rbp
237 #define KMSAN_INIT_ARG(sz)	\
238 	pushq	%rax			; \
239 	pushq	%rcx			; \
240 	pushq	%rdx			; \
241 	pushq	%rsi			; \
242 	pushq	%rdi			; \
243 	pushq	%r8			; \
244 	pushq	%r9			; \
245 	pushq	%r10			; \
246 	pushq	%r11			; \
247 	movq	$sz,%rdi		; \
248 	callq	_C_LABEL(kmsan_init_arg); \
249 	popq	%r11			; \
250 	popq	%r10			; \
251 	popq	%r9			; \
252 	popq	%r8			; \
253 	popq	%rdi			; \
254 	popq	%rsi			; \
255 	popq	%rdx			; \
256 	popq	%rcx			; \
257 	popq	%rax
258 #define KMSAN_INIT_RET(sz)	\
259 	pushq	%rax			; \
260 	pushq	%rcx			; \
261 	pushq	%rdx			; \
262 	pushq	%rsi			; \
263 	pushq	%rdi			; \
264 	pushq	%r8			; \
265 	pushq	%r9			; \
266 	pushq	%r10			; \
267 	pushq	%r11			; \
268 	movq	$sz,%rdi		; \
269 	callq	_C_LABEL(kmsan_init_ret); \
270 	popq	%r11			; \
271 	popq	%r10			; \
272 	popq	%r9			; \
273 	popq	%r8			; \
274 	popq	%rdi			; \
275 	popq	%rsi			; \
276 	popq	%rdx			; \
277 	popq	%rcx			; \
278 	popq	%rax
279 #else
280 #define KMSAN_ENTER		/* nothing */
281 #define KMSAN_LEAVE		/* nothing */
282 #define KMSAN_INIT_ARG(sz)	/* nothing */
283 #define KMSAN_INIT_RET(sz)	/* nothing */
284 #endif
285 
286 #ifdef KCOV
287 #define KCOV_DISABLE			\
288 	incl	CPUVAR(IDEPTH)
289 #define KCOV_ENABLE			\
290 	decl	CPUVAR(IDEPTH)
291 #else
292 #define KCOV_DISABLE		/* nothing */
293 #define KCOV_ENABLE		/* nothing */
294 #endif
295 
296 #define	INTRENTRY \
297 	subq	$TF_REGSIZE,%rsp	; \
298 	INTR_SAVE_GPRS			; \
299 	cld				; \
300 	SMAP_ENABLE			; \
301 	testb	$SEL_UPL,TF_CS(%rsp)	; \
302 	je	98f			; \
303 	SWAPGS				; \
304 	IBRS_ENTER			; \
305 	SVS_ENTER			; \
306 	movw	%gs,TF_GS(%rsp)		; \
307 	movw	%fs,TF_FS(%rsp)		; \
308 	movw	%es,TF_ES(%rsp)		; \
309 	movw	%ds,TF_DS(%rsp)		; \
310 98:	KMSAN_ENTER
311 
312 #define INTRFASTEXIT \
313 	jmp	intrfastexit
314 
315 #define INTR_RECURSE_HWFRAME \
316 	movq	%rsp,%r10		; \
317 	movl	%ss,%r11d		; \
318 	pushq	%r11			; \
319 	pushq	%r10			; \
320 	pushfq				; \
321 	pushq	$GSEL(GCODE_SEL,SEL_KPL); \
322 /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \
323  	XEN_ONLY2(andb	$0xfc,(%rsp);)	  \
324 	pushq	%r13			;
325 
326 #define INTR_RECURSE_ENTRY \
327 	subq	$TF_REGSIZE,%rsp	; \
328 	INTR_SAVE_GPRS			; \
329 	cld				; \
330 	KMSAN_ENTER
331 
332 #define	CHECK_DEFERRED_SWITCH \
333 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)
334 
335 #define CHECK_ASTPENDING(reg)	cmpl	$0, L_MD_ASTPENDING(reg)
336 #define CLEAR_ASTPENDING(reg)	movl	$0, L_MD_ASTPENDING(reg)
337 
338 /*
339  * If the FPU state is not in the CPU, restore it. Executed with interrupts
340  * disabled.
341  *
342  *     %r14 is curlwp, must not be modified
343  *     %rbx must not be modified
344  */
345 #define HANDLE_DEFERRED_FPU	\
346 	testl	$MDL_FPU_IN_CPU,L_MD_FLAGS(%r14)	; \
347 	jnz	1f					; \
348 	call	_C_LABEL(fpu_handle_deferred)		; \
349 	orl	$MDL_FPU_IN_CPU,L_MD_FLAGS(%r14)	; \
350 1:
351 
352 #endif /* _AMD64_MACHINE_FRAMEASM_H */
353