xref: /openbsd-src/sys/arch/amd64/include/frameasm.h (revision 2dd0808e9226b38a4c9debd3704d298d02a5ab14)
1*2dd0808eSguenther /*	$OpenBSD: frameasm.h,v 1.27 2023/07/27 00:30:07 guenther Exp $	*/
2f5df1827Smickey /*	$NetBSD: frameasm.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $	*/
3f5df1827Smickey 
4f5df1827Smickey #ifndef _AMD64_MACHINE_FRAMEASM_H
5f5df1827Smickey #define _AMD64_MACHINE_FRAMEASM_H
6f5df1827Smickey 
7f5df1827Smickey /*
8f5df1827Smickey  * Macros to define pushing/popping frames for interrupts, traps
9f5df1827Smickey  * and system calls. Currently all the same; will diverge later.
10f5df1827Smickey  */
11f5df1827Smickey 
12f5df1827Smickey /*
13f5df1827Smickey  * These are used on interrupt or trap entry or exit.
14f5df1827Smickey  */
15f5df1827Smickey #define INTR_SAVE_GPRS \
16f5df1827Smickey 	subq	$120,%rsp		; \
17b767b017Sguenther 	INTR_SAVE_MOST_GPRS_NO_ADJ	; \
18b767b017Sguenther 	movq	%rcx,TF_RCX(%rsp)
19b767b017Sguenther #define INTR_SAVE_MOST_GPRS_NO_ADJ \
20f5df1827Smickey 	movq	%r15,TF_R15(%rsp)	; \
21f5df1827Smickey 	movq	%r14,TF_R14(%rsp)	; \
22f5df1827Smickey 	movq	%r13,TF_R13(%rsp)	; \
23f5df1827Smickey 	movq	%r12,TF_R12(%rsp)	; \
24f5df1827Smickey 	movq	%r11,TF_R11(%rsp)	; \
25f5df1827Smickey 	movq	%r10,TF_R10(%rsp)	; \
26f5df1827Smickey 	movq	%r9,TF_R9(%rsp)		; \
27f5df1827Smickey 	movq	%r8,TF_R8(%rsp)		; \
28f5df1827Smickey 	movq	%rdi,TF_RDI(%rsp)	; \
29f5df1827Smickey 	movq	%rsi,TF_RSI(%rsp)	; \
30f5df1827Smickey 	movq	%rbp,TF_RBP(%rsp)	; \
31fbad0e3eSguenther 	leaq	TF_RBP(%rsp),%rbp	; \
32f5df1827Smickey 	movq	%rbx,TF_RBX(%rsp)	; \
33f5df1827Smickey 	movq	%rdx,TF_RDX(%rsp)	; \
34f5df1827Smickey 	movq	%rax,TF_RAX(%rsp)
35f5df1827Smickey 
36f0f07b0bSguenther /*
37f0f07b0bSguenther  * We clear registers when coming from userspace to prevent
38ddcc9d3cSguenther  * user-controlled values from being available for use in speculative
39f0f07b0bSguenther  * execution in the kernel.  %rsp and %rbp are the kernel values when
40f0f07b0bSguenther  * this is used, so there are only 14 to clear.  32bit operations clear
41f0f07b0bSguenther  * the register upper-halves automatically.
42f0f07b0bSguenther  */
43f0f07b0bSguenther #define INTR_CLEAR_GPRS \
44f0f07b0bSguenther 	xorl	%eax,%eax		; \
456404221fSguenther 	xorl	%ebx,%ebx		; \
466404221fSguenther 	xorl	%ecx,%ecx		; \
476404221fSguenther 	xorl	%edx,%edx		; \
486404221fSguenther 	xorl	%esi,%esi		; \
496404221fSguenther 	xorl	%edi,%edi		; \
506404221fSguenther 	xorl	%r8d,%r8d		; \
516404221fSguenther 	xorl	%r9d,%r9d		; \
526404221fSguenther 	xorl	%r10d,%r10d		; \
536404221fSguenther 	xorl	%r11d,%r11d		; \
546404221fSguenther 	xorl	%r12d,%r12d		; \
556404221fSguenther 	xorl	%r13d,%r13d		; \
566404221fSguenther 	xorl	%r14d,%r14d		; \
576404221fSguenther 	xorl	%r15d,%r15d
58f0f07b0bSguenther 
59f0f07b0bSguenther 
60bb386764Sguenther /*
61bb386764Sguenther  * For real interrupt code paths, where we can come from userspace.
62bb386764Sguenther  * We only have an iretq_frame on entry.
63bb386764Sguenther  */
64b767b017Sguenther #define INTRENTRY_LABEL(label)	X##label##_untramp
65b767b017Sguenther #define	INTRENTRY(label) \
66e3e62cc7Sderaadt 	endbr64				; \
67a212cfe0Sguenther 	testb	$SEL_RPL,IRETQ_CS(%rsp)	; \
68b767b017Sguenther 	je	INTRENTRY_LABEL(label)	; \
69f5df1827Smickey 	swapgs				; \
705c3fa5a3Sguenther 	FENCE_SWAPGS_MIS_TAKEN 		; \
71b767b017Sguenther 	movq	%rax,CPUVAR(SCRATCH)	; \
7238863611Sguenther 	CODEPATCH_START			; \
73b767b017Sguenther 	movq	CPUVAR(KERN_CR3),%rax	; \
74b767b017Sguenther 	movq	%rax,%cr3		; \
7538863611Sguenther 	CODEPATCH_END(CPTAG_MELTDOWN_NOP);\
76b767b017Sguenther 	jmp	98f			; \
7702a59bc4Sguenther END(X##label)				; \
7802a59bc4Sguenther _ENTRY(INTRENTRY_LABEL(label)) /* from kernel */ \
795c3fa5a3Sguenther 	FENCE_NO_SAFE_SMAP		; \
80a212cfe0Sguenther 	subq	$TF_RIP,%rsp		; \
81a212cfe0Sguenther 	movq	%rcx,TF_RCX(%rsp)	; \
8272062f84Sguenther 	jmp	99f			; \
8338863611Sguenther 	_ALIGN_TRAPS			; \
8472062f84Sguenther 98:	/* from userspace */		  \
85b767b017Sguenther 	movq	CPUVAR(KERN_RSP),%rax	; \
86b767b017Sguenther 	xchgq	%rax,%rsp		; \
87b767b017Sguenther 	movq	%rcx,TF_RCX(%rsp)	; \
88a4858df8Sguenther 	RET_STACK_REFILL_WITH_RCX	; \
89b767b017Sguenther 	/* copy iretq frame to the trap frame */ \
90b767b017Sguenther 	movq	IRETQ_RIP(%rax),%rcx	; \
91b767b017Sguenther 	movq	%rcx,TF_RIP(%rsp)	; \
92b767b017Sguenther 	movq	IRETQ_CS(%rax),%rcx	; \
93b767b017Sguenther 	movq	%rcx,TF_CS(%rsp)	; \
94b767b017Sguenther 	movq	IRETQ_RFLAGS(%rax),%rcx	; \
95b767b017Sguenther 	movq	%rcx,TF_RFLAGS(%rsp)	; \
96b767b017Sguenther 	movq	IRETQ_RSP(%rax),%rcx	; \
97b767b017Sguenther 	movq	%rcx,TF_RSP(%rsp)	; \
98b767b017Sguenther 	movq	IRETQ_SS(%rax),%rcx	; \
99b767b017Sguenther 	movq	%rcx,TF_SS(%rsp)	; \
100a212cfe0Sguenther 	movq	CPUVAR(SCRATCH),%rax	; \
101a212cfe0Sguenther 99:	INTR_SAVE_MOST_GPRS_NO_ADJ	; \
102a212cfe0Sguenther 	INTR_CLEAR_GPRS			; \
103a212cfe0Sguenther 	movq	%rax,TF_ERR(%rsp)
104b767b017Sguenther 
105f5df1827Smickey #define INTRFASTEXIT \
106c6853312Sguenther 	jmp	intr_fast_exit
107f5df1827Smickey 
108bb386764Sguenther /*
109bb386764Sguenther  * Entry for faking up an interrupt frame after spllower() unblocks
110bb386764Sguenther  * a previously received interrupt.  On entry, %r13 has the %rip
111bb386764Sguenther  * to return to.  %r10 and %r11 are scratch.
112bb386764Sguenther  */
113bb386764Sguenther #define	INTR_RECURSE \
114*2dd0808eSguenther 	endbr64				; \
115bb386764Sguenther 	/* fake the iretq_frame */	; \
116f5df1827Smickey 	movq	%rsp,%r10		; \
117f5df1827Smickey 	movl	%ss,%r11d		; \
118f5df1827Smickey 	pushq	%r11			; \
119f5df1827Smickey 	pushq	%r10			; \
120f5df1827Smickey 	pushfq				; \
121f5df1827Smickey 	movl	%cs,%r11d		; \
122f5df1827Smickey 	pushq	%r11			; \
123bb386764Sguenther 	pushq	%r13			; \
124bb386764Sguenther 	/* now do the rest of the intrframe */ \
125bb386764Sguenther 	subq	$16,%rsp		; \
126bb386764Sguenther 	INTR_SAVE_GPRS
127bb386764Sguenther 
128bb386764Sguenther 
129bb386764Sguenther /*
130bb386764Sguenther  * Entry for traps from kernel, where there's a trapno + err already
131bb386764Sguenther  * on the stack.  We have to move the err from its hardware location
132bb386764Sguenther  * to the location we want it.
133bb386764Sguenther  */
134bb386764Sguenther #define	TRAP_ENTRY_KERN \
135bb386764Sguenther 	subq	$120,%rsp		; \
136bb386764Sguenther 	movq	%rcx,TF_RCX(%rsp)	; \
137bb386764Sguenther 	movq	(TF_RIP - 8)(%rsp),%rcx	; \
138bb386764Sguenther 	movq	%rcx,TF_ERR(%rsp)	; \
139bb386764Sguenther 	INTR_SAVE_MOST_GPRS_NO_ADJ
140bb386764Sguenther 
141bb386764Sguenther /*
142bb386764Sguenther  * Entry for traps from userland, where there's a trapno + err on
143bb386764Sguenther  * the iretq stack.
144bb386764Sguenther  * Assumes that %rax has been saved in CPUVAR(SCRATCH).
145bb386764Sguenther  */
146bb386764Sguenther #define	TRAP_ENTRY_USER \
147a212cfe0Sguenther 	movq	CPUVAR(KERN_RSP),%rax		; \
148a212cfe0Sguenther 	xchgq	%rax,%rsp			; \
149a212cfe0Sguenther 	movq	%rcx,TF_RCX(%rsp)		; \
150a212cfe0Sguenther 	RET_STACK_REFILL_WITH_RCX		; \
151a212cfe0Sguenther 	/* copy trapno+err to the trap frame */ \
152a212cfe0Sguenther 	movq	0(%rax),%rcx			; \
153a212cfe0Sguenther 	movq	%rcx,TF_TRAPNO(%rsp)		; \
154a212cfe0Sguenther 	movq	8(%rax),%rcx			; \
155a212cfe0Sguenther 	movq	%rcx,TF_ERR(%rsp)		; \
156a212cfe0Sguenther 	/* copy iretq frame to the trap frame */ \
157a212cfe0Sguenther 	movq	(IRETQ_RIP+16)(%rax),%rcx	; \
158a212cfe0Sguenther 	movq	%rcx,TF_RIP(%rsp)		; \
159a212cfe0Sguenther 	movq	(IRETQ_CS+16)(%rax),%rcx	; \
160a212cfe0Sguenther 	movq	%rcx,TF_CS(%rsp)		; \
161a212cfe0Sguenther 	movq	(IRETQ_RFLAGS+16)(%rax),%rcx	; \
162a212cfe0Sguenther 	movq	%rcx,TF_RFLAGS(%rsp)		; \
163a212cfe0Sguenther 	movq	(IRETQ_RSP+16)(%rax),%rcx	; \
164a212cfe0Sguenther 	movq	%rcx,TF_RSP(%rsp)		; \
165a212cfe0Sguenther 	movq	(IRETQ_SS+16)(%rax),%rcx	; \
166a212cfe0Sguenther 	movq	%rcx,TF_SS(%rsp)		; \
167a212cfe0Sguenther 	movq	CPUVAR(SCRATCH),%rax		; \
168bb386764Sguenther 	INTR_SAVE_MOST_GPRS_NO_ADJ		; \
169bb386764Sguenther 	INTR_CLEAR_GPRS
170bb386764Sguenther 
171bb386764Sguenther /*
172bb386764Sguenther  * Entry from syscall instruction, where RIP is in %rcx and RFLAGS is in %r11.
173bb386764Sguenther  * We stash the syscall # in tf_err for SPL check.
174bb386764Sguenther  * Assumes that %rax has been saved in CPUVAR(SCRATCH).
175bb386764Sguenther  */
176bb386764Sguenther #define	SYSCALL_ENTRY \
177bb386764Sguenther 	movq	CPUVAR(KERN_RSP),%rax				; \
178bb386764Sguenther 	xchgq	%rax,%rsp					; \
179bb386764Sguenther 	movq	%rcx,TF_RCX(%rsp)				; \
180bb386764Sguenther 	movq	%rcx,TF_RIP(%rsp)				; \
181bb386764Sguenther 	RET_STACK_REFILL_WITH_RCX				; \
182bb386764Sguenther 	movq	$(GSEL(GUDATA_SEL, SEL_UPL)),TF_SS(%rsp)	; \
183bb386764Sguenther 	movq	%rax,TF_RSP(%rsp)				; \
184bb386764Sguenther 	movq	CPUVAR(SCRATCH),%rax				; \
185bb386764Sguenther 	INTR_SAVE_MOST_GPRS_NO_ADJ				; \
186bb386764Sguenther 	movq	%r11, TF_RFLAGS(%rsp)				; \
187bb386764Sguenther 	movq	$(GSEL(GUCODE_SEL, SEL_UPL)), TF_CS(%rsp)	; \
188bb386764Sguenther 	movq	%rax,TF_ERR(%rsp)				; \
189bb386764Sguenther 	INTR_CLEAR_GPRS
190f5df1827Smickey 
191f5df1827Smickey #define CHECK_ASTPENDING(reg)	movq	CPUVAR(CURPROC),reg		; \
192f5df1827Smickey 				cmpq	$0, reg				; \
193f5df1827Smickey 				je	99f				; \
194f5df1827Smickey 				cmpl	$0, P_MD_ASTPENDING(reg)	; \
195f5df1827Smickey 				99:
196f5df1827Smickey 
197f5df1827Smickey #define CLEAR_ASTPENDING(reg)	movl	$0, P_MD_ASTPENDING(reg)
198f5df1827Smickey 
199f5df1827Smickey #endif /* _AMD64_MACHINE_FRAMEASM_H */
200