xref: /openbsd-src/sys/arch/amd64/include/frameasm.h (revision 505ee9ea3b177e2387d907a91ca7da069f3f14d8)
1 /*	$OpenBSD: frameasm.h,v 1.22 2019/08/07 18:53:28 guenther Exp $	*/
2 /*	$NetBSD: frameasm.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $	*/
3 
4 #ifndef _AMD64_MACHINE_FRAMEASM_H
5 #define _AMD64_MACHINE_FRAMEASM_H
6 
7 /*
8  * Macros to define pushing/popping frames for interrupts, traps
9  * and system calls. Currently all the same; will diverge later.
10  */
11 
12 /*
13  * These are used on interrupt or trap entry or exit.
14  */
15 #define INTR_SAVE_GPRS \
16 	subq	$120,%rsp		; \
17 	INTR_SAVE_MOST_GPRS_NO_ADJ	; \
18 	movq	%rcx,TF_RCX(%rsp)
19 #define INTR_SAVE_MOST_GPRS_NO_ADJ \
20 	movq	%r15,TF_R15(%rsp)	; \
21 	movq	%r14,TF_R14(%rsp)	; \
22 	movq	%r13,TF_R13(%rsp)	; \
23 	movq	%r12,TF_R12(%rsp)	; \
24 	movq	%r11,TF_R11(%rsp)	; \
25 	movq	%r10,TF_R10(%rsp)	; \
26 	movq	%r9,TF_R9(%rsp)		; \
27 	movq	%r8,TF_R8(%rsp)		; \
28 	movq	%rdi,TF_RDI(%rsp)	; \
29 	movq	%rsi,TF_RSI(%rsp)	; \
30 	movq	%rbp,TF_RBP(%rsp)	; \
31 	leaq	TF_RBP(%rsp),%rbp	; \
32 	movq	%rbx,TF_RBX(%rsp)	; \
33 	movq	%rdx,TF_RDX(%rsp)	; \
34 	movq	%rax,TF_RAX(%rsp)
35 
36 /*
37  * We clear registers when coming from userspace to prevent
38  * user-controlled values from being available for use in speculative
39  * execution in the kernel.  %rsp and %rbp are the kernel values when
40  * this is used, so there are only 14 to clear.  32bit operations clear
41  * the register upper-halves automatically.
42  */
43 #define INTR_CLEAR_GPRS \
44 	xorl	%eax,%eax		; \
45 	xorl	%ebx,%ebx		; \
46 	xorl	%ecx,%ecx		; \
47 	xorl	%edx,%edx		; \
48 	xorl	%esi,%esi		; \
49 	xorl	%edi,%edi		; \
50 	xorl	%r8d,%r8d		; \
51 	xorl	%r9d,%r9d		; \
52 	xorl	%r10d,%r10d		; \
53 	xorl	%r11d,%r11d		; \
54 	xorl	%r12d,%r12d		; \
55 	xorl	%r13d,%r13d		; \
56 	xorl	%r14d,%r14d		; \
57 	xorl	%r15d,%r15d
58 
59 
60 /* For real interrupt code paths, where we can come from userspace */
61 #define INTRENTRY_LABEL(label)	X##label##_untramp
62 #define	INTRENTRY(label) \
63 	testb	$SEL_RPL,24(%rsp)	; \
64 	je	INTRENTRY_LABEL(label)	; \
65 	swapgs				; \
66 	FENCE_SWAPGS_MIS_TAKEN 		; \
67 	movq	%rax,CPUVAR(SCRATCH)	; \
68 	CODEPATCH_START			; \
69 	movq	CPUVAR(KERN_CR3),%rax	; \
70 	movq	%rax,%cr3		; \
71 	CODEPATCH_END(CPTAG_MELTDOWN_NOP);\
72 	jmp	98f			; \
73 	.text				; \
74 	_ALIGN_TRAPS			; \
75 	.global	INTRENTRY_LABEL(label)	; \
76 INTRENTRY_LABEL(label):	/* from kernel */ \
77 	FENCE_NO_SAFE_SMAP		; \
78 	INTR_ENTRY_KERN			; \
79 	jmp	99f			; \
80 	_ALIGN_TRAPS			; \
81 98:	/* from userspace */		  \
82 	INTR_ENTRY_USER			; \
83 99:	INTR_SAVE_MOST_GPRS_NO_ADJ	; \
84 	INTR_CLEAR_GPRS
85 
86 #define	INTR_ENTRY_KERN \
87 	subq	$120,%rsp		; \
88 	movq	%rcx,TF_RCX(%rsp)	; \
89 	/* the hardware puts err next to %rip, we move it elsewhere and */ \
90 	/* later put %rbp in this slot to make it look like a call frame */ \
91 	movq	(TF_RIP - 8)(%rsp),%rcx	; \
92 	movq	%rcx,TF_ERR(%rsp)
93 
94 #define	INTR_ENTRY_USER \
95 	movq	CPUVAR(KERN_RSP),%rax	; \
96 	xchgq	%rax,%rsp		; \
97 	movq	%rcx,TF_RCX(%rsp)	; \
98 	RET_STACK_REFILL_WITH_RCX	; \
99 	/* copy trapno+err to the trap frame */ \
100 	movq	0(%rax),%rcx		; \
101 	movq	%rcx,TF_TRAPNO(%rsp)	; \
102 	movq	8(%rax),%rcx		; \
103 	movq	%rcx,TF_ERR(%rsp)	; \
104 	addq	$16,%rax		; \
105 	/* copy iretq frame to the trap frame */ \
106 	movq	IRETQ_RIP(%rax),%rcx	; \
107 	movq	%rcx,TF_RIP(%rsp)	; \
108 	movq	IRETQ_CS(%rax),%rcx	; \
109 	movq	%rcx,TF_CS(%rsp)	; \
110 	movq	IRETQ_RFLAGS(%rax),%rcx	; \
111 	movq	%rcx,TF_RFLAGS(%rsp)	; \
112 	movq	IRETQ_RSP(%rax),%rcx	; \
113 	movq	%rcx,TF_RSP(%rsp)	; \
114 	movq	IRETQ_SS(%rax),%rcx	; \
115 	movq	%rcx,TF_SS(%rsp)	; \
116 	movq	CPUVAR(SCRATCH),%rax
117 
118 /* For faking up an interrupt frame when we're already in the kernel */
119 #define	INTR_REENTRY \
120 	INTR_SAVE_GPRS
121 
122 #define INTRFASTEXIT \
123 	jmp	intr_fast_exit
124 
125 #define INTR_RECURSE_HWFRAME \
126 	movq	%rsp,%r10		; \
127 	movl	%ss,%r11d		; \
128 	pushq	%r11			; \
129 	pushq	%r10			; \
130 	pushfq				; \
131 	movl	%cs,%r11d		; \
132 	pushq	%r11			; \
133 	pushq	%r13			;
134 
135 #define CHECK_ASTPENDING(reg)	movq	CPUVAR(CURPROC),reg		; \
136 				cmpq	$0, reg				; \
137 				je	99f				; \
138 				cmpl	$0, P_MD_ASTPENDING(reg)	; \
139 				99:
140 
141 #define CLEAR_ASTPENDING(reg)	movl	$0, P_MD_ASTPENDING(reg)
142 
143 #endif /* _AMD64_MACHINE_FRAMEASM_H */
144