1 /* $OpenBSD: frameasm.h,v 1.21 2019/05/12 21:27:47 guenther Exp $ */ 2 /* $NetBSD: frameasm.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $ */ 3 4 #ifndef _AMD64_MACHINE_FRAMEASM_H 5 #define _AMD64_MACHINE_FRAMEASM_H 6 7 /* 8 * Macros to define pushing/popping frames for interrupts, traps 9 * and system calls. Currently all the same; will diverge later. 10 */ 11 12 /* 13 * These are used on interrupt or trap entry or exit. 14 */ 15 #define INTR_SAVE_GPRS \ 16 subq $120,%rsp ; \ 17 INTR_SAVE_MOST_GPRS_NO_ADJ ; \ 18 movq %rcx,TF_RCX(%rsp) 19 #define INTR_SAVE_MOST_GPRS_NO_ADJ \ 20 movq %r15,TF_R15(%rsp) ; \ 21 movq %r14,TF_R14(%rsp) ; \ 22 movq %r13,TF_R13(%rsp) ; \ 23 movq %r12,TF_R12(%rsp) ; \ 24 movq %r11,TF_R11(%rsp) ; \ 25 movq %r10,TF_R10(%rsp) ; \ 26 movq %r9,TF_R9(%rsp) ; \ 27 movq %r8,TF_R8(%rsp) ; \ 28 movq %rdi,TF_RDI(%rsp) ; \ 29 movq %rsi,TF_RSI(%rsp) ; \ 30 movq %rbp,TF_RBP(%rsp) ; \ 31 leaq TF_RBP(%rsp),%rbp ; \ 32 movq %rbx,TF_RBX(%rsp) ; \ 33 movq %rdx,TF_RDX(%rsp) ; \ 34 movq %rax,TF_RAX(%rsp) 35 36 /* 37 * We clear registers when coming from userspace to prevent 38 * user-controlled values from being available for use in speculative 39 * execution in the kernel. %rsp and %rbp are the kernel values when 40 * this is used, so there are only 14 to clear. 32bit operations clear 41 * the register upper-halves automatically. 42 */ 43 #define INTR_CLEAR_GPRS \ 44 xorl %eax,%eax ; \ 45 xorl %ebx,%ebx ; \ 46 xorl %ecx,%ecx ; \ 47 xorl %edx,%edx ; \ 48 xorl %esi,%esi ; \ 49 xorl %edi,%edi ; \ 50 xorl %r8d,%r8d ; \ 51 xorl %r9d,%r9d ; \ 52 xorl %r10d,%r10d ; \ 53 xorl %r11d,%r11d ; \ 54 xorl %r12d,%r12d ; \ 55 xorl %r13d,%r13d ; \ 56 xorl %r14d,%r14d ; \ 57 xorl %r15d,%r15d 58 59 60 /* For real interrupt code paths, where we can come from userspace */ 61 #define INTRENTRY_LABEL(label) X##label##_untramp 62 #define INTRENTRY(label) \ 63 testb $SEL_RPL,24(%rsp) ; \ 64 je INTRENTRY_LABEL(label) ; \ 65 swapgs ; \ 66 movq %rax,CPUVAR(SCRATCH) ; \ 67 CODEPATCH_START ; \ 68 movq CPUVAR(KERN_CR3),%rax ; \ 69 movq %rax,%cr3 ; \ 70 CODEPATCH_END(CPTAG_MELTDOWN_NOP);\ 71 jmp 98f ; \ 72 .text ; \ 73 _ALIGN_TRAPS ; \ 74 .global INTRENTRY_LABEL(label) ; \ 75 INTRENTRY_LABEL(label): /* from kernel */ \ 76 INTR_ENTRY_KERN ; \ 77 jmp 99f ; \ 78 _ALIGN_TRAPS ; \ 79 98: /* from userspace */ \ 80 INTR_ENTRY_USER ; \ 81 99: INTR_SAVE_MOST_GPRS_NO_ADJ ; \ 82 INTR_CLEAR_GPRS 83 84 #define INTR_ENTRY_KERN \ 85 subq $120,%rsp ; \ 86 movq %rcx,TF_RCX(%rsp) ; \ 87 /* the hardware puts err next to %rip, we move it elsewhere and */ \ 88 /* later put %rbp in this slot to make it look like a call frame */ \ 89 movq (TF_RIP - 8)(%rsp),%rcx ; \ 90 movq %rcx,TF_ERR(%rsp) 91 92 #define INTR_ENTRY_USER \ 93 movq CPUVAR(KERN_RSP),%rax ; \ 94 xchgq %rax,%rsp ; \ 95 movq %rcx,TF_RCX(%rsp) ; \ 96 RET_STACK_REFILL_WITH_RCX ; \ 97 /* copy trapno+err to the trap frame */ \ 98 movq 0(%rax),%rcx ; \ 99 movq %rcx,TF_TRAPNO(%rsp) ; \ 100 movq 8(%rax),%rcx ; \ 101 movq %rcx,TF_ERR(%rsp) ; \ 102 addq $16,%rax ; \ 103 /* copy iretq frame to the trap frame */ \ 104 movq IRETQ_RIP(%rax),%rcx ; \ 105 movq %rcx,TF_RIP(%rsp) ; \ 106 movq IRETQ_CS(%rax),%rcx ; \ 107 movq %rcx,TF_CS(%rsp) ; \ 108 movq IRETQ_RFLAGS(%rax),%rcx ; \ 109 movq %rcx,TF_RFLAGS(%rsp) ; \ 110 movq IRETQ_RSP(%rax),%rcx ; \ 111 movq %rcx,TF_RSP(%rsp) ; \ 112 movq IRETQ_SS(%rax),%rcx ; \ 113 movq %rcx,TF_SS(%rsp) ; \ 114 movq CPUVAR(SCRATCH),%rax 115 116 /* For faking up an interrupt frame when we're already in the kernel */ 117 #define INTR_REENTRY \ 118 INTR_SAVE_GPRS 119 120 #define INTRFASTEXIT \ 121 jmp intr_fast_exit 122 123 #define INTR_RECURSE_HWFRAME \ 124 movq %rsp,%r10 ; \ 125 movl %ss,%r11d ; \ 126 pushq %r11 ; \ 127 pushq %r10 ; \ 128 pushfq ; \ 129 movl %cs,%r11d ; \ 130 pushq %r11 ; \ 131 pushq %r13 ; 132 133 #define CHECK_ASTPENDING(reg) movq CPUVAR(CURPROC),reg ; \ 134 cmpq $0, reg ; \ 135 je 99f ; \ 136 cmpl $0, P_MD_ASTPENDING(reg) ; \ 137 99: 138 139 #define CLEAR_ASTPENDING(reg) movl $0, P_MD_ASTPENDING(reg) 140 141 #endif /* _AMD64_MACHINE_FRAMEASM_H */ 142