1 /* $NetBSD: frameasm.h,v 1.35 2018/02/22 08:56:51 maxv Exp $ */ 2 3 #ifndef _AMD64_MACHINE_FRAMEASM_H 4 #define _AMD64_MACHINE_FRAMEASM_H 5 6 #ifdef _KERNEL_OPT 7 #include "opt_xen.h" 8 #include "opt_svs.h" 9 #endif 10 11 /* 12 * Macros to define pushing/popping frames for interrupts, traps 13 * and system calls. Currently all the same; will diverge later. 14 */ 15 16 #ifdef XEN 17 #define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32) 18 /* Xen do not need swapgs, done by hypervisor */ 19 #define swapgs 20 #define iretq pushq $0 ; jmp HYPERVISOR_iret 21 #define XEN_ONLY2(x,y) x,y 22 #define NOT_XEN(x) 23 24 #define CLI(temp_reg) \ 25 movq CPUVAR(VCPU),%r ## temp_reg ; \ 26 movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg); 27 28 #define STI(temp_reg) \ 29 movq CPUVAR(VCPU),%r ## temp_reg ; \ 30 movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg); 31 32 #else /* XEN */ 33 #define XEN_ONLY2(x,y) 34 #define NOT_XEN(x) x 35 #define CLI(temp_reg) cli 36 #define STI(temp_reg) sti 37 #endif /* XEN */ 38 39 #define HP_NAME_CLAC 1 40 #define HP_NAME_STAC 2 41 #define HP_NAME_NOLOCK 3 42 #define HP_NAME_RETFENCE 4 43 #define HP_NAME_SVS_ENTER 5 44 #define HP_NAME_SVS_LEAVE 6 45 #define HP_NAME_SVS_ENTER_ALT 7 46 #define HP_NAME_SVS_LEAVE_ALT 8 47 48 #define HOTPATCH(name, size) \ 49 123: ; \ 50 .pushsection .rodata.hotpatch, "a" ; \ 51 .byte name ; \ 52 .byte size ; \ 53 .quad 123b ; \ 54 .popsection 55 56 #define SMAP_ENABLE \ 57 HOTPATCH(HP_NAME_CLAC, 3) ; \ 58 .byte 0x0F, 0x1F, 0x00 ; \ 59 60 #define SMAP_DISABLE \ 61 HOTPATCH(HP_NAME_STAC, 3) ; \ 62 .byte 0x0F, 0x1F, 0x00 ; \ 63 64 #define SWAPGS NOT_XEN(swapgs) 65 66 /* 67 * These are used on interrupt or trap entry or exit. 68 */ 69 #define INTR_SAVE_GPRS \ 70 movq %rdi,TF_RDI(%rsp) ; \ 71 movq %rsi,TF_RSI(%rsp) ; \ 72 movq %rdx,TF_RDX(%rsp) ; \ 73 movq %rcx,TF_RCX(%rsp) ; \ 74 movq %r8,TF_R8(%rsp) ; \ 75 movq %r9,TF_R9(%rsp) ; \ 76 movq %r10,TF_R10(%rsp) ; \ 77 movq %r11,TF_R11(%rsp) ; \ 78 movq %r12,TF_R12(%rsp) ; \ 79 movq %r13,TF_R13(%rsp) ; \ 80 movq %r14,TF_R14(%rsp) ; \ 81 movq %r15,TF_R15(%rsp) ; \ 82 movq %rbp,TF_RBP(%rsp) ; \ 83 movq %rbx,TF_RBX(%rsp) ; \ 84 movq %rax,TF_RAX(%rsp) 85 86 #define INTR_RESTORE_GPRS \ 87 movq TF_RDI(%rsp),%rdi ; \ 88 movq TF_RSI(%rsp),%rsi ; \ 89 movq TF_RDX(%rsp),%rdx ; \ 90 movq TF_RCX(%rsp),%rcx ; \ 91 movq TF_R8(%rsp),%r8 ; \ 92 movq TF_R9(%rsp),%r9 ; \ 93 movq TF_R10(%rsp),%r10 ; \ 94 movq TF_R11(%rsp),%r11 ; \ 95 movq TF_R12(%rsp),%r12 ; \ 96 movq TF_R13(%rsp),%r13 ; \ 97 movq TF_R14(%rsp),%r14 ; \ 98 movq TF_R15(%rsp),%r15 ; \ 99 movq TF_RBP(%rsp),%rbp ; \ 100 movq TF_RBX(%rsp),%rbx ; \ 101 movq TF_RAX(%rsp),%rax 102 103 #define TEXT_USER_BEGIN .pushsection .text.user, "ax" 104 #define TEXT_USER_END .popsection 105 106 #ifdef SVS 107 108 /* XXX: put this somewhere else */ 109 #define SVS_UTLS 0xffffc00000000000 /* PMAP_PCPU_BASE */ 110 #define UTLS_KPDIRPA 0 111 #define UTLS_SCRATCH 8 112 #define UTLS_RSP0 16 113 114 #define SVS_ENTER_BYTES 22 115 #define SVS_ENTER \ 116 HOTPATCH(HP_NAME_SVS_ENTER, SVS_ENTER_BYTES) ; \ 117 .byte 0xEB, (SVS_ENTER_BYTES-2) /* jmp */ ; \ 118 .fill (SVS_ENTER_BYTES-2),1,0xCC 119 120 #define SVS_LEAVE_BYTES 31 121 #define SVS_LEAVE \ 122 HOTPATCH(HP_NAME_SVS_LEAVE, SVS_LEAVE_BYTES) ; \ 123 .byte 0xEB, (SVS_LEAVE_BYTES-2) /* jmp */ ; \ 124 .fill (SVS_LEAVE_BYTES-2),1,0xCC 125 126 #define SVS_ENTER_ALT_BYTES 23 127 #define SVS_ENTER_ALTSTACK \ 128 HOTPATCH(HP_NAME_SVS_ENTER_ALT, SVS_ENTER_ALT_BYTES) ; \ 129 .byte 0xEB, (SVS_ENTER_ALT_BYTES-2) /* jmp */ ; \ 130 .fill (SVS_ENTER_ALT_BYTES-2),1,0xCC 131 132 #define SVS_LEAVE_ALT_BYTES 22 133 #define SVS_LEAVE_ALTSTACK \ 134 HOTPATCH(HP_NAME_SVS_LEAVE_ALT, SVS_LEAVE_ALT_BYTES) ; \ 135 .byte 0xEB, (SVS_LEAVE_ALT_BYTES-2) /* jmp */ ; \ 136 .fill (SVS_LEAVE_ALT_BYTES-2),1,0xCC 137 138 #else 139 #define SVS_ENTER /* nothing */ 140 #define SVS_LEAVE /* nothing */ 141 #define SVS_ENTER_ALTSTACK /* nothing */ 142 #define SVS_LEAVE_ALTSTACK /* nothing */ 143 #endif 144 145 #define INTRENTRY_L(kernel_trap, usertrap) \ 146 subq $TF_REGSIZE,%rsp ; \ 147 INTR_SAVE_GPRS ; \ 148 cld ; \ 149 SMAP_ENABLE ; \ 150 testb $SEL_UPL,TF_CS(%rsp) ; \ 151 je kernel_trap ; \ 152 usertrap ; \ 153 SWAPGS ; \ 154 SVS_ENTER ; \ 155 movw %gs,TF_GS(%rsp) ; \ 156 movw %fs,TF_FS(%rsp) ; \ 157 movw %es,TF_ES(%rsp) ; \ 158 movw %ds,TF_DS(%rsp) 159 160 #define INTRENTRY \ 161 INTRENTRY_L(98f,) ; \ 162 98: 163 164 #define INTRFASTEXIT \ 165 jmp intrfastexit 166 167 #define INTR_RECURSE_HWFRAME \ 168 movq %rsp,%r10 ; \ 169 movl %ss,%r11d ; \ 170 pushq %r11 ; \ 171 pushq %r10 ; \ 172 pushfq ; \ 173 pushq $GSEL(GCODE_SEL,SEL_KPL); \ 174 /* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \ 175 XEN_ONLY2(andb $0xfc,(%rsp);) \ 176 pushq %r13 ; 177 178 #define INTR_RECURSE_ENTRY \ 179 subq $TF_REGSIZE,%rsp ; \ 180 INTR_SAVE_GPRS ; \ 181 cld 182 183 #define CHECK_DEFERRED_SWITCH \ 184 cmpl $0, CPUVAR(WANT_PMAPLOAD) 185 186 #define CHECK_ASTPENDING(reg) cmpl $0, L_MD_ASTPENDING(reg) 187 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg) 188 189 #endif /* _AMD64_MACHINE_FRAMEASM_H */ 190