1 /* $NetBSD: frameasm.h,v 1.33 2020/05/01 09:40:47 maxv Exp $ */ 2 3 #ifndef _I386_FRAMEASM_H_ 4 #define _I386_FRAMEASM_H_ 5 6 #ifdef _KERNEL_OPT 7 #include "opt_multiprocessor.h" 8 #include "opt_xen.h" 9 #endif 10 11 12 #ifdef XEN 13 /* XXX assym.h */ 14 #define TRAP_INSTR int $0x82 15 #define XEN_BLOCK_EVENTS(reg) movb $1,EVTCHN_UPCALL_MASK(reg) 16 #define XEN_UNBLOCK_EVENTS(reg) movb $0,EVTCHN_UPCALL_MASK(reg) 17 #define XEN_TEST_PENDING(reg) testb $0xFF,EVTCHN_UPCALL_PENDING(reg) 18 #endif /* XEN */ 19 20 #if defined(XENPV) 21 #define CLI(reg) movl CPUVAR(VCPU),reg ; \ 22 XEN_BLOCK_EVENTS(reg) 23 #define STI(reg) movl CPUVAR(VCPU),reg ; \ 24 XEN_UNBLOCK_EVENTS(reg) 25 #define STIC(reg) movl CPUVAR(VCPU),reg ; \ 26 XEN_UNBLOCK_EVENTS(reg) ; \ 27 testb $0xff,EVTCHN_UPCALL_PENDING(reg) 28 #define PUSHF(reg) movl CPUVAR(VCPU),reg ; \ 29 movzbl EVTCHN_UPCALL_MASK(reg), reg; \ 30 pushl reg 31 #define POPF(reg) call _C_LABEL(xen_write_psl); \ 32 addl $4,%esp 33 #else 34 #define CLI(reg) cli 35 #define STI(reg) sti 36 #define PUSHF(reg) pushf 37 #define POPF(reg) popf 38 #ifdef XENPVHVM 39 #define STIC(reg) sti ; \ 40 movl CPUVAR(VCPU),reg ; \ 41 XEN_UNBLOCK_EVENTS(reg) ; \ 42 testb $0xff,EVTCHN_UPCALL_PENDING(reg) 43 #endif /* XENPVHVM */ 44 45 #endif /* XENPV */ 46 47 #define HP_NAME_CLAC 1 48 #define HP_NAME_STAC 2 49 #define HP_NAME_NOLOCK 3 50 #define HP_NAME_RETFENCE 4 51 #define HP_NAME_SSE2_LFENCE 5 52 #define HP_NAME_SSE2_MFENCE 6 53 #define HP_NAME_CAS_64 7 54 #define HP_NAME_SPLLOWER 8 55 #define HP_NAME_MUTEX_EXIT 9 56 57 #define HOTPATCH(name, size) \ 58 123: ; \ 59 .pushsection .rodata.hotpatch, "a" ; \ 60 .byte name ; \ 61 .byte size ; \ 62 .long 123b ; \ 63 .popsection 64 65 #define SMAP_ENABLE \ 66 HOTPATCH(HP_NAME_CLAC, 3) ; \ 67 .byte 0x90, 0x90, 0x90 68 69 #define SMAP_DISABLE \ 70 HOTPATCH(HP_NAME_STAC, 3) ; \ 71 .byte 0x90, 0x90, 0x90 72 73 /* 74 * These are used on interrupt or trap entry or exit. 75 */ 76 #define INTRENTRY \ 77 SMAP_ENABLE ; \ 78 subl $TF_PUSHSIZE,%esp ; \ 79 movw %gs,TF_GS(%esp) ; \ 80 movw %fs,TF_FS(%esp) ; \ 81 movl %eax,TF_EAX(%esp) ; \ 82 movw %es,TF_ES(%esp) ; \ 83 movw %ds,TF_DS(%esp) ; \ 84 movl $GSEL(GDATA_SEL, SEL_KPL),%eax ; \ 85 movl %edi,TF_EDI(%esp) ; \ 86 movl %esi,TF_ESI(%esp) ; \ 87 movw %ax,%ds ; \ 88 movl %ebp,TF_EBP(%esp) ; \ 89 movw %ax,%es ; \ 90 movl %ebx,TF_EBX(%esp) ; \ 91 movw %ax,%gs ; \ 92 movl %edx,TF_EDX(%esp) ; \ 93 movl $GSEL(GCPU_SEL, SEL_KPL),%eax ; \ 94 movl %ecx,TF_ECX(%esp) ; \ 95 movl %eax,%fs ; \ 96 cld 97 98 #define INTRFASTEXIT \ 99 jmp intrfastexit 100 101 #define INTR_RECURSE_HWFRAME \ 102 pushfl ; \ 103 pushl %cs ; \ 104 pushl %esi ; 105 106 #define CHECK_DEFERRED_SWITCH \ 107 cmpl $0, CPUVAR(WANT_PMAPLOAD) 108 109 #define CHECK_ASTPENDING(reg) movl CPUVAR(CURLWP),reg ; \ 110 cmpl $0, L_MD_ASTPENDING(reg) 111 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg) 112 113 /* 114 * If the FPU state is not in the CPU, restore it. Executed with interrupts 115 * disabled. 116 * 117 * %ebx must not be modified 118 */ 119 #define HANDLE_DEFERRED_FPU \ 120 movl CPUVAR(CURLWP),%eax ; \ 121 testl $MDL_FPU_IN_CPU,L_MD_FLAGS(%eax) ; \ 122 jnz 1f ; \ 123 pushl %eax ; \ 124 call _C_LABEL(fpu_handle_deferred) ; \ 125 popl %eax ; \ 126 orl $MDL_FPU_IN_CPU,L_MD_FLAGS(%eax) ; \ 127 1: 128 129 /* 130 * IDEPTH_INCR: 131 * increase ci_idepth and switch to the interrupt stack if necessary. 132 * note that the initial value of ci_idepth is -1. 133 * 134 * => should be called with interrupt disabled. 135 * => save the old value of %esp in %eax. 136 */ 137 138 #define IDEPTH_INCR \ 139 incl CPUVAR(IDEPTH); \ 140 movl %esp, %eax; \ 141 jne 999f; \ 142 movl CPUVAR(INTRSTACK), %esp; \ 143 999: pushl %eax; /* eax == pointer to intrframe */ \ 144 145 /* 146 * IDEPTH_DECR: 147 * decrement ci_idepth and switch back to 148 * the original stack saved by IDEPTH_INCR. 149 * 150 * => should be called with interrupt disabled. 151 */ 152 153 #define IDEPTH_DECR \ 154 popl %esp; \ 155 decl CPUVAR(IDEPTH) 156 157 #endif /* _I386_FRAMEASM_H_ */ 158