1 /* $NetBSD: frameasm.h,v 1.34 2022/04/09 12:07:00 riastradh Exp $ */ 2 3 #ifndef _I386_FRAMEASM_H_ 4 #define _I386_FRAMEASM_H_ 5 6 #ifdef _KERNEL_OPT 7 #include "opt_multiprocessor.h" 8 #include "opt_xen.h" 9 #endif 10 11 12 #ifdef XEN 13 /* XXX assym.h */ 14 #define TRAP_INSTR int $0x82 15 #define XEN_BLOCK_EVENTS(reg) movb $1,EVTCHN_UPCALL_MASK(reg) 16 #define XEN_UNBLOCK_EVENTS(reg) movb $0,EVTCHN_UPCALL_MASK(reg) 17 #define XEN_TEST_PENDING(reg) testb $0xFF,EVTCHN_UPCALL_PENDING(reg) 18 #endif /* XEN */ 19 20 #if defined(XENPV) 21 #define CLI(reg) movl CPUVAR(VCPU),reg ; \ 22 XEN_BLOCK_EVENTS(reg) 23 #define STI(reg) movl CPUVAR(VCPU),reg ; \ 24 XEN_UNBLOCK_EVENTS(reg) 25 #define STIC(reg) movl CPUVAR(VCPU),reg ; \ 26 XEN_UNBLOCK_EVENTS(reg) ; \ 27 testb $0xff,EVTCHN_UPCALL_PENDING(reg) 28 #define PUSHF(reg) movl CPUVAR(VCPU),reg ; \ 29 movzbl EVTCHN_UPCALL_MASK(reg), reg; \ 30 pushl reg 31 #define POPF(reg) call _C_LABEL(xen_write_psl); \ 32 addl $4,%esp 33 #else 34 #define CLI(reg) cli 35 #define STI(reg) sti 36 #define PUSHF(reg) pushf 37 #define POPF(reg) popf 38 #ifdef XENPVHVM 39 #define STIC(reg) sti ; \ 40 movl CPUVAR(VCPU),reg ; \ 41 XEN_UNBLOCK_EVENTS(reg) ; \ 42 testb $0xff,EVTCHN_UPCALL_PENDING(reg) 43 #endif /* XENPVHVM */ 44 45 #endif /* XENPV */ 46 47 #define HP_NAME_CLAC 1 48 #define HP_NAME_STAC 2 49 #define HP_NAME_NOLOCK 3 50 #define HP_NAME_RETFENCE 4 51 #define HP_NAME_SSE2_MFENCE 5 52 #define HP_NAME_CAS_64 6 53 #define HP_NAME_SPLLOWER 7 54 #define HP_NAME_MUTEX_EXIT 8 55 56 #define HOTPATCH(name, size) \ 57 123: ; \ 58 .pushsection .rodata.hotpatch, "a" ; \ 59 .byte name ; \ 60 .byte size ; \ 61 .long 123b ; \ 62 .popsection 63 64 #define SMAP_ENABLE \ 65 HOTPATCH(HP_NAME_CLAC, 3) ; \ 66 .byte 0x90, 0x90, 0x90 67 68 #define SMAP_DISABLE \ 69 HOTPATCH(HP_NAME_STAC, 3) ; \ 70 .byte 0x90, 0x90, 0x90 71 72 /* 73 * These are used on interrupt or trap entry or exit. 74 */ 75 #define INTRENTRY \ 76 SMAP_ENABLE ; \ 77 subl $TF_PUSHSIZE,%esp ; \ 78 movw %gs,TF_GS(%esp) ; \ 79 movw %fs,TF_FS(%esp) ; \ 80 movl %eax,TF_EAX(%esp) ; \ 81 movw %es,TF_ES(%esp) ; \ 82 movw %ds,TF_DS(%esp) ; \ 83 movl $GSEL(GDATA_SEL, SEL_KPL),%eax ; \ 84 movl %edi,TF_EDI(%esp) ; \ 85 movl %esi,TF_ESI(%esp) ; \ 86 movw %ax,%ds ; \ 87 movl %ebp,TF_EBP(%esp) ; \ 88 movw %ax,%es ; \ 89 movl %ebx,TF_EBX(%esp) ; \ 90 movw %ax,%gs ; \ 91 movl %edx,TF_EDX(%esp) ; \ 92 movl $GSEL(GCPU_SEL, SEL_KPL),%eax ; \ 93 movl %ecx,TF_ECX(%esp) ; \ 94 movl %eax,%fs ; \ 95 cld 96 97 #define INTRFASTEXIT \ 98 jmp intrfastexit 99 100 #define INTR_RECURSE_HWFRAME \ 101 pushfl ; \ 102 pushl %cs ; \ 103 pushl %esi ; 104 105 #define CHECK_DEFERRED_SWITCH \ 106 cmpl $0, CPUVAR(WANT_PMAPLOAD) 107 108 #define CHECK_ASTPENDING(reg) movl CPUVAR(CURLWP),reg ; \ 109 cmpl $0, L_MD_ASTPENDING(reg) 110 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg) 111 112 /* 113 * If the FPU state is not in the CPU, restore it. Executed with interrupts 114 * disabled. 115 * 116 * %ebx must not be modified 117 */ 118 #define HANDLE_DEFERRED_FPU \ 119 movl CPUVAR(CURLWP),%eax ; \ 120 testl $MDL_FPU_IN_CPU,L_MD_FLAGS(%eax) ; \ 121 jnz 1f ; \ 122 pushl %eax ; \ 123 call _C_LABEL(fpu_handle_deferred) ; \ 124 popl %eax ; \ 125 orl $MDL_FPU_IN_CPU,L_MD_FLAGS(%eax) ; \ 126 1: 127 128 /* 129 * IDEPTH_INCR: 130 * increase ci_idepth and switch to the interrupt stack if necessary. 131 * note that the initial value of ci_idepth is -1. 132 * 133 * => should be called with interrupt disabled. 134 * => save the old value of %esp in %eax. 135 */ 136 137 #define IDEPTH_INCR \ 138 incl CPUVAR(IDEPTH); \ 139 movl %esp, %eax; \ 140 jne 999f; \ 141 movl CPUVAR(INTRSTACK), %esp; \ 142 999: pushl %eax; /* eax == pointer to intrframe */ \ 143 144 /* 145 * IDEPTH_DECR: 146 * decrement ci_idepth and switch back to 147 * the original stack saved by IDEPTH_INCR. 148 * 149 * => should be called with interrupt disabled. 150 */ 151 152 #define IDEPTH_DECR \ 153 popl %esp; \ 154 decl CPUVAR(IDEPTH) 155 156 #endif /* _I386_FRAMEASM_H_ */ 157