1 /* $NetBSD: frameasm.h,v 1.35 2022/07/30 14:11:00 riastradh Exp $ */ 2 3 #ifndef _I386_FRAMEASM_H_ 4 #define _I386_FRAMEASM_H_ 5 6 #ifdef _KERNEL_OPT 7 #include "opt_multiprocessor.h" 8 #include "opt_xen.h" 9 #endif 10 11 12 #ifdef XEN 13 /* XXX assym.h */ 14 #define TRAP_INSTR int $0x82 15 #define XEN_BLOCK_EVENTS(reg) movb $1,EVTCHN_UPCALL_MASK(reg) 16 #define XEN_UNBLOCK_EVENTS(reg) movb $0,EVTCHN_UPCALL_MASK(reg) 17 #define XEN_TEST_PENDING(reg) testb $0xFF,EVTCHN_UPCALL_PENDING(reg) 18 #endif /* XEN */ 19 20 #if defined(XENPV) 21 #define CLI(reg) movl CPUVAR(VCPU),reg ; \ 22 XEN_BLOCK_EVENTS(reg) 23 #define STI(reg) movl CPUVAR(VCPU),reg ; \ 24 XEN_UNBLOCK_EVENTS(reg) 25 #define STIC(reg) movl CPUVAR(VCPU),reg ; \ 26 XEN_UNBLOCK_EVENTS(reg) ; \ 27 testb $0xff,EVTCHN_UPCALL_PENDING(reg) 28 #define PUSHF(reg) movl CPUVAR(VCPU),reg ; \ 29 movzbl EVTCHN_UPCALL_MASK(reg), reg; \ 30 pushl reg 31 #define POPF(reg) call _C_LABEL(xen_write_psl); \ 32 addl $4,%esp 33 #else 34 #define CLI(reg) cli 35 #define STI(reg) sti 36 #define PUSHF(reg) pushf 37 #define POPF(reg) popf 38 #ifdef XENPVHVM 39 #define STIC(reg) sti ; \ 40 movl CPUVAR(VCPU),reg ; \ 41 XEN_UNBLOCK_EVENTS(reg) ; \ 42 testb $0xff,EVTCHN_UPCALL_PENDING(reg) 43 #endif /* XENPVHVM */ 44 45 #endif /* XENPV */ 46 47 #define HP_NAME_CLAC 1 48 #define HP_NAME_STAC 2 49 #define HP_NAME_NOLOCK 3 50 #define HP_NAME_RETFENCE 4 51 #define HP_NAME_CAS_64 5 52 #define HP_NAME_SPLLOWER 6 53 #define HP_NAME_MUTEX_EXIT 7 54 55 #define HOTPATCH(name, size) \ 56 123: ; \ 57 .pushsection .rodata.hotpatch, "a" ; \ 58 .byte name ; \ 59 .byte size ; \ 60 .long 123b ; \ 61 .popsection 62 63 #define SMAP_ENABLE \ 64 HOTPATCH(HP_NAME_CLAC, 3) ; \ 65 .byte 0x90, 0x90, 0x90 66 67 #define SMAP_DISABLE \ 68 HOTPATCH(HP_NAME_STAC, 3) ; \ 69 .byte 0x90, 0x90, 0x90 70 71 /* 72 * These are used on interrupt or trap entry or exit. 73 */ 74 #define INTRENTRY \ 75 SMAP_ENABLE ; \ 76 subl $TF_PUSHSIZE,%esp ; \ 77 movw %gs,TF_GS(%esp) ; \ 78 movw %fs,TF_FS(%esp) ; \ 79 movl %eax,TF_EAX(%esp) ; \ 80 movw %es,TF_ES(%esp) ; \ 81 movw %ds,TF_DS(%esp) ; \ 82 movl $GSEL(GDATA_SEL, SEL_KPL),%eax ; \ 83 movl %edi,TF_EDI(%esp) ; \ 84 movl %esi,TF_ESI(%esp) ; \ 85 movw %ax,%ds ; \ 86 movl %ebp,TF_EBP(%esp) ; \ 87 movw %ax,%es ; \ 88 movl %ebx,TF_EBX(%esp) ; \ 89 movw %ax,%gs ; \ 90 movl %edx,TF_EDX(%esp) ; \ 91 movl $GSEL(GCPU_SEL, SEL_KPL),%eax ; \ 92 movl %ecx,TF_ECX(%esp) ; \ 93 movl %eax,%fs ; \ 94 cld 95 96 #define INTRFASTEXIT \ 97 jmp intrfastexit 98 99 #define INTR_RECURSE_HWFRAME \ 100 pushfl ; \ 101 pushl %cs ; \ 102 pushl %esi ; 103 104 #define CHECK_DEFERRED_SWITCH \ 105 cmpl $0, CPUVAR(WANT_PMAPLOAD) 106 107 #define CHECK_ASTPENDING(reg) movl CPUVAR(CURLWP),reg ; \ 108 cmpl $0, L_MD_ASTPENDING(reg) 109 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg) 110 111 /* 112 * If the FPU state is not in the CPU, restore it. Executed with interrupts 113 * disabled. 114 * 115 * %ebx must not be modified 116 */ 117 #define HANDLE_DEFERRED_FPU \ 118 movl CPUVAR(CURLWP),%eax ; \ 119 testl $MDL_FPU_IN_CPU,L_MD_FLAGS(%eax) ; \ 120 jnz 1f ; \ 121 pushl %eax ; \ 122 call _C_LABEL(fpu_handle_deferred) ; \ 123 popl %eax ; \ 124 orl $MDL_FPU_IN_CPU,L_MD_FLAGS(%eax) ; \ 125 1: 126 127 /* 128 * IDEPTH_INCR: 129 * increase ci_idepth and switch to the interrupt stack if necessary. 130 * note that the initial value of ci_idepth is -1. 131 * 132 * => should be called with interrupt disabled. 133 * => save the old value of %esp in %eax. 134 */ 135 136 #define IDEPTH_INCR \ 137 incl CPUVAR(IDEPTH); \ 138 movl %esp, %eax; \ 139 jne 999f; \ 140 movl CPUVAR(INTRSTACK), %esp; \ 141 999: pushl %eax; /* eax == pointer to intrframe */ \ 142 143 /* 144 * IDEPTH_DECR: 145 * decrement ci_idepth and switch back to 146 * the original stack saved by IDEPTH_INCR. 147 * 148 * => should be called with interrupt disabled. 149 */ 150 151 #define IDEPTH_DECR \ 152 popl %esp; \ 153 decl CPUVAR(IDEPTH) 154 155 #endif /* _I386_FRAMEASM_H_ */ 156