10Sstevel@tonic-gate/* 2*13134Skuriakose.kuruvilla@oracle.com * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 30Sstevel@tonic-gate */ 40Sstevel@tonic-gate 50Sstevel@tonic-gate/* 60Sstevel@tonic-gate * Copyright (c) 1989, 1990 William F. Jolitz. 70Sstevel@tonic-gate * Copyright (c) 1990 The Regents of the University of California. 80Sstevel@tonic-gate * All rights reserved. 90Sstevel@tonic-gate * 100Sstevel@tonic-gate * Redistribution and use in source and binary forms, with or without 110Sstevel@tonic-gate * modification, are permitted provided that the following conditions 120Sstevel@tonic-gate * are met: 130Sstevel@tonic-gate * 1. Redistributions of source code must retain the above copyright 140Sstevel@tonic-gate * notice, this list of conditions and the following disclaimer. 150Sstevel@tonic-gate * 2. Redistributions in binary form must reproduce the above copyright 160Sstevel@tonic-gate * notice, this list of conditions and the following disclaimer in the 170Sstevel@tonic-gate * documentation and/or other materials provided with the distribution. 180Sstevel@tonic-gate * 3. All advertising materials mentioning features or use of this software 190Sstevel@tonic-gate * must display the following acknowledgement: 200Sstevel@tonic-gate * This product includes software developed by the University of 210Sstevel@tonic-gate * California, Berkeley and its contributors. 220Sstevel@tonic-gate * 4. Neither the name of the University nor the names of its contributors 230Sstevel@tonic-gate * may be used to endorse or promote products derived from this software 240Sstevel@tonic-gate * without specific prior written permission. 250Sstevel@tonic-gate * 260Sstevel@tonic-gate * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 270Sstevel@tonic-gate * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 280Sstevel@tonic-gate * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 290Sstevel@tonic-gate * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 300Sstevel@tonic-gate * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 310Sstevel@tonic-gate * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 320Sstevel@tonic-gate * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 330Sstevel@tonic-gate * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 340Sstevel@tonic-gate * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 350Sstevel@tonic-gate * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 360Sstevel@tonic-gate * SUCH DAMAGE. 370Sstevel@tonic-gate * 380Sstevel@tonic-gate * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $ 390Sstevel@tonic-gate */ 400Sstevel@tonic-gate 410Sstevel@tonic-gate#include <sys/asm_linkage.h> 420Sstevel@tonic-gate#include <sys/asm_misc.h> 430Sstevel@tonic-gate#include <sys/trap.h> 440Sstevel@tonic-gate#include <sys/psw.h> 450Sstevel@tonic-gate#include <sys/regset.h> 460Sstevel@tonic-gate#include <sys/privregs.h> 470Sstevel@tonic-gate#include <sys/dtrace.h> 483446Smrj#include <sys/x86_archext.h> 490Sstevel@tonic-gate#include <sys/traptrace.h> 502006Sandrei#include <sys/machparam.h> 510Sstevel@tonic-gate 520Sstevel@tonic-gate/* 530Sstevel@tonic-gate * only one routine in this file is interesting to lint 540Sstevel@tonic-gate */ 550Sstevel@tonic-gate 560Sstevel@tonic-gate#if defined(__lint) 570Sstevel@tonic-gate 580Sstevel@tonic-gatevoid 590Sstevel@tonic-gatendptrap_frstor(void) 600Sstevel@tonic-gate{} 610Sstevel@tonic-gate 620Sstevel@tonic-gate#else 630Sstevel@tonic-gate 640Sstevel@tonic-gate#include "assym.h" 650Sstevel@tonic-gate 660Sstevel@tonic-gate/* 670Sstevel@tonic-gate * push $0 on stack for traps that do not 680Sstevel@tonic-gate * generate an error code. This is so the rest 690Sstevel@tonic-gate * of the kernel can expect a consistent stack 700Sstevel@tonic-gate * from from any exception. 715084Sjohnlev * 725084Sjohnlev * Note that for all exceptions for amd64 735084Sjohnlev * %r11 and %rcx are on the stack. Just pop 745084Sjohnlev * them back into their appropriate registers and let 755084Sjohnlev * it get saved as is running native. 760Sstevel@tonic-gate */ 773446Smrj 785084Sjohnlev#if defined(__xpv) && defined(__amd64) 795084Sjohnlev 805084Sjohnlev#define NPTRAP_NOERR(trapno) \ 815084Sjohnlev pushq $0; \ 825084Sjohnlev pushq $trapno 835084Sjohnlev 845084Sjohnlev#define TRAP_NOERR(trapno) \ 855084Sjohnlev XPV_TRAP_POP; \ 865084Sjohnlev NPTRAP_NOERR(trapno) 875084Sjohnlev 885084Sjohnlev/* 895084Sjohnlev * error code already pushed by hw 905084Sjohnlev * onto stack. 915084Sjohnlev */ 925084Sjohnlev#define TRAP_ERR(trapno) \ 935084Sjohnlev XPV_TRAP_POP; \ 945084Sjohnlev pushq $trapno 955084Sjohnlev 965084Sjohnlev#else /* __xpv && __amd64 */ 975084Sjohnlev 980Sstevel@tonic-gate#define TRAP_NOERR(trapno) \ 990Sstevel@tonic-gate push $0; \ 1000Sstevel@tonic-gate push $trapno 1010Sstevel@tonic-gate 1023446Smrj#define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno) 1033446Smrj 1040Sstevel@tonic-gate/* 1050Sstevel@tonic-gate * error code already pushed by hw 1060Sstevel@tonic-gate * onto stack. 1070Sstevel@tonic-gate */ 1080Sstevel@tonic-gate#define TRAP_ERR(trapno) \ 1090Sstevel@tonic-gate push $trapno 1100Sstevel@tonic-gate 1115084Sjohnlev#endif /* __xpv && __amd64 */ 1125084Sjohnlev 1133446Smrj 1140Sstevel@tonic-gate /* 1150Sstevel@tonic-gate * #DE 1160Sstevel@tonic-gate */ 1170Sstevel@tonic-gate ENTRY_NP(div0trap) 1180Sstevel@tonic-gate TRAP_NOERR(T_ZERODIV) /* $0 */ 1190Sstevel@tonic-gate jmp cmntrap 1200Sstevel@tonic-gate SET_SIZE(div0trap) 1210Sstevel@tonic-gate 1220Sstevel@tonic-gate /* 1230Sstevel@tonic-gate * #DB 1240Sstevel@tonic-gate * 1253446Smrj * Fetch %dr6 and clear it, handing off the value to the 1263446Smrj * cmntrap code in %r15/%esi 1273446Smrj */ 1283446Smrj ENTRY_NP(dbgtrap) 1293446Smrj TRAP_NOERR(T_SGLSTP) /* $1 */ 1303446Smrj 1313446Smrj#if defined(__amd64) 1325084Sjohnlev#if !defined(__xpv) /* no sysenter support yet */ 1333446Smrj /* 1340Sstevel@tonic-gate * If we get here as a result of single-stepping a sysenter 1350Sstevel@tonic-gate * instruction, we suddenly find ourselves taking a #db 1360Sstevel@tonic-gate * in kernel mode -before- we've swapgs'ed. So before we can 1370Sstevel@tonic-gate * take the trap, we do the swapgs here, and fix the return 1380Sstevel@tonic-gate * %rip in trap() so that we return immediately after the 1390Sstevel@tonic-gate * swapgs in the sysenter handler to avoid doing the swapgs again. 1400Sstevel@tonic-gate * 1410Sstevel@tonic-gate * Nobody said that the design of sysenter was particularly 1420Sstevel@tonic-gate * elegant, did they? 1430Sstevel@tonic-gate */ 1444257Ssherrym 1450Sstevel@tonic-gate pushq %r11 1464257Ssherrym 1474257Ssherrym /* 1484257Ssherrym * At this point the stack looks like this: 1494257Ssherrym * 1504257Ssherrym * (high address) r_ss 1514257Ssherrym * r_rsp 1524257Ssherrym * r_rfl 1534257Ssherrym * r_cs 1544257Ssherrym * r_rip <-- %rsp + 24 1554257Ssherrym * r_err <-- %rsp + 16 1564257Ssherrym * r_trapno <-- %rsp + 8 1574257Ssherrym * (low address) %r11 <-- %rsp 1584257Ssherrym */ 1590Sstevel@tonic-gate leaq sys_sysenter(%rip), %r11 1604257Ssherrym cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */ 1619882SJordan.Vaughan@Sun.com je 1f 1629882SJordan.Vaughan@Sun.com leaq brand_sys_sysenter(%rip), %r11 1639882SJordan.Vaughan@Sun.com cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */ 1649882SJordan.Vaughan@Sun.com jne 2f 1659882SJordan.Vaughan@Sun.com1: SWAPGS 1669882SJordan.Vaughan@Sun.com2: popq %r11 1675084Sjohnlev#endif /* !__xpv */ 1683446Smrj 1693446Smrj INTR_PUSH 1705084Sjohnlev#if defined(__xpv) 1715084Sjohnlev movl $6, %edi 1725084Sjohnlev call kdi_dreg_get 1735084Sjohnlev movq %rax, %r15 /* %db6 -> %r15 */ 1745084Sjohnlev movl $6, %edi 1755084Sjohnlev movl $0, %esi 1765084Sjohnlev call kdi_dreg_set /* 0 -> %db6 */ 1775084Sjohnlev#else 1783446Smrj movq %db6, %r15 1793446Smrj xorl %eax, %eax 1803446Smrj movq %rax, %db6 1815084Sjohnlev#endif 1820Sstevel@tonic-gate 1830Sstevel@tonic-gate#elif defined(__i386) 1843446Smrj 1853446Smrj INTR_PUSH 1865084Sjohnlev#if defined(__xpv) 1875084Sjohnlev pushl $6 1885084Sjohnlev call kdi_dreg_get 1895084Sjohnlev addl $4, %esp 1905084Sjohnlev movl %eax, %esi /* %dr6 -> %esi */ 1915084Sjohnlev pushl $0 1925084Sjohnlev pushl $6 1935084Sjohnlev call kdi_dreg_set /* 0 -> %dr6 */ 1945084Sjohnlev addl $8, %esp 1955084Sjohnlev#else 1963446Smrj movl %db6, %esi 1973446Smrj xorl %eax, %eax 1983446Smrj movl %eax, %db6 1995084Sjohnlev#endif 2003446Smrj#endif /* __i386 */ 2013446Smrj 2023446Smrj jmp cmntrap_pushed 2030Sstevel@tonic-gate SET_SIZE(dbgtrap) 2040Sstevel@tonic-gate 2050Sstevel@tonic-gate#if defined(__amd64) 2065084Sjohnlev#if !defined(__xpv) 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate/* 2090Sstevel@tonic-gate * Macro to set the gsbase or kgsbase to the address of the struct cpu 2103446Smrj * for this processor. If we came from userland, set kgsbase else 2113446Smrj * set gsbase. We find the proper cpu struct by looping through 2120Sstevel@tonic-gate * the cpu structs for all processors till we find a match for the gdt 2130Sstevel@tonic-gate * of the trapping processor. The stack is expected to be pointing at 2143446Smrj * the standard regs pushed by hardware on a trap (plus error code and trapno). 2150Sstevel@tonic-gate */ 2160Sstevel@tonic-gate#define SET_CPU_GSBASE \ 2170Sstevel@tonic-gate subq $REGOFF_TRAPNO, %rsp; /* save regs */ \ 2180Sstevel@tonic-gate movq %rax, REGOFF_RAX(%rsp); \ 2190Sstevel@tonic-gate movq %rbx, REGOFF_RBX(%rsp); \ 2200Sstevel@tonic-gate movq %rcx, REGOFF_RCX(%rsp); \ 2210Sstevel@tonic-gate movq %rdx, REGOFF_RDX(%rsp); \ 2220Sstevel@tonic-gate movq %rbp, REGOFF_RBP(%rsp); \ 2230Sstevel@tonic-gate movq %rsp, %rbp; \ 2240Sstevel@tonic-gate subq $16, %rsp; /* space for gdt */ \ 2250Sstevel@tonic-gate sgdt 6(%rsp); \ 2260Sstevel@tonic-gate movq 8(%rsp), %rcx; /* %rcx has gdt to match */ \ 2270Sstevel@tonic-gate xorl %ebx, %ebx; /* loop index */ \ 2280Sstevel@tonic-gate leaq cpu(%rip), %rdx; /* cpu pointer array */ \ 2290Sstevel@tonic-gate1: \ 2300Sstevel@tonic-gate movq (%rdx, %rbx, CLONGSIZE), %rax; /* get cpu[i] */ \ 2310Sstevel@tonic-gate cmpq $0x0, %rax; /* cpu[i] == NULL ? */ \ 2320Sstevel@tonic-gate je 2f; /* yes, continue */ \ 2330Sstevel@tonic-gate cmpq %rcx, CPU_GDT(%rax); /* gdt == cpu[i]->cpu_gdt ? */ \ 2340Sstevel@tonic-gate je 3f; /* yes, go set gsbase */ \ 2350Sstevel@tonic-gate2: \ 2360Sstevel@tonic-gate incl %ebx; /* i++ */ \ 2370Sstevel@tonic-gate cmpl $NCPU, %ebx; /* i < NCPU ? */ \ 2380Sstevel@tonic-gate jb 1b; /* yes, loop */ \ 2390Sstevel@tonic-gate/* XXX BIG trouble if we fall thru here. We didn't find a gdt match */ \ 2400Sstevel@tonic-gate3: \ 2410Sstevel@tonic-gate movl $MSR_AMD_KGSBASE, %ecx; \ 2420Sstevel@tonic-gate cmpw $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */ \ 2430Sstevel@tonic-gate jne 4f; /* no, go set KGSBASE */ \ 2440Sstevel@tonic-gate movl $MSR_AMD_GSBASE, %ecx; /* yes, set GSBASE */ \ 2450Sstevel@tonic-gate mfence; /* OPTERON_ERRATUM_88 */ \ 2460Sstevel@tonic-gate4: \ 2470Sstevel@tonic-gate movq %rax, %rdx; /* write base register */ \ 2480Sstevel@tonic-gate shrq $32, %rdx; \ 2490Sstevel@tonic-gate wrmsr; \ 2500Sstevel@tonic-gate movq REGOFF_RDX(%rbp), %rdx; /* restore regs */ \ 2510Sstevel@tonic-gate movq REGOFF_RCX(%rbp), %rcx; \ 2520Sstevel@tonic-gate movq REGOFF_RBX(%rbp), %rbx; \ 2530Sstevel@tonic-gate movq REGOFF_RAX(%rbp), %rax; \ 2540Sstevel@tonic-gate movq %rbp, %rsp; \ 2550Sstevel@tonic-gate movq REGOFF_RBP(%rsp), %rbp; \ 2560Sstevel@tonic-gate addq $REGOFF_TRAPNO, %rsp /* pop stack */ 2573446Smrj 2585084Sjohnlev#else /* __xpv */ 2595084Sjohnlev 2605084Sjohnlev#define SET_CPU_GSBASE /* noop on the hypervisor */ 2615084Sjohnlev 2625084Sjohnlev#endif /* __xpv */ 2630Sstevel@tonic-gate#endif /* __amd64 */ 2643446Smrj 2650Sstevel@tonic-gate 2660Sstevel@tonic-gate#if defined(__amd64) 2670Sstevel@tonic-gate 2680Sstevel@tonic-gate /* 2690Sstevel@tonic-gate * #NMI 2705084Sjohnlev * 2715084Sjohnlev * XXPV: See 6532669. 2720Sstevel@tonic-gate */ 2730Sstevel@tonic-gate ENTRY_NP(nmiint) 2740Sstevel@tonic-gate TRAP_NOERR(T_NMIFLT) /* $2 */ 2750Sstevel@tonic-gate 2760Sstevel@tonic-gate SET_CPU_GSBASE 2770Sstevel@tonic-gate 2780Sstevel@tonic-gate /* 2790Sstevel@tonic-gate * Save all registers and setup segment registers 2800Sstevel@tonic-gate * with kernel selectors. 2810Sstevel@tonic-gate */ 2820Sstevel@tonic-gate INTR_PUSH 2833446Smrj INTGATE_INIT_KERNEL_FLAGS 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP) 2860Sstevel@tonic-gate TRACE_REGS(%r12, %rsp, %rax, %rbx) 2870Sstevel@tonic-gate TRACE_STAMP(%r12) 2880Sstevel@tonic-gate 2890Sstevel@tonic-gate movq %rsp, %rbp 2900Sstevel@tonic-gate 2910Sstevel@tonic-gate movq %rbp, %rdi 2920Sstevel@tonic-gate call av_dispatch_nmivect 2930Sstevel@tonic-gate 2940Sstevel@tonic-gate INTR_POP 2953446Smrj IRET 2963446Smrj /*NOTREACHED*/ 2970Sstevel@tonic-gate SET_SIZE(nmiint) 2980Sstevel@tonic-gate 2990Sstevel@tonic-gate#elif defined(__i386) 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate /* 3020Sstevel@tonic-gate * #NMI 3030Sstevel@tonic-gate */ 3040Sstevel@tonic-gate ENTRY_NP(nmiint) 3050Sstevel@tonic-gate TRAP_NOERR(T_NMIFLT) /* $2 */ 3060Sstevel@tonic-gate 3070Sstevel@tonic-gate /* 3080Sstevel@tonic-gate * Save all registers and setup segment registers 3090Sstevel@tonic-gate * with kernel selectors. 3100Sstevel@tonic-gate */ 3110Sstevel@tonic-gate INTR_PUSH 3123446Smrj INTGATE_INIT_KERNEL_FLAGS 3130Sstevel@tonic-gate 3143446Smrj TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP) 3153446Smrj TRACE_REGS(%edi, %esp, %ebx, %ecx) 3163446Smrj TRACE_STAMP(%edi) 3170Sstevel@tonic-gate 3183446Smrj movl %esp, %ebp 3193446Smrj 3203446Smrj pushl %ebp 3213446Smrj call av_dispatch_nmivect 3220Sstevel@tonic-gate addl $4, %esp 3230Sstevel@tonic-gate 3240Sstevel@tonic-gate INTR_POP_USER 3253446Smrj IRET 3260Sstevel@tonic-gate SET_SIZE(nmiint) 3270Sstevel@tonic-gate 3280Sstevel@tonic-gate#endif /* __i386 */ 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate /* 3310Sstevel@tonic-gate * #BP 3320Sstevel@tonic-gate */ 3330Sstevel@tonic-gate ENTRY_NP(brktrap) 3343446Smrj 3350Sstevel@tonic-gate#if defined(__amd64) 3365084Sjohnlev XPV_TRAP_POP 3370Sstevel@tonic-gate cmpw $KCS_SEL, 8(%rsp) 3383446Smrj jne bp_user 3390Sstevel@tonic-gate 3400Sstevel@tonic-gate /* 3410Sstevel@tonic-gate * This is a breakpoint in the kernel -- it is very likely that this 3420Sstevel@tonic-gate * is DTrace-induced. To unify DTrace handling, we spoof this as an 3430Sstevel@tonic-gate * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault -- 3440Sstevel@tonic-gate * we must decrement the trapping %rip to make it appear as a fault. 3450Sstevel@tonic-gate * We then push a non-zero error code to indicate that this is coming 3460Sstevel@tonic-gate * from #BP. 3470Sstevel@tonic-gate */ 3480Sstevel@tonic-gate decq (%rsp) 3490Sstevel@tonic-gate push $1 /* error code -- non-zero for #BP */ 3500Sstevel@tonic-gate jmp ud_kernel 3513446Smrj 3523446Smrjbp_user: 3533446Smrj#endif /* __amd64 */ 3543446Smrj 3553446Smrj NPTRAP_NOERR(T_BPTFLT) /* $3 */ 3563446Smrj jmp dtrace_trap 3573446Smrj 3580Sstevel@tonic-gate SET_SIZE(brktrap) 3590Sstevel@tonic-gate 3600Sstevel@tonic-gate /* 3610Sstevel@tonic-gate * #OF 3620Sstevel@tonic-gate */ 3630Sstevel@tonic-gate ENTRY_NP(ovflotrap) 3640Sstevel@tonic-gate TRAP_NOERR(T_OVFLW) /* $4 */ 3650Sstevel@tonic-gate jmp cmntrap 3660Sstevel@tonic-gate SET_SIZE(ovflotrap) 3670Sstevel@tonic-gate 3680Sstevel@tonic-gate /* 3690Sstevel@tonic-gate * #BR 3700Sstevel@tonic-gate */ 3710Sstevel@tonic-gate ENTRY_NP(boundstrap) 3720Sstevel@tonic-gate TRAP_NOERR(T_BOUNDFLT) /* $5 */ 3730Sstevel@tonic-gate jmp cmntrap 3740Sstevel@tonic-gate SET_SIZE(boundstrap) 3750Sstevel@tonic-gate 3760Sstevel@tonic-gate#if defined(__amd64) 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate ENTRY_NP(invoptrap) 3793446Smrj 3805084Sjohnlev XPV_TRAP_POP 3815084Sjohnlev 3820Sstevel@tonic-gate cmpw $KCS_SEL, 8(%rsp) 3830Sstevel@tonic-gate jne ud_user 3840Sstevel@tonic-gate 3855084Sjohnlev#if defined(__xpv) 3865084Sjohnlev movb $0, 12(%rsp) /* clear saved upcall_mask from %cs */ 3875084Sjohnlev#endif 3880Sstevel@tonic-gate push $0 /* error code -- zero for #UD */ 3890Sstevel@tonic-gateud_kernel: 3900Sstevel@tonic-gate push $0xdddd /* a dummy trap number */ 3913446Smrj INTR_PUSH 3920Sstevel@tonic-gate movq REGOFF_RIP(%rsp), %rdi 3930Sstevel@tonic-gate movq REGOFF_RSP(%rsp), %rsi 3940Sstevel@tonic-gate movq REGOFF_RAX(%rsp), %rdx 3950Sstevel@tonic-gate pushq (%rsi) 3960Sstevel@tonic-gate movq %rsp, %rsi 3970Sstevel@tonic-gate call dtrace_invop 3980Sstevel@tonic-gate ALTENTRY(dtrace_invop_callsite) 3990Sstevel@tonic-gate addq $8, %rsp 4000Sstevel@tonic-gate cmpl $DTRACE_INVOP_PUSHL_EBP, %eax 4010Sstevel@tonic-gate je ud_push 4020Sstevel@tonic-gate cmpl $DTRACE_INVOP_LEAVE, %eax 4030Sstevel@tonic-gate je ud_leave 4040Sstevel@tonic-gate cmpl $DTRACE_INVOP_NOP, %eax 4050Sstevel@tonic-gate je ud_nop 4060Sstevel@tonic-gate cmpl $DTRACE_INVOP_RET, %eax 4070Sstevel@tonic-gate je ud_ret 4080Sstevel@tonic-gate jmp ud_trap 4090Sstevel@tonic-gate 4100Sstevel@tonic-gateud_push: 4110Sstevel@tonic-gate /* 4120Sstevel@tonic-gate * We must emulate a "pushq %rbp". To do this, we pull the stack 4130Sstevel@tonic-gate * down 8 bytes, and then store the base pointer. 4140Sstevel@tonic-gate */ 4150Sstevel@tonic-gate INTR_POP 4160Sstevel@tonic-gate subq $16, %rsp /* make room for %rbp */ 4170Sstevel@tonic-gate pushq %rax /* push temp */ 4180Sstevel@tonic-gate movq 24(%rsp), %rax /* load calling RIP */ 4190Sstevel@tonic-gate addq $1, %rax /* increment over trapping instr */ 4200Sstevel@tonic-gate movq %rax, 8(%rsp) /* store calling RIP */ 4210Sstevel@tonic-gate movq 32(%rsp), %rax /* load calling CS */ 4220Sstevel@tonic-gate movq %rax, 16(%rsp) /* store calling CS */ 4230Sstevel@tonic-gate movq 40(%rsp), %rax /* load calling RFLAGS */ 4240Sstevel@tonic-gate movq %rax, 24(%rsp) /* store calling RFLAGS */ 4250Sstevel@tonic-gate movq 48(%rsp), %rax /* load calling RSP */ 4260Sstevel@tonic-gate subq $8, %rax /* make room for %rbp */ 4270Sstevel@tonic-gate movq %rax, 32(%rsp) /* store calling RSP */ 4280Sstevel@tonic-gate movq 56(%rsp), %rax /* load calling SS */ 4290Sstevel@tonic-gate movq %rax, 40(%rsp) /* store calling SS */ 4300Sstevel@tonic-gate movq 32(%rsp), %rax /* reload calling RSP */ 4310Sstevel@tonic-gate movq %rbp, (%rax) /* store %rbp there */ 4320Sstevel@tonic-gate popq %rax /* pop off temp */ 4333446Smrj IRET /* return from interrupt */ 4343446Smrj /*NOTREACHED*/ 4350Sstevel@tonic-gate 4360Sstevel@tonic-gateud_leave: 4370Sstevel@tonic-gate /* 4380Sstevel@tonic-gate * We must emulate a "leave", which is the same as a "movq %rbp, %rsp" 4390Sstevel@tonic-gate * followed by a "popq %rbp". This is quite a bit simpler on amd64 4400Sstevel@tonic-gate * than it is on i386 -- we can exploit the fact that the %rsp is 4410Sstevel@tonic-gate * explicitly saved to effect the pop without having to reshuffle 4420Sstevel@tonic-gate * the other data pushed for the trap. 4430Sstevel@tonic-gate */ 4440Sstevel@tonic-gate INTR_POP 4450Sstevel@tonic-gate pushq %rax /* push temp */ 4460Sstevel@tonic-gate movq 8(%rsp), %rax /* load calling RIP */ 4470Sstevel@tonic-gate addq $1, %rax /* increment over trapping instr */ 4480Sstevel@tonic-gate movq %rax, 8(%rsp) /* store calling RIP */ 4490Sstevel@tonic-gate movq (%rbp), %rax /* get new %rbp */ 4500Sstevel@tonic-gate addq $8, %rbp /* adjust new %rsp */ 4510Sstevel@tonic-gate movq %rbp, 32(%rsp) /* store new %rsp */ 4520Sstevel@tonic-gate movq %rax, %rbp /* set new %rbp */ 4530Sstevel@tonic-gate popq %rax /* pop off temp */ 4543446Smrj IRET /* return from interrupt */ 4553446Smrj /*NOTREACHED*/ 4560Sstevel@tonic-gate 4570Sstevel@tonic-gateud_nop: 4580Sstevel@tonic-gate /* 4590Sstevel@tonic-gate * We must emulate a "nop". This is obviously not hard: we need only 4600Sstevel@tonic-gate * advance the %rip by one. 4610Sstevel@tonic-gate */ 4620Sstevel@tonic-gate INTR_POP 4630Sstevel@tonic-gate incq (%rsp) 4643446Smrj IRET 4653446Smrj /*NOTREACHED*/ 4660Sstevel@tonic-gate 4670Sstevel@tonic-gateud_ret: 4680Sstevel@tonic-gate INTR_POP 4690Sstevel@tonic-gate pushq %rax /* push temp */ 4700Sstevel@tonic-gate movq 32(%rsp), %rax /* load %rsp */ 4710Sstevel@tonic-gate movq (%rax), %rax /* load calling RIP */ 4720Sstevel@tonic-gate movq %rax, 8(%rsp) /* store calling RIP */ 4730Sstevel@tonic-gate addq $8, 32(%rsp) /* adjust new %rsp */ 4740Sstevel@tonic-gate popq %rax /* pop off temp */ 4753446Smrj IRET /* return from interrupt */ 4763446Smrj /*NOTREACHED*/ 4770Sstevel@tonic-gate 4780Sstevel@tonic-gateud_trap: 4790Sstevel@tonic-gate /* 4800Sstevel@tonic-gate * We're going to let the kernel handle this as a normal #UD. If, 4810Sstevel@tonic-gate * however, we came through #BP and are spoofing #UD (in this case, 4820Sstevel@tonic-gate * the stored error value will be non-zero), we need to de-spoof 4830Sstevel@tonic-gate * the trap by incrementing %rip and pushing T_BPTFLT. 4840Sstevel@tonic-gate */ 4850Sstevel@tonic-gate cmpq $0, REGOFF_ERR(%rsp) 4860Sstevel@tonic-gate je ud_ud 4870Sstevel@tonic-gate incq REGOFF_RIP(%rsp) 4880Sstevel@tonic-gate addq $REGOFF_RIP, %rsp 4893446Smrj NPTRAP_NOERR(T_BPTFLT) /* $3 */ 4900Sstevel@tonic-gate jmp cmntrap 4910Sstevel@tonic-gate 4920Sstevel@tonic-gateud_ud: 4930Sstevel@tonic-gate addq $REGOFF_RIP, %rsp 4940Sstevel@tonic-gateud_user: 4953446Smrj NPTRAP_NOERR(T_ILLINST) 4960Sstevel@tonic-gate jmp cmntrap 4970Sstevel@tonic-gate SET_SIZE(invoptrap) 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate#elif defined(__i386) 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate /* 5020Sstevel@tonic-gate * #UD 5030Sstevel@tonic-gate */ 5040Sstevel@tonic-gate ENTRY_NP(invoptrap) 5050Sstevel@tonic-gate /* 5060Sstevel@tonic-gate * If we are taking an invalid opcode trap while in the kernel, this 5070Sstevel@tonic-gate * is likely an FBT probe point. 5080Sstevel@tonic-gate */ 5090Sstevel@tonic-gate pushl %gs 5100Sstevel@tonic-gate cmpw $KGS_SEL, (%esp) 5110Sstevel@tonic-gate jne 8f 5123446Smrj 5130Sstevel@tonic-gate addl $4, %esp 5145084Sjohnlev#if defined(__xpv) 5155084Sjohnlev movb $0, 6(%esp) /* clear saved upcall_mask from %cs */ 5165084Sjohnlev#endif /* __xpv */ 5170Sstevel@tonic-gate pusha 5180Sstevel@tonic-gate pushl %eax /* push %eax -- may be return value */ 5190Sstevel@tonic-gate pushl %esp /* push stack pointer */ 5200Sstevel@tonic-gate addl $48, (%esp) /* adjust to incoming args */ 5210Sstevel@tonic-gate pushl 40(%esp) /* push calling EIP */ 5220Sstevel@tonic-gate call dtrace_invop 5230Sstevel@tonic-gate ALTENTRY(dtrace_invop_callsite) 5240Sstevel@tonic-gate addl $12, %esp 5250Sstevel@tonic-gate cmpl $DTRACE_INVOP_PUSHL_EBP, %eax 5260Sstevel@tonic-gate je 1f 5270Sstevel@tonic-gate cmpl $DTRACE_INVOP_POPL_EBP, %eax 5280Sstevel@tonic-gate je 2f 5290Sstevel@tonic-gate cmpl $DTRACE_INVOP_LEAVE, %eax 5300Sstevel@tonic-gate je 3f 5310Sstevel@tonic-gate cmpl $DTRACE_INVOP_NOP, %eax 5320Sstevel@tonic-gate je 4f 5330Sstevel@tonic-gate jmp 7f 5340Sstevel@tonic-gate1: 5350Sstevel@tonic-gate /* 5360Sstevel@tonic-gate * We must emulate a "pushl %ebp". To do this, we pull the stack 5370Sstevel@tonic-gate * down 4 bytes, and then store the base pointer. 5380Sstevel@tonic-gate */ 5390Sstevel@tonic-gate popa 5400Sstevel@tonic-gate subl $4, %esp /* make room for %ebp */ 5410Sstevel@tonic-gate pushl %eax /* push temp */ 5420Sstevel@tonic-gate movl 8(%esp), %eax /* load calling EIP */ 5430Sstevel@tonic-gate incl %eax /* increment over LOCK prefix */ 5440Sstevel@tonic-gate movl %eax, 4(%esp) /* store calling EIP */ 5450Sstevel@tonic-gate movl 12(%esp), %eax /* load calling CS */ 5460Sstevel@tonic-gate movl %eax, 8(%esp) /* store calling CS */ 5470Sstevel@tonic-gate movl 16(%esp), %eax /* load calling EFLAGS */ 5480Sstevel@tonic-gate movl %eax, 12(%esp) /* store calling EFLAGS */ 5490Sstevel@tonic-gate movl %ebp, 16(%esp) /* push %ebp */ 5500Sstevel@tonic-gate popl %eax /* pop off temp */ 5513446Smrj jmp _emul_done 5520Sstevel@tonic-gate2: 5530Sstevel@tonic-gate /* 5540Sstevel@tonic-gate * We must emulate a "popl %ebp". To do this, we do the opposite of 5550Sstevel@tonic-gate * the above: we remove the %ebp from the stack, and squeeze up the 5560Sstevel@tonic-gate * saved state from the trap. 5570Sstevel@tonic-gate */ 5580Sstevel@tonic-gate popa 5590Sstevel@tonic-gate pushl %eax /* push temp */ 5600Sstevel@tonic-gate movl 16(%esp), %ebp /* pop %ebp */ 5610Sstevel@tonic-gate movl 12(%esp), %eax /* load calling EFLAGS */ 5620Sstevel@tonic-gate movl %eax, 16(%esp) /* store calling EFLAGS */ 5630Sstevel@tonic-gate movl 8(%esp), %eax /* load calling CS */ 5640Sstevel@tonic-gate movl %eax, 12(%esp) /* store calling CS */ 5650Sstevel@tonic-gate movl 4(%esp), %eax /* load calling EIP */ 5660Sstevel@tonic-gate incl %eax /* increment over LOCK prefix */ 5670Sstevel@tonic-gate movl %eax, 8(%esp) /* store calling EIP */ 5680Sstevel@tonic-gate popl %eax /* pop off temp */ 5690Sstevel@tonic-gate addl $4, %esp /* adjust stack pointer */ 5703446Smrj jmp _emul_done 5710Sstevel@tonic-gate3: 5720Sstevel@tonic-gate /* 5730Sstevel@tonic-gate * We must emulate a "leave", which is the same as a "movl %ebp, %esp" 5740Sstevel@tonic-gate * followed by a "popl %ebp". This looks similar to the above, but 5750Sstevel@tonic-gate * requires two temporaries: one for the new base pointer, and one 5760Sstevel@tonic-gate * for the staging register. 5770Sstevel@tonic-gate */ 5780Sstevel@tonic-gate popa 5790Sstevel@tonic-gate pushl %eax /* push temp */ 5800Sstevel@tonic-gate pushl %ebx /* push temp */ 5810Sstevel@tonic-gate movl %ebp, %ebx /* set temp to old %ebp */ 5820Sstevel@tonic-gate movl (%ebx), %ebp /* pop %ebp */ 5830Sstevel@tonic-gate movl 16(%esp), %eax /* load calling EFLAGS */ 5840Sstevel@tonic-gate movl %eax, (%ebx) /* store calling EFLAGS */ 5850Sstevel@tonic-gate movl 12(%esp), %eax /* load calling CS */ 5860Sstevel@tonic-gate movl %eax, -4(%ebx) /* store calling CS */ 5870Sstevel@tonic-gate movl 8(%esp), %eax /* load calling EIP */ 5880Sstevel@tonic-gate incl %eax /* increment over LOCK prefix */ 5890Sstevel@tonic-gate movl %eax, -8(%ebx) /* store calling EIP */ 5900Sstevel@tonic-gate movl %ebx, -4(%esp) /* temporarily store new %esp */ 5910Sstevel@tonic-gate popl %ebx /* pop off temp */ 5920Sstevel@tonic-gate popl %eax /* pop off temp */ 5930Sstevel@tonic-gate movl -12(%esp), %esp /* set stack pointer */ 5940Sstevel@tonic-gate subl $8, %esp /* adjust for three pushes, one pop */ 5953446Smrj jmp _emul_done 5960Sstevel@tonic-gate4: 5970Sstevel@tonic-gate /* 5980Sstevel@tonic-gate * We must emulate a "nop". This is obviously not hard: we need only 5990Sstevel@tonic-gate * advance the %eip by one. 6000Sstevel@tonic-gate */ 6010Sstevel@tonic-gate popa 6020Sstevel@tonic-gate incl (%esp) 6033446Smrj_emul_done: 6043446Smrj IRET /* return from interrupt */ 6050Sstevel@tonic-gate7: 6060Sstevel@tonic-gate popa 6070Sstevel@tonic-gate pushl $0 6080Sstevel@tonic-gate pushl $T_ILLINST /* $6 */ 6090Sstevel@tonic-gate jmp cmntrap 6100Sstevel@tonic-gate8: 6110Sstevel@tonic-gate addl $4, %esp 6120Sstevel@tonic-gate pushl $0 6130Sstevel@tonic-gate pushl $T_ILLINST /* $6 */ 6140Sstevel@tonic-gate jmp cmntrap 6150Sstevel@tonic-gate SET_SIZE(invoptrap) 6160Sstevel@tonic-gate 6170Sstevel@tonic-gate#endif /* __i386 */ 6180Sstevel@tonic-gate 6190Sstevel@tonic-gate#if defined(__amd64) 6200Sstevel@tonic-gate 6210Sstevel@tonic-gate /* 6220Sstevel@tonic-gate * #NM 6230Sstevel@tonic-gate */ 6245084Sjohnlev#if defined(__xpv) 6255084Sjohnlev 6265084Sjohnlev ENTRY_NP(ndptrap) 6275084Sjohnlev /* 6285084Sjohnlev * (On the hypervisor we must make a hypercall so we might as well 6295084Sjohnlev * save everything and handle as in a normal trap.) 6305084Sjohnlev */ 6315084Sjohnlev TRAP_NOERR(T_NOEXTFLT) /* $7 */ 6325084Sjohnlev INTR_PUSH 6335084Sjohnlev 6345084Sjohnlev /* 6355084Sjohnlev * We want to do this quickly as every lwp using fp will take this 6365084Sjohnlev * after a context switch -- we do the frequent path in ndptrap_frstor 6375084Sjohnlev * below; for all other cases, we let the trap code handle it 6385084Sjohnlev */ 639*13134Skuriakose.kuruvilla@oracle.com LOADCPU(%rax) /* swapgs handled in hypervisor */ 6405084Sjohnlev cmpl $0, fpu_exists(%rip) 6415084Sjohnlev je .handle_in_trap /* let trap handle no fp case */ 642*13134Skuriakose.kuruvilla@oracle.com movq CPU_THREAD(%rax), %rbx /* %rbx = curthread */ 643*13134Skuriakose.kuruvilla@oracle.com movl $FPU_EN, %eax 644*13134Skuriakose.kuruvilla@oracle.com movq T_LWP(%rbx), %rbx /* %rbx = lwp */ 645*13134Skuriakose.kuruvilla@oracle.com testq %rbx, %rbx 6465084Sjohnlev jz .handle_in_trap /* should not happen? */ 6475084Sjohnlev#if LWP_PCB_FPU != 0 648*13134Skuriakose.kuruvilla@oracle.com addq $LWP_PCB_FPU, %rbx /* &lwp->lwp_pcb.pcb_fpu */ 6495084Sjohnlev#endif 650*13134Skuriakose.kuruvilla@oracle.com testl %eax, PCB_FPU_FLAGS(%rbx) 6515084Sjohnlev jz .handle_in_trap /* must be the first fault */ 6525084Sjohnlev CLTS 653*13134Skuriakose.kuruvilla@oracle.com andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx) 6545084Sjohnlev#if FPU_CTX_FPU_REGS != 0 655*13134Skuriakose.kuruvilla@oracle.com addq $FPU_CTX_FPU_REGS, %rbx 6565084Sjohnlev#endif 657*13134Skuriakose.kuruvilla@oracle.com 658*13134Skuriakose.kuruvilla@oracle.com movl FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax /* for xrstor */ 659*13134Skuriakose.kuruvilla@oracle.com movl FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx /* for xrstor */ 660*13134Skuriakose.kuruvilla@oracle.com 6615084Sjohnlev /* 6625084Sjohnlev * the label below is used in trap.c to detect FP faults in 6635084Sjohnlev * kernel due to user fault. 6645084Sjohnlev */ 6655084Sjohnlev ALTENTRY(ndptrap_frstor) 666*13134Skuriakose.kuruvilla@oracle.com .globl _patch_xrstorq_rbx 667*13134Skuriakose.kuruvilla@oracle.com_patch_xrstorq_rbx: 668*13134Skuriakose.kuruvilla@oracle.com FXRSTORQ ((%rbx)) 6695084Sjohnlev cmpw $KCS_SEL, REGOFF_CS(%rsp) 6705084Sjohnlev je .return_to_kernel 6715084Sjohnlev 6725084Sjohnlev ASSERT_UPCALL_MASK_IS_SET 6735084Sjohnlev USER_POP 6745084Sjohnlev IRET /* return to user mode */ 6755084Sjohnlev /*NOTREACHED*/ 6765084Sjohnlev 6775084Sjohnlev.return_to_kernel: 6785084Sjohnlev INTR_POP 6795084Sjohnlev IRET 6805084Sjohnlev /*NOTREACHED*/ 6815084Sjohnlev 6825084Sjohnlev.handle_in_trap: 6835084Sjohnlev INTR_POP 6845084Sjohnlev pushq $0 /* can not use TRAP_NOERR */ 6855084Sjohnlev pushq $T_NOEXTFLT 6865084Sjohnlev jmp cmninttrap 6875084Sjohnlev SET_SIZE(ndptrap_frstor) 6885084Sjohnlev SET_SIZE(ndptrap) 6895084Sjohnlev 6905084Sjohnlev#else /* __xpv */ 6915084Sjohnlev 6920Sstevel@tonic-gate ENTRY_NP(ndptrap) 6930Sstevel@tonic-gate /* 6940Sstevel@tonic-gate * We want to do this quickly as every lwp using fp will take this 6950Sstevel@tonic-gate * after a context switch -- we do the frequent path in ndptrap_frstor 6960Sstevel@tonic-gate * below; for all other cases, we let the trap code handle it 6970Sstevel@tonic-gate */ 6980Sstevel@tonic-gate pushq %rax 6990Sstevel@tonic-gate pushq %rbx 7000Sstevel@tonic-gate cmpw $KCS_SEL, 24(%rsp) /* did we come from kernel mode? */ 7010Sstevel@tonic-gate jne 1f 702*13134Skuriakose.kuruvilla@oracle.com LOADCPU(%rax) /* if yes, don't swapgs */ 7030Sstevel@tonic-gate jmp 2f 704*13134Skuriakose.kuruvilla@oracle.com1: 7053446Smrj SWAPGS /* if from user, need swapgs */ 706*13134Skuriakose.kuruvilla@oracle.com LOADCPU(%rax) 7073446Smrj SWAPGS 708*13134Skuriakose.kuruvilla@oracle.com2: 709*13134Skuriakose.kuruvilla@oracle.com /* 710*13134Skuriakose.kuruvilla@oracle.com * Xrstor needs to use edx as part of its flag. 711*13134Skuriakose.kuruvilla@oracle.com * NOTE: have to push rdx after "cmpw ...24(%rsp)", otherwise rsp+$24 712*13134Skuriakose.kuruvilla@oracle.com * will not point to CS. 713*13134Skuriakose.kuruvilla@oracle.com */ 714*13134Skuriakose.kuruvilla@oracle.com pushq %rdx 7150Sstevel@tonic-gate cmpl $0, fpu_exists(%rip) 7160Sstevel@tonic-gate je .handle_in_trap /* let trap handle no fp case */ 717*13134Skuriakose.kuruvilla@oracle.com movq CPU_THREAD(%rax), %rbx /* %rbx = curthread */ 718*13134Skuriakose.kuruvilla@oracle.com movl $FPU_EN, %eax 719*13134Skuriakose.kuruvilla@oracle.com movq T_LWP(%rbx), %rbx /* %rbx = lwp */ 720*13134Skuriakose.kuruvilla@oracle.com testq %rbx, %rbx 7210Sstevel@tonic-gate jz .handle_in_trap /* should not happen? */ 7220Sstevel@tonic-gate#if LWP_PCB_FPU != 0 723*13134Skuriakose.kuruvilla@oracle.com addq $LWP_PCB_FPU, %rbx /* &lwp->lwp_pcb.pcb_fpu */ 7240Sstevel@tonic-gate#endif 725*13134Skuriakose.kuruvilla@oracle.com testl %eax, PCB_FPU_FLAGS(%rbx) 7260Sstevel@tonic-gate jz .handle_in_trap /* must be the first fault */ 7270Sstevel@tonic-gate clts 728*13134Skuriakose.kuruvilla@oracle.com andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx) 7290Sstevel@tonic-gate#if FPU_CTX_FPU_REGS != 0 730*13134Skuriakose.kuruvilla@oracle.com addq $FPU_CTX_FPU_REGS, %rbx 7310Sstevel@tonic-gate#endif 732*13134Skuriakose.kuruvilla@oracle.com 733*13134Skuriakose.kuruvilla@oracle.com movl FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax /* for xrstor */ 734*13134Skuriakose.kuruvilla@oracle.com movl FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx /* for xrstor */ 735*13134Skuriakose.kuruvilla@oracle.com 7360Sstevel@tonic-gate /* 7370Sstevel@tonic-gate * the label below is used in trap.c to detect FP faults in 7380Sstevel@tonic-gate * kernel due to user fault. 7390Sstevel@tonic-gate */ 7400Sstevel@tonic-gate ALTENTRY(ndptrap_frstor) 741*13134Skuriakose.kuruvilla@oracle.com .globl _patch_xrstorq_rbx 742*13134Skuriakose.kuruvilla@oracle.com_patch_xrstorq_rbx: 743*13134Skuriakose.kuruvilla@oracle.com FXRSTORQ ((%rbx)) 744*13134Skuriakose.kuruvilla@oracle.com popq %rdx 7450Sstevel@tonic-gate popq %rbx 7460Sstevel@tonic-gate popq %rax 7473446Smrj IRET 7483446Smrj /*NOTREACHED*/ 7490Sstevel@tonic-gate 7500Sstevel@tonic-gate.handle_in_trap: 751*13134Skuriakose.kuruvilla@oracle.com popq %rdx 7520Sstevel@tonic-gate popq %rbx 7530Sstevel@tonic-gate popq %rax 7540Sstevel@tonic-gate TRAP_NOERR(T_NOEXTFLT) /* $7 */ 7550Sstevel@tonic-gate jmp cmninttrap 7560Sstevel@tonic-gate SET_SIZE(ndptrap_frstor) 7570Sstevel@tonic-gate SET_SIZE(ndptrap) 7580Sstevel@tonic-gate 7595084Sjohnlev#endif /* __xpv */ 7605084Sjohnlev 7610Sstevel@tonic-gate#elif defined(__i386) 7620Sstevel@tonic-gate 7630Sstevel@tonic-gate ENTRY_NP(ndptrap) 7640Sstevel@tonic-gate /* 7650Sstevel@tonic-gate * We want to do this quickly as every lwp using fp will take this 7660Sstevel@tonic-gate * after a context switch -- we do the frequent path in fpnoextflt 7670Sstevel@tonic-gate * below; for all other cases, we let the trap code handle it 7680Sstevel@tonic-gate */ 7690Sstevel@tonic-gate pushl %eax 7700Sstevel@tonic-gate pushl %ebx 771*13134Skuriakose.kuruvilla@oracle.com pushl %edx /* for xrstor */ 7720Sstevel@tonic-gate pushl %ds 7730Sstevel@tonic-gate pushl %gs 7740Sstevel@tonic-gate movl $KDS_SEL, %ebx 7750Sstevel@tonic-gate movw %bx, %ds 7760Sstevel@tonic-gate movl $KGS_SEL, %eax 7770Sstevel@tonic-gate movw %ax, %gs 7783446Smrj LOADCPU(%eax) 7790Sstevel@tonic-gate cmpl $0, fpu_exists 7800Sstevel@tonic-gate je .handle_in_trap /* let trap handle no fp case */ 7813446Smrj movl CPU_THREAD(%eax), %ebx /* %ebx = curthread */ 7823446Smrj movl $FPU_EN, %eax 7833446Smrj movl T_LWP(%ebx), %ebx /* %ebx = lwp */ 7843446Smrj testl %ebx, %ebx 7850Sstevel@tonic-gate jz .handle_in_trap /* should not happen? */ 7860Sstevel@tonic-gate#if LWP_PCB_FPU != 0 7873446Smrj addl $LWP_PCB_FPU, %ebx /* &lwp->lwp_pcb.pcb_fpu */ 7880Sstevel@tonic-gate#endif 7893446Smrj testl %eax, PCB_FPU_FLAGS(%ebx) 7900Sstevel@tonic-gate jz .handle_in_trap /* must be the first fault */ 7913446Smrj CLTS 7923446Smrj andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx) 7930Sstevel@tonic-gate#if FPU_CTX_FPU_REGS != 0 7943446Smrj addl $FPU_CTX_FPU_REGS, %ebx 7950Sstevel@tonic-gate#endif 796*13134Skuriakose.kuruvilla@oracle.com 797*13134Skuriakose.kuruvilla@oracle.com movl FPU_CTX_FPU_XSAVE_MASK(%ebx), %eax /* for xrstor */ 798*13134Skuriakose.kuruvilla@oracle.com movl FPU_CTX_FPU_XSAVE_MASK+4(%ebx), %edx /* for xrstor */ 799*13134Skuriakose.kuruvilla@oracle.com 8000Sstevel@tonic-gate /* 8010Sstevel@tonic-gate * the label below is used in trap.c to detect FP faults in kernel 8020Sstevel@tonic-gate * due to user fault. 8030Sstevel@tonic-gate */ 8040Sstevel@tonic-gate ALTENTRY(ndptrap_frstor) 805*13134Skuriakose.kuruvilla@oracle.com .globl _patch_fxrstor_ebx 8063446Smrj_patch_fxrstor_ebx: 807*13134Skuriakose.kuruvilla@oracle.com .globl _patch_xrstor_ebx 808*13134Skuriakose.kuruvilla@oracle.com_patch_xrstor_ebx: 8093446Smrj frstor (%ebx) /* may be patched to fxrstor */ 8100Sstevel@tonic-gate nop /* (including this byte) */ 8110Sstevel@tonic-gate popl %gs 8120Sstevel@tonic-gate popl %ds 813*13134Skuriakose.kuruvilla@oracle.com popl %edx 8140Sstevel@tonic-gate popl %ebx 8150Sstevel@tonic-gate popl %eax 8163446Smrj IRET 8170Sstevel@tonic-gate 8180Sstevel@tonic-gate.handle_in_trap: 8190Sstevel@tonic-gate popl %gs 8200Sstevel@tonic-gate popl %ds 821*13134Skuriakose.kuruvilla@oracle.com popl %edx 8220Sstevel@tonic-gate popl %ebx 8230Sstevel@tonic-gate popl %eax 8243446Smrj TRAP_NOERR(T_NOEXTFLT) /* $7 */ 8250Sstevel@tonic-gate jmp cmninttrap 8260Sstevel@tonic-gate SET_SIZE(ndptrap_frstor) 8270Sstevel@tonic-gate SET_SIZE(ndptrap) 8280Sstevel@tonic-gate 8290Sstevel@tonic-gate#endif /* __i386 */ 8300Sstevel@tonic-gate 8315084Sjohnlev#if !defined(__xpv) 8320Sstevel@tonic-gate#if defined(__amd64) 8330Sstevel@tonic-gate 8340Sstevel@tonic-gate /* 8350Sstevel@tonic-gate * #DF 8360Sstevel@tonic-gate */ 8370Sstevel@tonic-gate ENTRY_NP(syserrtrap) 8380Sstevel@tonic-gate pushq $T_DBLFLT 8390Sstevel@tonic-gate SET_CPU_GSBASE 8400Sstevel@tonic-gate 8410Sstevel@tonic-gate /* 8425084Sjohnlev * We share this handler with kmdb (if kmdb is loaded). As such, we 8435084Sjohnlev * may have reached this point after encountering a #df in kmdb. If 8445084Sjohnlev * that happens, we'll still be on kmdb's IDT. We need to switch back 8455084Sjohnlev * to this CPU's IDT before proceeding. Furthermore, if we did arrive 8465084Sjohnlev * here from kmdb, kmdb is probably in a very sickly state, and 8475084Sjohnlev * shouldn't be entered from the panic flow. We'll suppress that 8485084Sjohnlev * entry by setting nopanicdebug. 8490Sstevel@tonic-gate */ 8500Sstevel@tonic-gate pushq %rax 8510Sstevel@tonic-gate subq $DESCTBR_SIZE, %rsp 8520Sstevel@tonic-gate sidt (%rsp) 8530Sstevel@tonic-gate movq %gs:CPU_IDT, %rax 8540Sstevel@tonic-gate cmpq %rax, DTR_BASE(%rsp) 8550Sstevel@tonic-gate je 1f 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate movq %rax, DTR_BASE(%rsp) 8580Sstevel@tonic-gate movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp) 8590Sstevel@tonic-gate lidt (%rsp) 8600Sstevel@tonic-gate 8610Sstevel@tonic-gate movl $1, nopanicdebug 8620Sstevel@tonic-gate 8630Sstevel@tonic-gate1: addq $DESCTBR_SIZE, %rsp 8640Sstevel@tonic-gate popq %rax 8650Sstevel@tonic-gate 8660Sstevel@tonic-gate DFTRAP_PUSH 8670Sstevel@tonic-gate 8680Sstevel@tonic-gate /* 8690Sstevel@tonic-gate * freeze trap trace. 8700Sstevel@tonic-gate */ 8710Sstevel@tonic-gate#ifdef TRAPTRACE 8720Sstevel@tonic-gate leaq trap_trace_freeze(%rip), %r11 8730Sstevel@tonic-gate incl (%r11) 8740Sstevel@tonic-gate#endif 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate ENABLE_INTR_FLAGS 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate movq %rsp, %rdi /* ®s */ 8790Sstevel@tonic-gate xorl %esi, %esi /* clear address */ 8800Sstevel@tonic-gate xorl %edx, %edx /* cpuid = 0 */ 8810Sstevel@tonic-gate call trap 8820Sstevel@tonic-gate 8830Sstevel@tonic-gate SET_SIZE(syserrtrap) 8840Sstevel@tonic-gate 8850Sstevel@tonic-gate#elif defined(__i386) 8860Sstevel@tonic-gate 8870Sstevel@tonic-gate /* 8880Sstevel@tonic-gate * #DF 8890Sstevel@tonic-gate */ 8900Sstevel@tonic-gate ENTRY_NP(syserrtrap) 8910Sstevel@tonic-gate cli /* disable interrupts */ 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate /* 8945084Sjohnlev * We share this handler with kmdb (if kmdb is loaded). As such, we 8955084Sjohnlev * may have reached this point after encountering a #df in kmdb. If 8965084Sjohnlev * that happens, we'll still be on kmdb's IDT. We need to switch back 8975084Sjohnlev * to this CPU's IDT before proceeding. Furthermore, if we did arrive 8985084Sjohnlev * here from kmdb, kmdb is probably in a very sickly state, and 8995084Sjohnlev * shouldn't be entered from the panic flow. We'll suppress that 9005084Sjohnlev * entry by setting nopanicdebug. 9010Sstevel@tonic-gate */ 9025084Sjohnlev 9030Sstevel@tonic-gate subl $DESCTBR_SIZE, %esp 9040Sstevel@tonic-gate movl %gs:CPU_IDT, %eax 9050Sstevel@tonic-gate sidt (%esp) 9060Sstevel@tonic-gate cmpl DTR_BASE(%esp), %eax 9070Sstevel@tonic-gate je 1f 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate movl %eax, DTR_BASE(%esp) 9100Sstevel@tonic-gate movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp) 9110Sstevel@tonic-gate lidt (%esp) 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate movl $1, nopanicdebug 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate1: addl $DESCTBR_SIZE, %esp 9160Sstevel@tonic-gate 9170Sstevel@tonic-gate /* 9180Sstevel@tonic-gate * Check the CPL in the TSS to see what mode 9190Sstevel@tonic-gate * (user or kernel) we took the fault in. At this 9200Sstevel@tonic-gate * point we are running in the context of the double 9210Sstevel@tonic-gate * fault task (dftss) but the CPU's task points to 9220Sstevel@tonic-gate * the previous task (ktss) where the process context 9230Sstevel@tonic-gate * has been saved as the result of the task switch. 9240Sstevel@tonic-gate */ 9250Sstevel@tonic-gate movl %gs:CPU_TSS, %eax /* get the TSS */ 9260Sstevel@tonic-gate movl TSS_SS(%eax), %ebx /* save the fault SS */ 9270Sstevel@tonic-gate movl TSS_ESP(%eax), %edx /* save the fault ESP */ 9280Sstevel@tonic-gate testw $CPL_MASK, TSS_CS(%eax) /* user mode ? */ 9290Sstevel@tonic-gate jz make_frame 9300Sstevel@tonic-gate movw TSS_SS0(%eax), %ss /* get on the kernel stack */ 9310Sstevel@tonic-gate movl TSS_ESP0(%eax), %esp 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate /* 9340Sstevel@tonic-gate * Clear the NT flag to avoid a task switch when the process 9350Sstevel@tonic-gate * finally pops the EFL off the stack via an iret. Clear 9360Sstevel@tonic-gate * the TF flag since that is what the processor does for 9370Sstevel@tonic-gate * a normal exception. Clear the IE flag so that interrupts 9380Sstevel@tonic-gate * remain disabled. 9390Sstevel@tonic-gate */ 9400Sstevel@tonic-gate movl TSS_EFL(%eax), %ecx 9410Sstevel@tonic-gate andl $_BITNOT(PS_NT|PS_T|PS_IE), %ecx 9420Sstevel@tonic-gate pushl %ecx 9430Sstevel@tonic-gate popfl /* restore the EFL */ 9440Sstevel@tonic-gate movw TSS_LDT(%eax), %cx /* restore the LDT */ 9450Sstevel@tonic-gate lldt %cx 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate /* 9480Sstevel@tonic-gate * Restore process segment selectors. 9490Sstevel@tonic-gate */ 9500Sstevel@tonic-gate movw TSS_DS(%eax), %ds 9510Sstevel@tonic-gate movw TSS_ES(%eax), %es 9520Sstevel@tonic-gate movw TSS_FS(%eax), %fs 9530Sstevel@tonic-gate movw TSS_GS(%eax), %gs 9540Sstevel@tonic-gate 9550Sstevel@tonic-gate /* 9560Sstevel@tonic-gate * Restore task segment selectors. 9570Sstevel@tonic-gate */ 9580Sstevel@tonic-gate movl $KDS_SEL, TSS_DS(%eax) 9590Sstevel@tonic-gate movl $KDS_SEL, TSS_ES(%eax) 9600Sstevel@tonic-gate movl $KDS_SEL, TSS_SS(%eax) 9610Sstevel@tonic-gate movl $KFS_SEL, TSS_FS(%eax) 9620Sstevel@tonic-gate movl $KGS_SEL, TSS_GS(%eax) 9630Sstevel@tonic-gate 9640Sstevel@tonic-gate /* 9650Sstevel@tonic-gate * Clear the TS bit, the busy bits in both task 9660Sstevel@tonic-gate * descriptors, and switch tasks. 9670Sstevel@tonic-gate */ 9680Sstevel@tonic-gate clts 9690Sstevel@tonic-gate leal gdt0, %ecx 9700Sstevel@tonic-gate movl DFTSS_SEL+4(%ecx), %esi 9710Sstevel@tonic-gate andl $_BITNOT(0x200), %esi 9720Sstevel@tonic-gate movl %esi, DFTSS_SEL+4(%ecx) 9730Sstevel@tonic-gate movl KTSS_SEL+4(%ecx), %esi 9740Sstevel@tonic-gate andl $_BITNOT(0x200), %esi 9750Sstevel@tonic-gate movl %esi, KTSS_SEL+4(%ecx) 9760Sstevel@tonic-gate movw $KTSS_SEL, %cx 9770Sstevel@tonic-gate ltr %cx 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate /* 9800Sstevel@tonic-gate * Restore part of the process registers. 9810Sstevel@tonic-gate */ 9820Sstevel@tonic-gate movl TSS_EBP(%eax), %ebp 9830Sstevel@tonic-gate movl TSS_ECX(%eax), %ecx 9840Sstevel@tonic-gate movl TSS_ESI(%eax), %esi 9850Sstevel@tonic-gate movl TSS_EDI(%eax), %edi 9860Sstevel@tonic-gate 9870Sstevel@tonic-gatemake_frame: 9880Sstevel@tonic-gate /* 9890Sstevel@tonic-gate * Make a trap frame. Leave the error code (0) on 9900Sstevel@tonic-gate * the stack since the first word on a trap stack is 9910Sstevel@tonic-gate * unused anyway. 9920Sstevel@tonic-gate */ 9930Sstevel@tonic-gate pushl %ebx / fault SS 9940Sstevel@tonic-gate pushl %edx / fault ESP 9950Sstevel@tonic-gate pushl TSS_EFL(%eax) / fault EFL 9960Sstevel@tonic-gate pushl TSS_CS(%eax) / fault CS 9970Sstevel@tonic-gate pushl TSS_EIP(%eax) / fault EIP 9980Sstevel@tonic-gate pushl $0 / error code 9990Sstevel@tonic-gate pushl $T_DBLFLT / trap number 8 10000Sstevel@tonic-gate movl TSS_EBX(%eax), %ebx / restore EBX 10010Sstevel@tonic-gate movl TSS_EDX(%eax), %edx / restore EDX 10020Sstevel@tonic-gate movl TSS_EAX(%eax), %eax / restore EAX 10030Sstevel@tonic-gate sti / enable interrupts 10040Sstevel@tonic-gate jmp cmntrap 10050Sstevel@tonic-gate SET_SIZE(syserrtrap) 10060Sstevel@tonic-gate 10070Sstevel@tonic-gate#endif /* __i386 */ 10085084Sjohnlev#endif /* !__xpv */ 10090Sstevel@tonic-gate 10100Sstevel@tonic-gate ENTRY_NP(overrun) 10110Sstevel@tonic-gate push $0 10120Sstevel@tonic-gate TRAP_NOERR(T_EXTOVRFLT) /* $9 i386 only - not generated */ 10130Sstevel@tonic-gate jmp cmninttrap 10140Sstevel@tonic-gate SET_SIZE(overrun) 10150Sstevel@tonic-gate 10160Sstevel@tonic-gate /* 10170Sstevel@tonic-gate * #TS 10180Sstevel@tonic-gate */ 10190Sstevel@tonic-gate ENTRY_NP(invtsstrap) 10200Sstevel@tonic-gate TRAP_ERR(T_TSSFLT) /* $10 already have error code on stack */ 10210Sstevel@tonic-gate jmp cmntrap 10220Sstevel@tonic-gate SET_SIZE(invtsstrap) 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate /* 10250Sstevel@tonic-gate * #NP 10260Sstevel@tonic-gate */ 10270Sstevel@tonic-gate ENTRY_NP(segnptrap) 10280Sstevel@tonic-gate TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */ 10290Sstevel@tonic-gate#if defined(__amd64) 10300Sstevel@tonic-gate SET_CPU_GSBASE 10310Sstevel@tonic-gate#endif 10320Sstevel@tonic-gate jmp cmntrap 10330Sstevel@tonic-gate SET_SIZE(segnptrap) 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate /* 10360Sstevel@tonic-gate * #SS 10370Sstevel@tonic-gate */ 10380Sstevel@tonic-gate ENTRY_NP(stktrap) 10390Sstevel@tonic-gate TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */ 10400Sstevel@tonic-gate jmp cmntrap 10410Sstevel@tonic-gate SET_SIZE(stktrap) 10420Sstevel@tonic-gate 10430Sstevel@tonic-gate /* 10440Sstevel@tonic-gate * #GP 10450Sstevel@tonic-gate */ 10460Sstevel@tonic-gate ENTRY_NP(gptrap) 10470Sstevel@tonic-gate TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */ 10480Sstevel@tonic-gate#if defined(__amd64) 10490Sstevel@tonic-gate SET_CPU_GSBASE 10500Sstevel@tonic-gate#endif 10510Sstevel@tonic-gate jmp cmntrap 10520Sstevel@tonic-gate SET_SIZE(gptrap) 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate /* 10550Sstevel@tonic-gate * #PF 10560Sstevel@tonic-gate */ 10570Sstevel@tonic-gate ENTRY_NP(pftrap) 10580Sstevel@tonic-gate TRAP_ERR(T_PGFLT) /* $14 already have error code on stack */ 10593446Smrj INTR_PUSH 10605084Sjohnlev#if defined(__xpv) 10615084Sjohnlev 10625084Sjohnlev#if defined(__amd64) 10635084Sjohnlev movq %gs:CPU_VCPU_INFO, %r15 10645084Sjohnlev movq VCPU_INFO_ARCH_CR2(%r15), %r15 /* vcpu[].arch.cr2 */ 10655084Sjohnlev#elif defined(__i386) 10665084Sjohnlev movl %gs:CPU_VCPU_INFO, %esi 10675084Sjohnlev movl VCPU_INFO_ARCH_CR2(%esi), %esi /* vcpu[].arch.cr2 */ 10685084Sjohnlev#endif /* __i386 */ 10695084Sjohnlev 10705084Sjohnlev#else /* __xpv */ 10713446Smrj 10723446Smrj#if defined(__amd64) 10733446Smrj movq %cr2, %r15 10743446Smrj#elif defined(__i386) 10753446Smrj movl %cr2, %esi 10763446Smrj#endif /* __i386 */ 10773446Smrj 10785084Sjohnlev#endif /* __xpv */ 10793446Smrj jmp cmntrap_pushed 10800Sstevel@tonic-gate SET_SIZE(pftrap) 10810Sstevel@tonic-gate 10820Sstevel@tonic-gate#if !defined(__amd64) 10830Sstevel@tonic-gate 10843446Smrj .globl idt0_default_r 10853446Smrj 10860Sstevel@tonic-gate /* 10870Sstevel@tonic-gate * #PF pentium bug workaround 10880Sstevel@tonic-gate */ 10890Sstevel@tonic-gate ENTRY_NP(pentium_pftrap) 10900Sstevel@tonic-gate pushl %eax 10910Sstevel@tonic-gate movl %cr2, %eax 10920Sstevel@tonic-gate andl $MMU_STD_PAGEMASK, %eax 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate cmpl %eax, %cs:idt0_default_r+2 /* fixme */ 10950Sstevel@tonic-gate 10960Sstevel@tonic-gate je check_for_user_address 10970Sstevel@tonic-gateuser_mode: 10980Sstevel@tonic-gate popl %eax 10990Sstevel@tonic-gate pushl $T_PGFLT /* $14 */ 11000Sstevel@tonic-gate jmp cmntrap 11010Sstevel@tonic-gatecheck_for_user_address: 11020Sstevel@tonic-gate /* 11030Sstevel@tonic-gate * Before we assume that we have an unmapped trap on our hands, 11040Sstevel@tonic-gate * check to see if this is a fault from user mode. If it is, 11050Sstevel@tonic-gate * we'll kick back into the page fault handler. 11060Sstevel@tonic-gate */ 11070Sstevel@tonic-gate movl 4(%esp), %eax /* error code */ 11080Sstevel@tonic-gate andl $PF_ERR_USER, %eax 11090Sstevel@tonic-gate jnz user_mode 11100Sstevel@tonic-gate 11110Sstevel@tonic-gate /* 11120Sstevel@tonic-gate * We now know that this is the invalid opcode trap. 11130Sstevel@tonic-gate */ 11140Sstevel@tonic-gate popl %eax 11150Sstevel@tonic-gate addl $4, %esp /* pop error code */ 11160Sstevel@tonic-gate jmp invoptrap 11170Sstevel@tonic-gate SET_SIZE(pentium_pftrap) 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate#endif /* !__amd64 */ 11200Sstevel@tonic-gate 11210Sstevel@tonic-gate ENTRY_NP(resvtrap) 11220Sstevel@tonic-gate TRAP_NOERR(15) /* (reserved) */ 11230Sstevel@tonic-gate jmp cmntrap 11240Sstevel@tonic-gate SET_SIZE(resvtrap) 11250Sstevel@tonic-gate 11260Sstevel@tonic-gate /* 11270Sstevel@tonic-gate * #MF 11280Sstevel@tonic-gate */ 11290Sstevel@tonic-gate ENTRY_NP(ndperr) 11300Sstevel@tonic-gate TRAP_NOERR(T_EXTERRFLT) /* $16 */ 11310Sstevel@tonic-gate jmp cmninttrap 11320Sstevel@tonic-gate SET_SIZE(ndperr) 11330Sstevel@tonic-gate 11340Sstevel@tonic-gate /* 11350Sstevel@tonic-gate * #AC 11360Sstevel@tonic-gate */ 11370Sstevel@tonic-gate ENTRY_NP(achktrap) 11380Sstevel@tonic-gate TRAP_ERR(T_ALIGNMENT) /* $17 */ 11390Sstevel@tonic-gate jmp cmntrap 11400Sstevel@tonic-gate SET_SIZE(achktrap) 11410Sstevel@tonic-gate 11420Sstevel@tonic-gate /* 11430Sstevel@tonic-gate * #MC 11440Sstevel@tonic-gate */ 11451414Scindi .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */ 11461414Scindi 11471414Scindi#if defined(__amd64) 11481414Scindi 11490Sstevel@tonic-gate ENTRY_NP(mcetrap) 11500Sstevel@tonic-gate TRAP_NOERR(T_MCE) /* $18 */ 11513446Smrj 115210Skucharsk SET_CPU_GSBASE 11533446Smrj 11541414Scindi INTR_PUSH 11553446Smrj INTGATE_INIT_KERNEL_FLAGS 11561414Scindi 11571414Scindi TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP) 11581414Scindi TRACE_REGS(%rdi, %rsp, %rbx, %rcx) 11591414Scindi TRACE_STAMP(%rdi) 11601414Scindi 11611414Scindi movq %rsp, %rbp 11621414Scindi 11631414Scindi movq %rsp, %rdi /* arg0 = struct regs *rp */ 11641414Scindi call cmi_mca_trap /* cmi_mca_trap(rp); */ 11651414Scindi 11661414Scindi jmp _sys_rtt 11671414Scindi SET_SIZE(mcetrap) 11681414Scindi 11691414Scindi#else 11701414Scindi 11711414Scindi ENTRY_NP(mcetrap) 11721414Scindi TRAP_NOERR(T_MCE) /* $18 */ 11733446Smrj 11741414Scindi INTR_PUSH 11753446Smrj INTGATE_INIT_KERNEL_FLAGS 11761414Scindi 11773446Smrj TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP) 11783446Smrj TRACE_REGS(%edi, %esp, %ebx, %ecx) 11793446Smrj TRACE_STAMP(%edi) 11803446Smrj 11811414Scindi movl %esp, %ebp 11821414Scindi 11831414Scindi movl %esp, %ecx 11841414Scindi pushl %ecx /* arg0 = struct regs *rp */ 11851414Scindi call cmi_mca_trap /* cmi_mca_trap(rp) */ 11861414Scindi addl $4, %esp /* pop arg0 */ 11871414Scindi 11881414Scindi jmp _sys_rtt 11891414Scindi SET_SIZE(mcetrap) 11901414Scindi 119110Skucharsk#endif 11920Sstevel@tonic-gate 11930Sstevel@tonic-gate /* 11940Sstevel@tonic-gate * #XF 11950Sstevel@tonic-gate */ 11960Sstevel@tonic-gate ENTRY_NP(xmtrap) 11970Sstevel@tonic-gate TRAP_NOERR(T_SIMDFPE) /* $19 */ 11985849Ssethg jmp cmninttrap 11990Sstevel@tonic-gate SET_SIZE(xmtrap) 12000Sstevel@tonic-gate 12010Sstevel@tonic-gate ENTRY_NP(invaltrap) 12020Sstevel@tonic-gate TRAP_NOERR(30) /* very invalid */ 12030Sstevel@tonic-gate jmp cmntrap 12040Sstevel@tonic-gate SET_SIZE(invaltrap) 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate ENTRY_NP(invalint) 12070Sstevel@tonic-gate TRAP_NOERR(31) /* even more so */ 12080Sstevel@tonic-gate jmp cmnint 12090Sstevel@tonic-gate SET_SIZE(invalint) 12100Sstevel@tonic-gate 12110Sstevel@tonic-gate .globl fasttable 12120Sstevel@tonic-gate 12130Sstevel@tonic-gate#if defined(__amd64) 12140Sstevel@tonic-gate 12150Sstevel@tonic-gate ENTRY_NP(fasttrap) 12160Sstevel@tonic-gate cmpl $T_LASTFAST, %eax 12170Sstevel@tonic-gate ja 1f 12180Sstevel@tonic-gate orl %eax, %eax /* (zero extend top 32-bits) */ 12190Sstevel@tonic-gate leaq fasttable(%rip), %r11 12200Sstevel@tonic-gate leaq (%r11, %rax, CLONGSIZE), %r11 12210Sstevel@tonic-gate jmp *(%r11) 12220Sstevel@tonic-gate1: 12230Sstevel@tonic-gate /* 12240Sstevel@tonic-gate * Fast syscall number was illegal. Make it look 12250Sstevel@tonic-gate * as if the INT failed. Modify %rip to point before the 12260Sstevel@tonic-gate * INT, push the expected error code and fake a GP fault. 12270Sstevel@tonic-gate * 12280Sstevel@tonic-gate * XXX Why make the error code be offset into idt + 1? 12290Sstevel@tonic-gate * Instead we should push a real (soft?) error code 12300Sstevel@tonic-gate * on the stack and #gp handler could know about fasttraps? 12310Sstevel@tonic-gate */ 12325084Sjohnlev XPV_TRAP_POP 12335084Sjohnlev 12340Sstevel@tonic-gate subq $2, (%rsp) /* XXX int insn 2-bytes */ 12350Sstevel@tonic-gate pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2) 12365084Sjohnlev 12375084Sjohnlev#if defined(__xpv) 12385084Sjohnlev pushq %r11 12395084Sjohnlev pushq %rcx 12405084Sjohnlev#endif 12410Sstevel@tonic-gate jmp gptrap 12420Sstevel@tonic-gate SET_SIZE(fasttrap) 12430Sstevel@tonic-gate 12440Sstevel@tonic-gate#elif defined(__i386) 12450Sstevel@tonic-gate 12460Sstevel@tonic-gate ENTRY_NP(fasttrap) 12470Sstevel@tonic-gate cmpl $T_LASTFAST, %eax 12480Sstevel@tonic-gate ja 1f 12490Sstevel@tonic-gate jmp *%cs:fasttable(, %eax, CLONGSIZE) 12500Sstevel@tonic-gate1: 12510Sstevel@tonic-gate /* 12520Sstevel@tonic-gate * Fast syscall number was illegal. Make it look 12530Sstevel@tonic-gate * as if the INT failed. Modify %eip to point before the 12540Sstevel@tonic-gate * INT, push the expected error code and fake a GP fault. 12550Sstevel@tonic-gate * 12560Sstevel@tonic-gate * XXX Why make the error code be offset into idt + 1? 12570Sstevel@tonic-gate * Instead we should push a real (soft?) error code 12580Sstevel@tonic-gate * on the stack and #gp handler could know about fasttraps? 12590Sstevel@tonic-gate */ 12600Sstevel@tonic-gate subl $2, (%esp) /* XXX int insn 2-bytes */ 12610Sstevel@tonic-gate pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2) 12620Sstevel@tonic-gate jmp gptrap 12630Sstevel@tonic-gate SET_SIZE(fasttrap) 12640Sstevel@tonic-gate 12650Sstevel@tonic-gate#endif /* __i386 */ 12660Sstevel@tonic-gate 12670Sstevel@tonic-gate ENTRY_NP(dtrace_ret) 12680Sstevel@tonic-gate TRAP_NOERR(T_DTRACE_RET) 12690Sstevel@tonic-gate jmp dtrace_trap 12700Sstevel@tonic-gate SET_SIZE(dtrace_ret) 12710Sstevel@tonic-gate 12720Sstevel@tonic-gate#if defined(__amd64) 12730Sstevel@tonic-gate 12740Sstevel@tonic-gate /* 12750Sstevel@tonic-gate * RFLAGS 24 bytes up the stack from %rsp. 12760Sstevel@tonic-gate * XXX a constant would be nicer. 12770Sstevel@tonic-gate */ 12780Sstevel@tonic-gate ENTRY_NP(fast_null) 12795084Sjohnlev XPV_TRAP_POP 12800Sstevel@tonic-gate orq $PS_C, 24(%rsp) /* set carry bit in user flags */ 12813446Smrj IRET 12823446Smrj /*NOTREACHED*/ 12830Sstevel@tonic-gate SET_SIZE(fast_null) 12840Sstevel@tonic-gate 12850Sstevel@tonic-gate#elif defined(__i386) 12860Sstevel@tonic-gate 12870Sstevel@tonic-gate ENTRY_NP(fast_null) 12880Sstevel@tonic-gate orw $PS_C, 8(%esp) /* set carry bit in user flags */ 12893446Smrj IRET 12900Sstevel@tonic-gate SET_SIZE(fast_null) 12910Sstevel@tonic-gate 12920Sstevel@tonic-gate#endif /* __i386 */ 12930Sstevel@tonic-gate 12940Sstevel@tonic-gate /* 12950Sstevel@tonic-gate * Interrupts start at 32 12960Sstevel@tonic-gate */ 12970Sstevel@tonic-gate#define MKIVCT(n) \ 12980Sstevel@tonic-gate ENTRY_NP(ivct/**/n) \ 12990Sstevel@tonic-gate push $0; \ 13000Sstevel@tonic-gate push $n - 0x20; \ 13010Sstevel@tonic-gate jmp cmnint; \ 13020Sstevel@tonic-gate SET_SIZE(ivct/**/n) 13030Sstevel@tonic-gate 13040Sstevel@tonic-gate MKIVCT(32) 13050Sstevel@tonic-gate MKIVCT(33) 13060Sstevel@tonic-gate MKIVCT(34) 13070Sstevel@tonic-gate MKIVCT(35) 13080Sstevel@tonic-gate MKIVCT(36) 13090Sstevel@tonic-gate MKIVCT(37) 13100Sstevel@tonic-gate MKIVCT(38) 13110Sstevel@tonic-gate MKIVCT(39) 13120Sstevel@tonic-gate MKIVCT(40) 13130Sstevel@tonic-gate MKIVCT(41) 13140Sstevel@tonic-gate MKIVCT(42) 13150Sstevel@tonic-gate MKIVCT(43) 13160Sstevel@tonic-gate MKIVCT(44) 13170Sstevel@tonic-gate MKIVCT(45) 13180Sstevel@tonic-gate MKIVCT(46) 13190Sstevel@tonic-gate MKIVCT(47) 13200Sstevel@tonic-gate MKIVCT(48) 13210Sstevel@tonic-gate MKIVCT(49) 13220Sstevel@tonic-gate MKIVCT(50) 13230Sstevel@tonic-gate MKIVCT(51) 13240Sstevel@tonic-gate MKIVCT(52) 13250Sstevel@tonic-gate MKIVCT(53) 13260Sstevel@tonic-gate MKIVCT(54) 13270Sstevel@tonic-gate MKIVCT(55) 13280Sstevel@tonic-gate MKIVCT(56) 13290Sstevel@tonic-gate MKIVCT(57) 13300Sstevel@tonic-gate MKIVCT(58) 13310Sstevel@tonic-gate MKIVCT(59) 13320Sstevel@tonic-gate MKIVCT(60) 13330Sstevel@tonic-gate MKIVCT(61) 13340Sstevel@tonic-gate MKIVCT(62) 13350Sstevel@tonic-gate MKIVCT(63) 13360Sstevel@tonic-gate MKIVCT(64) 13370Sstevel@tonic-gate MKIVCT(65) 13380Sstevel@tonic-gate MKIVCT(66) 13390Sstevel@tonic-gate MKIVCT(67) 13400Sstevel@tonic-gate MKIVCT(68) 13410Sstevel@tonic-gate MKIVCT(69) 13420Sstevel@tonic-gate MKIVCT(70) 13430Sstevel@tonic-gate MKIVCT(71) 13440Sstevel@tonic-gate MKIVCT(72) 13450Sstevel@tonic-gate MKIVCT(73) 13460Sstevel@tonic-gate MKIVCT(74) 13470Sstevel@tonic-gate MKIVCT(75) 13480Sstevel@tonic-gate MKIVCT(76) 13490Sstevel@tonic-gate MKIVCT(77) 13500Sstevel@tonic-gate MKIVCT(78) 13510Sstevel@tonic-gate MKIVCT(79) 13520Sstevel@tonic-gate MKIVCT(80) 13530Sstevel@tonic-gate MKIVCT(81) 13540Sstevel@tonic-gate MKIVCT(82) 13550Sstevel@tonic-gate MKIVCT(83) 13560Sstevel@tonic-gate MKIVCT(84) 13570Sstevel@tonic-gate MKIVCT(85) 13580Sstevel@tonic-gate MKIVCT(86) 13590Sstevel@tonic-gate MKIVCT(87) 13600Sstevel@tonic-gate MKIVCT(88) 13610Sstevel@tonic-gate MKIVCT(89) 13620Sstevel@tonic-gate MKIVCT(90) 13630Sstevel@tonic-gate MKIVCT(91) 13640Sstevel@tonic-gate MKIVCT(92) 13650Sstevel@tonic-gate MKIVCT(93) 13660Sstevel@tonic-gate MKIVCT(94) 13670Sstevel@tonic-gate MKIVCT(95) 13680Sstevel@tonic-gate MKIVCT(96) 13690Sstevel@tonic-gate MKIVCT(97) 13700Sstevel@tonic-gate MKIVCT(98) 13710Sstevel@tonic-gate MKIVCT(99) 13720Sstevel@tonic-gate MKIVCT(100) 13730Sstevel@tonic-gate MKIVCT(101) 13740Sstevel@tonic-gate MKIVCT(102) 13750Sstevel@tonic-gate MKIVCT(103) 13760Sstevel@tonic-gate MKIVCT(104) 13770Sstevel@tonic-gate MKIVCT(105) 13780Sstevel@tonic-gate MKIVCT(106) 13790Sstevel@tonic-gate MKIVCT(107) 13800Sstevel@tonic-gate MKIVCT(108) 13810Sstevel@tonic-gate MKIVCT(109) 13820Sstevel@tonic-gate MKIVCT(110) 13830Sstevel@tonic-gate MKIVCT(111) 13840Sstevel@tonic-gate MKIVCT(112) 13850Sstevel@tonic-gate MKIVCT(113) 13860Sstevel@tonic-gate MKIVCT(114) 13870Sstevel@tonic-gate MKIVCT(115) 13880Sstevel@tonic-gate MKIVCT(116) 13890Sstevel@tonic-gate MKIVCT(117) 13900Sstevel@tonic-gate MKIVCT(118) 13910Sstevel@tonic-gate MKIVCT(119) 13920Sstevel@tonic-gate MKIVCT(120) 13930Sstevel@tonic-gate MKIVCT(121) 13940Sstevel@tonic-gate MKIVCT(122) 13950Sstevel@tonic-gate MKIVCT(123) 13960Sstevel@tonic-gate MKIVCT(124) 13970Sstevel@tonic-gate MKIVCT(125) 13980Sstevel@tonic-gate MKIVCT(126) 13990Sstevel@tonic-gate MKIVCT(127) 14000Sstevel@tonic-gate MKIVCT(128) 14010Sstevel@tonic-gate MKIVCT(129) 14020Sstevel@tonic-gate MKIVCT(130) 14030Sstevel@tonic-gate MKIVCT(131) 14040Sstevel@tonic-gate MKIVCT(132) 14050Sstevel@tonic-gate MKIVCT(133) 14060Sstevel@tonic-gate MKIVCT(134) 14070Sstevel@tonic-gate MKIVCT(135) 14080Sstevel@tonic-gate MKIVCT(136) 14090Sstevel@tonic-gate MKIVCT(137) 14100Sstevel@tonic-gate MKIVCT(138) 14110Sstevel@tonic-gate MKIVCT(139) 14120Sstevel@tonic-gate MKIVCT(140) 14130Sstevel@tonic-gate MKIVCT(141) 14140Sstevel@tonic-gate MKIVCT(142) 14150Sstevel@tonic-gate MKIVCT(143) 14160Sstevel@tonic-gate MKIVCT(144) 14170Sstevel@tonic-gate MKIVCT(145) 14180Sstevel@tonic-gate MKIVCT(146) 14190Sstevel@tonic-gate MKIVCT(147) 14200Sstevel@tonic-gate MKIVCT(148) 14210Sstevel@tonic-gate MKIVCT(149) 14220Sstevel@tonic-gate MKIVCT(150) 14230Sstevel@tonic-gate MKIVCT(151) 14240Sstevel@tonic-gate MKIVCT(152) 14250Sstevel@tonic-gate MKIVCT(153) 14260Sstevel@tonic-gate MKIVCT(154) 14270Sstevel@tonic-gate MKIVCT(155) 14280Sstevel@tonic-gate MKIVCT(156) 14290Sstevel@tonic-gate MKIVCT(157) 14300Sstevel@tonic-gate MKIVCT(158) 14310Sstevel@tonic-gate MKIVCT(159) 14320Sstevel@tonic-gate MKIVCT(160) 14330Sstevel@tonic-gate MKIVCT(161) 14340Sstevel@tonic-gate MKIVCT(162) 14350Sstevel@tonic-gate MKIVCT(163) 14360Sstevel@tonic-gate MKIVCT(164) 14370Sstevel@tonic-gate MKIVCT(165) 14380Sstevel@tonic-gate MKIVCT(166) 14390Sstevel@tonic-gate MKIVCT(167) 14400Sstevel@tonic-gate MKIVCT(168) 14410Sstevel@tonic-gate MKIVCT(169) 14420Sstevel@tonic-gate MKIVCT(170) 14430Sstevel@tonic-gate MKIVCT(171) 14440Sstevel@tonic-gate MKIVCT(172) 14450Sstevel@tonic-gate MKIVCT(173) 14460Sstevel@tonic-gate MKIVCT(174) 14470Sstevel@tonic-gate MKIVCT(175) 14480Sstevel@tonic-gate MKIVCT(176) 14490Sstevel@tonic-gate MKIVCT(177) 14500Sstevel@tonic-gate MKIVCT(178) 14510Sstevel@tonic-gate MKIVCT(179) 14520Sstevel@tonic-gate MKIVCT(180) 14530Sstevel@tonic-gate MKIVCT(181) 14540Sstevel@tonic-gate MKIVCT(182) 14550Sstevel@tonic-gate MKIVCT(183) 14560Sstevel@tonic-gate MKIVCT(184) 14570Sstevel@tonic-gate MKIVCT(185) 14580Sstevel@tonic-gate MKIVCT(186) 14590Sstevel@tonic-gate MKIVCT(187) 14600Sstevel@tonic-gate MKIVCT(188) 14610Sstevel@tonic-gate MKIVCT(189) 14620Sstevel@tonic-gate MKIVCT(190) 14630Sstevel@tonic-gate MKIVCT(191) 14640Sstevel@tonic-gate MKIVCT(192) 14650Sstevel@tonic-gate MKIVCT(193) 14660Sstevel@tonic-gate MKIVCT(194) 14670Sstevel@tonic-gate MKIVCT(195) 14680Sstevel@tonic-gate MKIVCT(196) 14690Sstevel@tonic-gate MKIVCT(197) 14700Sstevel@tonic-gate MKIVCT(198) 14710Sstevel@tonic-gate MKIVCT(199) 14720Sstevel@tonic-gate MKIVCT(200) 14730Sstevel@tonic-gate MKIVCT(201) 14740Sstevel@tonic-gate MKIVCT(202) 14750Sstevel@tonic-gate MKIVCT(203) 14760Sstevel@tonic-gate MKIVCT(204) 14770Sstevel@tonic-gate MKIVCT(205) 14780Sstevel@tonic-gate MKIVCT(206) 14790Sstevel@tonic-gate MKIVCT(207) 14800Sstevel@tonic-gate MKIVCT(208) 14810Sstevel@tonic-gate MKIVCT(209) 14820Sstevel@tonic-gate MKIVCT(210) 14830Sstevel@tonic-gate MKIVCT(211) 14840Sstevel@tonic-gate MKIVCT(212) 14850Sstevel@tonic-gate MKIVCT(213) 14860Sstevel@tonic-gate MKIVCT(214) 14870Sstevel@tonic-gate MKIVCT(215) 14880Sstevel@tonic-gate MKIVCT(216) 14890Sstevel@tonic-gate MKIVCT(217) 14900Sstevel@tonic-gate MKIVCT(218) 14910Sstevel@tonic-gate MKIVCT(219) 14920Sstevel@tonic-gate MKIVCT(220) 14930Sstevel@tonic-gate MKIVCT(221) 14940Sstevel@tonic-gate MKIVCT(222) 14950Sstevel@tonic-gate MKIVCT(223) 14960Sstevel@tonic-gate MKIVCT(224) 14970Sstevel@tonic-gate MKIVCT(225) 14980Sstevel@tonic-gate MKIVCT(226) 14990Sstevel@tonic-gate MKIVCT(227) 15000Sstevel@tonic-gate MKIVCT(228) 15010Sstevel@tonic-gate MKIVCT(229) 15020Sstevel@tonic-gate MKIVCT(230) 15030Sstevel@tonic-gate MKIVCT(231) 15040Sstevel@tonic-gate MKIVCT(232) 15050Sstevel@tonic-gate MKIVCT(233) 15060Sstevel@tonic-gate MKIVCT(234) 15070Sstevel@tonic-gate MKIVCT(235) 15080Sstevel@tonic-gate MKIVCT(236) 15090Sstevel@tonic-gate MKIVCT(237) 15100Sstevel@tonic-gate MKIVCT(238) 15110Sstevel@tonic-gate MKIVCT(239) 15120Sstevel@tonic-gate MKIVCT(240) 15130Sstevel@tonic-gate MKIVCT(241) 15140Sstevel@tonic-gate MKIVCT(242) 15150Sstevel@tonic-gate MKIVCT(243) 15160Sstevel@tonic-gate MKIVCT(244) 15170Sstevel@tonic-gate MKIVCT(245) 15180Sstevel@tonic-gate MKIVCT(246) 15190Sstevel@tonic-gate MKIVCT(247) 15200Sstevel@tonic-gate MKIVCT(248) 15210Sstevel@tonic-gate MKIVCT(249) 15220Sstevel@tonic-gate MKIVCT(250) 15230Sstevel@tonic-gate MKIVCT(251) 15240Sstevel@tonic-gate MKIVCT(252) 15250Sstevel@tonic-gate MKIVCT(253) 15260Sstevel@tonic-gate MKIVCT(254) 15270Sstevel@tonic-gate MKIVCT(255) 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate#endif /* __lint */ 1530