xref: /onnv-gate/usr/src/uts/intel/ia32/ml/exception.s (revision 2006:11a559c797d5)
10Sstevel@tonic-gate/*
21414Scindi * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
30Sstevel@tonic-gate * Use is subject to license terms.
40Sstevel@tonic-gate */
50Sstevel@tonic-gate
60Sstevel@tonic-gate/*
70Sstevel@tonic-gate * Copyright (c) 1989, 1990 William F. Jolitz.
80Sstevel@tonic-gate * Copyright (c) 1990 The Regents of the University of California.
90Sstevel@tonic-gate * All rights reserved.
100Sstevel@tonic-gate *
110Sstevel@tonic-gate * Redistribution and use in source and binary forms, with or without
120Sstevel@tonic-gate * modification, are permitted provided that the following conditions
130Sstevel@tonic-gate * are met:
140Sstevel@tonic-gate * 1. Redistributions of source code must retain the above copyright
150Sstevel@tonic-gate *    notice, this list of conditions and the following disclaimer.
160Sstevel@tonic-gate * 2. Redistributions in binary form must reproduce the above copyright
170Sstevel@tonic-gate *    notice, this list of conditions and the following disclaimer in the
180Sstevel@tonic-gate *    documentation and/or other materials provided with the distribution.
190Sstevel@tonic-gate * 3. All advertising materials mentioning features or use of this software
200Sstevel@tonic-gate *    must display the following acknowledgement:
210Sstevel@tonic-gate *	This product includes software developed by the University of
220Sstevel@tonic-gate *	California, Berkeley and its contributors.
230Sstevel@tonic-gate * 4. Neither the name of the University nor the names of its contributors
240Sstevel@tonic-gate *    may be used to endorse or promote products derived from this software
250Sstevel@tonic-gate *    without specific prior written permission.
260Sstevel@tonic-gate *
270Sstevel@tonic-gate * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
280Sstevel@tonic-gate * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
290Sstevel@tonic-gate * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
300Sstevel@tonic-gate * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
310Sstevel@tonic-gate * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
320Sstevel@tonic-gate * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
330Sstevel@tonic-gate * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
340Sstevel@tonic-gate * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
350Sstevel@tonic-gate * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
360Sstevel@tonic-gate * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
370Sstevel@tonic-gate * SUCH DAMAGE.
380Sstevel@tonic-gate *
390Sstevel@tonic-gate * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
400Sstevel@tonic-gate */
410Sstevel@tonic-gate
420Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
430Sstevel@tonic-gate
440Sstevel@tonic-gate#include <sys/asm_linkage.h>
450Sstevel@tonic-gate#include <sys/asm_misc.h>
460Sstevel@tonic-gate#include <sys/trap.h>
470Sstevel@tonic-gate#include <sys/psw.h>
480Sstevel@tonic-gate#include <sys/regset.h>
490Sstevel@tonic-gate#include <sys/privregs.h>
500Sstevel@tonic-gate#include <sys/dtrace.h>
510Sstevel@tonic-gate#include <sys/traptrace.h>
52*2006Sandrei#include <sys/machparam.h>
530Sstevel@tonic-gate
540Sstevel@tonic-gate/*
550Sstevel@tonic-gate * only one routine in this file is interesting to lint
560Sstevel@tonic-gate */
570Sstevel@tonic-gate
580Sstevel@tonic-gate#if defined(__lint)
590Sstevel@tonic-gate
600Sstevel@tonic-gatevoid
610Sstevel@tonic-gatendptrap_frstor(void)
620Sstevel@tonic-gate{}
630Sstevel@tonic-gate
640Sstevel@tonic-gate#else
650Sstevel@tonic-gate
660Sstevel@tonic-gate#include "assym.h"
670Sstevel@tonic-gate
680Sstevel@tonic-gate/*
690Sstevel@tonic-gate * push $0 on stack for traps that do not
700Sstevel@tonic-gate * generate an error code. This is so the rest
710Sstevel@tonic-gate * of the kernel can expect a consistent stack
720Sstevel@tonic-gate * from from any exception.
730Sstevel@tonic-gate */
740Sstevel@tonic-gate#define	TRAP_NOERR(trapno)	\
750Sstevel@tonic-gate	push	$0;		\
760Sstevel@tonic-gate	push	$trapno
770Sstevel@tonic-gate
780Sstevel@tonic-gate/*
790Sstevel@tonic-gate * error code already pushed by hw
800Sstevel@tonic-gate * onto stack.
810Sstevel@tonic-gate */
820Sstevel@tonic-gate#define	TRAP_ERR(trapno)	\
830Sstevel@tonic-gate	push	$trapno
840Sstevel@tonic-gate
850Sstevel@tonic-gate	/*
860Sstevel@tonic-gate	 * #DE
870Sstevel@tonic-gate	 */
880Sstevel@tonic-gate	ENTRY_NP(div0trap)
890Sstevel@tonic-gate	TRAP_NOERR(T_ZERODIV)	/* $0 */
900Sstevel@tonic-gate	jmp	cmntrap
910Sstevel@tonic-gate	SET_SIZE(div0trap)
920Sstevel@tonic-gate
930Sstevel@tonic-gate#if defined(__amd64)
940Sstevel@tonic-gate	/*
950Sstevel@tonic-gate	 * #DB
960Sstevel@tonic-gate	 *
970Sstevel@tonic-gate	 * If we get here as a result of single-stepping a sysenter
980Sstevel@tonic-gate	 * instruction, we suddenly find ourselves taking a #db
990Sstevel@tonic-gate	 * in kernel mode -before- we've swapgs'ed.  So before we can
1000Sstevel@tonic-gate	 * take the trap, we do the swapgs here, and fix the return
1010Sstevel@tonic-gate	 * %rip in trap() so that we return immediately after the
1020Sstevel@tonic-gate	 * swapgs in the sysenter handler to avoid doing the swapgs again.
1030Sstevel@tonic-gate	 *
1040Sstevel@tonic-gate	 * Nobody said that the design of sysenter was particularly
1050Sstevel@tonic-gate	 * elegant, did they?
1060Sstevel@tonic-gate	 */
1070Sstevel@tonic-gate	ENTRY_NP(dbgtrap)
1080Sstevel@tonic-gate	pushq	%r11
1090Sstevel@tonic-gate	leaq	sys_sysenter(%rip), %r11
1100Sstevel@tonic-gate	cmpq	%r11, 8(%rsp)
1110Sstevel@tonic-gate	jne	1f
1120Sstevel@tonic-gate	swapgs
1130Sstevel@tonic-gate1:	popq	%r11
1140Sstevel@tonic-gate	TRAP_NOERR(T_SGLSTP)	/* $1 */
1150Sstevel@tonic-gate	jmp	cmntrap
1160Sstevel@tonic-gate	SET_SIZE(dbgtrap)
1170Sstevel@tonic-gate
1180Sstevel@tonic-gate#elif defined(__i386)
1190Sstevel@tonic-gate	/*
1200Sstevel@tonic-gate	 * #DB
1210Sstevel@tonic-gate	 */
1220Sstevel@tonic-gate	ENTRY_NP(dbgtrap)
1230Sstevel@tonic-gate	TRAP_NOERR(T_SGLSTP)	/* $1 */
1240Sstevel@tonic-gate	jmp	cmntrap
1250Sstevel@tonic-gate	SET_SIZE(dbgtrap)
1260Sstevel@tonic-gate#endif
1270Sstevel@tonic-gate
1280Sstevel@tonic-gate#if defined(__amd64)
1290Sstevel@tonic-gate
1300Sstevel@tonic-gate/*
1310Sstevel@tonic-gate * Macro to set the gsbase or kgsbase to the address of the struct cpu
1320Sstevel@tonic-gate * for this processor.  If we came from userland, set kgsbase else clear
1330Sstevel@tonic-gate * gs and set gsbase.  We find the proper cpu struct by looping through
1340Sstevel@tonic-gate * the cpu structs for all processors till we find a match for the gdt
1350Sstevel@tonic-gate * of the trapping processor.  The stack is expected to be pointing at
1360Sstevel@tonic-gate * The standard regs pushed by hardware on a trap (plus error code and trapno).
1370Sstevel@tonic-gate */
1380Sstevel@tonic-gate#define	SET_CPU_GSBASE							\
1390Sstevel@tonic-gate	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
1400Sstevel@tonic-gate	movq	%rax, REGOFF_RAX(%rsp);					\
1410Sstevel@tonic-gate	movq	%rbx, REGOFF_RBX(%rsp);					\
1420Sstevel@tonic-gate	movq	%rcx, REGOFF_RCX(%rsp);					\
1430Sstevel@tonic-gate	movq	%rdx, REGOFF_RDX(%rsp);					\
1440Sstevel@tonic-gate	movq	%rbp, REGOFF_RBP(%rsp);					\
1450Sstevel@tonic-gate	movq	%rsp, %rbp;						\
1460Sstevel@tonic-gate	subq	$16, %rsp;		/* space for gdt */		\
1470Sstevel@tonic-gate	sgdt	6(%rsp);						\
1480Sstevel@tonic-gate	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
1490Sstevel@tonic-gate	xorl	%ebx, %ebx;		/* loop index */		\
1500Sstevel@tonic-gate	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
1510Sstevel@tonic-gate1:									\
1520Sstevel@tonic-gate	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
1530Sstevel@tonic-gate	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
1540Sstevel@tonic-gate	je	2f;			/* yes, continue */		\
1550Sstevel@tonic-gate	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
1560Sstevel@tonic-gate	je	3f;			/* yes, go set gsbase */	\
1570Sstevel@tonic-gate2:									\
1580Sstevel@tonic-gate	incl	%ebx;			/* i++ */			\
1590Sstevel@tonic-gate	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
1600Sstevel@tonic-gate	jb	1b;			/* yes, loop */			\
1610Sstevel@tonic-gate/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
1620Sstevel@tonic-gate3:									\
1630Sstevel@tonic-gate	movl	$MSR_AMD_KGSBASE, %ecx;					\
1640Sstevel@tonic-gate	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
1650Sstevel@tonic-gate	jne	4f;			/* no, go set KGSBASE */	\
1660Sstevel@tonic-gate	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
1670Sstevel@tonic-gate        mfence;				/* OPTERON_ERRATUM_88 */	\
1680Sstevel@tonic-gate4:									\
1690Sstevel@tonic-gate	movq	%rax, %rdx;		/* write base register */	\
1700Sstevel@tonic-gate	shrq	$32, %rdx;						\
1710Sstevel@tonic-gate	wrmsr;								\
1720Sstevel@tonic-gate	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
1730Sstevel@tonic-gate	movq	REGOFF_RCX(%rbp), %rcx;					\
1740Sstevel@tonic-gate	movq	REGOFF_RBX(%rbp), %rbx;					\
1750Sstevel@tonic-gate	movq	REGOFF_RAX(%rbp), %rax;					\
1760Sstevel@tonic-gate	movq	%rbp, %rsp;						\
1770Sstevel@tonic-gate	movq	REGOFF_RBP(%rsp), %rbp;					\
1780Sstevel@tonic-gate	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
1790Sstevel@tonic-gate#endif	/* __amd64 */
1800Sstevel@tonic-gate
1810Sstevel@tonic-gate
1820Sstevel@tonic-gate
1830Sstevel@tonic-gate
1840Sstevel@tonic-gate	.globl	nmivect
1850Sstevel@tonic-gate	.globl	idt0_default_r
1860Sstevel@tonic-gate
1870Sstevel@tonic-gate#if defined(__amd64)
1880Sstevel@tonic-gate
1890Sstevel@tonic-gate	/*
1900Sstevel@tonic-gate	 * #NMI
1910Sstevel@tonic-gate	 */
1920Sstevel@tonic-gate	ENTRY_NP(nmiint)
1930Sstevel@tonic-gate	TRAP_NOERR(T_NMIFLT)	/* $2 */
1940Sstevel@tonic-gate
1950Sstevel@tonic-gate	SET_CPU_GSBASE
1960Sstevel@tonic-gate
1970Sstevel@tonic-gate	/*
1980Sstevel@tonic-gate	 * Save all registers and setup segment registers
1990Sstevel@tonic-gate	 * with kernel selectors.
2000Sstevel@tonic-gate	 */
2010Sstevel@tonic-gate	INTR_PUSH
2020Sstevel@tonic-gate
2030Sstevel@tonic-gate	DISABLE_INTR_FLAGS		/* and set the kernel flags */
2040Sstevel@tonic-gate
2050Sstevel@tonic-gate	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
2060Sstevel@tonic-gate
2070Sstevel@tonic-gate	TRACE_REGS(%r12, %rsp, %rax, %rbx)
2080Sstevel@tonic-gate	TRACE_STAMP(%r12)
2090Sstevel@tonic-gate
2100Sstevel@tonic-gate	movq	%rsp, %rbp
2110Sstevel@tonic-gate
2120Sstevel@tonic-gate	movq	%rbp, %rdi
2130Sstevel@tonic-gate	call	av_dispatch_nmivect
2140Sstevel@tonic-gate
2150Sstevel@tonic-gate	INTR_POP
2160Sstevel@tonic-gate	iretq
2170Sstevel@tonic-gate	SET_SIZE(nmiint)
2180Sstevel@tonic-gate
2190Sstevel@tonic-gate#elif defined(__i386)
2200Sstevel@tonic-gate
2210Sstevel@tonic-gate	/*
2220Sstevel@tonic-gate	 * #NMI
2230Sstevel@tonic-gate	 */
2240Sstevel@tonic-gate	ENTRY_NP(nmiint)
2250Sstevel@tonic-gate	TRAP_NOERR(T_NMIFLT)	/* $2 */
2260Sstevel@tonic-gate
2270Sstevel@tonic-gate	/*
2280Sstevel@tonic-gate	 * Save all registers and setup segment registers
2290Sstevel@tonic-gate	 * with kernel selectors.
2300Sstevel@tonic-gate	 */
2310Sstevel@tonic-gate	INTR_PUSH
2320Sstevel@tonic-gate
2330Sstevel@tonic-gate	/*
2340Sstevel@tonic-gate	 * setup pointer to reg struct as 2nd argument.
2350Sstevel@tonic-gate	 */
2360Sstevel@tonic-gate	movl	%esp, %ebp
2370Sstevel@tonic-gate	pushl	%ebp
2380Sstevel@tonic-gate
2390Sstevel@tonic-gate	DISABLE_INTR_FLAGS
2400Sstevel@tonic-gate
2410Sstevel@tonic-gate	movl	nmivect, %esi		/* get autovect structure */
2420Sstevel@tonic-gateloop1:
2430Sstevel@tonic-gate	cmpl	$0, %esi		/* if pointer is null  */
2440Sstevel@tonic-gate	je	.intr_ret		/* 	we're done */
2450Sstevel@tonic-gate	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
2460Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)	/* get argument to interrupt routine */
2470Sstevel@tonic-gate	call	*%edx			/* call interrupt routine with arg */
2480Sstevel@tonic-gate	addl	$4, %esp
2490Sstevel@tonic-gate	movl	AV_LINK(%esi), %esi	/* get next routine on list */
2500Sstevel@tonic-gate	jmp	loop1			/* keep looping until end of list */
2510Sstevel@tonic-gate
2520Sstevel@tonic-gate.intr_ret:
2530Sstevel@tonic-gate	addl	$4, %esp		/* 'pop' %ebp */
2540Sstevel@tonic-gate	INTR_POP_USER
2550Sstevel@tonic-gate	iret
2560Sstevel@tonic-gate	SET_SIZE(nmiint)
2570Sstevel@tonic-gate
2580Sstevel@tonic-gate#endif	/* __i386 */
2590Sstevel@tonic-gate
2600Sstevel@tonic-gate	/*
2610Sstevel@tonic-gate	 * #BP
2620Sstevel@tonic-gate	 */
2630Sstevel@tonic-gate	ENTRY_NP(brktrap)
2640Sstevel@tonic-gate#if defined(__amd64)
2650Sstevel@tonic-gate	cmpw	$KCS_SEL, 8(%rsp)
2660Sstevel@tonic-gate	je	bp_jmpud
2670Sstevel@tonic-gate#endif
2680Sstevel@tonic-gate
2690Sstevel@tonic-gate	TRAP_NOERR(T_BPTFLT)	/* $3 */
2700Sstevel@tonic-gate	jmp	dtrace_trap
2710Sstevel@tonic-gate
2720Sstevel@tonic-gate#if defined(__amd64)
2730Sstevel@tonic-gatebp_jmpud:
2740Sstevel@tonic-gate	/*
2750Sstevel@tonic-gate	 * This is a breakpoint in the kernel -- it is very likely that this
2760Sstevel@tonic-gate	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
2770Sstevel@tonic-gate	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
2780Sstevel@tonic-gate	 * we must decrement the trapping %rip to make it appear as a fault.
2790Sstevel@tonic-gate	 * We then push a non-zero error code to indicate that this is coming
2800Sstevel@tonic-gate	 * from #BP.
2810Sstevel@tonic-gate	 */
2820Sstevel@tonic-gate	decq	(%rsp)
2830Sstevel@tonic-gate	push	$1			/* error code -- non-zero for #BP */
2840Sstevel@tonic-gate	jmp	ud_kernel
2850Sstevel@tonic-gate#endif
2860Sstevel@tonic-gate
2870Sstevel@tonic-gate	SET_SIZE(brktrap)
2880Sstevel@tonic-gate
2890Sstevel@tonic-gate	/*
2900Sstevel@tonic-gate	 * #OF
2910Sstevel@tonic-gate	 */
2920Sstevel@tonic-gate	ENTRY_NP(ovflotrap)
2930Sstevel@tonic-gate	TRAP_NOERR(T_OVFLW)	/* $4 */
2940Sstevel@tonic-gate	jmp	cmntrap
2950Sstevel@tonic-gate	SET_SIZE(ovflotrap)
2960Sstevel@tonic-gate
2970Sstevel@tonic-gate	/*
2980Sstevel@tonic-gate	 * #BR
2990Sstevel@tonic-gate	 */
3000Sstevel@tonic-gate	ENTRY_NP(boundstrap)
3010Sstevel@tonic-gate	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
3020Sstevel@tonic-gate	jmp	cmntrap
3030Sstevel@tonic-gate	SET_SIZE(boundstrap)
3040Sstevel@tonic-gate
3050Sstevel@tonic-gate#if defined(__amd64)
3060Sstevel@tonic-gate
3070Sstevel@tonic-gate	ENTRY_NP(invoptrap)
3080Sstevel@tonic-gate	cmpw	$KCS_SEL, 8(%rsp)
3090Sstevel@tonic-gate	jne	ud_user
3100Sstevel@tonic-gate
3110Sstevel@tonic-gate	push	$0			/* error code -- zero for #UD */
3120Sstevel@tonic-gateud_kernel:
3130Sstevel@tonic-gate	push	$0xdddd			/* a dummy trap number */
3140Sstevel@tonic-gate	TRAP_PUSH
3150Sstevel@tonic-gate	movq	REGOFF_RIP(%rsp), %rdi
3160Sstevel@tonic-gate	movq	REGOFF_RSP(%rsp), %rsi
3170Sstevel@tonic-gate	movq	REGOFF_RAX(%rsp), %rdx
3180Sstevel@tonic-gate	pushq	(%rsi)
3190Sstevel@tonic-gate	movq	%rsp, %rsi
3200Sstevel@tonic-gate	call	dtrace_invop
3210Sstevel@tonic-gate	ALTENTRY(dtrace_invop_callsite)
3220Sstevel@tonic-gate	addq	$8, %rsp
3230Sstevel@tonic-gate	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
3240Sstevel@tonic-gate	je	ud_push
3250Sstevel@tonic-gate	cmpl	$DTRACE_INVOP_LEAVE, %eax
3260Sstevel@tonic-gate	je	ud_leave
3270Sstevel@tonic-gate	cmpl	$DTRACE_INVOP_NOP, %eax
3280Sstevel@tonic-gate	je	ud_nop
3290Sstevel@tonic-gate	cmpl	$DTRACE_INVOP_RET, %eax
3300Sstevel@tonic-gate	je	ud_ret
3310Sstevel@tonic-gate	jmp	ud_trap
3320Sstevel@tonic-gate
3330Sstevel@tonic-gateud_push:
3340Sstevel@tonic-gate	/*
3350Sstevel@tonic-gate	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
3360Sstevel@tonic-gate	 * down 8 bytes, and then store the base pointer.
3370Sstevel@tonic-gate	 */
3380Sstevel@tonic-gate	INTR_POP
3390Sstevel@tonic-gate	subq	$16, %rsp		/* make room for %rbp */
3400Sstevel@tonic-gate	pushq	%rax			/* push temp */
3410Sstevel@tonic-gate	movq	24(%rsp), %rax		/* load calling RIP */
3420Sstevel@tonic-gate	addq	$1, %rax		/* increment over trapping instr */
3430Sstevel@tonic-gate	movq	%rax, 8(%rsp)		/* store calling RIP */
3440Sstevel@tonic-gate	movq	32(%rsp), %rax		/* load calling CS */
3450Sstevel@tonic-gate	movq	%rax, 16(%rsp)		/* store calling CS */
3460Sstevel@tonic-gate	movq	40(%rsp), %rax		/* load calling RFLAGS */
3470Sstevel@tonic-gate	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
3480Sstevel@tonic-gate	movq	48(%rsp), %rax		/* load calling RSP */
3490Sstevel@tonic-gate	subq	$8, %rax		/* make room for %rbp */
3500Sstevel@tonic-gate	movq	%rax, 32(%rsp)		/* store calling RSP */
3510Sstevel@tonic-gate	movq	56(%rsp), %rax		/* load calling SS */
3520Sstevel@tonic-gate	movq	%rax, 40(%rsp)		/* store calling SS */
3530Sstevel@tonic-gate	movq	32(%rsp), %rax		/* reload calling RSP */
3540Sstevel@tonic-gate	movq	%rbp, (%rax)		/* store %rbp there */
3550Sstevel@tonic-gate	popq	%rax			/* pop off temp */
3560Sstevel@tonic-gate	iretq				/* return from interrupt */
3570Sstevel@tonic-gate
3580Sstevel@tonic-gateud_leave:
3590Sstevel@tonic-gate	/*
3600Sstevel@tonic-gate	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
3610Sstevel@tonic-gate	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
3620Sstevel@tonic-gate	 * than it is on i386 -- we can exploit the fact that the %rsp is
3630Sstevel@tonic-gate	 * explicitly saved to effect the pop without having to reshuffle
3640Sstevel@tonic-gate	 * the other data pushed for the trap.
3650Sstevel@tonic-gate	 */
3660Sstevel@tonic-gate	INTR_POP
3670Sstevel@tonic-gate	pushq	%rax			/* push temp */
3680Sstevel@tonic-gate	movq	8(%rsp), %rax		/* load calling RIP */
3690Sstevel@tonic-gate	addq	$1, %rax		/* increment over trapping instr */
3700Sstevel@tonic-gate	movq	%rax, 8(%rsp)		/* store calling RIP */
3710Sstevel@tonic-gate	movq	(%rbp), %rax		/* get new %rbp */
3720Sstevel@tonic-gate	addq	$8, %rbp		/* adjust new %rsp */
3730Sstevel@tonic-gate	movq	%rbp, 32(%rsp)		/* store new %rsp */
3740Sstevel@tonic-gate	movq	%rax, %rbp		/* set new %rbp */
3750Sstevel@tonic-gate	popq	%rax			/* pop off temp */
3760Sstevel@tonic-gate	iretq				/* return from interrupt */
3770Sstevel@tonic-gate
3780Sstevel@tonic-gateud_nop:
3790Sstevel@tonic-gate	/*
3800Sstevel@tonic-gate	 * We must emulate a "nop".  This is obviously not hard:  we need only
3810Sstevel@tonic-gate	 * advance the %rip by one.
3820Sstevel@tonic-gate	 */
3830Sstevel@tonic-gate	INTR_POP
3840Sstevel@tonic-gate	incq	(%rsp)
3850Sstevel@tonic-gate	iretq
3860Sstevel@tonic-gate
3870Sstevel@tonic-gateud_ret:
3880Sstevel@tonic-gate	INTR_POP
3890Sstevel@tonic-gate	pushq	%rax			/* push temp */
3900Sstevel@tonic-gate	movq	32(%rsp), %rax		/* load %rsp */
3910Sstevel@tonic-gate	movq	(%rax), %rax		/* load calling RIP */
3920Sstevel@tonic-gate	movq	%rax, 8(%rsp)		/* store calling RIP */
3930Sstevel@tonic-gate	addq	$8, 32(%rsp)		/* adjust new %rsp */
3940Sstevel@tonic-gate	popq	%rax			/* pop off temp */
3950Sstevel@tonic-gate	iretq				/* return from interrupt */
3960Sstevel@tonic-gate
3970Sstevel@tonic-gateud_trap:
3980Sstevel@tonic-gate	/*
3990Sstevel@tonic-gate	 * We're going to let the kernel handle this as a normal #UD.  If,
4000Sstevel@tonic-gate	 * however, we came through #BP and are spoofing #UD (in this case,
4010Sstevel@tonic-gate	 * the stored error value will be non-zero), we need to de-spoof
4020Sstevel@tonic-gate	 * the trap by incrementing %rip and pushing T_BPTFLT.
4030Sstevel@tonic-gate	 */
4040Sstevel@tonic-gate	cmpq	$0, REGOFF_ERR(%rsp)
4050Sstevel@tonic-gate	je	ud_ud
4060Sstevel@tonic-gate	incq	REGOFF_RIP(%rsp)
4070Sstevel@tonic-gate	addq	$REGOFF_RIP, %rsp
4080Sstevel@tonic-gate	TRAP_NOERR(T_BPTFLT)	/* $3 */
4090Sstevel@tonic-gate	jmp	cmntrap
4100Sstevel@tonic-gate
4110Sstevel@tonic-gateud_ud:
4120Sstevel@tonic-gate	addq	$REGOFF_RIP, %rsp
4130Sstevel@tonic-gateud_user:
4140Sstevel@tonic-gate	TRAP_NOERR(T_ILLINST)
4150Sstevel@tonic-gate	jmp	cmntrap
4160Sstevel@tonic-gate	SET_SIZE(invoptrap)
4170Sstevel@tonic-gate
4180Sstevel@tonic-gate#elif defined(__i386)
4190Sstevel@tonic-gate
4200Sstevel@tonic-gate	/*
4210Sstevel@tonic-gate	 * #UD
4220Sstevel@tonic-gate	 */
4230Sstevel@tonic-gate	ENTRY_NP(invoptrap)
4240Sstevel@tonic-gate	/*
4250Sstevel@tonic-gate	 * If we are taking an invalid opcode trap while in the kernel, this
4260Sstevel@tonic-gate	 * is likely an FBT probe point.
4270Sstevel@tonic-gate	 */
4280Sstevel@tonic-gate	pushl   %gs
4290Sstevel@tonic-gate	cmpw	$KGS_SEL, (%esp)
4300Sstevel@tonic-gate	jne	8f
4310Sstevel@tonic-gate	addl	$4, %esp
4320Sstevel@tonic-gate	pusha
4330Sstevel@tonic-gate	pushl	%eax			/* push %eax -- may be return value */
4340Sstevel@tonic-gate	pushl	%esp			/* push stack pointer */
4350Sstevel@tonic-gate	addl	$48, (%esp)		/* adjust to incoming args */
4360Sstevel@tonic-gate	pushl	40(%esp)		/* push calling EIP */
4370Sstevel@tonic-gate	call	dtrace_invop
4380Sstevel@tonic-gate	ALTENTRY(dtrace_invop_callsite)
4390Sstevel@tonic-gate	addl	$12, %esp
4400Sstevel@tonic-gate	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
4410Sstevel@tonic-gate	je	1f
4420Sstevel@tonic-gate	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
4430Sstevel@tonic-gate	je	2f
4440Sstevel@tonic-gate	cmpl	$DTRACE_INVOP_LEAVE, %eax
4450Sstevel@tonic-gate	je	3f
4460Sstevel@tonic-gate	cmpl	$DTRACE_INVOP_NOP, %eax
4470Sstevel@tonic-gate	je	4f
4480Sstevel@tonic-gate	jmp	7f
4490Sstevel@tonic-gate
4500Sstevel@tonic-gate1:
4510Sstevel@tonic-gate	/*
4520Sstevel@tonic-gate	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
4530Sstevel@tonic-gate	 * down 4 bytes, and then store the base pointer.
4540Sstevel@tonic-gate	 */
4550Sstevel@tonic-gate	popa
4560Sstevel@tonic-gate	subl	$4, %esp		/* make room for %ebp */
4570Sstevel@tonic-gate	pushl	%eax			/* push temp */
4580Sstevel@tonic-gate	movl	8(%esp), %eax		/* load calling EIP */
4590Sstevel@tonic-gate	incl	%eax			/* increment over LOCK prefix */
4600Sstevel@tonic-gate	movl	%eax, 4(%esp)		/* store calling EIP */
4610Sstevel@tonic-gate	movl	12(%esp), %eax		/* load calling CS */
4620Sstevel@tonic-gate	movl	%eax, 8(%esp)		/* store calling CS */
4630Sstevel@tonic-gate	movl	16(%esp), %eax		/* load calling EFLAGS */
4640Sstevel@tonic-gate	movl	%eax, 12(%esp)		/* store calling EFLAGS */
4650Sstevel@tonic-gate	movl	%ebp, 16(%esp)		/* push %ebp */
4660Sstevel@tonic-gate	popl	%eax			/* pop off temp */
4670Sstevel@tonic-gate	iret				/* return from interrupt */
4680Sstevel@tonic-gate
4690Sstevel@tonic-gate2:
4700Sstevel@tonic-gate	/*
4710Sstevel@tonic-gate	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
4720Sstevel@tonic-gate	 * the above:  we remove the %ebp from the stack, and squeeze up the
4730Sstevel@tonic-gate	 * saved state from the trap.
4740Sstevel@tonic-gate	 */
4750Sstevel@tonic-gate	popa
4760Sstevel@tonic-gate	pushl	%eax			/* push temp */
4770Sstevel@tonic-gate	movl	16(%esp), %ebp		/* pop %ebp */
4780Sstevel@tonic-gate	movl	12(%esp), %eax		/* load calling EFLAGS */
4790Sstevel@tonic-gate	movl	%eax, 16(%esp)		/* store calling EFLAGS */
4800Sstevel@tonic-gate	movl	8(%esp), %eax		/* load calling CS */
4810Sstevel@tonic-gate	movl	%eax, 12(%esp)		/* store calling CS */
4820Sstevel@tonic-gate	movl	4(%esp), %eax		/* load calling EIP */
4830Sstevel@tonic-gate	incl	%eax			/* increment over LOCK prefix */
4840Sstevel@tonic-gate	movl	%eax, 8(%esp)		/* store calling EIP */
4850Sstevel@tonic-gate	popl	%eax			/* pop off temp */
4860Sstevel@tonic-gate	addl	$4, %esp		/* adjust stack pointer */
4870Sstevel@tonic-gate	iret				/* return from interrupt */
4880Sstevel@tonic-gate
4890Sstevel@tonic-gate3:
4900Sstevel@tonic-gate	/*
4910Sstevel@tonic-gate	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
4920Sstevel@tonic-gate	 * followed by a "popl %ebp".  This looks similar to the above, but
4930Sstevel@tonic-gate	 * requires two temporaries:  one for the new base pointer, and one
4940Sstevel@tonic-gate	 * for the staging register.
4950Sstevel@tonic-gate	 */
4960Sstevel@tonic-gate	popa
4970Sstevel@tonic-gate	pushl	%eax			/* push temp */
4980Sstevel@tonic-gate	pushl	%ebx			/* push temp */
4990Sstevel@tonic-gate	movl	%ebp, %ebx		/* set temp to old %ebp */
5000Sstevel@tonic-gate	movl	(%ebx), %ebp		/* pop %ebp */
5010Sstevel@tonic-gate	movl	16(%esp), %eax		/* load calling EFLAGS */
5020Sstevel@tonic-gate	movl	%eax, (%ebx)		/* store calling EFLAGS */
5030Sstevel@tonic-gate	movl	12(%esp), %eax		/* load calling CS */
5040Sstevel@tonic-gate	movl	%eax, -4(%ebx)		/* store calling CS */
5050Sstevel@tonic-gate	movl	8(%esp), %eax		/* load calling EIP */
5060Sstevel@tonic-gate	incl	%eax			/* increment over LOCK prefix */
5070Sstevel@tonic-gate	movl	%eax, -8(%ebx)		/* store calling EIP */
5080Sstevel@tonic-gate	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
5090Sstevel@tonic-gate	popl	%ebx			/* pop off temp */
5100Sstevel@tonic-gate	popl	%eax			/* pop off temp */
5110Sstevel@tonic-gate	movl	-12(%esp), %esp		/* set stack pointer */
5120Sstevel@tonic-gate	subl	$8, %esp		/* adjust for three pushes, one pop */
5130Sstevel@tonic-gate	iret				/* return from interrupt */
5140Sstevel@tonic-gate
5150Sstevel@tonic-gate4:
5160Sstevel@tonic-gate	/*
5170Sstevel@tonic-gate	 * We must emulate a "nop".  This is obviously not hard:  we need only
5180Sstevel@tonic-gate	 * advance the %eip by one.
5190Sstevel@tonic-gate	 */
5200Sstevel@tonic-gate	popa
5210Sstevel@tonic-gate	incl	(%esp)
5220Sstevel@tonic-gate	iret
5230Sstevel@tonic-gate
5240Sstevel@tonic-gate7:
5250Sstevel@tonic-gate	popa
5260Sstevel@tonic-gate	pushl	$0
5270Sstevel@tonic-gate	pushl	$T_ILLINST	/* $6 */
5280Sstevel@tonic-gate	jmp	cmntrap
5290Sstevel@tonic-gate8:
5300Sstevel@tonic-gate	addl	$4, %esp
5310Sstevel@tonic-gate	pushl	$0
5320Sstevel@tonic-gate	pushl	$T_ILLINST	/* $6 */
5330Sstevel@tonic-gate	jmp	cmntrap
5340Sstevel@tonic-gate	SET_SIZE(invoptrap)
5350Sstevel@tonic-gate
5360Sstevel@tonic-gate#endif	/* __i386 */
5370Sstevel@tonic-gate
5380Sstevel@tonic-gate#if defined(__amd64)
5390Sstevel@tonic-gate
5400Sstevel@tonic-gate	/*
5410Sstevel@tonic-gate	 * #NM
5420Sstevel@tonic-gate	 */
5430Sstevel@tonic-gate	ENTRY_NP(ndptrap)
5440Sstevel@tonic-gate	/*
5450Sstevel@tonic-gate	 * We want to do this quickly as every lwp using fp will take this
5460Sstevel@tonic-gate	 * after a context switch -- we do the frequent path in ndptrap_frstor
5470Sstevel@tonic-gate	 * below; for all other cases, we let the trap code handle it
5480Sstevel@tonic-gate	 */
5490Sstevel@tonic-gate	pushq	%rax
5500Sstevel@tonic-gate	pushq	%rbx
5510Sstevel@tonic-gate	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
5520Sstevel@tonic-gate	jne     1f
5530Sstevel@tonic-gate	LOADCPU(%rbx)			/* if yes, don't swapgs */
5540Sstevel@tonic-gate	jmp	2f
5550Sstevel@tonic-gate1:
5560Sstevel@tonic-gate	swapgs				/* if from user, need swapgs */
5570Sstevel@tonic-gate	LOADCPU(%rbx)
5580Sstevel@tonic-gate	swapgs
5590Sstevel@tonic-gate2:
5600Sstevel@tonic-gate	cmpl	$0, fpu_exists(%rip)
5610Sstevel@tonic-gate	je	.handle_in_trap		/* let trap handle no fp case */
5620Sstevel@tonic-gate	movq	CPU_THREAD(%rbx), %rax	/* %rax = curthread */
5630Sstevel@tonic-gate	movl	$FPU_EN, %ebx
5640Sstevel@tonic-gate	movq	T_LWP(%rax), %rax	/* %rax = lwp */
5650Sstevel@tonic-gate	testq	%rax, %rax
5660Sstevel@tonic-gate	jz	.handle_in_trap		/* should not happen? */
5670Sstevel@tonic-gate#if LWP_PCB_FPU	!= 0
5680Sstevel@tonic-gate	addq	$LWP_PCB_FPU, %rax	/* &lwp->lwp_pcb.pcb_fpu */
5690Sstevel@tonic-gate#endif
5700Sstevel@tonic-gate	testl	%ebx, PCB_FPU_FLAGS(%rax)
5710Sstevel@tonic-gate	jz	.handle_in_trap		/* must be the first fault */
5720Sstevel@tonic-gate	clts
5730Sstevel@tonic-gate	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rax)
5740Sstevel@tonic-gate#if FPU_CTX_FPU_REGS != 0
5750Sstevel@tonic-gate	addq	$FPU_CTX_FPU_REGS, %rax
5760Sstevel@tonic-gate#endif
5770Sstevel@tonic-gate	/*
5780Sstevel@tonic-gate	 * the label below is used in trap.c to detect FP faults in
5790Sstevel@tonic-gate	 * kernel due to user fault.
5800Sstevel@tonic-gate	 */
5810Sstevel@tonic-gate	ALTENTRY(ndptrap_frstor)
5820Sstevel@tonic-gate	fxrstor	(%rax)
5830Sstevel@tonic-gate	popq	%rbx
5840Sstevel@tonic-gate	popq	%rax
5850Sstevel@tonic-gate	iretq
5860Sstevel@tonic-gate
5870Sstevel@tonic-gate.handle_in_trap:
5880Sstevel@tonic-gate	popq	%rbx
5890Sstevel@tonic-gate	popq	%rax
5900Sstevel@tonic-gate	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
5910Sstevel@tonic-gate	jmp	cmninttrap
5920Sstevel@tonic-gate	SET_SIZE(ndptrap_frstor)
5930Sstevel@tonic-gate	SET_SIZE(ndptrap)
5940Sstevel@tonic-gate
5950Sstevel@tonic-gate#elif defined(__i386)
5960Sstevel@tonic-gate
5970Sstevel@tonic-gate	ENTRY_NP(ndptrap)
5980Sstevel@tonic-gate	/*
5990Sstevel@tonic-gate	 * We want to do this quickly as every lwp using fp will take this
6000Sstevel@tonic-gate	 * after a context switch -- we do the frequent path in fpnoextflt
6010Sstevel@tonic-gate	 * below; for all other cases, we let the trap code handle it
6020Sstevel@tonic-gate	 */
6030Sstevel@tonic-gate	pushl	%eax
6040Sstevel@tonic-gate	pushl	%ebx
6050Sstevel@tonic-gate	pushl	%ds
6060Sstevel@tonic-gate	pushl	%gs
6070Sstevel@tonic-gate	movl	$KDS_SEL, %ebx
6080Sstevel@tonic-gate	movw	%bx, %ds
6090Sstevel@tonic-gate	movl	$KGS_SEL, %eax
6100Sstevel@tonic-gate	movw	%ax, %gs
6110Sstevel@tonic-gate	LOADCPU(%ebx)
6120Sstevel@tonic-gate	cmpl	$0, fpu_exists
6130Sstevel@tonic-gate	je	.handle_in_trap		/* let trap handle no fp case */
6140Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %eax	/* %eax = curthread */
6150Sstevel@tonic-gate	movl	$FPU_EN, %ebx
6160Sstevel@tonic-gate	movl	T_LWP(%eax), %eax	/* %eax = lwp */
6170Sstevel@tonic-gate	testl	%eax, %eax
6180Sstevel@tonic-gate	jz	.handle_in_trap		/* should not happen? */
6190Sstevel@tonic-gate#if LWP_PCB_FPU != 0
6200Sstevel@tonic-gate	addl	$LWP_PCB_FPU, %eax 	/* &lwp->lwp_pcb.pcb_fpu */
6210Sstevel@tonic-gate#endif
6220Sstevel@tonic-gate	testl	%ebx, PCB_FPU_FLAGS(%eax)
6230Sstevel@tonic-gate	jz	.handle_in_trap		/* must be the first fault */
6240Sstevel@tonic-gate	clts
6250Sstevel@tonic-gate	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%eax)
6260Sstevel@tonic-gate#if FPU_CTX_FPU_REGS != 0
6270Sstevel@tonic-gate	addl	$FPU_CTX_FPU_REGS, %eax
6280Sstevel@tonic-gate#endif
6290Sstevel@tonic-gate	/*
6300Sstevel@tonic-gate	 * the label below is used in trap.c to detect FP faults in kernel
6310Sstevel@tonic-gate	 * due to user fault.
6320Sstevel@tonic-gate	 */
6330Sstevel@tonic-gate	ALTENTRY(ndptrap_frstor)
6340Sstevel@tonic-gate	.globl	_patch_fxrstor_eax
6350Sstevel@tonic-gate_patch_fxrstor_eax:
6360Sstevel@tonic-gate	frstor	(%eax)		/* may be patched to fxrstor */
6370Sstevel@tonic-gate	nop			/* (including this byte) */
6380Sstevel@tonic-gate	popl	%gs
6390Sstevel@tonic-gate	popl	%ds
6400Sstevel@tonic-gate	popl	%ebx
6410Sstevel@tonic-gate	popl	%eax
6420Sstevel@tonic-gate	iret
6430Sstevel@tonic-gate
6440Sstevel@tonic-gate.handle_in_trap:
6450Sstevel@tonic-gate	popl	%gs
6460Sstevel@tonic-gate	popl	%ds
6470Sstevel@tonic-gate	popl	%ebx
6480Sstevel@tonic-gate	popl	%eax
6490Sstevel@tonic-gate	pushl	$0
6500Sstevel@tonic-gate	pushl	$T_NOEXTFLT	/* $7 */
6510Sstevel@tonic-gate	jmp	cmninttrap
6520Sstevel@tonic-gate	SET_SIZE(ndptrap_frstor)
6530Sstevel@tonic-gate	SET_SIZE(ndptrap)
6540Sstevel@tonic-gate
6550Sstevel@tonic-gate#endif	/* __i386 */
6560Sstevel@tonic-gate
6570Sstevel@tonic-gate#if defined(__amd64)
6580Sstevel@tonic-gate
6590Sstevel@tonic-gate	/*
6600Sstevel@tonic-gate	 * #DF
6610Sstevel@tonic-gate	 */
6620Sstevel@tonic-gate	ENTRY_NP(syserrtrap)
6630Sstevel@tonic-gate	pushq	$T_DBLFLT
6640Sstevel@tonic-gate
6650Sstevel@tonic-gate	SET_CPU_GSBASE
6660Sstevel@tonic-gate
6670Sstevel@tonic-gate	/*
6680Sstevel@tonic-gate	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
6690Sstevel@tonic-gate	 * have reached this point after encountering a #df in kmdb.  If that
6700Sstevel@tonic-gate	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
6710Sstevel@tonic-gate	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
6720Sstevel@tonic-gate	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
6730Sstevel@tonic-gate	 * entered from the panic flow.  We'll suppress that entry by setting
6740Sstevel@tonic-gate	 * nopanicdebug.
6750Sstevel@tonic-gate	 */
6760Sstevel@tonic-gate	pushq	%rax
6770Sstevel@tonic-gate	subq	$DESCTBR_SIZE, %rsp
6780Sstevel@tonic-gate	sidt	(%rsp)
6790Sstevel@tonic-gate	movq	%gs:CPU_IDT, %rax
6800Sstevel@tonic-gate	cmpq	%rax, DTR_BASE(%rsp)
6810Sstevel@tonic-gate	je	1f
6820Sstevel@tonic-gate
6830Sstevel@tonic-gate	movq	%rax, DTR_BASE(%rsp)
6840Sstevel@tonic-gate	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
6850Sstevel@tonic-gate	lidt	(%rsp)
6860Sstevel@tonic-gate
6870Sstevel@tonic-gate	movl	$1, nopanicdebug
6880Sstevel@tonic-gate
6890Sstevel@tonic-gate1:	addq	$DESCTBR_SIZE, %rsp
6900Sstevel@tonic-gate	popq	%rax
6910Sstevel@tonic-gate
6920Sstevel@tonic-gate	DFTRAP_PUSH
6930Sstevel@tonic-gate
6940Sstevel@tonic-gate	/*
6950Sstevel@tonic-gate	 * freeze trap trace.
6960Sstevel@tonic-gate	 */
6970Sstevel@tonic-gate#ifdef TRAPTRACE
6980Sstevel@tonic-gate	leaq	trap_trace_freeze(%rip), %r11
6990Sstevel@tonic-gate	incl	(%r11)
7000Sstevel@tonic-gate#endif
7010Sstevel@tonic-gate
7020Sstevel@tonic-gate	ENABLE_INTR_FLAGS
7030Sstevel@tonic-gate
7040Sstevel@tonic-gate	movq	%rsp, %rdi	/* &regs */
7050Sstevel@tonic-gate	xorl	%esi, %esi	/* clear address */
7060Sstevel@tonic-gate	xorl	%edx, %edx	/* cpuid = 0 */
7070Sstevel@tonic-gate	call	trap
7080Sstevel@tonic-gate
7090Sstevel@tonic-gate	SET_SIZE(syserrtrap)
7100Sstevel@tonic-gate
7110Sstevel@tonic-gate#elif defined(__i386)
7120Sstevel@tonic-gate
7130Sstevel@tonic-gate	/*
7140Sstevel@tonic-gate	 * #DF
7150Sstevel@tonic-gate	 */
7160Sstevel@tonic-gate	ENTRY_NP(syserrtrap)
7170Sstevel@tonic-gate	cli				/* disable interrupts */
7180Sstevel@tonic-gate
7190Sstevel@tonic-gate	/*
7200Sstevel@tonic-gate	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
7210Sstevel@tonic-gate	 * have reached this point after encountering a #df in kmdb.  If that
7220Sstevel@tonic-gate	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
7230Sstevel@tonic-gate	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
7240Sstevel@tonic-gate	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
7250Sstevel@tonic-gate	 * entered from the panic flow.  We'll suppress that entry by setting
7260Sstevel@tonic-gate	 * nopanicdebug.
7270Sstevel@tonic-gate	 */
7280Sstevel@tonic-gate	subl	$DESCTBR_SIZE, %esp
7290Sstevel@tonic-gate	movl	%gs:CPU_IDT, %eax
7300Sstevel@tonic-gate	sidt	(%esp)
7310Sstevel@tonic-gate	cmpl	DTR_BASE(%esp), %eax
7320Sstevel@tonic-gate	je	1f
7330Sstevel@tonic-gate
7340Sstevel@tonic-gate	movl	%eax, DTR_BASE(%esp)
7350Sstevel@tonic-gate	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
7360Sstevel@tonic-gate	lidt	(%esp)
7370Sstevel@tonic-gate
7380Sstevel@tonic-gate	movl	$1, nopanicdebug
7390Sstevel@tonic-gate
7400Sstevel@tonic-gate1:	addl	$DESCTBR_SIZE, %esp
7410Sstevel@tonic-gate
7420Sstevel@tonic-gate	/*
7430Sstevel@tonic-gate	 * Check the CPL in the TSS to see what mode
7440Sstevel@tonic-gate	 * (user or kernel) we took the fault in.  At this
7450Sstevel@tonic-gate	 * point we are running in the context of the double
7460Sstevel@tonic-gate	 * fault task (dftss) but the CPU's task points to
7470Sstevel@tonic-gate	 * the previous task (ktss) where the process context
7480Sstevel@tonic-gate	 * has been saved as the result of the task switch.
7490Sstevel@tonic-gate	 */
7500Sstevel@tonic-gate	movl	%gs:CPU_TSS, %eax	/* get the TSS */
7510Sstevel@tonic-gate	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
7520Sstevel@tonic-gate	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
7530Sstevel@tonic-gate	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
7540Sstevel@tonic-gate	jz	make_frame
7550Sstevel@tonic-gate	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
7560Sstevel@tonic-gate	movl	TSS_ESP0(%eax), %esp
7570Sstevel@tonic-gate
7580Sstevel@tonic-gate	/*
7590Sstevel@tonic-gate	 * Clear the NT flag to avoid a task switch when the process
7600Sstevel@tonic-gate	 * finally pops the EFL off the stack via an iret.  Clear
7610Sstevel@tonic-gate	 * the TF flag since that is what the processor does for
7620Sstevel@tonic-gate	 * a normal exception. Clear the IE flag so that interrupts
7630Sstevel@tonic-gate	 * remain disabled.
7640Sstevel@tonic-gate	 */
7650Sstevel@tonic-gate	movl	TSS_EFL(%eax), %ecx
7660Sstevel@tonic-gate	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
7670Sstevel@tonic-gate	pushl	%ecx
7680Sstevel@tonic-gate	popfl				/* restore the EFL */
7690Sstevel@tonic-gate	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
7700Sstevel@tonic-gate	lldt	%cx
7710Sstevel@tonic-gate
7720Sstevel@tonic-gate	/*
7730Sstevel@tonic-gate	 * Restore process segment selectors.
7740Sstevel@tonic-gate	 */
7750Sstevel@tonic-gate	movw	TSS_DS(%eax), %ds
7760Sstevel@tonic-gate	movw	TSS_ES(%eax), %es
7770Sstevel@tonic-gate	movw	TSS_FS(%eax), %fs
7780Sstevel@tonic-gate	movw	TSS_GS(%eax), %gs
7790Sstevel@tonic-gate
7800Sstevel@tonic-gate	/*
7810Sstevel@tonic-gate	 * Restore task segment selectors.
7820Sstevel@tonic-gate	 */
7830Sstevel@tonic-gate	movl	$KDS_SEL, TSS_DS(%eax)
7840Sstevel@tonic-gate	movl	$KDS_SEL, TSS_ES(%eax)
7850Sstevel@tonic-gate	movl	$KDS_SEL, TSS_SS(%eax)
7860Sstevel@tonic-gate	movl	$KFS_SEL, TSS_FS(%eax)
7870Sstevel@tonic-gate	movl	$KGS_SEL, TSS_GS(%eax)
7880Sstevel@tonic-gate
7890Sstevel@tonic-gate	/*
7900Sstevel@tonic-gate	 * Clear the TS bit, the busy bits in both task
7910Sstevel@tonic-gate	 * descriptors, and switch tasks.
7920Sstevel@tonic-gate	 */
7930Sstevel@tonic-gate	clts
7940Sstevel@tonic-gate	leal	gdt0, %ecx
7950Sstevel@tonic-gate	movl	DFTSS_SEL+4(%ecx), %esi
7960Sstevel@tonic-gate	andl	$_BITNOT(0x200), %esi
7970Sstevel@tonic-gate	movl	%esi, DFTSS_SEL+4(%ecx)
7980Sstevel@tonic-gate	movl	KTSS_SEL+4(%ecx), %esi
7990Sstevel@tonic-gate	andl	$_BITNOT(0x200), %esi
8000Sstevel@tonic-gate	movl	%esi, KTSS_SEL+4(%ecx)
8010Sstevel@tonic-gate	movw	$KTSS_SEL, %cx
8020Sstevel@tonic-gate	ltr	%cx
8030Sstevel@tonic-gate
8040Sstevel@tonic-gate	/*
8050Sstevel@tonic-gate	 * Restore part of the process registers.
8060Sstevel@tonic-gate	 */
8070Sstevel@tonic-gate	movl	TSS_EBP(%eax), %ebp
8080Sstevel@tonic-gate	movl	TSS_ECX(%eax), %ecx
8090Sstevel@tonic-gate	movl	TSS_ESI(%eax), %esi
8100Sstevel@tonic-gate	movl	TSS_EDI(%eax), %edi
8110Sstevel@tonic-gate
8120Sstevel@tonic-gatemake_frame:
8130Sstevel@tonic-gate	/*
8140Sstevel@tonic-gate	 * Make a trap frame.  Leave the error code (0) on
8150Sstevel@tonic-gate	 * the stack since the first word on a trap stack is
8160Sstevel@tonic-gate	 * unused anyway.
8170Sstevel@tonic-gate	 */
8180Sstevel@tonic-gate	pushl	%ebx			/ fault SS
8190Sstevel@tonic-gate	pushl	%edx			/ fault ESP
8200Sstevel@tonic-gate	pushl	TSS_EFL(%eax)		/ fault EFL
8210Sstevel@tonic-gate	pushl	TSS_CS(%eax)		/ fault CS
8220Sstevel@tonic-gate	pushl	TSS_EIP(%eax)		/ fault EIP
8230Sstevel@tonic-gate	pushl	$0			/ error code
8240Sstevel@tonic-gate	pushl	$T_DBLFLT		/ trap number 8
8250Sstevel@tonic-gate	movl	TSS_EBX(%eax), %ebx	/ restore EBX
8260Sstevel@tonic-gate	movl	TSS_EDX(%eax), %edx	/ restore EDX
8270Sstevel@tonic-gate	movl	TSS_EAX(%eax), %eax	/ restore EAX
8280Sstevel@tonic-gate	sti				/ enable interrupts
8290Sstevel@tonic-gate	jmp	cmntrap
8300Sstevel@tonic-gate	SET_SIZE(syserrtrap)
8310Sstevel@tonic-gate
8320Sstevel@tonic-gate#endif	/* __i386 */
8330Sstevel@tonic-gate
8340Sstevel@tonic-gate	ENTRY_NP(overrun)
8350Sstevel@tonic-gate	push	$0
8360Sstevel@tonic-gate	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
8370Sstevel@tonic-gate	jmp	cmninttrap
8380Sstevel@tonic-gate	SET_SIZE(overrun)
8390Sstevel@tonic-gate
8400Sstevel@tonic-gate	/*
8410Sstevel@tonic-gate	 * #TS
8420Sstevel@tonic-gate	 */
8430Sstevel@tonic-gate	ENTRY_NP(invtsstrap)
8440Sstevel@tonic-gate	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
8450Sstevel@tonic-gate	jmp	cmntrap
8460Sstevel@tonic-gate	SET_SIZE(invtsstrap)
8470Sstevel@tonic-gate
8480Sstevel@tonic-gate	/*
8490Sstevel@tonic-gate	 * #NP
8500Sstevel@tonic-gate	 */
8510Sstevel@tonic-gate	ENTRY_NP(segnptrap)
8520Sstevel@tonic-gate	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
8530Sstevel@tonic-gate#if defined(__amd64)
8540Sstevel@tonic-gate	SET_CPU_GSBASE
8550Sstevel@tonic-gate#endif
8560Sstevel@tonic-gate	jmp	cmntrap
8570Sstevel@tonic-gate	SET_SIZE(segnptrap)
8580Sstevel@tonic-gate
8590Sstevel@tonic-gate	/*
8600Sstevel@tonic-gate	 * #SS
8610Sstevel@tonic-gate	 */
8620Sstevel@tonic-gate	ENTRY_NP(stktrap)
8630Sstevel@tonic-gate	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
8640Sstevel@tonic-gate	jmp	cmntrap
8650Sstevel@tonic-gate	SET_SIZE(stktrap)
8660Sstevel@tonic-gate
8670Sstevel@tonic-gate	/*
8680Sstevel@tonic-gate	 * #GP
8690Sstevel@tonic-gate	 */
8700Sstevel@tonic-gate	ENTRY_NP(gptrap)
8710Sstevel@tonic-gate	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
8720Sstevel@tonic-gate#if defined(__amd64)
8730Sstevel@tonic-gate	SET_CPU_GSBASE
8740Sstevel@tonic-gate#endif
8750Sstevel@tonic-gate	jmp	cmntrap
8760Sstevel@tonic-gate	SET_SIZE(gptrap)
8770Sstevel@tonic-gate
8780Sstevel@tonic-gate	/*
8790Sstevel@tonic-gate	 * #PF
8800Sstevel@tonic-gate	 */
8810Sstevel@tonic-gate	ENTRY_NP(pftrap)
8820Sstevel@tonic-gate	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
8830Sstevel@tonic-gate	jmp	cmntrap
8840Sstevel@tonic-gate	SET_SIZE(pftrap)
8850Sstevel@tonic-gate
8860Sstevel@tonic-gate#if !defined(__amd64)
8870Sstevel@tonic-gate
8880Sstevel@tonic-gate	/*
8890Sstevel@tonic-gate	 * #PF pentium bug workaround
8900Sstevel@tonic-gate	 */
8910Sstevel@tonic-gate	ENTRY_NP(pentium_pftrap)
8920Sstevel@tonic-gate	pushl	%eax
8930Sstevel@tonic-gate	movl	%cr2, %eax
8940Sstevel@tonic-gate	andl	$MMU_STD_PAGEMASK, %eax
8950Sstevel@tonic-gate
8960Sstevel@tonic-gate	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
8970Sstevel@tonic-gate
8980Sstevel@tonic-gate	je	check_for_user_address
8990Sstevel@tonic-gateuser_mode:
9000Sstevel@tonic-gate	popl	%eax
9010Sstevel@tonic-gate	pushl	$T_PGFLT	/* $14 */
9020Sstevel@tonic-gate	jmp	cmntrap
9030Sstevel@tonic-gatecheck_for_user_address:
9040Sstevel@tonic-gate	/*
9050Sstevel@tonic-gate	 * Before we assume that we have an unmapped trap on our hands,
9060Sstevel@tonic-gate	 * check to see if this is a fault from user mode.  If it is,
9070Sstevel@tonic-gate	 * we'll kick back into the page fault handler.
9080Sstevel@tonic-gate	 */
9090Sstevel@tonic-gate	movl	4(%esp), %eax	/* error code */
9100Sstevel@tonic-gate	andl	$PF_ERR_USER, %eax
9110Sstevel@tonic-gate	jnz	user_mode
9120Sstevel@tonic-gate
9130Sstevel@tonic-gate	/*
9140Sstevel@tonic-gate	 * We now know that this is the invalid opcode trap.
9150Sstevel@tonic-gate	 */
9160Sstevel@tonic-gate	popl	%eax
9170Sstevel@tonic-gate	addl	$4, %esp	/* pop error code */
9180Sstevel@tonic-gate	jmp	invoptrap
9190Sstevel@tonic-gate	SET_SIZE(pentium_pftrap)
9200Sstevel@tonic-gate
9210Sstevel@tonic-gate#endif	/* !__amd64 */
9220Sstevel@tonic-gate
9230Sstevel@tonic-gate	ENTRY_NP(resvtrap)
9240Sstevel@tonic-gate	TRAP_NOERR(15)		/* (reserved)  */
9250Sstevel@tonic-gate	jmp	cmntrap
9260Sstevel@tonic-gate	SET_SIZE(resvtrap)
9270Sstevel@tonic-gate
9280Sstevel@tonic-gate	/*
9290Sstevel@tonic-gate	 * #MF
9300Sstevel@tonic-gate	 */
9310Sstevel@tonic-gate	ENTRY_NP(ndperr)
9320Sstevel@tonic-gate	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
9330Sstevel@tonic-gate	jmp	cmninttrap
9340Sstevel@tonic-gate	SET_SIZE(ndperr)
9350Sstevel@tonic-gate
9360Sstevel@tonic-gate	/*
9370Sstevel@tonic-gate	 * #AC
9380Sstevel@tonic-gate	 */
9390Sstevel@tonic-gate	ENTRY_NP(achktrap)
9400Sstevel@tonic-gate	TRAP_ERR(T_ALIGNMENT)	/* $17 */
9410Sstevel@tonic-gate	jmp	cmntrap
9420Sstevel@tonic-gate	SET_SIZE(achktrap)
9430Sstevel@tonic-gate
9440Sstevel@tonic-gate	/*
9450Sstevel@tonic-gate	 * #MC
9460Sstevel@tonic-gate	 */
9471414Scindi	.globl	cmi_mca_trap	/* see uts/i86pc/os/cmi.c */
9481414Scindi
9491414Scindi#if defined(__amd64)
9501414Scindi
9510Sstevel@tonic-gate	ENTRY_NP(mcetrap)
9520Sstevel@tonic-gate	TRAP_NOERR(T_MCE)	/* $18 */
95310Skucharsk	SET_CPU_GSBASE
9541414Scindi	INTR_PUSH
9551414Scindi
9561414Scindi	TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
9571414Scindi	TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
9581414Scindi	TRACE_STAMP(%rdi)
9591414Scindi
9601414Scindi	DISABLE_INTR_FLAGS
9611414Scindi	movq	%rsp, %rbp
9621414Scindi
9631414Scindi	movq	%rsp, %rdi	/* arg0 = struct regs *rp */
9641414Scindi	call	cmi_mca_trap	/* cmi_mca_trap(rp); */
9651414Scindi
9661414Scindi	jmp	_sys_rtt
9671414Scindi	SET_SIZE(mcetrap)
9681414Scindi
9691414Scindi#else
9701414Scindi
9711414Scindi	ENTRY_NP(mcetrap)
9721414Scindi	TRAP_NOERR(T_MCE)	/* $18 */
9731414Scindi	INTR_PUSH
9741414Scindi
9751414Scindi	DISABLE_INTR_FLAGS
9761414Scindi	movl	%esp, %ebp
9771414Scindi
9781414Scindi	movl	%esp, %ecx
9791414Scindi	pushl	%ecx		/* arg0 = struct regs *rp */
9801414Scindi	call	cmi_mca_trap	/* cmi_mca_trap(rp) */
9811414Scindi	addl	$4, %esp	/* pop arg0 */
9821414Scindi
9831414Scindi	jmp	_sys_rtt
9841414Scindi	SET_SIZE(mcetrap)
9851414Scindi
98610Skucharsk#endif
9870Sstevel@tonic-gate
9880Sstevel@tonic-gate	/*
9890Sstevel@tonic-gate	 * #XF
9900Sstevel@tonic-gate	 */
9910Sstevel@tonic-gate	ENTRY_NP(xmtrap)
9920Sstevel@tonic-gate	TRAP_NOERR(T_SIMDFPE)	/* $19 */
9930Sstevel@tonic-gate	jmp	cmntrap
9940Sstevel@tonic-gate	SET_SIZE(xmtrap)
9950Sstevel@tonic-gate
9960Sstevel@tonic-gate	ENTRY_NP(invaltrap)
9970Sstevel@tonic-gate	TRAP_NOERR(30)		/* very invalid */
9980Sstevel@tonic-gate	jmp	cmntrap
9990Sstevel@tonic-gate	SET_SIZE(invaltrap)
10000Sstevel@tonic-gate
10010Sstevel@tonic-gate	ENTRY_NP(invalint)
10020Sstevel@tonic-gate	TRAP_NOERR(31)		/* even more so */
10030Sstevel@tonic-gate	jmp	cmnint
10040Sstevel@tonic-gate	SET_SIZE(invalint)
10050Sstevel@tonic-gate
10060Sstevel@tonic-gate	.globl	fasttable
10070Sstevel@tonic-gate
10080Sstevel@tonic-gate#if defined(__amd64)
10090Sstevel@tonic-gate
10100Sstevel@tonic-gate	ENTRY_NP(fasttrap)
10110Sstevel@tonic-gate	cmpl	$T_LASTFAST, %eax
10120Sstevel@tonic-gate	ja	1f
10130Sstevel@tonic-gate	orl	%eax, %eax	/* (zero extend top 32-bits) */
10140Sstevel@tonic-gate	leaq	fasttable(%rip), %r11
10150Sstevel@tonic-gate	leaq	(%r11, %rax, CLONGSIZE), %r11
10160Sstevel@tonic-gate	jmp	*(%r11)
10170Sstevel@tonic-gate1:
10180Sstevel@tonic-gate	/*
10190Sstevel@tonic-gate	 * Fast syscall number was illegal.  Make it look
10200Sstevel@tonic-gate	 * as if the INT failed.  Modify %rip to point before the
10210Sstevel@tonic-gate	 * INT, push the expected error code and fake a GP fault.
10220Sstevel@tonic-gate	 *
10230Sstevel@tonic-gate	 * XXX Why make the error code be offset into idt + 1?
10240Sstevel@tonic-gate	 * Instead we should push a real (soft?) error code
10250Sstevel@tonic-gate	 * on the stack and #gp handler could know about fasttraps?
10260Sstevel@tonic-gate	 */
10270Sstevel@tonic-gate	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
10280Sstevel@tonic-gate	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
10290Sstevel@tonic-gate	jmp	gptrap
10300Sstevel@tonic-gate	SET_SIZE(fasttrap)
10310Sstevel@tonic-gate
10320Sstevel@tonic-gate#elif defined(__i386)
10330Sstevel@tonic-gate
10340Sstevel@tonic-gate	ENTRY_NP(fasttrap)
10350Sstevel@tonic-gate	cmpl	$T_LASTFAST, %eax
10360Sstevel@tonic-gate	ja	1f
10370Sstevel@tonic-gate	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
10380Sstevel@tonic-gate1:
10390Sstevel@tonic-gate	/*
10400Sstevel@tonic-gate	 * Fast syscall number was illegal.  Make it look
10410Sstevel@tonic-gate	 * as if the INT failed.  Modify %eip to point before the
10420Sstevel@tonic-gate	 * INT, push the expected error code and fake a GP fault.
10430Sstevel@tonic-gate	 *
10440Sstevel@tonic-gate	 * XXX Why make the error code be offset into idt + 1?
10450Sstevel@tonic-gate	 * Instead we should push a real (soft?) error code
10460Sstevel@tonic-gate	 * on the stack and #gp handler could know about fasttraps?
10470Sstevel@tonic-gate	 */
10480Sstevel@tonic-gate	subl	$2, (%esp)	/* XXX int insn 2-bytes */
10490Sstevel@tonic-gate	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
10500Sstevel@tonic-gate	jmp	gptrap
10510Sstevel@tonic-gate	SET_SIZE(fasttrap)
10520Sstevel@tonic-gate
10530Sstevel@tonic-gate#endif	/* __i386 */
10540Sstevel@tonic-gate
10550Sstevel@tonic-gate	ENTRY_NP(dtrace_fasttrap)
10560Sstevel@tonic-gate	TRAP_NOERR(T_DTRACE_PROBE)
10570Sstevel@tonic-gate	jmp	dtrace_trap
10580Sstevel@tonic-gate	SET_SIZE(dtrace_fasttrap)
10590Sstevel@tonic-gate
10600Sstevel@tonic-gate	ENTRY_NP(dtrace_ret)
10610Sstevel@tonic-gate	TRAP_NOERR(T_DTRACE_RET)
10620Sstevel@tonic-gate	jmp	dtrace_trap
10630Sstevel@tonic-gate	SET_SIZE(dtrace_ret)
10640Sstevel@tonic-gate
10650Sstevel@tonic-gate#if defined(__amd64)
10660Sstevel@tonic-gate
10670Sstevel@tonic-gate	/*
10680Sstevel@tonic-gate	 * RFLAGS 24 bytes up the stack from %rsp.
10690Sstevel@tonic-gate	 * XXX a constant would be nicer.
10700Sstevel@tonic-gate	 */
10710Sstevel@tonic-gate	ENTRY_NP(fast_null)
10720Sstevel@tonic-gate	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
10730Sstevel@tonic-gate	iretq
10740Sstevel@tonic-gate	SET_SIZE(fast_null)
10750Sstevel@tonic-gate
10760Sstevel@tonic-gate#elif defined(__i386)
10770Sstevel@tonic-gate
10780Sstevel@tonic-gate	ENTRY_NP(fast_null)
10790Sstevel@tonic-gate	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
10800Sstevel@tonic-gate	iret
10810Sstevel@tonic-gate	SET_SIZE(fast_null)
10820Sstevel@tonic-gate
10830Sstevel@tonic-gate#endif	/* __i386 */
10840Sstevel@tonic-gate
10850Sstevel@tonic-gate	/*
10860Sstevel@tonic-gate	 * Interrupts start at 32
10870Sstevel@tonic-gate	 */
10880Sstevel@tonic-gate#define MKIVCT(n)			\
10890Sstevel@tonic-gate	ENTRY_NP(ivct/**/n)		\
10900Sstevel@tonic-gate	push	$0;			\
10910Sstevel@tonic-gate	push	$n - 0x20;		\
10920Sstevel@tonic-gate	jmp	cmnint;			\
10930Sstevel@tonic-gate	SET_SIZE(ivct/**/n)
10940Sstevel@tonic-gate
10950Sstevel@tonic-gate	MKIVCT(32)
10960Sstevel@tonic-gate	MKIVCT(33)
10970Sstevel@tonic-gate	MKIVCT(34)
10980Sstevel@tonic-gate	MKIVCT(35)
10990Sstevel@tonic-gate	MKIVCT(36)
11000Sstevel@tonic-gate	MKIVCT(37)
11010Sstevel@tonic-gate	MKIVCT(38)
11020Sstevel@tonic-gate	MKIVCT(39)
11030Sstevel@tonic-gate	MKIVCT(40)
11040Sstevel@tonic-gate	MKIVCT(41)
11050Sstevel@tonic-gate	MKIVCT(42)
11060Sstevel@tonic-gate	MKIVCT(43)
11070Sstevel@tonic-gate	MKIVCT(44)
11080Sstevel@tonic-gate	MKIVCT(45)
11090Sstevel@tonic-gate	MKIVCT(46)
11100Sstevel@tonic-gate	MKIVCT(47)
11110Sstevel@tonic-gate	MKIVCT(48)
11120Sstevel@tonic-gate	MKIVCT(49)
11130Sstevel@tonic-gate	MKIVCT(50)
11140Sstevel@tonic-gate	MKIVCT(51)
11150Sstevel@tonic-gate	MKIVCT(52)
11160Sstevel@tonic-gate	MKIVCT(53)
11170Sstevel@tonic-gate	MKIVCT(54)
11180Sstevel@tonic-gate	MKIVCT(55)
11190Sstevel@tonic-gate	MKIVCT(56)
11200Sstevel@tonic-gate	MKIVCT(57)
11210Sstevel@tonic-gate	MKIVCT(58)
11220Sstevel@tonic-gate	MKIVCT(59)
11230Sstevel@tonic-gate	MKIVCT(60)
11240Sstevel@tonic-gate	MKIVCT(61)
11250Sstevel@tonic-gate	MKIVCT(62)
11260Sstevel@tonic-gate	MKIVCT(63)
11270Sstevel@tonic-gate	MKIVCT(64)
11280Sstevel@tonic-gate	MKIVCT(65)
11290Sstevel@tonic-gate	MKIVCT(66)
11300Sstevel@tonic-gate	MKIVCT(67)
11310Sstevel@tonic-gate	MKIVCT(68)
11320Sstevel@tonic-gate	MKIVCT(69)
11330Sstevel@tonic-gate	MKIVCT(70)
11340Sstevel@tonic-gate	MKIVCT(71)
11350Sstevel@tonic-gate	MKIVCT(72)
11360Sstevel@tonic-gate	MKIVCT(73)
11370Sstevel@tonic-gate	MKIVCT(74)
11380Sstevel@tonic-gate	MKIVCT(75)
11390Sstevel@tonic-gate	MKIVCT(76)
11400Sstevel@tonic-gate	MKIVCT(77)
11410Sstevel@tonic-gate	MKIVCT(78)
11420Sstevel@tonic-gate	MKIVCT(79)
11430Sstevel@tonic-gate	MKIVCT(80)
11440Sstevel@tonic-gate	MKIVCT(81)
11450Sstevel@tonic-gate	MKIVCT(82)
11460Sstevel@tonic-gate	MKIVCT(83)
11470Sstevel@tonic-gate	MKIVCT(84)
11480Sstevel@tonic-gate	MKIVCT(85)
11490Sstevel@tonic-gate	MKIVCT(86)
11500Sstevel@tonic-gate	MKIVCT(87)
11510Sstevel@tonic-gate	MKIVCT(88)
11520Sstevel@tonic-gate	MKIVCT(89)
11530Sstevel@tonic-gate	MKIVCT(90)
11540Sstevel@tonic-gate	MKIVCT(91)
11550Sstevel@tonic-gate	MKIVCT(92)
11560Sstevel@tonic-gate	MKIVCT(93)
11570Sstevel@tonic-gate	MKIVCT(94)
11580Sstevel@tonic-gate	MKIVCT(95)
11590Sstevel@tonic-gate	MKIVCT(96)
11600Sstevel@tonic-gate	MKIVCT(97)
11610Sstevel@tonic-gate	MKIVCT(98)
11620Sstevel@tonic-gate	MKIVCT(99)
11630Sstevel@tonic-gate	MKIVCT(100)
11640Sstevel@tonic-gate	MKIVCT(101)
11650Sstevel@tonic-gate	MKIVCT(102)
11660Sstevel@tonic-gate	MKIVCT(103)
11670Sstevel@tonic-gate	MKIVCT(104)
11680Sstevel@tonic-gate	MKIVCT(105)
11690Sstevel@tonic-gate	MKIVCT(106)
11700Sstevel@tonic-gate	MKIVCT(107)
11710Sstevel@tonic-gate	MKIVCT(108)
11720Sstevel@tonic-gate	MKIVCT(109)
11730Sstevel@tonic-gate	MKIVCT(110)
11740Sstevel@tonic-gate	MKIVCT(111)
11750Sstevel@tonic-gate	MKIVCT(112)
11760Sstevel@tonic-gate	MKIVCT(113)
11770Sstevel@tonic-gate	MKIVCT(114)
11780Sstevel@tonic-gate	MKIVCT(115)
11790Sstevel@tonic-gate	MKIVCT(116)
11800Sstevel@tonic-gate	MKIVCT(117)
11810Sstevel@tonic-gate	MKIVCT(118)
11820Sstevel@tonic-gate	MKIVCT(119)
11830Sstevel@tonic-gate	MKIVCT(120)
11840Sstevel@tonic-gate	MKIVCT(121)
11850Sstevel@tonic-gate	MKIVCT(122)
11860Sstevel@tonic-gate	MKIVCT(123)
11870Sstevel@tonic-gate	MKIVCT(124)
11880Sstevel@tonic-gate	MKIVCT(125)
11890Sstevel@tonic-gate	MKIVCT(126)
11900Sstevel@tonic-gate	MKIVCT(127)
11910Sstevel@tonic-gate	MKIVCT(128)
11920Sstevel@tonic-gate	MKIVCT(129)
11930Sstevel@tonic-gate	MKIVCT(130)
11940Sstevel@tonic-gate	MKIVCT(131)
11950Sstevel@tonic-gate	MKIVCT(132)
11960Sstevel@tonic-gate	MKIVCT(133)
11970Sstevel@tonic-gate	MKIVCT(134)
11980Sstevel@tonic-gate	MKIVCT(135)
11990Sstevel@tonic-gate	MKIVCT(136)
12000Sstevel@tonic-gate	MKIVCT(137)
12010Sstevel@tonic-gate	MKIVCT(138)
12020Sstevel@tonic-gate	MKIVCT(139)
12030Sstevel@tonic-gate	MKIVCT(140)
12040Sstevel@tonic-gate	MKIVCT(141)
12050Sstevel@tonic-gate	MKIVCT(142)
12060Sstevel@tonic-gate	MKIVCT(143)
12070Sstevel@tonic-gate	MKIVCT(144)
12080Sstevel@tonic-gate	MKIVCT(145)
12090Sstevel@tonic-gate	MKIVCT(146)
12100Sstevel@tonic-gate	MKIVCT(147)
12110Sstevel@tonic-gate	MKIVCT(148)
12120Sstevel@tonic-gate	MKIVCT(149)
12130Sstevel@tonic-gate	MKIVCT(150)
12140Sstevel@tonic-gate	MKIVCT(151)
12150Sstevel@tonic-gate	MKIVCT(152)
12160Sstevel@tonic-gate	MKIVCT(153)
12170Sstevel@tonic-gate	MKIVCT(154)
12180Sstevel@tonic-gate	MKIVCT(155)
12190Sstevel@tonic-gate	MKIVCT(156)
12200Sstevel@tonic-gate	MKIVCT(157)
12210Sstevel@tonic-gate	MKIVCT(158)
12220Sstevel@tonic-gate	MKIVCT(159)
12230Sstevel@tonic-gate	MKIVCT(160)
12240Sstevel@tonic-gate	MKIVCT(161)
12250Sstevel@tonic-gate	MKIVCT(162)
12260Sstevel@tonic-gate	MKIVCT(163)
12270Sstevel@tonic-gate	MKIVCT(164)
12280Sstevel@tonic-gate	MKIVCT(165)
12290Sstevel@tonic-gate	MKIVCT(166)
12300Sstevel@tonic-gate	MKIVCT(167)
12310Sstevel@tonic-gate	MKIVCT(168)
12320Sstevel@tonic-gate	MKIVCT(169)
12330Sstevel@tonic-gate	MKIVCT(170)
12340Sstevel@tonic-gate	MKIVCT(171)
12350Sstevel@tonic-gate	MKIVCT(172)
12360Sstevel@tonic-gate	MKIVCT(173)
12370Sstevel@tonic-gate	MKIVCT(174)
12380Sstevel@tonic-gate	MKIVCT(175)
12390Sstevel@tonic-gate	MKIVCT(176)
12400Sstevel@tonic-gate	MKIVCT(177)
12410Sstevel@tonic-gate	MKIVCT(178)
12420Sstevel@tonic-gate	MKIVCT(179)
12430Sstevel@tonic-gate	MKIVCT(180)
12440Sstevel@tonic-gate	MKIVCT(181)
12450Sstevel@tonic-gate	MKIVCT(182)
12460Sstevel@tonic-gate	MKIVCT(183)
12470Sstevel@tonic-gate	MKIVCT(184)
12480Sstevel@tonic-gate	MKIVCT(185)
12490Sstevel@tonic-gate	MKIVCT(186)
12500Sstevel@tonic-gate	MKIVCT(187)
12510Sstevel@tonic-gate	MKIVCT(188)
12520Sstevel@tonic-gate	MKIVCT(189)
12530Sstevel@tonic-gate	MKIVCT(190)
12540Sstevel@tonic-gate	MKIVCT(191)
12550Sstevel@tonic-gate	MKIVCT(192)
12560Sstevel@tonic-gate	MKIVCT(193)
12570Sstevel@tonic-gate	MKIVCT(194)
12580Sstevel@tonic-gate	MKIVCT(195)
12590Sstevel@tonic-gate	MKIVCT(196)
12600Sstevel@tonic-gate	MKIVCT(197)
12610Sstevel@tonic-gate	MKIVCT(198)
12620Sstevel@tonic-gate	MKIVCT(199)
12630Sstevel@tonic-gate	MKIVCT(200)
12640Sstevel@tonic-gate	MKIVCT(201)
12650Sstevel@tonic-gate	MKIVCT(202)
12660Sstevel@tonic-gate	MKIVCT(203)
12670Sstevel@tonic-gate	MKIVCT(204)
12680Sstevel@tonic-gate	MKIVCT(205)
12690Sstevel@tonic-gate	MKIVCT(206)
12700Sstevel@tonic-gate	MKIVCT(207)
12710Sstevel@tonic-gate	MKIVCT(208)
12720Sstevel@tonic-gate	MKIVCT(209)
12730Sstevel@tonic-gate	MKIVCT(210)
12740Sstevel@tonic-gate	MKIVCT(211)
12750Sstevel@tonic-gate	MKIVCT(212)
12760Sstevel@tonic-gate	MKIVCT(213)
12770Sstevel@tonic-gate	MKIVCT(214)
12780Sstevel@tonic-gate	MKIVCT(215)
12790Sstevel@tonic-gate	MKIVCT(216)
12800Sstevel@tonic-gate	MKIVCT(217)
12810Sstevel@tonic-gate	MKIVCT(218)
12820Sstevel@tonic-gate	MKIVCT(219)
12830Sstevel@tonic-gate	MKIVCT(220)
12840Sstevel@tonic-gate	MKIVCT(221)
12850Sstevel@tonic-gate	MKIVCT(222)
12860Sstevel@tonic-gate	MKIVCT(223)
12870Sstevel@tonic-gate	MKIVCT(224)
12880Sstevel@tonic-gate	MKIVCT(225)
12890Sstevel@tonic-gate	MKIVCT(226)
12900Sstevel@tonic-gate	MKIVCT(227)
12910Sstevel@tonic-gate	MKIVCT(228)
12920Sstevel@tonic-gate	MKIVCT(229)
12930Sstevel@tonic-gate	MKIVCT(230)
12940Sstevel@tonic-gate	MKIVCT(231)
12950Sstevel@tonic-gate	MKIVCT(232)
12960Sstevel@tonic-gate	MKIVCT(233)
12970Sstevel@tonic-gate	MKIVCT(234)
12980Sstevel@tonic-gate	MKIVCT(235)
12990Sstevel@tonic-gate	MKIVCT(236)
13000Sstevel@tonic-gate	MKIVCT(237)
13010Sstevel@tonic-gate	MKIVCT(238)
13020Sstevel@tonic-gate	MKIVCT(239)
13030Sstevel@tonic-gate	MKIVCT(240)
13040Sstevel@tonic-gate	MKIVCT(241)
13050Sstevel@tonic-gate	MKIVCT(242)
13060Sstevel@tonic-gate	MKIVCT(243)
13070Sstevel@tonic-gate	MKIVCT(244)
13080Sstevel@tonic-gate	MKIVCT(245)
13090Sstevel@tonic-gate	MKIVCT(246)
13100Sstevel@tonic-gate	MKIVCT(247)
13110Sstevel@tonic-gate	MKIVCT(248)
13120Sstevel@tonic-gate	MKIVCT(249)
13130Sstevel@tonic-gate	MKIVCT(250)
13140Sstevel@tonic-gate	MKIVCT(251)
13150Sstevel@tonic-gate	MKIVCT(252)
13160Sstevel@tonic-gate	MKIVCT(253)
13170Sstevel@tonic-gate	MKIVCT(254)
13180Sstevel@tonic-gate	MKIVCT(255)
13190Sstevel@tonic-gate
13200Sstevel@tonic-gate#endif	/* __lint */
1321