xref: /dflybsd-src/sys/platform/pc64/x86_64/exception.S (revision add2647ee1fad67c3a55c30eb1190d71a9a0eea0)
1b2b3ffcdSSimon Schubert/*-
2b2b3ffcdSSimon Schubert * Copyright (c) 1989, 1990 William F. Jolitz.
3b2b3ffcdSSimon Schubert * Copyright (c) 1990 The Regents of the University of California.
4b2b3ffcdSSimon Schubert * Copyright (c) 2007 The FreeBSD Foundation
5b2b3ffcdSSimon Schubert * Copyright (c) 2008 The DragonFly Project.
6b2b3ffcdSSimon Schubert * Copyright (c) 2008 Jordan Gordeev.
7b2b3ffcdSSimon Schubert * All rights reserved.
8b2b3ffcdSSimon Schubert *
9b2b3ffcdSSimon Schubert * Portions of this software were developed by A. Joseph Koshy under
10b2b3ffcdSSimon Schubert * sponsorship from the FreeBSD Foundation and Google, Inc.
11b2b3ffcdSSimon Schubert *
12b2b3ffcdSSimon Schubert * Redistribution and use in source and binary forms, with or without
13b2b3ffcdSSimon Schubert * modification, are permitted provided that the following conditions
14b2b3ffcdSSimon Schubert * are met:
15b2b3ffcdSSimon Schubert * 1. Redistributions of source code must retain the above copyright
16b2b3ffcdSSimon Schubert *    notice, this list of conditions and the following disclaimer.
17b2b3ffcdSSimon Schubert * 2. Redistributions in binary form must reproduce the above copyright
18b2b3ffcdSSimon Schubert *    notice, this list of conditions and the following disclaimer in the
19b2b3ffcdSSimon Schubert *    documentation and/or other materials provided with the distribution.
20c66c7e2fSzrj * 3. Neither the name of the University nor the names of its contributors
21b2b3ffcdSSimon Schubert *    may be used to endorse or promote products derived from this software
22b2b3ffcdSSimon Schubert *    without specific prior written permission.
23b2b3ffcdSSimon Schubert *
24b2b3ffcdSSimon Schubert * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25b2b3ffcdSSimon Schubert * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26b2b3ffcdSSimon Schubert * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27b2b3ffcdSSimon Schubert * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28b2b3ffcdSSimon Schubert * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29b2b3ffcdSSimon Schubert * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30b2b3ffcdSSimon Schubert * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31b2b3ffcdSSimon Schubert * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32b2b3ffcdSSimon Schubert * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33b2b3ffcdSSimon Schubert * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34b2b3ffcdSSimon Schubert * SUCH DAMAGE.
35b2b3ffcdSSimon Schubert */
36b2b3ffcdSSimon Schubert
3732d3bd25SSascha Wildner#if 0 /* JG */
38b2b3ffcdSSimon Schubert#include "opt_atpic.h"
39d6e8ab2dSSascha Wildner#endif
40b2b3ffcdSSimon Schubert
41b2b3ffcdSSimon Schubert#include <machine/asmacros.h>
42b2b3ffcdSSimon Schubert#include <machine/psl.h>
43b2b3ffcdSSimon Schubert#include <machine/trap.h>
44b2b3ffcdSSimon Schubert#include <machine/segments.h>
45b2b3ffcdSSimon Schubert
46b2b3ffcdSSimon Schubert#include "assym.s"
47b2b3ffcdSSimon Schubert
48b2b3ffcdSSimon Schubert	.text
49b2b3ffcdSSimon Schubert
50cc9b6223SMatthew Dillon	.globl	lwkt_switch_return
51cc9b6223SMatthew Dillon
52b2b3ffcdSSimon Schubert/*****************************************************************************/
53b2b3ffcdSSimon Schubert/* Trap handling                                                             */
54b2b3ffcdSSimon Schubert/*****************************************************************************/
55b2b3ffcdSSimon Schubert/*
56b2b3ffcdSSimon Schubert * Trap and fault vector routines.
57b2b3ffcdSSimon Schubert *
58b2b3ffcdSSimon Schubert * All traps are 'interrupt gates', SDT_SYSIGT.  An interrupt gate pushes
59b2b3ffcdSSimon Schubert * state on the stack but also disables interrupts.  This is important for
60b2b3ffcdSSimon Schubert * us for the use of the swapgs instruction.  We cannot be interrupted
61b2b3ffcdSSimon Schubert * until the GS.base value is correct.  For most traps, we automatically
62b2b3ffcdSSimon Schubert * then enable interrupts if the interrupted context had them enabled.
63b2b3ffcdSSimon Schubert *
64b2b3ffcdSSimon Schubert * The cpu will push a certain amount of state onto the kernel stack for
65b2b3ffcdSSimon Schubert * the current process.  See x86_64/include/frame.h.
66b2b3ffcdSSimon Schubert * This includes the current RFLAGS (status register, which includes
67b2b3ffcdSSimon Schubert * the interrupt disable state prior to the trap), the code segment register,
68b2b3ffcdSSimon Schubert * and the return instruction pointer are pushed by the cpu.  The cpu
69b2b3ffcdSSimon Schubert * will also push an 'error' code for certain traps.  We push a dummy
70b2b3ffcdSSimon Schubert * error code for those traps where the cpu doesn't in order to maintain
71b2b3ffcdSSimon Schubert * a consistent frame.  We also push a contrived 'trap number'.
72b2b3ffcdSSimon Schubert *
73b2b3ffcdSSimon Schubert * The cpu does not push the general registers, we must do that, and we
74b2b3ffcdSSimon Schubert * must restore them prior to calling 'iret'.  The cpu adjusts the %cs and
75b2b3ffcdSSimon Schubert * %ss segment registers, but does not mess with %ds, %es, or %fs.  Thus we
76b2b3ffcdSSimon Schubert * must load them with appropriate values for supervisor mode operation.
77b2b3ffcdSSimon Schubert */
78b2b3ffcdSSimon Schubert
79b2b3ffcdSSimon SchubertMCOUNT_LABEL(user)
80b2b3ffcdSSimon SchubertMCOUNT_LABEL(btrap)
81b2b3ffcdSSimon Schubert
82bd52bedfSMatthew Dillon/*
834611d87fSMatthew Dillon * Interrupts must be disabled for all traps, otherwise horrible %gs
844611d87fSMatthew Dillon * issues will occur.
85bd52bedfSMatthew Dillon */
86b2b3ffcdSSimon Schubert
87b2b3ffcdSSimon Schubert/* Regular traps; The cpu does not supply tf_err for these. */
88b2b3ffcdSSimon Schubert#define	TRAP(a)	 \
894611d87fSMatthew Dillon	PUSH_FRAME_TFRIP ;			\
90b2b3ffcdSSimon Schubert	movq $0,TF_XFLAGS(%rsp) ;		\
91b2b3ffcdSSimon Schubert	movq $(a),TF_TRAPNO(%rsp) ;		\
92b2b3ffcdSSimon Schubert	movq $0,TF_ADDR(%rsp) ;			\
93b2b3ffcdSSimon Schubert	movq $0,TF_ERR(%rsp) ;			\
94b2b3ffcdSSimon Schubert	jmp alltraps
954611d87fSMatthew Dillon
964611d87fSMatthew Dillon/* This group of traps have tf_err already pushed by the cpu */
974611d87fSMatthew Dillon#define	TRAP_ERR(a)				\
984611d87fSMatthew Dillon	PUSH_FRAME_TFERR ;			\
994611d87fSMatthew Dillon	movq $(a),TF_TRAPNO(%rsp) ;		\
1004611d87fSMatthew Dillon	movq $0,TF_ADDR(%rsp) ;			\
1014611d87fSMatthew Dillon	movq $0,TF_XFLAGS(%rsp) ;		\
1024611d87fSMatthew Dillon	jmp alltraps
1034611d87fSMatthew Dillon
10485b33048SMatthew Dillon/*
10585b33048SMatthew Dillon * Due to a historical artifact, it is possible for a #DB exception
10685b33048SMatthew Dillon * to occur in certain bad places that would normlally be protected by
10785b33048SMatthew Dillon * the interrupt gate's interrupt disablement.
10885b33048SMatthew Dillon *
10985b33048SMatthew Dillon * Due to this possibly occuring in the system call entry code, we also
11085b33048SMatthew Dillon * run #DB on an ist2 stack to force the cpu to load a new %rsp, otherwise
11185b33048SMatthew Dillon * it might push the cpu exception frame onto the user stack.  To make things
11285b33048SMatthew Dillon * easier we just point ist2 at our trampoline area.
11385b33048SMatthew Dillon */
114bd52bedfSMatthew DillonIDTVEC(dbg)
11585b33048SMatthew Dillon#ifdef DIRECT_DISALLOW_SS_CPUBUG
11685b33048SMatthew Dillon	/*
11785b33048SMatthew Dillon	 * Directly disallow #DB faults which can occur at critical points
11885b33048SMatthew Dillon	 * in the code due to a historical artifact of how the cpu operates.
11985b33048SMatthew Dillon	 * %gs state might not match RPL.  Test the %rip and iretq immediately
12085b33048SMatthew Dillon	 * (valid %gs and %cr3 state not needed).  If we don't need kernel
12185b33048SMatthew Dillon	 * reporting we can enable this and its a bit safer from unintended
12285b33048SMatthew Dillon	 * consequences.
12385b33048SMatthew Dillon	 *
12485b33048SMatthew Dillon	 * If this is not enabled the kernel still catches the problem.  It
12585b33048SMatthew Dillon	 * will report the problem and continue properly.
12685b33048SMatthew Dillon	 */
12785b33048SMatthew Dillon	cmpq	$Xbpt,0(%rsp)
12885b33048SMatthew Dillon	je	200f
12985b33048SMatthew Dillon	cmpq	$Xfast_syscall,0(%rsp)
13085b33048SMatthew Dillon	je	200f
13185b33048SMatthew Dillon#endif
13285b33048SMatthew Dillon
13385b33048SMatthew Dillon	/*
13485b33048SMatthew Dillon	 * Ok, regardless of the RPL mask in the trap frame, we took
13585b33048SMatthew Dillon	 * the trap on a separate stack via ist2.  This means we
13685b33048SMatthew Dillon	 * must copy it appropriately.
13785b33048SMatthew Dillon	 *
13885b33048SMatthew Dillon	 * If coming from userland we can skip directly to the normal
13985b33048SMatthew Dillon	 * TRAP code because it will handle the fact that we are on an
14085b33048SMatthew Dillon	 * alternative stack (dbgstack set by ist2), even though it isn't
14185b33048SMatthew Dillon	 * the trampoline stack).  The frame will be moved to the correct
14285b33048SMatthew Dillon	 * kernel stack.
14385b33048SMatthew Dillon	 */
14485b33048SMatthew Dillon	testb   $SEL_RPL_MASK,TF_CS-TF_RIP(%rsp)
14585b33048SMatthew Dillon	jnz	210f				/* jnz from userland */
14685b33048SMatthew Dillon
14785b33048SMatthew Dillon	/*
14885b33048SMatthew Dillon	 * From kernel - %gs and %cr3 may be inconsistent.  Save original
14985b33048SMatthew Dillon	 * values and load consistent values, restore after return.
15085b33048SMatthew Dillon	 *
15185b33048SMatthew Dillon	 * The trap handler is NOT allowed to block for this case.
15285b33048SMatthew Dillon	 */
15385b33048SMatthew Dillon	subq	$TR_RIP, %rsp
15485b33048SMatthew Dillon	movq	%rax, TR_RAX(%rsp)
15585b33048SMatthew Dillon	movq	%rcx, TR_RCX(%rsp)
15685b33048SMatthew Dillon	movq	%rdx, TR_RDX(%rsp)
15785b33048SMatthew Dillon
15885b33048SMatthew Dillon	cld
15985b33048SMatthew Dillon	movq	%cr3,%rax			/* save CR3 */
16085b33048SMatthew Dillon	movq	%rax, TR_PCB_CR3_SAVED(%rsp)
16185b33048SMatthew Dillon	movl	$MSR_GSBASE,%ecx		/* save %gs */
16285b33048SMatthew Dillon	rdmsr
16385b33048SMatthew Dillon	shlq	$32,%rdx
16485b33048SMatthew Dillon	orq	%rdx,%rax
16585b33048SMatthew Dillon	movq	%rax, TR_PCB_GS_SAVED(%rsp)
16685b33048SMatthew Dillon	movq	TR_PCB_GS_KERNEL(%rsp),%rdx	/* retrieve kernel %gs */
16785b33048SMatthew Dillon	movl	%edx,%eax
16885b33048SMatthew Dillon	shrq	$32,%rdx
16985b33048SMatthew Dillon	wrmsr
17085b33048SMatthew Dillon	movq	PCPU(trampoline)+TR_PCB_CR3,%rax
17185b33048SMatthew Dillon	movq	%rax,%cr3
17285b33048SMatthew Dillon
17385b33048SMatthew Dillon	movq	TR_RDX(%rsp), %rdx
17485b33048SMatthew Dillon	movq	TR_RCX(%rsp), %rcx
17585b33048SMatthew Dillon	movq	TR_RAX(%rsp), %rax
17685b33048SMatthew Dillon	addq	$TR_RIP, %rsp
17785b33048SMatthew Dillon
17885b33048SMatthew Dillon	/*
17985b33048SMatthew Dillon	 * We are coming from the kernel.
18085b33048SMatthew Dillon	 *
18185b33048SMatthew Dillon	 * We are on the IST2 stack and, in fact, we have to *STAY* on this
18285b33048SMatthew Dillon	 * stack so no longer try to shift our frame to the kernel %rsp
18385b33048SMatthew Dillon	 * in the trap frame, since this %rsp might actually be a user %rsp
18485b33048SMatthew Dillon	 * in the mov mem,%ss + syscall DBG trap case.
18585b33048SMatthew Dillon	 *
18685b33048SMatthew Dillon	 * Run the normal trap.  Because TF_CS is at a kernel RPL, the
18785b33048SMatthew Dillon	 * normal code will skip the usual swapgs and KMMU (trampoline)
18885b33048SMatthew Dillon	 * code.  We've handled the rest.
18985b33048SMatthew Dillon	 *
19085b33048SMatthew Dillon	 * NOTE: at this point the trampframe is above the normal stack
19185b33048SMatthew Dillon	 *	 frame.  The trap code will be ignorant of the special
19285b33048SMatthew Dillon	 *	 TR_* registers above the cpu hardware frame portion,
19385b33048SMatthew Dillon	 *	 and the TR_* registers below it will be overwritten.
19485b33048SMatthew Dillon	 */
19585b33048SMatthew Dillon	PUSH_FRAME_TFRIP
19685b33048SMatthew Dillon	movq	$0,TF_XFLAGS(%rsp)
19785b33048SMatthew Dillon	movq	$T_TRCTRAP,TF_TRAPNO(%rsp)
19885b33048SMatthew Dillon	movq	$0,TF_ADDR(%rsp)
19985b33048SMatthew Dillon	movq	$0,TF_ERR(%rsp)
20085b33048SMatthew Dillon
20185b33048SMatthew Dillon	FAKE_MCOUNT(TF_RIP(%rsp))
20285b33048SMatthew Dillon	cld
20385b33048SMatthew Dillon	movq	%rsp, %rdi
20485b33048SMatthew Dillon	call	trap
20585b33048SMatthew Dillon	MEXITCOUNT
20685b33048SMatthew Dillon
20785b33048SMatthew Dillon	/*
20885b33048SMatthew Dillon	 * Pop the frame (since we're coming from kernel mode, this will
20985b33048SMatthew Dillon	 * not mess with %cr3 or %gs), then restore %cr3 and %gs for our
21085b33048SMatthew Dillon	 * iretq.  Not optimal but more readable and this is not a
21185b33048SMatthew Dillon	 * critical path.
21285b33048SMatthew Dillon	 */
21385b33048SMatthew Dillon	POP_FRAME(nop)
21485b33048SMatthew Dillon
21585b33048SMatthew Dillon	subq	$TR_RIP, %rsp
21685b33048SMatthew Dillon	movq	%rax, TR_RAX(%rsp)
21785b33048SMatthew Dillon	movq	%rcx, TR_RCX(%rsp)
21885b33048SMatthew Dillon	movq	%rdx, TR_RDX(%rsp)
21985b33048SMatthew Dillon
22085b33048SMatthew Dillon	movl	$MSR_GSBASE,%ecx		/* restore %gs */
22185b33048SMatthew Dillon	movq	TR_PCB_GS_SAVED(%rsp),%rdx
22285b33048SMatthew Dillon	movl	%edx,%eax
22385b33048SMatthew Dillon	shrq	$32,%rdx
22485b33048SMatthew Dillon	wrmsr
22585b33048SMatthew Dillon
22685b33048SMatthew Dillon	movq	TR_PCB_CR3_SAVED(%rsp),%rax	/* restore %cr3 */
22785b33048SMatthew Dillon	movq	%rax,%cr3
22885b33048SMatthew Dillon
22985b33048SMatthew Dillon	movq	TR_RAX(%rsp),%rax
23085b33048SMatthew Dillon	movq	TR_RCX(%rsp),%rcx
23185b33048SMatthew Dillon	movq	TR_RDX(%rsp),%rdx
23285b33048SMatthew Dillon	addq	$TR_RIP, %rsp
23385b33048SMatthew Dillon
23485b33048SMatthew Dillon	/*
23585b33048SMatthew Dillon	 * Direct iretq. No point jumping to doreti because the
23685b33048SMatthew Dillon	 * exception code that deals with iretq faults can't handle
23785b33048SMatthew Dillon	 * non-deterministic %gs/%cr3 state.
23885b33048SMatthew Dillon	 */
23985b33048SMatthew Dillon#ifdef DIRECT_DISALLOW_SS_CPUBUG
24085b33048SMatthew Dillon200:
24185b33048SMatthew Dillon#endif
24285b33048SMatthew Dillon	iretq
24385b33048SMatthew Dillon
24485b33048SMatthew Dillon	/*
24585b33048SMatthew Dillon	 * From userland (normal trap path)
24685b33048SMatthew Dillon	 */
24785b33048SMatthew Dillon210:
248bd52bedfSMatthew Dillon	TRAP(T_TRCTRAP)
24985b33048SMatthew Dillon	/* NOT REACHED */
25085b33048SMatthew Dillon
251bd52bedfSMatthew DillonIDTVEC(bpt)
252bd52bedfSMatthew Dillon	TRAP(T_BPTFLT)
253b2b3ffcdSSimon SchubertIDTVEC(div)
254b2b3ffcdSSimon Schubert	TRAP(T_DIVIDE)
255b2b3ffcdSSimon SchubertIDTVEC(ofl)
256b2b3ffcdSSimon Schubert	TRAP(T_OFLOW)
257b2b3ffcdSSimon SchubertIDTVEC(bnd)
258b2b3ffcdSSimon Schubert	TRAP(T_BOUND)
259b2b3ffcdSSimon SchubertIDTVEC(ill)
260b2b3ffcdSSimon Schubert	TRAP(T_PRIVINFLT)
261b2b3ffcdSSimon SchubertIDTVEC(dna)
262b2b3ffcdSSimon Schubert	TRAP(T_DNA)
263b2b3ffcdSSimon SchubertIDTVEC(fpusegm)
264b2b3ffcdSSimon Schubert	TRAP(T_FPOPFLT)
265b2b3ffcdSSimon SchubertIDTVEC(mchk)
266b2b3ffcdSSimon Schubert	TRAP(T_MCHK)
267b2b3ffcdSSimon SchubertIDTVEC(fpu)
268b2b3ffcdSSimon Schubert	TRAP(T_ARITHTRAP)
269b2b3ffcdSSimon SchubertIDTVEC(xmm)
270b2b3ffcdSSimon Schubert	TRAP(T_XMMFLT)
271b2b3ffcdSSimon Schubert
272b2b3ffcdSSimon SchubertIDTVEC(tss)
273b2b3ffcdSSimon Schubert	TRAP_ERR(T_TSSFLT)
274b2b3ffcdSSimon SchubertIDTVEC(missing)
275b2b3ffcdSSimon Schubert	TRAP_ERR(T_SEGNPFLT)
276b2b3ffcdSSimon SchubertIDTVEC(stk)
277b2b3ffcdSSimon Schubert	TRAP_ERR(T_STKFLT)
278b2b3ffcdSSimon SchubertIDTVEC(align)
279b2b3ffcdSSimon Schubert	TRAP_ERR(T_ALIGNFLT)
280b2b3ffcdSSimon Schubert
281b2b3ffcdSSimon Schubert	/*
282b2b3ffcdSSimon Schubert	 * alltraps entry point.  Use swapgs if this is the first time in the
283b2b3ffcdSSimon Schubert	 * kernel from userland.  Reenable interrupts if they were enabled
284e1caeca9Szrj	 * before the trap.
2854611d87fSMatthew Dillon	 *
2864611d87fSMatthew Dillon	 * WARNING!  %gs not available until after our swapgs code
287b2b3ffcdSSimon Schubert	 */
288b2b3ffcdSSimon Schubert	SUPERALIGN_TEXT
289b2b3ffcdSSimon Schubert	.globl	alltraps
290b2b3ffcdSSimon Schubert	.type	alltraps,@function
291b2b3ffcdSSimon Schubertalltraps:
2924611d87fSMatthew Dillon
2934611d87fSMatthew Dillon#if 0
294b2b3ffcdSSimon Schubertalltraps_pushregs:
295b2b3ffcdSSimon Schubert	movq	%rdi,TF_RDI(%rsp)
296b2b3ffcdSSimon Schubertalltraps_pushregs_no_rdi:
297b2b3ffcdSSimon Schubert	movq	%rsi,TF_RSI(%rsp)
298b2b3ffcdSSimon Schubert	movq	%rdx,TF_RDX(%rsp)
299b2b3ffcdSSimon Schubert	movq	%rcx,TF_RCX(%rsp)
300b2b3ffcdSSimon Schubert	movq	%r8,TF_R8(%rsp)
301b2b3ffcdSSimon Schubert	movq	%r9,TF_R9(%rsp)
302b2b3ffcdSSimon Schubert	movq	%rax,TF_RAX(%rsp)
303b2b3ffcdSSimon Schubert	movq	%rbx,TF_RBX(%rsp)
304b2b3ffcdSSimon Schubert	movq	%rbp,TF_RBP(%rsp)
305b2b3ffcdSSimon Schubert	movq	%r10,TF_R10(%rsp)
306b2b3ffcdSSimon Schubert	movq	%r11,TF_R11(%rsp)
307b2b3ffcdSSimon Schubert	movq	%r12,TF_R12(%rsp)
308b2b3ffcdSSimon Schubert	movq	%r13,TF_R13(%rsp)
309b2b3ffcdSSimon Schubert	movq	%r14,TF_R14(%rsp)
310b2b3ffcdSSimon Schubert	movq	%r15,TF_R15(%rsp)
3114611d87fSMatthew Dillon#endif
3124611d87fSMatthew Dillon	sti
313b2b3ffcdSSimon Schubert	FAKE_MCOUNT(TF_RIP(%rsp))
314b2b3ffcdSSimon Schubert	.globl	calltrap
315b2b3ffcdSSimon Schubert	.type	calltrap,@function
316b2b3ffcdSSimon Schubertcalltrap:
3173338cc67SMatthew Dillon	cld
318b2b3ffcdSSimon Schubert	movq	%rsp, %rdi
319b2b3ffcdSSimon Schubert	call	trap
320b2b3ffcdSSimon Schubert	MEXITCOUNT
321b2b3ffcdSSimon Schubert	jmp	doreti			/* Handle any pending ASTs */
322b2b3ffcdSSimon Schubert
323b2b3ffcdSSimon SchubertIDTVEC(dblfault)
3244611d87fSMatthew Dillon	PUSH_FRAME_TFERR
325b2b3ffcdSSimon Schubert	movq	$T_DOUBLEFLT,TF_TRAPNO(%rsp)
326b2b3ffcdSSimon Schubert	movq	$0,TF_ADDR(%rsp)
327b2b3ffcdSSimon Schubert	movq	$0,TF_XFLAGS(%rsp)
3284611d87fSMatthew Dillon
3293338cc67SMatthew Dillon	cld
3304611d87fSMatthew Dillon	movq	%rsp, %rdi
331b2b3ffcdSSimon Schubert	call	dblfault_handler
332b2b3ffcdSSimon Schubert2:	hlt
333b2b3ffcdSSimon Schubert	jmp	2b
334b2b3ffcdSSimon Schubert
3354611d87fSMatthew Dillon	/*
3364611d87fSMatthew Dillon	 * We need to save the contents of %cr2 before PUSH_FRAME* messes
3374611d87fSMatthew Dillon	 * with %cr3.
3384611d87fSMatthew Dillon	 */
339b2b3ffcdSSimon SchubertIDTVEC(page)
3404611d87fSMatthew Dillon	PUSH_FRAME_TFERR_SAVECR2
341b2b3ffcdSSimon Schubert	movq	$T_PAGEFLT,TF_TRAPNO(%rsp)
342b2b3ffcdSSimon Schubert	movq	$0,TF_XFLAGS(%rsp)
3434611d87fSMatthew Dillon	jmp	alltraps
344b2b3ffcdSSimon Schubert
345b2b3ffcdSSimon Schubert	/*
346b2b3ffcdSSimon Schubert	 * We have to special-case this one.  If we get a trap in doreti() at
3476a65d560SMatthew Dillon	 * the iretq stage, we'll reenter as a kernel exception with the
3486a65d560SMatthew Dillon	 * wrong gs and isolation state.  We have to act as through we came
3496a65d560SMatthew Dillon	 * in from userland.
350b2b3ffcdSSimon Schubert	 */
351b2b3ffcdSSimon SchubertIDTVEC(prot)
3524611d87fSMatthew Dillon	pushq	%r10
3534611d87fSMatthew Dillon	leaq	doreti_iret(%rip),%r10
3544611d87fSMatthew Dillon	cmpq	%r10,TF_RIP-TF_ERR+8(%rsp)		/* +8 due to pushq */
3554611d87fSMatthew Dillon	jne	prot_normal
3564611d87fSMatthew Dillon	testb	$SEL_RPL_MASK,TF_CS-TF_ERR+8(%rsp)      /* +8 due to pushq */
3574611d87fSMatthew Dillon	jnz	prot_normal
3586a65d560SMatthew Dillon
3596a65d560SMatthew Dillon	/*
3606a65d560SMatthew Dillon	 * Special fault during iretq
3616a65d560SMatthew Dillon	 */
3626a65d560SMatthew Dillon	popq	%r10
3636a65d560SMatthew Dillon	swapgs
3646a65d560SMatthew Dillon	KMMUENTER_TFERR
3656a65d560SMatthew Dillon	subq	$TF_ERR,%rsp
3666a65d560SMatthew Dillon	PUSH_FRAME_REGS
3676a65d560SMatthew Dillon	movq	$T_PROTFLT,TF_TRAPNO(%rsp)
3686a65d560SMatthew Dillon	movq	$0,TF_ADDR(%rsp)
3696a65d560SMatthew Dillon	movq	$0,TF_XFLAGS(%rsp)
3706a65d560SMatthew Dillon	jmp	alltraps
3716a65d560SMatthew Dillon
3724611d87fSMatthew Dillonprot_normal:
3734611d87fSMatthew Dillon	popq	%r10
3744611d87fSMatthew Dillon	PUSH_FRAME_TFERR
375b2b3ffcdSSimon Schubert	movq	$T_PROTFLT,TF_TRAPNO(%rsp)
376b2b3ffcdSSimon Schubert	movq	$0,TF_ADDR(%rsp)
377b2b3ffcdSSimon Schubert	movq	$0,TF_XFLAGS(%rsp)
3784611d87fSMatthew Dillon	jmp	alltraps
379b2b3ffcdSSimon Schubert
380b2b3ffcdSSimon Schubert/*
381b2b3ffcdSSimon Schubert * Fast syscall entry point.  We enter here with just our new %cs/%ss set,
382b2b3ffcdSSimon Schubert * and the new privilige level.  We are still running on the old user stack
383b2b3ffcdSSimon Schubert * pointer.  We have to juggle a few things around to find our stack etc.
384b2b3ffcdSSimon Schubert * swapgs gives us access to our PCPU space only.
3854611d87fSMatthew Dillon *
3869283c84bSMatthew Dillon * We use GD_TRAMPOLINE+TR_CR2 to save the user stack pointer temporarily.
387b2b3ffcdSSimon Schubert */
388b2b3ffcdSSimon SchubertIDTVEC(fast_syscall)
3894611d87fSMatthew Dillon	swapgs					/* get kernel %gs */
3909283c84bSMatthew Dillon	movq	%rsp,PCPU(trampoline)+TR_CR2	/* save user %rsp */
3919e24b495SMatthew Dillon	movq	PCPU(common_tss)+TSS_RSP0,%rsp
39226c7e964SMatthew Dillon
39326c7e964SMatthew Dillon	/*
39426c7e964SMatthew Dillon	 * NOTE: KMMUENTER_SYSCALL does not actually use the stack but
39526c7e964SMatthew Dillon	 *	 adjust the stack pointer for correctness in case we
39626c7e964SMatthew Dillon	 *	 do in the future.
39726c7e964SMatthew Dillon	 */
39826c7e964SMatthew Dillon	subq	$TR_PCB_RSP,%rsp
3994611d87fSMatthew Dillon	KMMUENTER_SYSCALL
4009e24b495SMatthew Dillon	movq	PCPU(trampoline)+TR_PCB_RSP,%rsp
4014611d87fSMatthew Dillon
402b2b3ffcdSSimon Schubert	/* Now emulate a trapframe. Make the 8 byte alignment odd for call. */
403b2b3ffcdSSimon Schubert	subq	$TF_SIZE,%rsp
404b2b3ffcdSSimon Schubert	/* defer TF_RSP till we have a spare register */
405b2b3ffcdSSimon Schubert	movq	%r11,TF_RFLAGS(%rsp)
406b2b3ffcdSSimon Schubert	movq	%rcx,TF_RIP(%rsp)	/* %rcx original value is in %r10 */
4079283c84bSMatthew Dillon	movq	PCPU(trampoline)+TR_CR2,%r11	/* %r11 already saved */
408b2b3ffcdSSimon Schubert	movq	%r11,TF_RSP(%rsp)	/* user stack pointer */
409f2081646SMatthew Dillon	orl	$RQF_QUICKRET,PCPU(reqflags)
410b2b3ffcdSSimon Schubert	movq	$KUDSEL,TF_SS(%rsp)
411b2b3ffcdSSimon Schubert	movq	$KUCSEL,TF_CS(%rsp)
412b2b3ffcdSSimon Schubert	movq	$2,TF_ERR(%rsp)
413571cdd83SJordan Gordeev	movq	$T_FAST_SYSCALL,TF_TRAPNO(%rsp)	/* for the vkernel */
414b2b3ffcdSSimon Schubert	movq	$0,TF_XFLAGS(%rsp)	/* note: used in signal frame */
415b2b3ffcdSSimon Schubert	movq	%rdi,TF_RDI(%rsp)	/* arg 1 */
416b2b3ffcdSSimon Schubert	movq	%rsi,TF_RSI(%rsp)	/* arg 2 */
417b2b3ffcdSSimon Schubert	movq	%rdx,TF_RDX(%rsp)	/* arg 3 */
418b2b3ffcdSSimon Schubert	movq	%r10,TF_RCX(%rsp)	/* arg 4 */
419b2b3ffcdSSimon Schubert	movq	%r8,TF_R8(%rsp)		/* arg 5 */
420b2b3ffcdSSimon Schubert	movq	%r9,TF_R9(%rsp)		/* arg 6 */
421b2b3ffcdSSimon Schubert	movq	%rax,TF_RAX(%rsp)	/* syscall number */
422b2b3ffcdSSimon Schubert	movq	%rbx,TF_RBX(%rsp)	/* C preserved */
423b2b3ffcdSSimon Schubert	movq	%rbp,TF_RBP(%rsp)	/* C preserved */
424b2b3ffcdSSimon Schubert	movq	%r12,TF_R12(%rsp)	/* C preserved */
425b2b3ffcdSSimon Schubert	movq	%r13,TF_R13(%rsp)	/* C preserved */
426b2b3ffcdSSimon Schubert	movq	%r14,TF_R14(%rsp)	/* C preserved */
427b2b3ffcdSSimon Schubert	movq	%r15,TF_R15(%rsp)	/* C preserved */
4289474cbefSMatthew Dillon
4299474cbefSMatthew Dillon	xorq	%rax,%rax		/* SECURITY CLEAR REGS */
4309474cbefSMatthew Dillon	movq	%rax,%rbx
4319474cbefSMatthew Dillon	movq	%rax,%rcx
4329474cbefSMatthew Dillon	movq	%rax,%rdx
4339474cbefSMatthew Dillon	movq	%rax,%rsi
4349474cbefSMatthew Dillon	movq	%rax,%rdi
4359474cbefSMatthew Dillon	movq	%rax,%rbp
4369474cbefSMatthew Dillon	movq	%rax,%r8
4379474cbefSMatthew Dillon	movq	%rax,%r9
4389474cbefSMatthew Dillon	movq	%rax,%r10
4399474cbefSMatthew Dillon	movq	%rax,%r11
4409474cbefSMatthew Dillon	movq	%rax,%r12
4419474cbefSMatthew Dillon	movq	%rax,%r13
4429474cbefSMatthew Dillon	movq	%rax,%r14
4439474cbefSMatthew Dillon	movq	%rax,%r15
4443cc72d3dSMatthew Dillon
4454611d87fSMatthew Dillon	sti
446b2b3ffcdSSimon Schubert	FAKE_MCOUNT(TF_RIP(%rsp))
447b2b3ffcdSSimon Schubert	movq	%rsp, %rdi
448b2b3ffcdSSimon Schubert	call	syscall2
449f2081646SMatthew Dillon
450f2081646SMatthew Dillon	/*
451f2081646SMatthew Dillon	 * Fast return from system call
452f2081646SMatthew Dillon	 */
453f2081646SMatthew Dillon	cli
454f2081646SMatthew Dillon	testl	$RQF_IPIQ|RQF_TIMER|RQF_INTPEND|RQF_AST_MASK,PCPU(reqflags)
455f2081646SMatthew Dillon	jnz	1f
456f2081646SMatthew Dillon	testl	$RQF_QUICKRET,PCPU(reqflags)
457f2081646SMatthew Dillon	jz	1f
458f2081646SMatthew Dillon	MEXITCOUNT
4599474cbefSMatthew Dillon
4609474cbefSMatthew Dillon	movq	TF_RBX(%rsp),%rbx	/* SECURITY RESTORE */
4619474cbefSMatthew Dillon	movq	TF_RCX(%rsp),%rcx
4629474cbefSMatthew Dillon	movq	TF_RBP(%rsp),%rbp
4639474cbefSMatthew Dillon	movq	TF_R8(%rsp),%r8
4649474cbefSMatthew Dillon	movq	TF_R9(%rsp),%r9
4659474cbefSMatthew Dillon	xorq	%r10,%r10		/* (security - clear scratch) */
4669474cbefSMatthew Dillon	movq	%r10,%r11
4679474cbefSMatthew Dillon	movq	TF_R12(%rsp),%r12
4689474cbefSMatthew Dillon	movq	TF_R13(%rsp),%r13
4699474cbefSMatthew Dillon	movq	TF_R14(%rsp),%r14
4709474cbefSMatthew Dillon	movq	TF_R15(%rsp),%r15
4719474cbefSMatthew Dillon
4729474cbefSMatthew Dillon	movq	TF_RDI(%rsp),%rdi	/* NORMAL RESTORE */
473f2081646SMatthew Dillon	movq	TF_RSI(%rsp),%rsi
474f2081646SMatthew Dillon	movq	TF_RDX(%rsp),%rdx
475f2081646SMatthew Dillon	movq	TF_RAX(%rsp),%rax
476f2081646SMatthew Dillon	movq	TF_RFLAGS(%rsp),%r11
477f2081646SMatthew Dillon	movq	TF_RIP(%rsp),%rcx
478f2081646SMatthew Dillon	movq	TF_RSP(%rsp),%rsp
4794611d87fSMatthew Dillon	KMMUEXIT_SYSCALL
480f2081646SMatthew Dillon	swapgs
481f2081646SMatthew Dillon	sysretq
4824611d87fSMatthew Dillon
483f2081646SMatthew Dillon	/*
484f2081646SMatthew Dillon	 * Normal slow / full iret
485f2081646SMatthew Dillon	 */
486f2081646SMatthew Dillon1:
487b2b3ffcdSSimon Schubert	MEXITCOUNT
488b2b3ffcdSSimon Schubert	jmp	doreti
489b2b3ffcdSSimon Schubert
490b2b3ffcdSSimon Schubert/*
491b2b3ffcdSSimon Schubert * Here for CYA insurance, in case a "syscall" instruction gets
49287ef2da6Szrj * issued from 32 bit compatibility mode. MSR_CSTAR has to point
493b2b3ffcdSSimon Schubert * to *something* if EFER_SCE is enabled.
494b2b3ffcdSSimon Schubert */
495b2b3ffcdSSimon SchubertIDTVEC(fast_syscall32)
496b2b3ffcdSSimon Schubert	sysret
497b2b3ffcdSSimon Schubert
498b2b3ffcdSSimon Schubert/*
499b2b3ffcdSSimon Schubert * NMI handling is special.
500b2b3ffcdSSimon Schubert *
50185b33048SMatthew Dillon * First, an NMI is taken on its own pcpu stack.  RFLAGS.IF, %gs, and %cr3
50285b33048SMatthew Dillon * will be inconsistent when interrupt supervisor mode.
503b2b3ffcdSSimon Schubert *
504b2b3ffcdSSimon Schubert * Second, the processor treats NMIs specially, blocking further NMIs
505b2b3ffcdSSimon Schubert * until an 'iretq' instruction is executed.  We therefore need to
506b2b3ffcdSSimon Schubert * execute the NMI handler with interrupts disabled to prevent a
507b2b3ffcdSSimon Schubert * nested interrupt from executing an 'iretq' instruction and
508b2b3ffcdSSimon Schubert * inadvertently taking the processor out of NMI mode.
509b2b3ffcdSSimon Schubert */
510b2b3ffcdSSimon SchubertIDTVEC(nmi)
51185b33048SMatthew Dillon	/*
51285b33048SMatthew Dillon	 * We don't need to special-case entry from userland, %gs will
51385b33048SMatthew Dillon	 * be consistent with expectations.
51485b33048SMatthew Dillon	 */
51585b33048SMatthew Dillon	testb   $SEL_RPL_MASK,TF_CS-TF_RIP(%rsp) ; /* from userland? */ \
51685b33048SMatthew Dillon	jnz	200f
51785b33048SMatthew Dillon
51885b33048SMatthew Dillon	/*
51985b33048SMatthew Dillon	 * From kernel - %gs and %cr3 may be inconsistent.  Save original
52085b33048SMatthew Dillon	 * values and load consistent values, restore on return.
52185b33048SMatthew Dillon	 *
52285b33048SMatthew Dillon	 * The trap handler is NOT allowed to block for this case.
52385b33048SMatthew Dillon	 */
52485b33048SMatthew Dillon	subq	$TR_RIP, %rsp
52585b33048SMatthew Dillon	movq	%rax, TR_RAX(%rsp)
52685b33048SMatthew Dillon	movq	%rcx, TR_RCX(%rsp)
52785b33048SMatthew Dillon	movq	%rdx, TR_RDX(%rsp)
52885b33048SMatthew Dillon
52985b33048SMatthew Dillon	cld
53085b33048SMatthew Dillon	movq	%cr3,%rax			/* save CR3 */
53185b33048SMatthew Dillon	movq	%rax, TR_PCB_CR3_SAVED(%rsp)
53285b33048SMatthew Dillon	movl	$MSR_GSBASE,%ecx		/* save %gs */
53385b33048SMatthew Dillon	rdmsr
53485b33048SMatthew Dillon	shlq	$32,%rdx
53585b33048SMatthew Dillon	orq	%rdx,%rax
53685b33048SMatthew Dillon	movq	%rax, TR_PCB_GS_SAVED(%rsp)
53785b33048SMatthew Dillon	movq	TR_PCB_GS_KERNEL(%rsp),%rdx	/* retrieve kernel %gs */
53885b33048SMatthew Dillon	movl	%edx,%eax
53985b33048SMatthew Dillon	shrq	$32,%rdx
54085b33048SMatthew Dillon	wrmsr
54185b33048SMatthew Dillon#if 0
54285b33048SMatthew Dillon	movq	TR_PCB_CR3(%rsp),%rax		/* retrieve kernel %cr3 */
54385b33048SMatthew Dillon#endif
54485b33048SMatthew Dillon	movq	PCPU(trampoline)+TR_PCB_CR3,%rax
54585b33048SMatthew Dillon	movq	%rax,%cr3
54685b33048SMatthew Dillon
54785b33048SMatthew Dillon	movq	TR_RDX(%rsp), %rdx
54885b33048SMatthew Dillon	movq	TR_RCX(%rsp), %rcx
54985b33048SMatthew Dillon	movq	TR_RAX(%rsp), %rax
55085b33048SMatthew Dillon	addq	$TR_RIP, %rsp
55185b33048SMatthew Dillon
55285b33048SMatthew Dillon	/*
55385b33048SMatthew Dillon	 * Ok, run the normal trap.  Because TF_CS is at a kernel RPL,
55485b33048SMatthew Dillon	 * the normal code will skip the usual swapgs and KMMU (trampoline)
55585b33048SMatthew Dillon	 * code.  We've handled the rest.
55685b33048SMatthew Dillon	 *
55785b33048SMatthew Dillon	 * NOTE: at this point the trampframe is above the normal stack
55885b33048SMatthew Dillon	 *	 frame.  The trap code will be ignorant of the special
55985b33048SMatthew Dillon	 *	 TR_* registers above the cpu hardware frame portion,
56085b33048SMatthew Dillon	 *	 and the TR_* registers below it will be overwritten.
56185b33048SMatthew Dillon	 */
56285b33048SMatthew Dillon	PUSH_FRAME_TFRIP
56385b33048SMatthew Dillon	movq	$0,TF_XFLAGS(%rsp)
56485b33048SMatthew Dillon	movq	$T_NMI,TF_TRAPNO(%rsp)
56585b33048SMatthew Dillon	movq	$0,TF_ADDR(%rsp)
56685b33048SMatthew Dillon	movq	$0,TF_ERR(%rsp)
56785b33048SMatthew Dillon
56885b33048SMatthew Dillon	FAKE_MCOUNT(TF_RIP(%rsp))
56985b33048SMatthew Dillon	cld
57085b33048SMatthew Dillon	movq	%rsp, %rdi
57185b33048SMatthew Dillon	call	trap
57285b33048SMatthew Dillon	MEXITCOUNT
57385b33048SMatthew Dillon
57485b33048SMatthew Dillon	/*
57585b33048SMatthew Dillon	 * Pop the frame (since we're coming from kernel mode, this will
57685b33048SMatthew Dillon	 * not mess with %cr3 or %gs), then restore %cr3 and %gs for our
57785b33048SMatthew Dillon	 * iretq.  Not optimal but more readable and this is not a
57885b33048SMatthew Dillon	 * critical path.
57985b33048SMatthew Dillon	 */
58085b33048SMatthew Dillon	POP_FRAME(nop)
58185b33048SMatthew Dillon
58285b33048SMatthew Dillon	subq	$TR_RIP, %rsp
58385b33048SMatthew Dillon	movq	%rax, TR_RAX(%rsp)
58485b33048SMatthew Dillon	movq	%rcx, TR_RCX(%rsp)
58585b33048SMatthew Dillon	movq	%rdx, TR_RDX(%rsp)
58685b33048SMatthew Dillon
58785b33048SMatthew Dillon	movl	$MSR_GSBASE,%ecx		/* restore %gs */
58885b33048SMatthew Dillon	movq	TR_PCB_GS_SAVED(%rsp),%rdx
58985b33048SMatthew Dillon	movl	%edx,%eax
59085b33048SMatthew Dillon	shrq	$32,%rdx
59185b33048SMatthew Dillon	wrmsr
59285b33048SMatthew Dillon
59385b33048SMatthew Dillon	movq	TR_PCB_CR3_SAVED(%rsp),%rax	/* restore %cr3 */
59485b33048SMatthew Dillon	movq	%rax,%cr3
59585b33048SMatthew Dillon
59685b33048SMatthew Dillon	movq	TR_RAX(%rsp),%rax
59785b33048SMatthew Dillon	movq	TR_RCX(%rsp),%rcx
59885b33048SMatthew Dillon	movq	TR_RDX(%rsp),%rdx
59985b33048SMatthew Dillon	addq	$TR_RIP, %rsp
60085b33048SMatthew Dillon
60185b33048SMatthew Dillon	/*
60285b33048SMatthew Dillon	 * Direct iretq. No point jumping to doreti because the
60385b33048SMatthew Dillon	 * exception code that deals with iretq faults can't handle
60485b33048SMatthew Dillon	 * non-deterministic %gs/%cr3 state.
60585b33048SMatthew Dillon	 */
60685b33048SMatthew Dillon	iretq
60785b33048SMatthew Dillon
60885b33048SMatthew Dillon	/*
60985b33048SMatthew Dillon	 * From userland (normal trap path)
61085b33048SMatthew Dillon	 */
61185b33048SMatthew Dillon200:
6124611d87fSMatthew Dillon	PUSH_FRAME_TFRIP
6134611d87fSMatthew Dillon	movq	$0,TF_XFLAGS(%rsp)
6144611d87fSMatthew Dillon	movq	$T_NMI,TF_TRAPNO(%rsp)
615b2b3ffcdSSimon Schubert	movq	$0,TF_ADDR(%rsp)
616b2b3ffcdSSimon Schubert	movq	$0,TF_ERR(%rsp)
6174611d87fSMatthew Dillon
618b2b3ffcdSSimon Schubert	FAKE_MCOUNT(TF_RIP(%rsp))
6193338cc67SMatthew Dillon	cld
620b2b3ffcdSSimon Schubert	movq	%rsp, %rdi
621b2b3ffcdSSimon Schubert	call	trap
622b2b3ffcdSSimon Schubert	MEXITCOUNT
6234611d87fSMatthew Dillon
6244611d87fSMatthew Dillon	POP_FRAME(jmp doreti_iret)
625b2b3ffcdSSimon Schubert
626b2b3ffcdSSimon Schubert/*
627*add2647eSMatthew Dillon * Reserved (unconfigured) traps rsvd00 - rsvdff
628*add2647eSMatthew Dillon */
629*add2647eSMatthew Dillon.macro reservetrap a b
630*add2647eSMatthew DillonIDTVEC(rsvd\a\b)
631*add2647eSMatthew Dillon	TRAP(T_RESERVED + 0x\a\b)
632*add2647eSMatthew Dillon.endm
633*add2647eSMatthew Dillon
634*add2647eSMatthew Dillon.macro reservegrp a
635*add2647eSMatthew Dillonreservetrap \a 0
636*add2647eSMatthew Dillonreservetrap \a 1
637*add2647eSMatthew Dillonreservetrap \a 2
638*add2647eSMatthew Dillonreservetrap \a 3
639*add2647eSMatthew Dillonreservetrap \a 4
640*add2647eSMatthew Dillonreservetrap \a 5
641*add2647eSMatthew Dillonreservetrap \a 6
642*add2647eSMatthew Dillonreservetrap \a 7
643*add2647eSMatthew Dillonreservetrap \a 8
644*add2647eSMatthew Dillonreservetrap \a 9
645*add2647eSMatthew Dillonreservetrap \a a
646*add2647eSMatthew Dillonreservetrap \a b
647*add2647eSMatthew Dillonreservetrap \a c
648*add2647eSMatthew Dillonreservetrap \a d
649*add2647eSMatthew Dillonreservetrap \a e
650*add2647eSMatthew Dillonreservetrap \a f
651*add2647eSMatthew Dillon.endm
652*add2647eSMatthew Dillon
653*add2647eSMatthew Dillonreservegrp 0
654*add2647eSMatthew Dillonreservegrp 1
655*add2647eSMatthew Dillonreservegrp 2
656*add2647eSMatthew Dillonreservegrp 3
657*add2647eSMatthew Dillonreservegrp 4
658*add2647eSMatthew Dillonreservegrp 5
659*add2647eSMatthew Dillonreservegrp 6
660*add2647eSMatthew Dillonreservegrp 7
661*add2647eSMatthew Dillonreservegrp 8
662*add2647eSMatthew Dillonreservegrp 9
663*add2647eSMatthew Dillonreservegrp a
664*add2647eSMatthew Dillonreservegrp b
665*add2647eSMatthew Dillonreservegrp c
666*add2647eSMatthew Dillonreservegrp d
667*add2647eSMatthew Dillonreservegrp e
668*add2647eSMatthew Dillonreservegrp f
669*add2647eSMatthew Dillon
670*add2647eSMatthew Dillon/*
671b2b3ffcdSSimon Schubert * This function is what cpu_heavy_restore jumps to after a new process
672b2b3ffcdSSimon Schubert * is created.  The LWKT subsystem switches while holding a critical
673b2b3ffcdSSimon Schubert * section and we maintain that abstraction here (e.g. because
674b2b3ffcdSSimon Schubert * cpu_heavy_restore needs it due to PCB_*() manipulation), then get out of
675b2b3ffcdSSimon Schubert * it before calling the initial function (typically fork_return()) and/or
676b2b3ffcdSSimon Schubert * returning to user mode.
677b2b3ffcdSSimon Schubert *
6782b0bd8aaSMatthew Dillon * The MP lock is not held at any point but the critcount is bumped
6792b0bd8aaSMatthew Dillon * on entry to prevent interruption of the trampoline at a bad point.
680cc9b6223SMatthew Dillon *
681cc9b6223SMatthew Dillon * This is effectively what td->td_switch() returns to.  It 'returns' the
682cc9b6223SMatthew Dillon * old thread in %rax and since this is not returning to a td->td_switch()
683cc9b6223SMatthew Dillon * call from lwkt_switch() we must handle the cleanup for the old thread
684cc9b6223SMatthew Dillon * by calling lwkt_switch_return().
685cc9b6223SMatthew Dillon *
686cc9b6223SMatthew Dillon * fork_trampoline(%rax:otd, %rbx:func, %r12:arg)
687b2b3ffcdSSimon Schubert */
688b2b3ffcdSSimon SchubertENTRY(fork_trampoline)
689cc9b6223SMatthew Dillon	movq	%rax,%rdi
690cc9b6223SMatthew Dillon	call	lwkt_switch_return
691b2b3ffcdSSimon Schubert	movq	PCPU(curthread),%rax
692f9235b6dSMatthew Dillon	decl	TD_CRITCOUNT(%rax)
693b2b3ffcdSSimon Schubert
694b2b3ffcdSSimon Schubert	/*
695b2b3ffcdSSimon Schubert	 * cpu_set_fork_handler intercepts this function call to
696b2b3ffcdSSimon Schubert	 * have this call a non-return function to stay in kernel mode.
697b2b3ffcdSSimon Schubert	 *
698b2b3ffcdSSimon Schubert	 * initproc has its own fork handler, start_init(), which DOES
699b2b3ffcdSSimon Schubert	 * return.
700b2b3ffcdSSimon Schubert	 *
701b2b3ffcdSSimon Schubert	 * %rbx - chaining function (typically fork_return)
702b2b3ffcdSSimon Schubert	 * %r12 -> %rdi (argument)
703b2b3ffcdSSimon Schubert	 * frame-> %rsi (trap frame)
704b2b3ffcdSSimon Schubert	 *
705b2b3ffcdSSimon Schubert	 *   void (func:rbx)(arg:rdi, trapframe:rsi)
706b2b3ffcdSSimon Schubert	 */
707b2b3ffcdSSimon Schubert	movq	%rsp, %rsi		/* pass trapframe by reference */
708b2b3ffcdSSimon Schubert	movq	%r12, %rdi		/* arg1 */
709b2b3ffcdSSimon Schubert	call	*%rbx			/* function */
710b2b3ffcdSSimon Schubert
711b2b3ffcdSSimon Schubert	/* cut from syscall */
712b2b3ffcdSSimon Schubert
713b2b3ffcdSSimon Schubert	sti
714b2b3ffcdSSimon Schubert	call	splz
715b2b3ffcdSSimon Schubert
716b2b3ffcdSSimon Schubert	/*
717b2b3ffcdSSimon Schubert	 * Return via doreti to handle ASTs.
718b2b3ffcdSSimon Schubert	 *
719b2b3ffcdSSimon Schubert	 * trapframe is at the top of the stack.
720b2b3ffcdSSimon Schubert	 */
721b2b3ffcdSSimon Schubert	MEXITCOUNT
722b2b3ffcdSSimon Schubert	jmp	doreti
723b2b3ffcdSSimon Schubert
724b2b3ffcdSSimon Schubert/*
725b2b3ffcdSSimon Schubert * To efficiently implement classification of trap and interrupt handlers
726b2b3ffcdSSimon Schubert * for profiling, there must be only trap handlers between the labels btrap
727b2b3ffcdSSimon Schubert * and bintr, and only interrupt handlers between the labels bintr and
728b2b3ffcdSSimon Schubert * eintr.  This is implemented (partly) by including files that contain
729b2b3ffcdSSimon Schubert * some of the handlers.  Before including the files, set up a normal asm
730b2b3ffcdSSimon Schubert * environment so that the included files doen't need to know that they are
731b2b3ffcdSSimon Schubert * included.
732b2b3ffcdSSimon Schubert */
733b2b3ffcdSSimon Schubert
734b2b3ffcdSSimon Schubert	.data
735b2b3ffcdSSimon Schubert	.p2align 4
73685b33048SMatthew Dillon
737b2b3ffcdSSimon Schubert	.text
738b2b3ffcdSSimon Schubert	SUPERALIGN_TEXT
739b2b3ffcdSSimon SchubertMCOUNT_LABEL(bintr)
740b2b3ffcdSSimon Schubert
74132d3bd25SSascha Wildner#if 0 /* JG */
742b2b3ffcdSSimon Schubert#include <x86_64/x86_64/apic_vector.S>
743b2b3ffcdSSimon Schubert#endif
744b2b3ffcdSSimon Schubert
745b2b3ffcdSSimon Schubert#ifdef DEV_ATPIC
746b2b3ffcdSSimon Schubert	.data
747b2b3ffcdSSimon Schubert	.p2align 4
748b2b3ffcdSSimon Schubert	.text
749b2b3ffcdSSimon Schubert	SUPERALIGN_TEXT
750b2b3ffcdSSimon Schubert
751b2b3ffcdSSimon Schubert#include <x86_64/isa/atpic_vector.S>
752b2b3ffcdSSimon Schubert#endif
753b2b3ffcdSSimon Schubert
754b2b3ffcdSSimon Schubert	.text
755b2b3ffcdSSimon SchubertMCOUNT_LABEL(eintr)
756