xref: /onnv-gate/usr/src/uts/intel/ia32/ml/swtch.s (revision 5322:010e79fdab36)
10Sstevel@tonic-gate/*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53446Smrj * Common Development and Distribution License (the "License").
63446Smrj * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate/*
223446Smrj * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate
280Sstevel@tonic-gate/*
290Sstevel@tonic-gate * Process switching routines.
300Sstevel@tonic-gate */
310Sstevel@tonic-gate
320Sstevel@tonic-gate#if defined(__lint)
330Sstevel@tonic-gate#include <sys/thread.h>
340Sstevel@tonic-gate#include <sys/systm.h>
350Sstevel@tonic-gate#include <sys/time.h>
360Sstevel@tonic-gate#else	/* __lint */
370Sstevel@tonic-gate#include "assym.h"
380Sstevel@tonic-gate#endif	/* __lint */
390Sstevel@tonic-gate
400Sstevel@tonic-gate#include <sys/asm_linkage.h>
410Sstevel@tonic-gate#include <sys/asm_misc.h>
420Sstevel@tonic-gate#include <sys/regset.h>
430Sstevel@tonic-gate#include <sys/privregs.h>
440Sstevel@tonic-gate#include <sys/stack.h>
450Sstevel@tonic-gate#include <sys/segments.h>
460Sstevel@tonic-gate
470Sstevel@tonic-gate/*
480Sstevel@tonic-gate * resume(thread_id_t t);
490Sstevel@tonic-gate *
500Sstevel@tonic-gate * a thread can only run on one processor at a time. there
510Sstevel@tonic-gate * exists a window on MPs where the current thread on one
520Sstevel@tonic-gate * processor is capable of being dispatched by another processor.
530Sstevel@tonic-gate * some overlap between outgoing and incoming threads can happen
540Sstevel@tonic-gate * when they are the same thread. in this case where the threads
550Sstevel@tonic-gate * are the same, resume() on one processor will spin on the incoming
560Sstevel@tonic-gate * thread until resume() on the other processor has finished with
570Sstevel@tonic-gate * the outgoing thread.
580Sstevel@tonic-gate *
590Sstevel@tonic-gate * The MMU context changes when the resuming thread resides in a different
600Sstevel@tonic-gate * process.  Kernel threads are known by resume to reside in process 0.
610Sstevel@tonic-gate * The MMU context, therefore, only changes when resuming a thread in
620Sstevel@tonic-gate * a process different from curproc.
630Sstevel@tonic-gate *
640Sstevel@tonic-gate * resume_from_intr() is called when the thread being resumed was not
650Sstevel@tonic-gate * passivated by resume (e.g. was interrupted).  This means that the
660Sstevel@tonic-gate * resume lock is already held and that a restore context is not needed.
670Sstevel@tonic-gate * Also, the MMU context is not changed on the resume in this case.
680Sstevel@tonic-gate *
690Sstevel@tonic-gate * resume_from_zombie() is the same as resume except the calling thread
700Sstevel@tonic-gate * is a zombie and must be put on the deathrow list after the CPU is
710Sstevel@tonic-gate * off the stack.
720Sstevel@tonic-gate */
730Sstevel@tonic-gate
740Sstevel@tonic-gate#if !defined(__lint)
750Sstevel@tonic-gate
760Sstevel@tonic-gate#if LWP_PCB_FPU != 0
770Sstevel@tonic-gate#error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
780Sstevel@tonic-gate#endif	/* LWP_PCB_FPU != 0 */
790Sstevel@tonic-gate
800Sstevel@tonic-gate#endif	/* !__lint */
810Sstevel@tonic-gate
820Sstevel@tonic-gate#if defined(__amd64)
830Sstevel@tonic-gate
840Sstevel@tonic-gate/*
850Sstevel@tonic-gate * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
860Sstevel@tonic-gate *
870Sstevel@tonic-gate * The stack frame must be created before the save of %rsp so that tracebacks
880Sstevel@tonic-gate * of swtch()ed-out processes show the process as having last called swtch().
890Sstevel@tonic-gate */
900Sstevel@tonic-gate#define SAVE_REGS(thread_t, retaddr)			\
910Sstevel@tonic-gate	movq	%rbp, T_RBP(thread_t);			\
920Sstevel@tonic-gate	movq	%rbx, T_RBX(thread_t);			\
930Sstevel@tonic-gate	movq	%r12, T_R12(thread_t);			\
940Sstevel@tonic-gate	movq	%r13, T_R13(thread_t);			\
950Sstevel@tonic-gate	movq	%r14, T_R14(thread_t);			\
960Sstevel@tonic-gate	movq	%r15, T_R15(thread_t);			\
970Sstevel@tonic-gate	pushq	%rbp;					\
980Sstevel@tonic-gate	movq	%rsp, %rbp;				\
990Sstevel@tonic-gate	movq	%rsp, T_SP(thread_t);			\
1000Sstevel@tonic-gate	movq	retaddr, T_PC(thread_t);		\
1010Sstevel@tonic-gate	movq	%rdi, %r12;				\
1020Sstevel@tonic-gate	call	__dtrace_probe___sched_off__cpu
1030Sstevel@tonic-gate
1040Sstevel@tonic-gate/*
1050Sstevel@tonic-gate * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
1060Sstevel@tonic-gate *
1070Sstevel@tonic-gate * We load up %rsp from the label_t as part of the context switch, so
1080Sstevel@tonic-gate * we don't repeat that here.
1090Sstevel@tonic-gate *
1100Sstevel@tonic-gate * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
1110Sstevel@tonic-gate * already has the effect of putting the stack back the way it was when
1120Sstevel@tonic-gate * we came in.
1130Sstevel@tonic-gate */
1140Sstevel@tonic-gate#define RESTORE_REGS(scratch_reg)			\
1150Sstevel@tonic-gate	movq	%gs:CPU_THREAD, scratch_reg;		\
1160Sstevel@tonic-gate	movq	T_RBP(scratch_reg), %rbp;		\
1170Sstevel@tonic-gate	movq	T_RBX(scratch_reg), %rbx;		\
1180Sstevel@tonic-gate	movq	T_R12(scratch_reg), %r12;		\
1190Sstevel@tonic-gate	movq	T_R13(scratch_reg), %r13;		\
1200Sstevel@tonic-gate	movq	T_R14(scratch_reg), %r14;		\
1210Sstevel@tonic-gate	movq	T_R15(scratch_reg), %r15
1220Sstevel@tonic-gate
1230Sstevel@tonic-gate/*
1240Sstevel@tonic-gate * Get pointer to a thread's hat structure
1250Sstevel@tonic-gate */
1260Sstevel@tonic-gate#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
1270Sstevel@tonic-gate	movq	T_PROCP(thread_t), hatp;		\
1280Sstevel@tonic-gate	movq	P_AS(hatp), scratch_reg;		\
1290Sstevel@tonic-gate	movq	A_HAT(scratch_reg), hatp
1300Sstevel@tonic-gate
131*5084Sjohnlev#define	TSC_READ()					\
132*5084Sjohnlev	call	tsc_read;				\
133*5084Sjohnlev	movq	%rax, %r14;
134*5084Sjohnlev
135*5084Sjohnlev/*
136*5084Sjohnlev * If we are resuming an interrupt thread, store a timestamp in the thread
137*5084Sjohnlev * structure.  If an interrupt occurs between tsc_read() and its subsequent
138*5084Sjohnlev * store, the timestamp will be stale by the time it is stored.  We can detect
139*5084Sjohnlev * this by doing a compare-and-swap on the thread's timestamp, since any
140*5084Sjohnlev * interrupt occurring in this window will put a new timestamp in the thread's
141*5084Sjohnlev * t_intr_start field.
142*5084Sjohnlev */
143*5084Sjohnlev#define	STORE_INTR_START(thread_t)			\
144*5084Sjohnlev	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
145*5084Sjohnlev	jz	1f;					\
146*5084Sjohnlev0:							\
147*5084Sjohnlev	TSC_READ();					\
148*5084Sjohnlev	movq	T_INTR_START(thread_t), %rax;		\
149*5084Sjohnlev	cmpxchgq %r14, T_INTR_START(thread_t);		\
150*5084Sjohnlev	jnz	0b;					\
151*5084Sjohnlev1:
152*5084Sjohnlev
1530Sstevel@tonic-gate#elif defined (__i386)
1540Sstevel@tonic-gate
1550Sstevel@tonic-gate/*
1560Sstevel@tonic-gate * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
1570Sstevel@tonic-gate *
1580Sstevel@tonic-gate * The stack frame must be created before the save of %esp so that tracebacks
1590Sstevel@tonic-gate * of swtch()ed-out processes show the process as having last called swtch().
1600Sstevel@tonic-gate */
1610Sstevel@tonic-gate#define SAVE_REGS(thread_t, retaddr)			\
1620Sstevel@tonic-gate	movl	%ebp, T_EBP(thread_t);			\
1630Sstevel@tonic-gate	movl	%ebx, T_EBX(thread_t);			\
1640Sstevel@tonic-gate	movl	%esi, T_ESI(thread_t);			\
1650Sstevel@tonic-gate	movl	%edi, T_EDI(thread_t);			\
1660Sstevel@tonic-gate	pushl	%ebp;					\
1670Sstevel@tonic-gate	movl	%esp, %ebp;				\
1680Sstevel@tonic-gate	movl	%esp, T_SP(thread_t);			\
1690Sstevel@tonic-gate	movl	retaddr, T_PC(thread_t);		\
1700Sstevel@tonic-gate	movl	8(%ebp), %edi;				\
1710Sstevel@tonic-gate	pushl	%edi;					\
1720Sstevel@tonic-gate	call	__dtrace_probe___sched_off__cpu;	\
1730Sstevel@tonic-gate	addl	$CLONGSIZE, %esp
1740Sstevel@tonic-gate
1750Sstevel@tonic-gate/*
1760Sstevel@tonic-gate * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
1770Sstevel@tonic-gate *
1780Sstevel@tonic-gate * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
1790Sstevel@tonic-gate * already has the effect of putting the stack back the way it was when
1800Sstevel@tonic-gate * we came in.
1810Sstevel@tonic-gate */
1820Sstevel@tonic-gate#define RESTORE_REGS(scratch_reg)			\
1830Sstevel@tonic-gate	movl	%gs:CPU_THREAD, scratch_reg;		\
1840Sstevel@tonic-gate	movl	T_EBP(scratch_reg), %ebp;		\
1850Sstevel@tonic-gate	movl	T_EBX(scratch_reg), %ebx;		\
1860Sstevel@tonic-gate	movl	T_ESI(scratch_reg), %esi;		\
1870Sstevel@tonic-gate	movl	T_EDI(scratch_reg), %edi
1880Sstevel@tonic-gate
1890Sstevel@tonic-gate/*
1900Sstevel@tonic-gate * Get pointer to a thread's hat structure
1910Sstevel@tonic-gate */
1920Sstevel@tonic-gate#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)	\
1930Sstevel@tonic-gate	movl	T_PROCP(thread_t), hatp;		\
1940Sstevel@tonic-gate	movl	P_AS(hatp), scratch_reg;		\
1950Sstevel@tonic-gate	movl	A_HAT(scratch_reg), hatp
1960Sstevel@tonic-gate
197*5084Sjohnlev/*
198*5084Sjohnlev * If we are resuming an interrupt thread, store a timestamp in the thread
199*5084Sjohnlev * structure.  If an interrupt occurs between tsc_read() and its subsequent
200*5084Sjohnlev * store, the timestamp will be stale by the time it is stored.  We can detect
201*5084Sjohnlev * this by doing a compare-and-swap on the thread's timestamp, since any
202*5084Sjohnlev * interrupt occurring in this window will put a new timestamp in the thread's
203*5084Sjohnlev * t_intr_start field.
204*5084Sjohnlev */
205*5084Sjohnlev#define	STORE_INTR_START(thread_t)			\
206*5084Sjohnlev	testw	$T_INTR_THREAD, T_FLAGS(thread_t);	\
207*5084Sjohnlev	jz	1f;					\
208*5084Sjohnlev	pushl	%ecx;					\
209*5084Sjohnlev0:							\
210*5084Sjohnlev	pushl	T_INTR_START(thread_t);			\
211*5084Sjohnlev	pushl	T_INTR_START+4(thread_t);		\
212*5084Sjohnlev	call	tsc_read;				\
213*5084Sjohnlev	movl	%eax, %ebx;				\
214*5084Sjohnlev	movl	%edx, %ecx;				\
215*5084Sjohnlev	popl	%edx;					\
216*5084Sjohnlev	popl	%eax;					\
217*5084Sjohnlev	cmpxchg8b T_INTR_START(thread_t);		\
218*5084Sjohnlev	jnz	0b;					\
219*5084Sjohnlev	popl	%ecx;					\
220*5084Sjohnlev1:
221*5084Sjohnlev
2220Sstevel@tonic-gate#endif	/* __amd64 */
2230Sstevel@tonic-gate
2240Sstevel@tonic-gate#if defined(__lint)
2250Sstevel@tonic-gate
2260Sstevel@tonic-gate/* ARGSUSED */
2270Sstevel@tonic-gatevoid
2280Sstevel@tonic-gateresume(kthread_t *t)
2290Sstevel@tonic-gate{}
2300Sstevel@tonic-gate
2310Sstevel@tonic-gate#else	/* __lint */
2320Sstevel@tonic-gate
2330Sstevel@tonic-gate#if defined(__amd64)
2340Sstevel@tonic-gate
2350Sstevel@tonic-gate	ENTRY(resume)
2360Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax
2370Sstevel@tonic-gate	leaq	resume_return(%rip), %r11
2380Sstevel@tonic-gate
2390Sstevel@tonic-gate	/*
2400Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
2410Sstevel@tonic-gate	 * thread to resume_return.
2420Sstevel@tonic-gate	 *
2430Sstevel@tonic-gate	 * %r12 = t (new thread) when done
2440Sstevel@tonic-gate	 */
2450Sstevel@tonic-gate	SAVE_REGS(%rax, %r11)
2460Sstevel@tonic-gate
2470Sstevel@tonic-gate	LOADCPU(%r15)				/* %r15 = CPU */
2480Sstevel@tonic-gate	movq	CPU_THREAD(%r15), %r13		/* %r13 = curthread */
2490Sstevel@tonic-gate
2500Sstevel@tonic-gate	/*
2510Sstevel@tonic-gate	 * Call savectx if thread has installed context ops.
2520Sstevel@tonic-gate	 *
2530Sstevel@tonic-gate	 * Note that if we have floating point context, the save op
2540Sstevel@tonic-gate	 * (either fpsave_begin or fpxsave_begin) will issue the
2550Sstevel@tonic-gate	 * async save instruction (fnsave or fxsave respectively)
2560Sstevel@tonic-gate	 * that we fwait for below.
2570Sstevel@tonic-gate	 */
2580Sstevel@tonic-gate	cmpq	$0, T_CTX(%r13)		/* should current thread savectx? */
2590Sstevel@tonic-gate	je	.nosavectx		/* skip call when zero */
2600Sstevel@tonic-gate
2610Sstevel@tonic-gate	movq	%r13, %rdi		/* arg = thread pointer */
2620Sstevel@tonic-gate	call	savectx			/* call ctx ops */
2633446Smrj.nosavectx:
2640Sstevel@tonic-gate
2651217Srab        /*
2661217Srab         * Call savepctx if process has installed context ops.
2671217Srab         */
2681217Srab	movq	T_PROCP(%r13), %r14	/* %r14 = proc */
2691217Srab        cmpq    $0, P_PCTX(%r14)         /* should current thread savectx? */
2701217Srab        je      .nosavepctx              /* skip call when zero */
2710Sstevel@tonic-gate
2721217Srab        movq    %r14, %rdi              /* arg = proc pointer */
2731217Srab        call    savepctx                 /* call ctx ops */
2741217Srab.nosavepctx:
2750Sstevel@tonic-gate
2761217Srab	/*
2770Sstevel@tonic-gate	 * Temporarily switch to the idle thread's stack
2780Sstevel@tonic-gate	 */
2790Sstevel@tonic-gate	movq	CPU_IDLE_THREAD(%r15), %rax 	/* idle thread pointer */
2800Sstevel@tonic-gate
2810Sstevel@tonic-gate	/*
2820Sstevel@tonic-gate	 * Set the idle thread as the current thread
2830Sstevel@tonic-gate	 */
2840Sstevel@tonic-gate	movq	T_SP(%rax), %rsp	/* It is safe to set rsp */
2850Sstevel@tonic-gate	movq	%rax, CPU_THREAD(%r15)
2860Sstevel@tonic-gate
2870Sstevel@tonic-gate	/*
2880Sstevel@tonic-gate	 * Switch in the hat context for the new thread
2890Sstevel@tonic-gate	 *
2900Sstevel@tonic-gate	 */
2910Sstevel@tonic-gate	GET_THREAD_HATP(%rdi, %r12, %r11)
2920Sstevel@tonic-gate	call	hat_switch
2930Sstevel@tonic-gate
2940Sstevel@tonic-gate	/*
2950Sstevel@tonic-gate	 * Clear and unlock previous thread's t_lock
2960Sstevel@tonic-gate	 * to allow it to be dispatched by another processor.
2970Sstevel@tonic-gate	 */
2980Sstevel@tonic-gate	movb	$0, T_LOCK(%r13)
2990Sstevel@tonic-gate
3000Sstevel@tonic-gate	/*
3010Sstevel@tonic-gate	 * IMPORTANT: Registers at this point must be:
3020Sstevel@tonic-gate	 *       %r12 = new thread
3030Sstevel@tonic-gate	 *
3040Sstevel@tonic-gate	 * Here we are in the idle thread, have dropped the old thread.
3050Sstevel@tonic-gate	 */
3060Sstevel@tonic-gate	ALTENTRY(_resume_from_idle)
3070Sstevel@tonic-gate	/*
3080Sstevel@tonic-gate	 * spin until dispatched thread's mutex has
3090Sstevel@tonic-gate	 * been unlocked. this mutex is unlocked when
3100Sstevel@tonic-gate	 * it becomes safe for the thread to run.
3110Sstevel@tonic-gate	 */
3120Sstevel@tonic-gate.lock_thread_mutex:
3130Sstevel@tonic-gate	lock
3140Sstevel@tonic-gate	btsl	$0, T_LOCK(%r12) 	/* attempt to lock new thread's mutex */
3150Sstevel@tonic-gate	jnc	.thread_mutex_locked	/* got it */
3160Sstevel@tonic-gate
3170Sstevel@tonic-gate.spin_thread_mutex:
3180Sstevel@tonic-gate	pause
3190Sstevel@tonic-gate	cmpb	$0, T_LOCK(%r12)	/* check mutex status */
3200Sstevel@tonic-gate	jz	.lock_thread_mutex	/* clear, retry lock */
3210Sstevel@tonic-gate	jmp	.spin_thread_mutex	/* still locked, spin... */
3220Sstevel@tonic-gate
3230Sstevel@tonic-gate.thread_mutex_locked:
3240Sstevel@tonic-gate	/*
3250Sstevel@tonic-gate	 * Fix CPU structure to indicate new running thread.
3260Sstevel@tonic-gate	 * Set pointer in new thread to the CPU structure.
3270Sstevel@tonic-gate	 */
3280Sstevel@tonic-gate	LOADCPU(%r13)			/* load current CPU pointer */
3290Sstevel@tonic-gate	cmpq	%r13, T_CPU(%r12)
3300Sstevel@tonic-gate	je	.setup_cpu
3310Sstevel@tonic-gate
3320Sstevel@tonic-gate	/* cp->cpu_stats.sys.cpumigrate++ */
3330Sstevel@tonic-gate	incq    CPU_STATS_SYS_CPUMIGRATE(%r13)
3340Sstevel@tonic-gate	movq	%r13, T_CPU(%r12)	/* set new thread's CPU pointer */
3350Sstevel@tonic-gate
3360Sstevel@tonic-gate.setup_cpu:
3370Sstevel@tonic-gate	/*
3380Sstevel@tonic-gate	 * Setup rsp0 (kernel stack) in TSS to curthread's stack.
3390Sstevel@tonic-gate	 * (Note: Since we don't have saved 'regs' structure for all
3400Sstevel@tonic-gate	 *	  the threads we can't easily determine if we need to
3410Sstevel@tonic-gate	 *	  change rsp0. So, we simply change the rsp0 to bottom
3420Sstevel@tonic-gate	 *	  of the thread stack and it will work for all cases.)
3430Sstevel@tonic-gate	 *
3440Sstevel@tonic-gate	 * XX64 - Is this correct?
3450Sstevel@tonic-gate	 */
3460Sstevel@tonic-gate	movq	CPU_TSS(%r13), %r14
3470Sstevel@tonic-gate	movq	T_STACK(%r12), %rax
3480Sstevel@tonic-gate	addq	$REGSIZE+MINFRAME, %rax	/* to the bottom of thread stack */
349*5084Sjohnlev#if !defined(__xpv)
3500Sstevel@tonic-gate	movq	%rax, TSS_RSP0(%r14)
351*5084Sjohnlev#else
352*5084Sjohnlev	movl	$KDS_SEL, %edi
353*5084Sjohnlev	movq	%rax, %rsi
354*5084Sjohnlev	call	HYPERVISOR_stack_switch
355*5084Sjohnlev#endif	/* __xpv */
3560Sstevel@tonic-gate
3570Sstevel@tonic-gate	movq	%r12, CPU_THREAD(%r13)	/* set CPU's thread pointer */
3580Sstevel@tonic-gate	xorl	%ebp, %ebp		/* make $<threadlist behave better */
3590Sstevel@tonic-gate	movq	T_LWP(%r12), %rax 	/* set associated lwp to  */
3600Sstevel@tonic-gate	movq	%rax, CPU_LWP(%r13) 	/* CPU's lwp ptr */
3610Sstevel@tonic-gate
3620Sstevel@tonic-gate	movq	T_SP(%r12), %rsp	/* switch to outgoing thread's stack */
3630Sstevel@tonic-gate	movq	T_PC(%r12), %r13	/* saved return addr */
3640Sstevel@tonic-gate
3650Sstevel@tonic-gate	/*
3660Sstevel@tonic-gate	 * Call restorectx if context ops have been installed.
3670Sstevel@tonic-gate	 */
3680Sstevel@tonic-gate	cmpq	$0, T_CTX(%r12)		/* should resumed thread restorectx? */
3690Sstevel@tonic-gate	jz	.norestorectx		/* skip call when zero */
3700Sstevel@tonic-gate	movq	%r12, %rdi		/* arg = thread pointer */
3710Sstevel@tonic-gate	call	restorectx		/* call ctx ops */
3720Sstevel@tonic-gate.norestorectx:
3730Sstevel@tonic-gate
3740Sstevel@tonic-gate	/*
3751217Srab	 * Call restorepctx if context ops have been installed for the proc.
3761217Srab	 */
3771217Srab	movq	T_PROCP(%r12), %rcx
3781217Srab	cmpq	$0, P_PCTX(%rcx)
3791217Srab	jz	.norestorepctx
3801217Srab	movq	%rcx, %rdi
3811217Srab	call	restorepctx
3821217Srab.norestorepctx:
3831217Srab
384*5084Sjohnlev	STORE_INTR_START(%r12)
3850Sstevel@tonic-gate
3860Sstevel@tonic-gate	/*
3870Sstevel@tonic-gate	 * Restore non-volatile registers, then have spl0 return to the
3880Sstevel@tonic-gate	 * resuming thread's PC after first setting the priority as low as
3890Sstevel@tonic-gate	 * possible and blocking all interrupt threads that may be active.
3900Sstevel@tonic-gate	 */
3910Sstevel@tonic-gate	movq	%r13, %rax	/* save return address */
3920Sstevel@tonic-gate	RESTORE_REGS(%r11)
3930Sstevel@tonic-gate	pushq	%rax		/* push return address for spl0() */
3940Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
3950Sstevel@tonic-gate	jmp	spl0
3960Sstevel@tonic-gate
3970Sstevel@tonic-gateresume_return:
3980Sstevel@tonic-gate	/*
3990Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
4000Sstevel@tonic-gate	 */
4010Sstevel@tonic-gate	addq	$CLONGSIZE, %rsp
4020Sstevel@tonic-gate	ret
4030Sstevel@tonic-gate	SET_SIZE(_resume_from_idle)
4040Sstevel@tonic-gate	SET_SIZE(resume)
4050Sstevel@tonic-gate
4060Sstevel@tonic-gate#elif defined (__i386)
4070Sstevel@tonic-gate
4080Sstevel@tonic-gate	ENTRY(resume)
4090Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax
4100Sstevel@tonic-gate	movl	$resume_return, %ecx
4110Sstevel@tonic-gate
4120Sstevel@tonic-gate	/*
4130Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
4140Sstevel@tonic-gate	 * thread to resume_return.
4150Sstevel@tonic-gate	 *
4160Sstevel@tonic-gate	 * %edi = t (new thread) when done.
4170Sstevel@tonic-gate	 */
4180Sstevel@tonic-gate	SAVE_REGS(%eax,  %ecx)
4190Sstevel@tonic-gate
4200Sstevel@tonic-gate	LOADCPU(%ebx)			/* %ebx = CPU */
4210Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %esi	/* %esi = curthread */
4220Sstevel@tonic-gate
4233446Smrj#ifdef DEBUG
4243446Smrj	call	assert_ints_enabled	/* panics if we are cli'd */
4253446Smrj#endif
4260Sstevel@tonic-gate	/*
4270Sstevel@tonic-gate	 * Call savectx if thread has installed context ops.
4280Sstevel@tonic-gate	 *
4290Sstevel@tonic-gate	 * Note that if we have floating point context, the save op
4300Sstevel@tonic-gate	 * (either fpsave_begin or fpxsave_begin) will issue the
4310Sstevel@tonic-gate	 * async save instruction (fnsave or fxsave respectively)
4320Sstevel@tonic-gate	 * that we fwait for below.
4330Sstevel@tonic-gate	 */
4340Sstevel@tonic-gate	movl	T_CTX(%esi), %eax	/* should current thread savectx? */
4350Sstevel@tonic-gate	testl	%eax, %eax
4360Sstevel@tonic-gate	jz	.nosavectx		/* skip call when zero */
4370Sstevel@tonic-gate	pushl	%esi			/* arg = thread pointer */
4380Sstevel@tonic-gate	call	savectx			/* call ctx ops */
4390Sstevel@tonic-gate	addl	$4, %esp		/* restore stack pointer */
4403446Smrj.nosavectx:
4410Sstevel@tonic-gate
4421217Srab        /*
4431217Srab         * Call savepctx if process has installed context ops.
4441217Srab         */
4451217Srab	movl	T_PROCP(%esi), %eax	/* %eax = proc */
4461217Srab	cmpl	$0, P_PCTX(%eax)	/* should current thread savectx? */
4471217Srab	je	.nosavepctx		/* skip call when zero */
4481217Srab	pushl	%eax			/* arg = proc pointer */
4491217Srab	call	savepctx		/* call ctx ops */
4501217Srab	addl	$4, %esp
4513446Smrj.nosavepctx:
4520Sstevel@tonic-gate
4530Sstevel@tonic-gate	/*
4540Sstevel@tonic-gate	 * Temporarily switch to the idle thread's stack
4550Sstevel@tonic-gate	 */
4560Sstevel@tonic-gate	movl	CPU_IDLE_THREAD(%ebx), %eax 	/* idle thread pointer */
4570Sstevel@tonic-gate
4580Sstevel@tonic-gate	/*
4590Sstevel@tonic-gate	 * Set the idle thread as the current thread
4600Sstevel@tonic-gate	 */
4610Sstevel@tonic-gate	movl	T_SP(%eax), %esp	/* It is safe to set esp */
4620Sstevel@tonic-gate	movl	%eax, CPU_THREAD(%ebx)
4630Sstevel@tonic-gate
4640Sstevel@tonic-gate	/* switch in the hat context for the new thread */
4650Sstevel@tonic-gate	GET_THREAD_HATP(%ecx, %edi, %ecx)
4660Sstevel@tonic-gate	pushl	%ecx
4670Sstevel@tonic-gate	call	hat_switch
4680Sstevel@tonic-gate	addl	$4, %esp
4690Sstevel@tonic-gate
4700Sstevel@tonic-gate	/*
4710Sstevel@tonic-gate	 * Clear and unlock previous thread's t_lock
4720Sstevel@tonic-gate	 * to allow it to be dispatched by another processor.
4730Sstevel@tonic-gate	 */
4743446Smrj	movb	$0, T_LOCK(%esi)
4750Sstevel@tonic-gate
4760Sstevel@tonic-gate	/*
4770Sstevel@tonic-gate	 * IMPORTANT: Registers at this point must be:
4780Sstevel@tonic-gate	 *       %edi = new thread
4790Sstevel@tonic-gate	 *
4800Sstevel@tonic-gate	 * Here we are in the idle thread, have dropped the old thread.
4810Sstevel@tonic-gate	 */
4820Sstevel@tonic-gate	ALTENTRY(_resume_from_idle)
4830Sstevel@tonic-gate	/*
4840Sstevel@tonic-gate	 * spin until dispatched thread's mutex has
4850Sstevel@tonic-gate	 * been unlocked. this mutex is unlocked when
4860Sstevel@tonic-gate	 * it becomes safe for the thread to run.
4870Sstevel@tonic-gate	 */
4880Sstevel@tonic-gate.L4:
4890Sstevel@tonic-gate	lock
4900Sstevel@tonic-gate	btsl	$0, T_LOCK(%edi) /* lock new thread's mutex */
4910Sstevel@tonic-gate	jc	.L4_2			/* lock did not succeed */
4920Sstevel@tonic-gate
4930Sstevel@tonic-gate	/*
4940Sstevel@tonic-gate	 * Fix CPU structure to indicate new running thread.
4950Sstevel@tonic-gate	 * Set pointer in new thread to the CPU structure.
4960Sstevel@tonic-gate	 */
4970Sstevel@tonic-gate	LOADCPU(%esi)			/* load current CPU pointer */
4980Sstevel@tonic-gate	movl	T_STACK(%edi), %eax	/* here to use v pipeline of */
4990Sstevel@tonic-gate					/* Pentium. Used few lines below */
5000Sstevel@tonic-gate	cmpl	%esi, T_CPU(%edi)
5010Sstevel@tonic-gate	jne	.L5_2
5020Sstevel@tonic-gate.L5_1:
5030Sstevel@tonic-gate	/*
5040Sstevel@tonic-gate	 * Setup esp0 (kernel stack) in TSS to curthread's stack.
5050Sstevel@tonic-gate	 * (Note: Since we don't have saved 'regs' structure for all
5060Sstevel@tonic-gate	 *	  the threads we can't easily determine if we need to
5070Sstevel@tonic-gate	 *	  change esp0. So, we simply change the esp0 to bottom
5080Sstevel@tonic-gate	 *	  of the thread stack and it will work for all cases.)
5090Sstevel@tonic-gate	 */
5100Sstevel@tonic-gate	movl	CPU_TSS(%esi), %ecx
5110Sstevel@tonic-gate	addl	$REGSIZE+MINFRAME, %eax	/* to the bottom of thread stack */
512*5084Sjohnlev#if !defined(__xpv)
5130Sstevel@tonic-gate	movl	%eax, TSS_ESP0(%ecx)
514*5084Sjohnlev#else
515*5084Sjohnlev	pushl	%eax
516*5084Sjohnlev	pushl	$KDS_SEL
517*5084Sjohnlev	call	HYPERVISOR_stack_switch
518*5084Sjohnlev	addl	$8, %esp
519*5084Sjohnlev#endif	/* __xpv */
5200Sstevel@tonic-gate
5210Sstevel@tonic-gate	movl	%edi, CPU_THREAD(%esi)	/* set CPU's thread pointer */
5220Sstevel@tonic-gate	xorl	%ebp, %ebp		/* make $<threadlist behave better */
5230Sstevel@tonic-gate	movl	T_LWP(%edi), %eax 	/* set associated lwp to  */
5240Sstevel@tonic-gate	movl	%eax, CPU_LWP(%esi) 	/* CPU's lwp ptr */
5250Sstevel@tonic-gate
5260Sstevel@tonic-gate	movl	T_SP(%edi), %esp	/* switch to outgoing thread's stack */
5270Sstevel@tonic-gate	movl	T_PC(%edi), %esi	/* saved return addr */
5280Sstevel@tonic-gate
5290Sstevel@tonic-gate	/*
5300Sstevel@tonic-gate	 * Call restorectx if context ops have been installed.
5310Sstevel@tonic-gate	 */
5320Sstevel@tonic-gate	movl	T_CTX(%edi), %eax	/* should resumed thread restorectx? */
5330Sstevel@tonic-gate	testl	%eax, %eax
5340Sstevel@tonic-gate	jz	.norestorectx		/* skip call when zero */
5350Sstevel@tonic-gate	pushl	%edi			/* arg = thread pointer */
5360Sstevel@tonic-gate	call	restorectx		/* call ctx ops */
5370Sstevel@tonic-gate	addl	$4, %esp		/* restore stack pointer */
5380Sstevel@tonic-gate.norestorectx:
5390Sstevel@tonic-gate
5400Sstevel@tonic-gate	/*
5411217Srab	 * Call restorepctx if context ops have been installed for the proc.
5421217Srab	 */
5431217Srab	movl	T_PROCP(%edi), %eax
5441217Srab	cmpl	$0, P_PCTX(%eax)
5451217Srab	je	.norestorepctx
5461217Srab	pushl	%eax			/* arg = proc pointer */
5471217Srab	call	restorepctx
5481217Srab	addl	$4, %esp		/* restore stack pointer */
5491217Srab.norestorepctx:
5501217Srab
551*5084Sjohnlev	STORE_INTR_START(%edi)
552*5084Sjohnlev
5530Sstevel@tonic-gate	/*
5540Sstevel@tonic-gate	 * Restore non-volatile registers, then have spl0 return to the
5550Sstevel@tonic-gate	 * resuming thread's PC after first setting the priority as low as
5560Sstevel@tonic-gate	 * possible and blocking all interrupt threads that may be active.
5570Sstevel@tonic-gate	 */
5580Sstevel@tonic-gate	movl	%esi, %eax		/* save return address */
5590Sstevel@tonic-gate	RESTORE_REGS(%ecx)
5600Sstevel@tonic-gate	pushl	%eax			/* push return address for spl0() */
5610Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
5620Sstevel@tonic-gate	jmp	spl0
5630Sstevel@tonic-gate
5640Sstevel@tonic-gateresume_return:
5650Sstevel@tonic-gate	/*
5660Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
5670Sstevel@tonic-gate	 */
5680Sstevel@tonic-gate	addl	$CLONGSIZE, %esp
5690Sstevel@tonic-gate	ret
5700Sstevel@tonic-gate
5710Sstevel@tonic-gate.L4_2:
5720Sstevel@tonic-gate	pause
5730Sstevel@tonic-gate	cmpb	$0, T_LOCK(%edi)
5740Sstevel@tonic-gate	je	.L4
5750Sstevel@tonic-gate	jmp	.L4_2
5760Sstevel@tonic-gate
5770Sstevel@tonic-gate.L5_2:
5780Sstevel@tonic-gate	/* cp->cpu_stats.sys.cpumigrate++ */
5790Sstevel@tonic-gate	addl    $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
5800Sstevel@tonic-gate	adcl    $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
5810Sstevel@tonic-gate	movl	%esi, T_CPU(%edi)	/* set new thread's CPU pointer */
5820Sstevel@tonic-gate	jmp	.L5_1
5830Sstevel@tonic-gate
5840Sstevel@tonic-gate	SET_SIZE(_resume_from_idle)
5850Sstevel@tonic-gate	SET_SIZE(resume)
5860Sstevel@tonic-gate
5870Sstevel@tonic-gate#endif	/* __amd64 */
5880Sstevel@tonic-gate#endif	/* __lint */
5890Sstevel@tonic-gate
5900Sstevel@tonic-gate#if defined(__lint)
5910Sstevel@tonic-gate
5920Sstevel@tonic-gate/* ARGSUSED */
5930Sstevel@tonic-gatevoid
5940Sstevel@tonic-gateresume_from_zombie(kthread_t *t)
5950Sstevel@tonic-gate{}
5960Sstevel@tonic-gate
5970Sstevel@tonic-gate#else	/* __lint */
5980Sstevel@tonic-gate
5990Sstevel@tonic-gate#if defined(__amd64)
6000Sstevel@tonic-gate
6010Sstevel@tonic-gate	ENTRY(resume_from_zombie)
6020Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax
6030Sstevel@tonic-gate	leaq	resume_from_zombie_return(%rip), %r11
6040Sstevel@tonic-gate
6050Sstevel@tonic-gate	/*
6060Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
6070Sstevel@tonic-gate	 * thread to resume_from_zombie_return.
6080Sstevel@tonic-gate	 *
6090Sstevel@tonic-gate	 * %r12 = t (new thread) when done
6100Sstevel@tonic-gate	 */
6110Sstevel@tonic-gate	SAVE_REGS(%rax, %r11)
6120Sstevel@tonic-gate
6130Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
6140Sstevel@tonic-gate
6150Sstevel@tonic-gate	/* clean up the fp unit. It might be left enabled */
616*5084Sjohnlev
617*5084Sjohnlev#if defined(__xpv)		/* XXPV XXtclayton */
618*5084Sjohnlev	/*
619*5084Sjohnlev	 * Remove this after bringup.
620*5084Sjohnlev	 * (Too many #gp's for an instrumented hypervisor.)
621*5084Sjohnlev	 */
622*5084Sjohnlev	STTS(%rax)
623*5084Sjohnlev#else
6240Sstevel@tonic-gate	movq	%cr0, %rax
6250Sstevel@tonic-gate	testq	$CR0_TS, %rax
6260Sstevel@tonic-gate	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
6270Sstevel@tonic-gate	fninit				/* init fpu & discard pending error */
6280Sstevel@tonic-gate	orq	$CR0_TS, %rax
6290Sstevel@tonic-gate	movq	%rax, %cr0
6300Sstevel@tonic-gate.zfpu_disabled:
6310Sstevel@tonic-gate
632*5084Sjohnlev#endif	/* __xpv */
633*5084Sjohnlev
6340Sstevel@tonic-gate	/*
6350Sstevel@tonic-gate	 * Temporarily switch to the idle thread's stack so that the zombie
6360Sstevel@tonic-gate	 * thread's stack can be reclaimed by the reaper.
6370Sstevel@tonic-gate	 */
6380Sstevel@tonic-gate	movq	%gs:CPU_IDLE_THREAD, %rax /* idle thread pointer */
6390Sstevel@tonic-gate	movq	T_SP(%rax), %rsp	/* get onto idle thread stack */
6400Sstevel@tonic-gate
6410Sstevel@tonic-gate	/*
6420Sstevel@tonic-gate	 * Sigh. If the idle thread has never run thread_start()
6430Sstevel@tonic-gate	 * then t_sp is mis-aligned by thread_load().
6440Sstevel@tonic-gate	 */
6450Sstevel@tonic-gate	andq	$_BITNOT(STACK_ALIGN-1), %rsp
6460Sstevel@tonic-gate
6470Sstevel@tonic-gate	/*
6480Sstevel@tonic-gate	 * Set the idle thread as the current thread.
6490Sstevel@tonic-gate	 */
6500Sstevel@tonic-gate	movq	%rax, %gs:CPU_THREAD
6510Sstevel@tonic-gate
6520Sstevel@tonic-gate	/* switch in the hat context for the new thread */
6530Sstevel@tonic-gate	GET_THREAD_HATP(%rdi, %r12, %r11)
6540Sstevel@tonic-gate	call	hat_switch
6550Sstevel@tonic-gate
6560Sstevel@tonic-gate	/*
6570Sstevel@tonic-gate	 * Put the zombie on death-row.
6580Sstevel@tonic-gate	 */
6590Sstevel@tonic-gate	movq	%r13, %rdi
6600Sstevel@tonic-gate	call	reapq_add
6610Sstevel@tonic-gate
6620Sstevel@tonic-gate	jmp	_resume_from_idle	/* finish job of resume */
6630Sstevel@tonic-gate
6640Sstevel@tonic-gateresume_from_zombie_return:
6650Sstevel@tonic-gate	RESTORE_REGS(%r11)		/* restore non-volatile registers */
6660Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
6670Sstevel@tonic-gate
6680Sstevel@tonic-gate	/*
6690Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
6700Sstevel@tonic-gate	 */
6710Sstevel@tonic-gate	addq	$CLONGSIZE, %rsp
6720Sstevel@tonic-gate	ret
6730Sstevel@tonic-gate	SET_SIZE(resume_from_zombie)
6740Sstevel@tonic-gate
6750Sstevel@tonic-gate#elif defined (__i386)
6760Sstevel@tonic-gate
6770Sstevel@tonic-gate	ENTRY(resume_from_zombie)
6780Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax
6790Sstevel@tonic-gate	movl	$resume_from_zombie_return, %ecx
6800Sstevel@tonic-gate
6810Sstevel@tonic-gate	/*
6820Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
6830Sstevel@tonic-gate	 * thread to resume_from_zombie_return.
6840Sstevel@tonic-gate	 *
6850Sstevel@tonic-gate	 * %edi = t (new thread) when done.
6860Sstevel@tonic-gate	 */
6870Sstevel@tonic-gate	SAVE_REGS(%eax, %ecx)
6880Sstevel@tonic-gate
6893446Smrj#ifdef DEBUG
6903446Smrj	call	assert_ints_enabled	/* panics if we are cli'd */
6913446Smrj#endif
6920Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
6930Sstevel@tonic-gate
6940Sstevel@tonic-gate	/* clean up the fp unit. It might be left enabled */
6953446Smrj
6960Sstevel@tonic-gate	movl	%cr0, %eax
6970Sstevel@tonic-gate	testl	$CR0_TS, %eax
6980Sstevel@tonic-gate	jnz	.zfpu_disabled		/* if TS already set, nothing to do */
6990Sstevel@tonic-gate	fninit				/* init fpu & discard pending error */
7000Sstevel@tonic-gate	orl	$CR0_TS, %eax
7010Sstevel@tonic-gate	movl	%eax, %cr0
7020Sstevel@tonic-gate.zfpu_disabled:
7033446Smrj
7040Sstevel@tonic-gate	/*
7050Sstevel@tonic-gate	 * Temporarily switch to the idle thread's stack so that the zombie
7060Sstevel@tonic-gate	 * thread's stack can be reclaimed by the reaper.
7070Sstevel@tonic-gate	 */
7080Sstevel@tonic-gate	movl	%gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
7090Sstevel@tonic-gate	movl	T_SP(%eax), %esp	/* get onto idle thread stack */
7100Sstevel@tonic-gate
7110Sstevel@tonic-gate	/*
7120Sstevel@tonic-gate	 * Set the idle thread as the current thread.
7130Sstevel@tonic-gate	 */
7140Sstevel@tonic-gate	movl	%eax, %gs:CPU_THREAD
7150Sstevel@tonic-gate
7163446Smrj	/*
7173446Smrj	 * switch in the hat context for the new thread
7183446Smrj	 */
7190Sstevel@tonic-gate	GET_THREAD_HATP(%ecx, %edi, %ecx)
7200Sstevel@tonic-gate	pushl	%ecx
7210Sstevel@tonic-gate	call	hat_switch
7220Sstevel@tonic-gate	addl	$4, %esp
7233446Smrj
7240Sstevel@tonic-gate	/*
7250Sstevel@tonic-gate	 * Put the zombie on death-row.
7260Sstevel@tonic-gate	 */
7270Sstevel@tonic-gate	pushl	%esi
7280Sstevel@tonic-gate	call	reapq_add
7290Sstevel@tonic-gate	addl	$4, %esp
7300Sstevel@tonic-gate	jmp	_resume_from_idle	/* finish job of resume */
7310Sstevel@tonic-gate
7320Sstevel@tonic-gateresume_from_zombie_return:
7330Sstevel@tonic-gate	RESTORE_REGS(%ecx)		/* restore non-volatile registers */
7340Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
7350Sstevel@tonic-gate
7360Sstevel@tonic-gate	/*
7370Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
7380Sstevel@tonic-gate	 */
7390Sstevel@tonic-gate	addl	$CLONGSIZE, %esp
7400Sstevel@tonic-gate	ret
7410Sstevel@tonic-gate	SET_SIZE(resume_from_zombie)
7420Sstevel@tonic-gate
7430Sstevel@tonic-gate#endif	/* __amd64 */
7440Sstevel@tonic-gate#endif	/* __lint */
7450Sstevel@tonic-gate
7460Sstevel@tonic-gate#if defined(__lint)
7470Sstevel@tonic-gate
7480Sstevel@tonic-gate/* ARGSUSED */
7490Sstevel@tonic-gatevoid
7500Sstevel@tonic-gateresume_from_intr(kthread_t *t)
7510Sstevel@tonic-gate{}
7520Sstevel@tonic-gate
7530Sstevel@tonic-gate#else	/* __lint */
7540Sstevel@tonic-gate
7550Sstevel@tonic-gate#if defined(__amd64)
7560Sstevel@tonic-gate
7570Sstevel@tonic-gate	ENTRY(resume_from_intr)
7580Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax
7590Sstevel@tonic-gate	leaq	resume_from_intr_return(%rip), %r11
7600Sstevel@tonic-gate
7610Sstevel@tonic-gate	/*
7620Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
7630Sstevel@tonic-gate	 * thread to resume_from_intr_return.
7640Sstevel@tonic-gate	 *
7650Sstevel@tonic-gate	 * %r12 = t (new thread) when done
7660Sstevel@tonic-gate	 */
7670Sstevel@tonic-gate	SAVE_REGS(%rax, %r11)
7680Sstevel@tonic-gate
7690Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r13	/* %r13 = curthread */
7700Sstevel@tonic-gate	movq	%r12, %gs:CPU_THREAD	/* set CPU's thread pointer */
7710Sstevel@tonic-gate	movq	T_SP(%r12), %rsp	/* restore resuming thread's sp */
7720Sstevel@tonic-gate	xorl	%ebp, %ebp		/* make $<threadlist behave better */
7730Sstevel@tonic-gate
7740Sstevel@tonic-gate	/*
7750Sstevel@tonic-gate	 * Unlock outgoing thread's mutex dispatched by another processor.
7760Sstevel@tonic-gate	 */
7770Sstevel@tonic-gate	xorl	%eax, %eax
7780Sstevel@tonic-gate	xchgb	%al, T_LOCK(%r13)
7790Sstevel@tonic-gate
780*5084Sjohnlev	STORE_INTR_START(%r12)
7810Sstevel@tonic-gate
7820Sstevel@tonic-gate	/*
7830Sstevel@tonic-gate	 * Restore non-volatile registers, then have spl0 return to the
7840Sstevel@tonic-gate	 * resuming thread's PC after first setting the priority as low as
7850Sstevel@tonic-gate	 * possible and blocking all interrupt threads that may be active.
7860Sstevel@tonic-gate	 */
7870Sstevel@tonic-gate	movq	T_PC(%r12), %rax	/* saved return addr */
7880Sstevel@tonic-gate	RESTORE_REGS(%r11);
7890Sstevel@tonic-gate	pushq	%rax			/* push return address for spl0() */
7900Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
7910Sstevel@tonic-gate	jmp	spl0
7920Sstevel@tonic-gate
7930Sstevel@tonic-gateresume_from_intr_return:
7940Sstevel@tonic-gate	/*
7950Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
7960Sstevel@tonic-gate	 */
7970Sstevel@tonic-gate	addq 	$CLONGSIZE, %rsp
7980Sstevel@tonic-gate	ret
7990Sstevel@tonic-gate	SET_SIZE(resume_from_intr)
8000Sstevel@tonic-gate
8010Sstevel@tonic-gate#elif defined (__i386)
8020Sstevel@tonic-gate
8030Sstevel@tonic-gate	ENTRY(resume_from_intr)
8040Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax
8050Sstevel@tonic-gate	movl	$resume_from_intr_return, %ecx
8060Sstevel@tonic-gate
8070Sstevel@tonic-gate	/*
8080Sstevel@tonic-gate	 * Save non-volatile registers, and set return address for current
8090Sstevel@tonic-gate	 * thread to resume_return.
8100Sstevel@tonic-gate	 *
8110Sstevel@tonic-gate	 * %edi = t (new thread) when done.
8120Sstevel@tonic-gate	 */
8130Sstevel@tonic-gate	SAVE_REGS(%eax, %ecx)
8140Sstevel@tonic-gate
8153446Smrj#ifdef DEBUG
8163446Smrj	call	assert_ints_enabled	/* panics if we are cli'd */
8173446Smrj#endif
8180Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %esi	/* %esi = curthread */
8190Sstevel@tonic-gate	movl	%edi, %gs:CPU_THREAD	/* set CPU's thread pointer */
8200Sstevel@tonic-gate	movl	T_SP(%edi), %esp	/* restore resuming thread's sp */
8210Sstevel@tonic-gate	xorl	%ebp, %ebp		/* make $<threadlist behave better */
8220Sstevel@tonic-gate
8230Sstevel@tonic-gate	/*
8240Sstevel@tonic-gate	 * Unlock outgoing thread's mutex dispatched by another processor.
8250Sstevel@tonic-gate	 */
8260Sstevel@tonic-gate	xorl	%eax,%eax
8270Sstevel@tonic-gate	xchgb	%al, T_LOCK(%esi)
8280Sstevel@tonic-gate
829*5084Sjohnlev	STORE_INTR_START(%edi)
830*5084Sjohnlev
8310Sstevel@tonic-gate	/*
8320Sstevel@tonic-gate	 * Restore non-volatile registers, then have spl0 return to the
8330Sstevel@tonic-gate	 * resuming thread's PC after first setting the priority as low as
8340Sstevel@tonic-gate	 * possible and blocking all interrupt threads that may be active.
8350Sstevel@tonic-gate	 */
8360Sstevel@tonic-gate	movl	T_PC(%edi), %eax	/* saved return addr */
8370Sstevel@tonic-gate	RESTORE_REGS(%ecx)
8380Sstevel@tonic-gate	pushl	%eax			/* push return address for spl0() */
8390Sstevel@tonic-gate	call	__dtrace_probe___sched_on__cpu
8400Sstevel@tonic-gate	jmp	spl0
8410Sstevel@tonic-gate
8420Sstevel@tonic-gateresume_from_intr_return:
8430Sstevel@tonic-gate	/*
8440Sstevel@tonic-gate	 * Remove stack frame created in SAVE_REGS()
8450Sstevel@tonic-gate	 */
8460Sstevel@tonic-gate	addl	$CLONGSIZE, %esp
8470Sstevel@tonic-gate	ret
8480Sstevel@tonic-gate	SET_SIZE(resume_from_intr)
8490Sstevel@tonic-gate
8500Sstevel@tonic-gate#endif	/* __amd64 */
8510Sstevel@tonic-gate#endif /* __lint */
8520Sstevel@tonic-gate
8530Sstevel@tonic-gate#if defined(__lint)
8540Sstevel@tonic-gate
8550Sstevel@tonic-gatevoid
8560Sstevel@tonic-gatethread_start(void)
8570Sstevel@tonic-gate{}
8580Sstevel@tonic-gate
8590Sstevel@tonic-gate#else   /* __lint */
8600Sstevel@tonic-gate
8610Sstevel@tonic-gate#if defined(__amd64)
8620Sstevel@tonic-gate
8630Sstevel@tonic-gate	ENTRY(thread_start)
8640Sstevel@tonic-gate	popq	%rax		/* start() */
8650Sstevel@tonic-gate	popq	%rdi		/* arg */
8660Sstevel@tonic-gate	popq	%rsi		/* len */
8670Sstevel@tonic-gate	movq	%rsp, %rbp
8680Sstevel@tonic-gate	call	*%rax
8690Sstevel@tonic-gate	call	thread_exit	/* destroy thread if it returns. */
8700Sstevel@tonic-gate	/*NOTREACHED*/
8710Sstevel@tonic-gate	SET_SIZE(thread_start)
8720Sstevel@tonic-gate
8730Sstevel@tonic-gate#elif defined(__i386)
8740Sstevel@tonic-gate
8750Sstevel@tonic-gate	ENTRY(thread_start)
8760Sstevel@tonic-gate	popl	%eax
8770Sstevel@tonic-gate	movl	%esp, %ebp
8780Sstevel@tonic-gate	addl	$8, %ebp
8790Sstevel@tonic-gate	call	*%eax
8800Sstevel@tonic-gate	addl	$8, %esp
8810Sstevel@tonic-gate	call	thread_exit	/* destroy thread if it returns. */
8820Sstevel@tonic-gate	/*NOTREACHED*/
8830Sstevel@tonic-gate	SET_SIZE(thread_start)
8840Sstevel@tonic-gate
8850Sstevel@tonic-gate#endif	/* __i386 */
8860Sstevel@tonic-gate
8870Sstevel@tonic-gate#endif  /* __lint */
888