xref: /onnv-gate/usr/src/uts/intel/kdi/amd64/kdi_asm.s (revision 5084:7d838c5c0eed)
13446Smrj/*
23446Smrj * CDDL HEADER START
33446Smrj *
43446Smrj * The contents of this file are subject to the terms of the
53446Smrj * Common Development and Distribution License (the "License").
63446Smrj * You may not use this file except in compliance with the License.
73446Smrj *
83446Smrj * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93446Smrj * or http://www.opensolaris.org/os/licensing.
103446Smrj * See the License for the specific language governing permissions
113446Smrj * and limitations under the License.
123446Smrj *
133446Smrj * When distributing Covered Code, include this CDDL HEADER in each
143446Smrj * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153446Smrj * If applicable, add the following below this CDDL HEADER, with the
163446Smrj * fields enclosed by brackets "[]" replaced with your own identifying
173446Smrj * information: Portions Copyright [yyyy] [name of copyright owner]
183446Smrj *
193446Smrj * CDDL HEADER END
203446Smrj */
213446Smrj
223446Smrj/*
233446Smrj * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
243446Smrj * Use is subject to license terms.
253446Smrj */
263446Smrj
273446Smrj#pragma ident	"%Z%%M%	%I%	%E% SMI"
283446Smrj
293446Smrj/*
303446Smrj * Debugger entry for both master and slave CPUs
313446Smrj */
323446Smrj
333446Smrj#if defined(__lint)
343446Smrj#include <sys/types.h>
353446Smrj#endif
363446Smrj
373446Smrj#include <sys/segments.h>
383446Smrj#include <sys/asm_linkage.h>
393446Smrj#include <sys/controlregs.h>
403446Smrj#include <sys/x86_archext.h>
413446Smrj#include <sys/privregs.h>
423446Smrj#include <sys/machprivregs.h>
433446Smrj#include <sys/kdi_regs.h>
443446Smrj#include <sys/psw.h>
453446Smrj#include <sys/uadmin.h>
46*5084Sjohnlev#ifdef __xpv
47*5084Sjohnlev#include <sys/hypervisor.h>
48*5084Sjohnlev#endif
493446Smrj
503446Smrj#ifdef _ASM
513446Smrj
523446Smrj#include <kdi_assym.h>
533446Smrj#include <assym.h>
543446Smrj
553446Smrj/* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
563446Smrj#define	GET_CPUSAVE_ADDR \
573446Smrj	movzbq	%gs:CPU_ID, %rbx;		\
583446Smrj	movq	%rbx, %rax;			\
593446Smrj	movq	$KRS_SIZE, %rcx;		\
603446Smrj	mulq	%rcx;				\
613446Smrj	movq	$kdi_cpusave, %rdx;		\
623446Smrj	/*CSTYLED*/				\
633446Smrj	addq	(%rdx), %rax
643446Smrj
653446Smrj/*
663446Smrj * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
673446Smrj * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
683446Smrj * debugger through the trap handler.  We don't want to clobber the saved IDT
693446Smrj * in the process, as we'd end up resuming the world on our IDT.
703446Smrj */
713446Smrj#define	SAVE_IDTGDT				\
723446Smrj	movq	%gs:CPU_IDT, %r11;		\
733446Smrj	leaq    kdi_idt(%rip), %rsi;		\
743446Smrj	cmpq	%rsi, %r11;			\
753446Smrj	je	1f;				\
763446Smrj	movq	%r11, KRS_IDT(%rax);		\
773446Smrj	movq	%gs:CPU_GDT, %r11;		\
783446Smrj	movq	%r11, KRS_GDT(%rax);		\
793446Smrj1:
803446Smrj
81*5084Sjohnlev#ifdef __xpv
82*5084Sjohnlev
83*5084Sjohnlev#define	SAVE_GSBASE(reg) /* nothing */
84*5084Sjohnlev#define	RESTORE_GSBASE(reg) /* nothing */
85*5084Sjohnlev
86*5084Sjohnlev#else
87*5084Sjohnlev
88*5084Sjohnlev#define	SAVE_GSBASE(base)				\
89*5084Sjohnlev	movl	$MSR_AMD_GSBASE, %ecx;			\
90*5084Sjohnlev	rdmsr;						\
91*5084Sjohnlev	shlq	$32, %rdx;				\
92*5084Sjohnlev	orq	%rax, %rdx;				\
93*5084Sjohnlev	movq	%rdx, REG_OFF(KDIREG_GSBASE)(base)
943446Smrj
95*5084Sjohnlev#define	RESTORE_GSBASE(base)				\
96*5084Sjohnlev	movq	REG_OFF(KDIREG_GSBASE)(base), %rdx;	\
97*5084Sjohnlev	movq	%rdx, %rax;				\
98*5084Sjohnlev	shrq	$32, %rdx;				\
99*5084Sjohnlev	movl	$MSR_AMD_GSBASE, %ecx;			\
100*5084Sjohnlev	wrmsr
101*5084Sjohnlev
102*5084Sjohnlev#endif /* __xpv */
103*5084Sjohnlev
104*5084Sjohnlev/*
105*5084Sjohnlev * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.  Note
106*5084Sjohnlev * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
107*5084Sjohnlev * unnecessary.
108*5084Sjohnlev */
1093446Smrj#define	KDI_SAVE_REGS(base) \
1103446Smrj	movq	%rdi, REG_OFF(KDIREG_RDI)(base);	\
1113446Smrj	movq	%rsi, REG_OFF(KDIREG_RSI)(base);	\
1123446Smrj	movq	%rdx, REG_OFF(KDIREG_RDX)(base);	\
1133446Smrj	movq	%rcx, REG_OFF(KDIREG_RCX)(base);	\
1143446Smrj	movq	%r8, REG_OFF(KDIREG_R8)(base);		\
1153446Smrj	movq	%r9, REG_OFF(KDIREG_R9)(base);		\
1163446Smrj	movq	%rax, REG_OFF(KDIREG_RAX)(base);	\
1173446Smrj	movq	%rbx, REG_OFF(KDIREG_RBX)(base);	\
1183446Smrj	movq	%rbp, REG_OFF(KDIREG_RBP)(base);	\
1193446Smrj	movq	%r10, REG_OFF(KDIREG_R10)(base);	\
1203446Smrj	movq	%r11, REG_OFF(KDIREG_R11)(base);	\
1213446Smrj	movq	%r12, REG_OFF(KDIREG_R12)(base);	\
1223446Smrj	movq	%r13, REG_OFF(KDIREG_R13)(base);	\
1233446Smrj	movq	%r14, REG_OFF(KDIREG_R14)(base);	\
1243446Smrj	movq	%r15, REG_OFF(KDIREG_R15)(base);	\
1253446Smrj	movq	%rbp, REG_OFF(KDIREG_SAVFP)(base);	\
1263446Smrj	movq	REG_OFF(KDIREG_RIP)(base), %rax;	\
1273446Smrj	movq	%rax, REG_OFF(KDIREG_SAVPC)(base);	\
1283446Smrj	clrq	%rax;					\
1293446Smrj	movw	%ds, %ax;				\
1303446Smrj	movq	%rax, REG_OFF(KDIREG_DS)(base);		\
1313446Smrj	movw	%es, %ax;				\
1323446Smrj	movq	%rax, REG_OFF(KDIREG_ES)(base);		\
1333446Smrj	movw	%fs, %ax;				\
1343446Smrj	movq	%rax, REG_OFF(KDIREG_FS)(base);		\
1353446Smrj	movw	%gs, %ax;				\
1363939Ssethg	movq	%rax, REG_OFF(KDIREG_GS)(base);		\
137*5084Sjohnlev	SAVE_GSBASE(base)
1383446Smrj
1393446Smrj#define	KDI_RESTORE_REGS(base) \
1403446Smrj	movq	base, %rdi;				\
141*5084Sjohnlev	RESTORE_GSBASE(%rdi);				\
1423446Smrj	movq	REG_OFF(KDIREG_ES)(%rdi), %rax;		\
1433446Smrj	movw	%ax, %es;				\
1443446Smrj	movq	REG_OFF(KDIREG_DS)(%rdi), %rax;		\
1453446Smrj	movw	%ax, %ds;				\
1463446Smrj	movq	REG_OFF(KDIREG_R15)(%rdi), %r15;	\
1473446Smrj	movq	REG_OFF(KDIREG_R14)(%rdi), %r14;	\
1483446Smrj	movq	REG_OFF(KDIREG_R13)(%rdi), %r13;	\
1493446Smrj	movq	REG_OFF(KDIREG_R12)(%rdi), %r12;	\
1503446Smrj	movq	REG_OFF(KDIREG_R11)(%rdi), %r11;	\
1513446Smrj	movq	REG_OFF(KDIREG_R10)(%rdi), %r10;	\
1523446Smrj	movq	REG_OFF(KDIREG_RBP)(%rdi), %rbp;	\
1533446Smrj	movq	REG_OFF(KDIREG_RBX)(%rdi), %rbx;	\
1543446Smrj	movq	REG_OFF(KDIREG_RAX)(%rdi), %rax;	\
1553446Smrj	movq	REG_OFF(KDIREG_R9)(%rdi), %r9;		\
1563446Smrj	movq	REG_OFF(KDIREG_R8)(%rdi), %r8;		\
1573446Smrj	movq	REG_OFF(KDIREG_RCX)(%rdi), %rcx;	\
1583446Smrj	movq	REG_OFF(KDIREG_RDX)(%rdi), %rdx;	\
1593446Smrj	movq	REG_OFF(KDIREG_RSI)(%rdi), %rsi;	\
1603446Smrj	movq	REG_OFF(KDIREG_RDI)(%rdi), %rdi
1613446Smrj
1623446Smrj/*
163*5084Sjohnlev * Given the address of the current CPU's cpusave area in %rax, the following
164*5084Sjohnlev * macro restores the debugging state to said CPU.  Restored state includes
165*5084Sjohnlev * the debug registers from the global %dr variables, and debugging MSRs from
166*5084Sjohnlev * the CPU save area.  This code would be in a separate routine, but for the
167*5084Sjohnlev * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
168*5084Sjohnlev * the number of jumps taken subsequent to the update of said MSRs.  We can
169*5084Sjohnlev * remove one jump (the ret) by using a macro instead of a function for the
170*5084Sjohnlev * debugging state restoration code.
171*5084Sjohnlev *
172*5084Sjohnlev * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
173*5084Sjohnlev */
174*5084Sjohnlev#define	KDI_RESTORE_DEBUGGING_STATE \
175*5084Sjohnlev	pushq	%rdi;						\
176*5084Sjohnlev	leaq	kdi_drreg(%rip), %r15;				\
177*5084Sjohnlev	movl	$7, %edi;					\
178*5084Sjohnlev	movq	DR_CTL(%r15), %rsi;				\
179*5084Sjohnlev	call	kdi_dreg_set;					\
180*5084Sjohnlev								\
181*5084Sjohnlev	movl	$6, %edi;					\
182*5084Sjohnlev	movq	$KDIREG_DRSTAT_RESERVED, %rsi;			\
183*5084Sjohnlev	call	kdi_dreg_set;					\
184*5084Sjohnlev								\
185*5084Sjohnlev	movl	$0, %edi;					\
186*5084Sjohnlev	movq	DRADDR_OFF(0)(%r15), %rsi;			\
187*5084Sjohnlev	call	kdi_dreg_set;					\
188*5084Sjohnlev	movl	$1, %edi;					\
189*5084Sjohnlev	movq	DRADDR_OFF(1)(%r15), %rsi;			\
190*5084Sjohnlev	call	kdi_dreg_set;					\
191*5084Sjohnlev	movl	$2, %edi;					\
192*5084Sjohnlev	movq	DRADDR_OFF(2)(%r15), %rsi;			\
193*5084Sjohnlev	call	kdi_dreg_set;					\
194*5084Sjohnlev	movl	$3, %edi;					\
195*5084Sjohnlev	movq	DRADDR_OFF(3)(%r15), %rsi;			\
196*5084Sjohnlev	call	kdi_dreg_set;					\
197*5084Sjohnlev	popq	%rdi;						\
198*5084Sjohnlev								\
199*5084Sjohnlev	/*							\
200*5084Sjohnlev	 * Write any requested MSRs.				\
201*5084Sjohnlev	 */							\
202*5084Sjohnlev	movq	KRS_MSR(%rdi), %rbx;				\
203*5084Sjohnlev	cmpq	$0, %rbx;					\
204*5084Sjohnlev	je	3f;						\
205*5084Sjohnlev1:								\
206*5084Sjohnlev	movl	MSR_NUM(%rbx), %ecx;				\
207*5084Sjohnlev	cmpl	$0, %ecx;					\
208*5084Sjohnlev	je	3f;						\
209*5084Sjohnlev								\
210*5084Sjohnlev	movl	MSR_TYPE(%rbx), %edx;				\
211*5084Sjohnlev	cmpl	$KDI_MSR_WRITE, %edx;				\
212*5084Sjohnlev	jne	2f;						\
213*5084Sjohnlev								\
214*5084Sjohnlev	movq	MSR_VALP(%rbx), %rdx;				\
215*5084Sjohnlev	movl	0(%rdx), %eax;					\
216*5084Sjohnlev	movl	4(%rdx), %edx;					\
217*5084Sjohnlev	wrmsr;							\
218*5084Sjohnlev2:								\
219*5084Sjohnlev	addq	$MSR_SIZE, %rbx;				\
220*5084Sjohnlev	jmp	1b;						\
221*5084Sjohnlev3:								\
222*5084Sjohnlev	/*							\
223*5084Sjohnlev	 * We must not branch after re-enabling LBR.  If	\
224*5084Sjohnlev	 * kdi_wsr_wrexit_msr is set, it contains the number	\
225*5084Sjohnlev	 * of the MSR that controls LBR.  kdi_wsr_wrexit_valp	\
226*5084Sjohnlev	 * contains the value that is to be written to enable	\
227*5084Sjohnlev	 * LBR.							\
228*5084Sjohnlev	 */							\
229*5084Sjohnlev	leaq	kdi_msr_wrexit_msr(%rip), %rcx;			\
230*5084Sjohnlev	movl	(%rcx), %ecx;					\
231*5084Sjohnlev	cmpl	$0, %ecx;					\
232*5084Sjohnlev	je	1f;						\
233*5084Sjohnlev								\
234*5084Sjohnlev	leaq	kdi_msr_wrexit_valp(%rip), %rdx;		\
235*5084Sjohnlev	movq	(%rdx), %rdx;					\
236*5084Sjohnlev	movl	0(%rdx), %eax;					\
237*5084Sjohnlev	movl	4(%rdx), %edx;					\
238*5084Sjohnlev								\
239*5084Sjohnlev	wrmsr;							\
240*5084Sjohnlev1:
241*5084Sjohnlev
242*5084Sjohnlev/*
2433446Smrj * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
2443446Smrj * The following macros manage the buffer.
2453446Smrj */
2463446Smrj
2473446Smrj/* Advance the ring buffer */
2483446Smrj#define	ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
2493446Smrj	movq	KRS_CURCRUMBIDX(cpusave), tmp1;	\
2503446Smrj	cmpq	$[KDI_NCRUMBS - 1], tmp1;	\
2513446Smrj	jge	1f;				\
2523446Smrj	/* Advance the pointer and index */	\
2533446Smrj	addq	$1, tmp1;			\
2543446Smrj	movq	tmp1, KRS_CURCRUMBIDX(cpusave);	\
2553446Smrj	movq	KRS_CURCRUMB(cpusave), tmp1;	\
2563446Smrj	addq	$KRM_SIZE, tmp1;		\
2573446Smrj	jmp	2f;				\
2583446Smrj1:	/* Reset the pointer and index */	\
2593446Smrj	movq	$0, KRS_CURCRUMBIDX(cpusave);	\
2603446Smrj	leaq	KRS_CRUMBS(cpusave), tmp1;	\
2613446Smrj2:	movq	tmp1, KRS_CURCRUMB(cpusave);	\
2623446Smrj	/* Clear the new crumb */		\
2633446Smrj	movq	$KDI_NCRUMBS, tmp2;		\
2643446Smrj3:	movq	$0, -4(tmp1, tmp2, 4);		\
2653446Smrj	decq	tmp2;				\
2663446Smrj	jnz	3b
2673446Smrj
2683446Smrj/* Set a value in the current breadcrumb buffer */
2693446Smrj#define	ADD_CRUMB(cpusave, offset, value, tmp) \
2703446Smrj	movq	KRS_CURCRUMB(cpusave), tmp;	\
2713446Smrj	movq	value, offset(tmp)
2723446Smrj
2733446Smrj#endif	/* _ASM */
2743446Smrj
2753446Smrj#if defined(__lint)
2763446Smrjvoid
2773446Smrjkdi_cmnint(void)
2783446Smrj{
2793446Smrj}
2803446Smrj#else	/* __lint */
2813446Smrj
2823446Smrj	/* XXX implement me */
2833446Smrj	ENTRY_NP(kdi_nmiint)
2843446Smrj	clrq	%rcx
2853446Smrj	movq	(%rcx), %rcx
2863446Smrj	SET_SIZE(kdi_nmiint)
2873446Smrj
288*5084Sjohnlev	/*
289*5084Sjohnlev	 * The main entry point for master CPUs.  It also serves as the trap
290*5084Sjohnlev	 * handler for all traps and interrupts taken during single-step.
291*5084Sjohnlev	 */
2923446Smrj	ENTRY_NP(kdi_cmnint)
2933446Smrj	ALTENTRY(kdi_master_entry)
2943446Smrj
2953446Smrj	pushq	%rax
2963446Smrj	CLI(%rax)
2973446Smrj	popq	%rax
2983446Smrj
2993446Smrj	/* Save current register state */
3003446Smrj	subq	$REG_OFF(KDIREG_TRAPNO), %rsp
3013446Smrj	KDI_SAVE_REGS(%rsp)
3023446Smrj
303*5084Sjohnlev#ifdef __xpv
304*5084Sjohnlev	/*
305*5084Sjohnlev	 * Clear saved_upcall_mask in unused byte of cs slot on stack.
306*5084Sjohnlev	 * It can only confuse things.
307*5084Sjohnlev	 */
308*5084Sjohnlev	movb	$0, REG_OFF(KDIREG_CS)+4(%rsp)
309*5084Sjohnlev#endif
310*5084Sjohnlev
311*5084Sjohnlev#if !defined(__xpv)
3123446Smrj	/*
3133446Smrj	 * Switch to the kernel's GSBASE.  Neither GSBASE nor the ill-named
3143446Smrj	 * KGSBASE can be trusted, as the kernel may or may not have already
3153446Smrj	 * done a swapgs.  All is not lost, as the kernel can divine the correct
3163939Ssethg	 * value for us.  Note that the previous GSBASE is saved in the
3173939Ssethg	 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
318*5084Sjohnlev	 * blown away.  On the hypervisor, we don't need to do this, since it's
319*5084Sjohnlev	 * ensured we're on our requested kernel GSBASE already.
3203446Smrj	 */
3213446Smrj	subq	$10, %rsp
3223446Smrj	sgdt	(%rsp)
3233446Smrj	movq	2(%rsp), %rdi	/* gdt base now in %rdi */
3243446Smrj	addq	$10, %rsp
3253446Smrj	call	kdi_gdt2gsbase	/* returns kernel's GSBASE in %rax */
3263446Smrj
3273446Smrj	movq	%rax, %rdx
3283446Smrj	shrq	$32, %rdx
3293446Smrj	movl	$MSR_AMD_GSBASE, %ecx
3303446Smrj	wrmsr
331*5084Sjohnlev#endif	/* __xpv */
3323446Smrj
3333446Smrj	GET_CPUSAVE_ADDR	/* %rax = cpusave, %rbx = CPU ID */
3343446Smrj
3353446Smrj	ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
3363446Smrj
3373446Smrj	ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
3383446Smrj
3393446Smrj	movq	REG_OFF(KDIREG_RIP)(%rsp), %rcx
3403446Smrj	ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
3413446Smrj	ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
3423446Smrj	movq	REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
3433446Smrj	ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
3443446Smrj
3453446Smrj	movq	%rsp, %rbp
3463446Smrj	pushq	%rax
3473446Smrj
3483446Smrj	/*
3493446Smrj	 * Were we in the debugger when we took the trap (i.e. was %esp in one
3503446Smrj	 * of the debugger's memory ranges)?
3513446Smrj	 */
3523446Smrj	leaq	kdi_memranges, %rcx
3533446Smrj	movl	kdi_nmemranges, %edx
3543446Smrj1:	cmpq	MR_BASE(%rcx), %rsp
3553446Smrj	jl	2f		/* below this range -- try the next one */
3563446Smrj	cmpq	MR_LIM(%rcx), %rsp
3573446Smrj	jg	2f		/* above this range -- try the next one */
3583446Smrj	jmp	3f		/* matched within this range */
3593446Smrj
3603446Smrj2:	decl	%edx
3613446Smrj	jz	kdi_save_common_state	/* %rsp not within debugger memory */
3623446Smrj	addq	$MR_SIZE, %rcx
3633446Smrj	jmp	1b
3643446Smrj
3653446Smrj3:	/*
3663446Smrj	 * The master is still set.  That should only happen if we hit a trap
3673446Smrj	 * while running in the debugger.  Note that it may be an intentional
3683446Smrj	 * fault.  kmdb_dpi_handle_fault will sort it all out.
3693446Smrj	 */
3703446Smrj
3713446Smrj	movq	REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
3723446Smrj	movq	REG_OFF(KDIREG_RIP)(%rbp), %rsi
3733446Smrj	movq	REG_OFF(KDIREG_RSP)(%rbp), %rdx
3743446Smrj	movq	%rbx, %rcx		/* cpuid */
3753446Smrj
3763446Smrj	call	kdi_dvec_handle_fault
3773446Smrj
3783446Smrj	/*
3793446Smrj	 * If we're here, we ran into a debugger problem, and the user
3803446Smrj	 * elected to solve it by having the debugger debug itself.  The
3813446Smrj	 * state we're about to save is that of the debugger when it took
3823446Smrj	 * the fault.
3833446Smrj	 */
3843446Smrj
3853446Smrj	jmp	kdi_save_common_state
3863446Smrj
3873446Smrj	SET_SIZE(kdi_master_entry)
3883446Smrj	SET_SIZE(kdi_cmnint)
3893446Smrj
3903446Smrj#endif	/* __lint */
3913446Smrj
3923446Smrj/*
3933446Smrj * The cross-call handler for slave CPUs.
3943446Smrj *
3953446Smrj * The debugger is single-threaded, so only one CPU, called the master, may be
3963446Smrj * running it at any given time.  The other CPUs, known as slaves, spin in a
3973446Smrj * busy loop until there's something for them to do.  This is the entry point
3983446Smrj * for the slaves - they'll be sent here in response to a cross-call sent by the
3993446Smrj * master.
4003446Smrj */
4013446Smrj
4023446Smrj#if defined(__lint)
4033446Smrjchar kdi_slave_entry_patch;
4043446Smrj
4053446Smrjvoid
4063446Smrjkdi_slave_entry(void)
4073446Smrj{
4083446Smrj}
4093446Smrj#else /* __lint */
4103446Smrj	.globl	kdi_slave_entry_patch;
4113446Smrj
4123446Smrj	ENTRY_NP(kdi_slave_entry)
4133446Smrj
4143446Smrj	/* kdi_msr_add_clrentry knows where this is */
4153446Smrjkdi_slave_entry_patch:
4163446Smrj	KDI_MSR_PATCH;
4173446Smrj
4183446Smrj	/*
4193446Smrj	 * Cross calls are implemented as function calls, so our stack currently
4203446Smrj	 * looks like one you'd get from a zero-argument function call.  That
4213446Smrj	 * is, there's the return %rip at %rsp, and that's about it.  We need
4223446Smrj	 * to make it look like an interrupt stack.  When we first save, we'll
4233446Smrj	 * reverse the saved %ss and %rip, which we'll fix back up when we've
4243446Smrj	 * freed up some general-purpose registers.  We'll also need to fix up
4253446Smrj	 * the saved %rsp.
4263446Smrj	 */
4273446Smrj
4283446Smrj	pushq	%rsp		/* pushed value off by 8 */
4293446Smrj	pushfq
4303446Smrj	CLI(%rax)
4313446Smrj	pushq	$KCS_SEL
4323446Smrj	clrq	%rax
4333446Smrj	movw	%ss, %ax
4343446Smrj	pushq	%rax		/* rip should be here */
4353446Smrj	pushq	$-1		/* phony trap error code */
4363446Smrj	pushq	$-1		/* phony trap number */
4373446Smrj
4383446Smrj	subq	$REG_OFF(KDIREG_TRAPNO), %rsp
4393446Smrj	KDI_SAVE_REGS(%rsp)
4403446Smrj
4413446Smrj	movq	REG_OFF(KDIREG_SS)(%rsp), %rax
4423446Smrj	xchgq	REG_OFF(KDIREG_RIP)(%rsp), %rax
4433446Smrj	movq	%rax, REG_OFF(KDIREG_SS)(%rsp)
4443446Smrj
4453446Smrj	movq	REG_OFF(KDIREG_RSP)(%rsp), %rax
4463446Smrj	addq	$8, %rax
4473446Smrj	movq	%rax, REG_OFF(KDIREG_RSP)(%rsp)
4483446Smrj
4493446Smrj	/*
4503446Smrj	 * We've saved all of the general-purpose registers, and have a stack
4513446Smrj	 * that is irettable (after we strip down to the error code)
4523446Smrj	 */
4533446Smrj
4543446Smrj	GET_CPUSAVE_ADDR	/* %rax = cpusave, %rbx = CPU ID */
4553446Smrj
4563446Smrj	ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
4573446Smrj
4583446Smrj	ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
4593446Smrj
4603446Smrj	movq	REG_OFF(KDIREG_RIP)(%rsp), %rcx
4613446Smrj	ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
4623446Smrj
4633446Smrj	pushq	%rax
4643446Smrj	jmp	kdi_save_common_state
4653446Smrj
4663446Smrj	SET_SIZE(kdi_slave_entry)
4673446Smrj
4683446Smrj#endif	/* __lint */
4693446Smrj
470*5084Sjohnlev/*
471*5084Sjohnlev * The state of the world:
472*5084Sjohnlev *
473*5084Sjohnlev * The stack has a complete set of saved registers and segment
474*5084Sjohnlev * selectors, arranged in the kdi_regs.h order.  It also has a pointer
475*5084Sjohnlev * to our cpusave area.
476*5084Sjohnlev *
477*5084Sjohnlev * We need to save, into the cpusave area, a pointer to these saved
478*5084Sjohnlev * registers.  First we check whether we should jump straight back to
479*5084Sjohnlev * the kernel.  If not, we save a few more registers, ready the
480*5084Sjohnlev * machine for debugger entry, and enter the debugger.
481*5084Sjohnlev */
482*5084Sjohnlev
4833446Smrj#if !defined(__lint)
4843446Smrj
4853446Smrj	ENTRY_NP(kdi_save_common_state)
4863446Smrj
487*5084Sjohnlev	popq	%rdi			/* the cpusave area */
488*5084Sjohnlev	movq	%rsp, KRS_GREGS(%rdi)	/* save ptr to current saved regs */
4893446Smrj
490*5084Sjohnlev	pushq	%rdi
491*5084Sjohnlev	call	kdi_trap_pass
492*5084Sjohnlev	cmpq	$1, %rax
493*5084Sjohnlev	je	kdi_pass_to_kernel
494*5084Sjohnlev	popq	%rax /* cpusave in %rax */
4953446Smrj
4963446Smrj	SAVE_IDTGDT
4973446Smrj
498*5084Sjohnlev#if !defined(__xpv)
4993446Smrj	/* Save off %cr0, and clear write protect */
5003446Smrj	movq	%cr0, %rcx
5013446Smrj	movq	%rcx, KRS_CR0(%rax)
5023446Smrj	andq	$_BITNOT(CR0_WP), %rcx
5033446Smrj	movq	%rcx, %cr0
504*5084Sjohnlev#endif
5053446Smrj
5063446Smrj	/* Save the debug registers and disable any active watchpoints */
5073446Smrj
5083446Smrj	movq	%rax, %r15		/* save cpusave area ptr */
5093446Smrj	movl	$7, %edi
5103446Smrj	call	kdi_dreg_get
5113446Smrj	movq	%rax, KRS_DRCTL(%r15)
5123446Smrj
5133446Smrj	andq	$_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
5143446Smrj	movq	%rax, %rsi
5153446Smrj	movl	$7, %edi
5163446Smrj	call	kdi_dreg_set
5173446Smrj
5183446Smrj	movl	$6, %edi
5193446Smrj	call	kdi_dreg_get
5203446Smrj	movq	%rax, KRS_DRSTAT(%r15)
5213446Smrj
5223446Smrj	movl	$0, %edi
5233446Smrj	call	kdi_dreg_get
5243446Smrj	movq	%rax, KRS_DROFF(0)(%r15)
5253446Smrj
5263446Smrj	movl	$1, %edi
5273446Smrj	call	kdi_dreg_get
5283446Smrj	movq	%rax, KRS_DROFF(1)(%r15)
5293446Smrj
5303446Smrj	movl	$2, %edi
5313446Smrj	call	kdi_dreg_get
5323446Smrj	movq	%rax, KRS_DROFF(2)(%r15)
5333446Smrj
5343446Smrj	movl	$3, %edi
5353446Smrj	call	kdi_dreg_get
5363446Smrj	movq	%rax, KRS_DROFF(3)(%r15)
5373446Smrj
5383446Smrj	movq	%r15, %rax	/* restore cpu save area to rax */
5393446Smrj
5403446Smrj	/*
5413446Smrj	 * Save any requested MSRs.
5423446Smrj	 */
5433446Smrj	movq	KRS_MSR(%rax), %rcx
5443446Smrj	cmpq	$0, %rcx
5453446Smrj	je	no_msr
5463446Smrj
5473446Smrj	pushq	%rax		/* rdmsr clobbers %eax */
5483446Smrj	movq	%rcx, %rbx
5493446Smrj
5503446Smrj1:
5513446Smrj	movl	MSR_NUM(%rbx), %ecx
5523446Smrj	cmpl	$0, %ecx
5533446Smrj	je	msr_done
5543446Smrj
5553446Smrj	movl	MSR_TYPE(%rbx), %edx
5563446Smrj	cmpl	$KDI_MSR_READ, %edx
5573446Smrj	jne	msr_next
5583446Smrj
5593446Smrj	rdmsr			/* addr in %ecx, value into %edx:%eax */
5603446Smrj	movl	%eax, MSR_VAL(%rbx)
5613446Smrj	movl	%edx, _CONST(MSR_VAL + 4)(%rbx)
5623446Smrj
5633446Smrjmsr_next:
5643446Smrj	addq	$MSR_SIZE, %rbx
5653446Smrj	jmp	1b
5663446Smrj
5673446Smrjmsr_done:
5683446Smrj	popq	%rax
5693446Smrj
5703446Smrjno_msr:
5713446Smrj	clrq	%rbp		/* stack traces should end here */
5723446Smrj
5733446Smrj	pushq	%rax
5743446Smrj	movq	%rax, %rdi	/* cpusave */
5753446Smrj
5763446Smrj	call	kdi_debugger_entry
5773446Smrj
578*5084Sjohnlev	/* Pass cpusave to kdi_resume */
5793446Smrj	popq	%rdi
5803446Smrj
5813446Smrj	jmp	kdi_resume
5823446Smrj
5833446Smrj	SET_SIZE(kdi_save_common_state)
5843446Smrj
5853446Smrj#endif	/* !__lint */
5863446Smrj
5873446Smrj/*
588*5084Sjohnlev * Resume the world.  The code that calls kdi_resume has already
589*5084Sjohnlev * decided whether or not to restore the IDT.
590*5084Sjohnlev */
591*5084Sjohnlev#if defined(__lint)
592*5084Sjohnlevvoid
593*5084Sjohnlevkdi_resume(void)
594*5084Sjohnlev{
595*5084Sjohnlev}
596*5084Sjohnlev#else	/* __lint */
597*5084Sjohnlev
598*5084Sjohnlev	/* cpusave in %rdi */
599*5084Sjohnlev	ENTRY_NP(kdi_resume)
600*5084Sjohnlev
601*5084Sjohnlev	/*
602*5084Sjohnlev	 * Send this CPU back into the world
603*5084Sjohnlev	 */
604*5084Sjohnlev#if !defined(__xpv)
605*5084Sjohnlev	movq	KRS_CR0(%rdi), %rdx
606*5084Sjohnlev	movq	%rdx, %cr0
607*5084Sjohnlev#endif
608*5084Sjohnlev
609*5084Sjohnlev	KDI_RESTORE_DEBUGGING_STATE
610*5084Sjohnlev
611*5084Sjohnlev	movq	KRS_GREGS(%rdi), %rsp
612*5084Sjohnlev	KDI_RESTORE_REGS(%rsp)
613*5084Sjohnlev	addq	$REG_OFF(KDIREG_RIP), %rsp	/* Discard state, trapno, err */
614*5084Sjohnlev	IRET
615*5084Sjohnlev	/*NOTREACHED*/
616*5084Sjohnlev	SET_SIZE(kdi_resume)
617*5084Sjohnlev
618*5084Sjohnlev#endif	/* __lint */
619*5084Sjohnlev
620*5084Sjohnlev#if !defined(__lint)
621*5084Sjohnlev
622*5084Sjohnlev	ENTRY_NP(kdi_pass_to_kernel)
623*5084Sjohnlev
624*5084Sjohnlev	popq	%rdi /* cpusave */
625*5084Sjohnlev
626*5084Sjohnlev	movq	$KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
627*5084Sjohnlev
628*5084Sjohnlev	/*
629*5084Sjohnlev	 * Find the trap and vector off the right kernel handler.  The trap
630*5084Sjohnlev	 * handler will expect the stack to be in trap order, with %rip being
631*5084Sjohnlev	 * the last entry, so we'll need to restore all our regs.  On i86xpv
632*5084Sjohnlev	 * we'll need to compensate for XPV_TRAP_POP.
633*5084Sjohnlev	 *
634*5084Sjohnlev	 * We're hard-coding the three cases where KMDB has installed permanent
635*5084Sjohnlev	 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
636*5084Sjohnlev	 * to work with; we can't use a global since other CPUs can easily pass
637*5084Sjohnlev	 * through here at the same time.
638*5084Sjohnlev	 *
639*5084Sjohnlev	 * Note that we handle T_DBGENTR since userspace might have tried it.
640*5084Sjohnlev	 */
641*5084Sjohnlev	movq	KRS_GREGS(%rdi), %rsp
642*5084Sjohnlev	movq	REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
643*5084Sjohnlev	cmpq	$T_SGLSTP, %rdi
644*5084Sjohnlev	je	1f
645*5084Sjohnlev	cmpq	$T_BPTFLT, %rdi
646*5084Sjohnlev	je	2f
647*5084Sjohnlev	cmpq	$T_DBGENTR, %rdi
648*5084Sjohnlev	je	3f
649*5084Sjohnlev	/*
650*5084Sjohnlev	 * Hmm, unknown handler.  Somebody forgot to update this when they
651*5084Sjohnlev	 * added a new trap interposition... try to drop back into kmdb.
652*5084Sjohnlev	 */
653*5084Sjohnlev	int	$T_DBGENTR
654*5084Sjohnlev
655*5084Sjohnlev#define	CALL_TRAP_HANDLER(name) \
656*5084Sjohnlev	KDI_RESTORE_REGS(%rsp); \
657*5084Sjohnlev	/* Discard state, trapno, err */ \
658*5084Sjohnlev	addq	$REG_OFF(KDIREG_RIP), %rsp; \
659*5084Sjohnlev	XPV_TRAP_PUSH; \
660*5084Sjohnlev	jmp	%cs:name
661*5084Sjohnlev
6623446Smrj1:
663*5084Sjohnlev	CALL_TRAP_HANDLER(dbgtrap)
664*5084Sjohnlev	/*NOTREACHED*/
665*5084Sjohnlev2:
666*5084Sjohnlev	CALL_TRAP_HANDLER(brktrap)
667*5084Sjohnlev	/*NOTREACHED*/
668*5084Sjohnlev3:
669*5084Sjohnlev	CALL_TRAP_HANDLER(invaltrap)
670*5084Sjohnlev	/*NOTREACHED*/
671*5084Sjohnlev
672*5084Sjohnlev	SET_SIZE(kdi_pass_to_kernel)
673*5084Sjohnlev
674*5084Sjohnlev	/*
675*5084Sjohnlev	 * A minimal version of mdboot(), to be used by the master CPU only.
676*5084Sjohnlev	 */
677*5084Sjohnlev	ENTRY_NP(kdi_reboot)
678*5084Sjohnlev
679*5084Sjohnlev	movl	$AD_BOOT, %edi
680*5084Sjohnlev	movl	$A_SHUTDOWN, %esi
681*5084Sjohnlev	call	*psm_shutdownf
682*5084Sjohnlev#if defined(__xpv)
683*5084Sjohnlev	movl	$SHUTDOWN_reboot, %edi
684*5084Sjohnlev	call	HYPERVISOR_shutdown
685*5084Sjohnlev#else
686*5084Sjohnlev	call	reset
687*5084Sjohnlev#endif
688*5084Sjohnlev	/*NOTREACHED*/
689*5084Sjohnlev
690*5084Sjohnlev	SET_SIZE(kdi_reboot)
691*5084Sjohnlev
692*5084Sjohnlev#endif	/* !__lint */
6933446Smrj
6943446Smrj#if defined(__lint)
6953446Smrj/*ARGSUSED*/
6963446Smrjvoid
6973446Smrjkdi_cpu_debug_init(kdi_cpusave_t *save)
6983446Smrj{
6993446Smrj}
7003446Smrj#else	/* __lint */
7013446Smrj
7023446Smrj	ENTRY_NP(kdi_cpu_debug_init)
7033446Smrj	pushq	%rbp
7043446Smrj	movq	%rsp, %rbp
7053446Smrj
7063446Smrj	pushq	%rbx		/* macro will clobber %rbx */
7073446Smrj	KDI_RESTORE_DEBUGGING_STATE
7083446Smrj	popq	%rbx
7093446Smrj
7103446Smrj	leave
7113446Smrj	ret
7123446Smrj
7133446Smrj	SET_SIZE(kdi_cpu_debug_init)
7143446Smrj#endif	/* !__lint */
7153446Smrj
716