xref: /onnv-gate/usr/src/uts/i86xpv/sys/machprivregs.h (revision 11035:7dfb6a27ef28)
15084Sjohnlev /*
25084Sjohnlev  * CDDL HEADER START
35084Sjohnlev  *
45084Sjohnlev  * The contents of this file are subject to the terms of the
55084Sjohnlev  * Common Development and Distribution License (the "License").
65084Sjohnlev  * You may not use this file except in compliance with the License.
75084Sjohnlev  *
85084Sjohnlev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95084Sjohnlev  * or http://www.opensolaris.org/os/licensing.
105084Sjohnlev  * See the License for the specific language governing permissions
115084Sjohnlev  * and limitations under the License.
125084Sjohnlev  *
135084Sjohnlev  * When distributing Covered Code, include this CDDL HEADER in each
145084Sjohnlev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155084Sjohnlev  * If applicable, add the following below this CDDL HEADER, with the
165084Sjohnlev  * fields enclosed by brackets "[]" replaced with your own identifying
175084Sjohnlev  * information: Portions Copyright [yyyy] [name of copyright owner]
185084Sjohnlev  *
195084Sjohnlev  * CDDL HEADER END
205084Sjohnlev  */
215084Sjohnlev 
225084Sjohnlev /*
2310267SStuart.Maybee@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
245084Sjohnlev  * Use is subject to license terms.
255084Sjohnlev  */
265084Sjohnlev 
275084Sjohnlev #ifndef	_SYS_MACHPRIVREGS_H
285084Sjohnlev #define	_SYS_MACHPRIVREGS_H
295084Sjohnlev 
305084Sjohnlev #include <sys/hypervisor.h>
315084Sjohnlev 
325084Sjohnlev /*
335084Sjohnlev  * Platform dependent instruction sequences for manipulating
345084Sjohnlev  * privileged state
355084Sjohnlev  */
365084Sjohnlev 
375084Sjohnlev #ifdef __cplusplus
385084Sjohnlev extern "C" {
395084Sjohnlev #endif
405084Sjohnlev 
415084Sjohnlev /*
425084Sjohnlev  * CLI and STI are quite complex to virtualize!
435084Sjohnlev  */
445084Sjohnlev 
455084Sjohnlev #if defined(__amd64)
465084Sjohnlev 
475084Sjohnlev #define	CURVCPU(r)					\
485084Sjohnlev 	movq	%gs:CPU_VCPU_INFO, r
495084Sjohnlev 
505084Sjohnlev #define	CURTHREAD(r)					\
515084Sjohnlev 	movq	%gs:CPU_THREAD, r
525084Sjohnlev 
535084Sjohnlev #elif defined(__i386)
545084Sjohnlev 
555084Sjohnlev #define	CURVCPU(r)					\
565084Sjohnlev 	movl	%gs:CPU_VCPU_INFO, r
575084Sjohnlev 
585084Sjohnlev #define	CURTHREAD(r)					\
595084Sjohnlev 	movl	%gs:CPU_THREAD, r
605084Sjohnlev 
615084Sjohnlev #endif	/* __i386 */
625084Sjohnlev 
635084Sjohnlev #define	XEN_TEST_EVENT_PENDING(r)			\
645084Sjohnlev 	testb	$0xff, VCPU_INFO_EVTCHN_UPCALL_PENDING(r)
655084Sjohnlev 
665084Sjohnlev #define	XEN_SET_UPCALL_MASK(r)				\
675084Sjohnlev 	movb	$1, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
685084Sjohnlev 
695084Sjohnlev #define	XEN_GET_UPCALL_MASK(r, mask)			\
705084Sjohnlev 	movb	VCPU_INFO_EVTCHN_UPCALL_MASK(r), mask
715084Sjohnlev 
725084Sjohnlev #define	XEN_TEST_UPCALL_MASK(r)				\
735084Sjohnlev 	testb	$1, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
745084Sjohnlev 
755084Sjohnlev #define	XEN_CLEAR_UPCALL_MASK(r)			\
765084Sjohnlev 	ASSERT_UPCALL_MASK_IS_SET;			\
775084Sjohnlev 	movb	$0, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
785084Sjohnlev 
795084Sjohnlev #ifdef DEBUG
805084Sjohnlev 
815084Sjohnlev /*
825084Sjohnlev  * Much logic depends on the upcall mask being set at
835084Sjohnlev  * various points in the code; use this macro to validate.
845084Sjohnlev  *
855084Sjohnlev  * Need to use CURVCPU(r) to establish the vcpu pointer.
865084Sjohnlev  */
875084Sjohnlev #if defined(__amd64)
885084Sjohnlev 
895084Sjohnlev #define	ASSERT_UPCALL_MASK_IS_SET			\
905084Sjohnlev 	pushq	%r11;					\
915084Sjohnlev 	CURVCPU(%r11);					\
925084Sjohnlev 	XEN_TEST_UPCALL_MASK(%r11);			\
935084Sjohnlev 	jne	6f;					\
945084Sjohnlev 	cmpl	$0, stistipanic(%rip);			\
955084Sjohnlev 	jle	6f;					\
965084Sjohnlev 	movl	$-1, stistipanic(%rip);			\
975084Sjohnlev 	movq	stistimsg(%rip), %rdi;			\
985084Sjohnlev 	xorl	%eax, %eax;				\
995084Sjohnlev 	call	panic;					\
1005084Sjohnlev 6:	pushq	%rax;					\
1015084Sjohnlev 	pushq	%rbx;					\
1025084Sjohnlev 	movl	%gs:CPU_ID, %eax;			\
1035084Sjohnlev 	leaq	.+0(%rip), %r11;			\
1045084Sjohnlev 	leaq	laststi(%rip), %rbx;			\
1055084Sjohnlev 	movq	%r11, (%rbx, %rax, 8);			\
1065084Sjohnlev 	popq	%rbx;					\
1075084Sjohnlev 	popq	%rax;					\
1085084Sjohnlev 	popq	%r11
1095084Sjohnlev 
1105084Sjohnlev #define	SAVE_CLI_LOCATION				\
1115084Sjohnlev 	pushq	%rax;					\
1125084Sjohnlev 	pushq	%rbx;					\
1135084Sjohnlev 	pushq	%rcx;					\
1145084Sjohnlev 	movl	%gs:CPU_ID, %eax;			\
1155084Sjohnlev 	leaq	.+0(%rip), %rcx;			\
1165084Sjohnlev 	leaq	lastcli, %rbx;				\
1175084Sjohnlev 	movq	%rcx, (%rbx, %rax, 8);			\
1185084Sjohnlev 	popq	%rcx;					\
1195084Sjohnlev 	popq	%rbx;					\
1205084Sjohnlev 	popq	%rax;					\
1215084Sjohnlev 
1225084Sjohnlev #elif defined(__i386)
1235084Sjohnlev 
1245084Sjohnlev #define	ASSERT_UPCALL_MASK_IS_SET			\
1255084Sjohnlev 	pushl	%ecx;					\
1265084Sjohnlev 	CURVCPU(%ecx);					\
1275084Sjohnlev 	XEN_TEST_UPCALL_MASK(%ecx);			\
1285084Sjohnlev 	jne	6f;					\
1295084Sjohnlev 	cmpl	$0, stistipanic;			\
1305084Sjohnlev 	jle	6f;					\
1315084Sjohnlev 	movl	$-1, stistipanic;			\
1325084Sjohnlev 	movl	stistimsg, %ecx;			\
1335084Sjohnlev 	pushl	%ecx;					\
1345084Sjohnlev 	call	panic;					\
1355084Sjohnlev 6:	pushl	%eax;					\
1365084Sjohnlev 	pushl	%ebx;					\
1375084Sjohnlev 	movl	%gs:CPU_ID, %eax;			\
1385084Sjohnlev 	leal	.+0, %ecx;				\
1395084Sjohnlev 	leal	laststi, %ebx;				\
1405084Sjohnlev 	movl	%ecx, (%ebx, %eax, 4);			\
1415084Sjohnlev 	popl	%ebx;					\
1425084Sjohnlev 	popl	%eax;					\
1435084Sjohnlev 	popl	%ecx
1445084Sjohnlev 
1455084Sjohnlev #define	SAVE_CLI_LOCATION				\
1465084Sjohnlev 	pushl	%eax;					\
1475084Sjohnlev 	pushl	%ebx;					\
1485084Sjohnlev 	pushl	%ecx;					\
1495084Sjohnlev 	movl	%gs:CPU_ID, %eax;			\
1505084Sjohnlev 	leal	.+0, %ecx;				\
1515084Sjohnlev 	leal	lastcli, %ebx;				\
1525084Sjohnlev 	movl	%ecx, (%ebx, %eax, 4);			\
1535084Sjohnlev 	popl	%ecx;					\
1545084Sjohnlev 	popl	%ebx;					\
1555084Sjohnlev 	popl	%eax;					\
1565084Sjohnlev 
1575084Sjohnlev #endif	/* __i386 */
1585084Sjohnlev 
1595084Sjohnlev #else	/* DEBUG */
1605084Sjohnlev 
1615084Sjohnlev #define	ASSERT_UPCALL_MASK_IS_SET	/* empty */
1625084Sjohnlev #define	SAVE_CLI_LOCATION		/* empty */
1635084Sjohnlev 
1645084Sjohnlev #endif	/* DEBUG */
1655084Sjohnlev 
1665084Sjohnlev #define	KPREEMPT_DISABLE(t)				\
1675084Sjohnlev 	addb	$1, T_PREEMPT(t)
1685084Sjohnlev 
1695084Sjohnlev #define	KPREEMPT_ENABLE_NOKP(t)				\
1705084Sjohnlev 	subb	$1, T_PREEMPT(t)
1715084Sjohnlev 
1725084Sjohnlev #define	CLI(r)						\
1735084Sjohnlev 	CURTHREAD(r);					\
1745084Sjohnlev 	KPREEMPT_DISABLE(r);				\
1755084Sjohnlev 	CURVCPU(r);					\
1765084Sjohnlev 	XEN_SET_UPCALL_MASK(r);				\
1775084Sjohnlev 	SAVE_CLI_LOCATION;				\
1785084Sjohnlev 	CURTHREAD(r);					\
1795084Sjohnlev 	KPREEMPT_ENABLE_NOKP(r)
1805084Sjohnlev 
1815084Sjohnlev #define	CLIRET(r, ret)					\
1825084Sjohnlev 	CURTHREAD(r);					\
1835084Sjohnlev 	KPREEMPT_DISABLE(r);				\
1845084Sjohnlev 	CURVCPU(r);					\
1855084Sjohnlev 	XEN_GET_UPCALL_MASK(r, ret);			\
1865084Sjohnlev 	XEN_SET_UPCALL_MASK(r);				\
1875084Sjohnlev 	SAVE_CLI_LOCATION;				\
1885084Sjohnlev 	CURTHREAD(r);					\
1895084Sjohnlev 	KPREEMPT_ENABLE_NOKP(r)
1905084Sjohnlev 
1915084Sjohnlev /*
1925084Sjohnlev  * We use the fact that HYPERVISOR_block will clear the upcall mask
1935084Sjohnlev  * for us and then give us an upcall if there is a pending event
1945084Sjohnlev  * to achieve getting a callback on this cpu without the danger of
1955084Sjohnlev  * being preempted and migrating to another cpu between the upcall
1965084Sjohnlev  * enable and the callback delivery.
1975084Sjohnlev  */
1985084Sjohnlev #if defined(__amd64)
1995084Sjohnlev 
2005084Sjohnlev #define	STI_CLOBBER		/* clobbers %rax, %rdi, %r11 */		\
2015084Sjohnlev 	CURVCPU(%r11);							\
2025084Sjohnlev 	ASSERT_UPCALL_MASK_IS_SET;					\
2035084Sjohnlev 	movw	$0x100, %ax;	/* assume mask set, pending clear */	\
2045084Sjohnlev 	movw	$0, %di;	/* clear mask and pending */		\
2055084Sjohnlev 	lock;								\
2065084Sjohnlev 	cmpxchgw %di, VCPU_INFO_EVTCHN_UPCALL_PENDING(%r11);		\
2075084Sjohnlev 	jz	7f;		/* xchg worked, we're done */		\
2085084Sjohnlev 	movl	$__HYPERVISOR_sched_op, %eax; /* have pending upcall */	\
2095084Sjohnlev 	movl	$SCHEDOP_block, %edi;					\
2105084Sjohnlev 	pushq	%rsi;	/* hypercall clobbers C param regs plus r10 */	\
2115084Sjohnlev 	pushq	%rcx;							\
2125084Sjohnlev 	pushq	%rdx;							\
2135084Sjohnlev 	pushq	%r8;							\
2145084Sjohnlev 	pushq	%r9;							\
2155084Sjohnlev 	pushq	%r10;							\
2165084Sjohnlev 	TRAP_INSTR;	/* clear upcall mask, force upcall */ 		\
2175084Sjohnlev 	popq	%r10;							\
2185084Sjohnlev 	popq	%r9;							\
2195084Sjohnlev 	popq	%r8;							\
2205084Sjohnlev 	popq	%rdx;							\
2215084Sjohnlev 	popq	%rcx;							\
2225084Sjohnlev 	popq	%rsi;							\
2235084Sjohnlev 7:
2245084Sjohnlev 
2255084Sjohnlev #define	STI								\
2265084Sjohnlev 	pushq	%r11;							\
2275084Sjohnlev 	pushq	%rdi;							\
2285084Sjohnlev 	pushq	%rax;							\
2295084Sjohnlev 	STI_CLOBBER;	/* clobbers %r11, %rax, %rdi */			\
2305084Sjohnlev 	popq	%rax;							\
2315084Sjohnlev 	popq	%rdi;							\
2325084Sjohnlev 	popq	%r11
2335084Sjohnlev 
2345084Sjohnlev #elif defined(__i386)
2355084Sjohnlev 
2365084Sjohnlev #define	STI_CLOBBER		/* clobbers %eax, %ebx, %ecx */		\
2375084Sjohnlev 	CURVCPU(%ecx);							\
2385084Sjohnlev 	ASSERT_UPCALL_MASK_IS_SET;					\
2395084Sjohnlev 	movw	$0x100, %ax;	/* assume mask set, pending clear */	\
2405084Sjohnlev 	movw	$0, %bx;	/* clear mask and pending */		\
2415084Sjohnlev 	lock;								\
2425084Sjohnlev 	cmpxchgw %bx, VCPU_INFO_EVTCHN_UPCALL_PENDING(%ecx);		\
2435084Sjohnlev 	jz	7f;		/* xchg worked, we're done */		\
2445084Sjohnlev 	movl	$__HYPERVISOR_sched_op, %eax; /* have pending upcall */	\
2455084Sjohnlev 	movl	$SCHEDOP_block, %ebx;					\
2465084Sjohnlev 	TRAP_INSTR;		/* clear upcall mask, force upcall */	\
2475084Sjohnlev 7:
2485084Sjohnlev 
2495084Sjohnlev #define	STI						\
2505084Sjohnlev 	pushl	%eax;					\
2515084Sjohnlev 	pushl	%ebx;					\
2525084Sjohnlev 	pushl	%ecx;					\
2535084Sjohnlev 	STI_CLOBBER;	/* clobbers %eax, %ebx, %ecx */	\
2545084Sjohnlev 	popl	%ecx;					\
2555084Sjohnlev 	popl	%ebx;					\
2565084Sjohnlev 	popl	%eax
2575084Sjohnlev 
2585084Sjohnlev #endif	/* __i386 */
2595084Sjohnlev 
2605084Sjohnlev /*
2615084Sjohnlev  * Map the PS_IE bit to the hypervisor's event mask bit
2625084Sjohnlev  * To -set- the event mask, we have to do a CLI
2635084Sjohnlev  * To -clear- the event mask, we have to do a STI
2645084Sjohnlev  * (with all the accompanying pre-emption and callbacks, ick)
2655084Sjohnlev  *
2665084Sjohnlev  * And vice versa.
2675084Sjohnlev  */
2685084Sjohnlev 
2695084Sjohnlev #if defined(__amd64)
2705084Sjohnlev 
2715084Sjohnlev #define	IE_TO_EVENT_MASK(rtmp, rfl)		\
2725084Sjohnlev 	testq	$PS_IE, rfl;			\
2735084Sjohnlev 	jnz	4f;				\
2745084Sjohnlev 	CLI(rtmp);				\
2755084Sjohnlev 	jmp	5f;				\
2765084Sjohnlev 4:	STI;					\
2775084Sjohnlev 5:
2785084Sjohnlev 
2795084Sjohnlev #define	EVENT_MASK_TO_IE(rtmp, rfl)		\
2805084Sjohnlev 	andq	$_BITNOT(PS_IE), rfl;		\
2815084Sjohnlev 	CURVCPU(rtmp);				\
2825084Sjohnlev 	XEN_TEST_UPCALL_MASK(rtmp);		\
2835084Sjohnlev 	jnz	1f;				\
2845084Sjohnlev 	orq	$PS_IE, rfl;			\
2855084Sjohnlev 1:
2865084Sjohnlev 
2875084Sjohnlev #elif defined(__i386)
2885084Sjohnlev 
2895084Sjohnlev #define	IE_TO_EVENT_MASK(rtmp, rfl)		\
2905084Sjohnlev 	testl	$PS_IE, rfl;			\
2915084Sjohnlev 	jnz	4f;				\
2925084Sjohnlev 	CLI(rtmp);				\
2935084Sjohnlev 	jmp	5f;				\
2945084Sjohnlev 4:	STI;					\
2955084Sjohnlev 5:
2965084Sjohnlev 
2975084Sjohnlev #define	EVENT_MASK_TO_IE(rtmp, rfl)		\
2985084Sjohnlev 	andl	$_BITNOT(PS_IE), rfl;		\
2995084Sjohnlev 	CURVCPU(rtmp);				\
3005084Sjohnlev 	XEN_TEST_UPCALL_MASK(rtmp);		\
3015084Sjohnlev 	jnz	1f;				\
3025084Sjohnlev 	orl	$PS_IE, rfl;			\
3035084Sjohnlev 1:
3045084Sjohnlev 
3055084Sjohnlev #endif	/* __i386 */
3065084Sjohnlev 
3075084Sjohnlev /*
3085084Sjohnlev  * Used to re-enable interrupts in the body of exception handlers
3095084Sjohnlev  */
3105084Sjohnlev 
3115084Sjohnlev #if defined(__amd64)
3125084Sjohnlev 
3135084Sjohnlev #define	ENABLE_INTR_FLAGS		\
3145084Sjohnlev 	pushq	$F_ON;			\
3155084Sjohnlev 	popfq;				\
3165084Sjohnlev 	STI
3175084Sjohnlev 
3185084Sjohnlev #elif defined(__i386)
3195084Sjohnlev 
3205084Sjohnlev #define	ENABLE_INTR_FLAGS		\
3215084Sjohnlev 	pushl	$F_ON;			\
3225084Sjohnlev 	popfl;				\
3235084Sjohnlev 	STI
3245084Sjohnlev 
3255084Sjohnlev #endif	/* __i386 */
3265084Sjohnlev 
3275084Sjohnlev /*
3285084Sjohnlev  * Virtualize IRET and SYSRET
3295084Sjohnlev  */
3305084Sjohnlev 
3315084Sjohnlev #if defined(__amd64)
3325084Sjohnlev 
3335084Sjohnlev #if defined(DEBUG)
3345084Sjohnlev 
3355084Sjohnlev /*
3365084Sjohnlev  * Die nastily with a #ud trap if we are about to switch to user
3375084Sjohnlev  * mode in HYPERVISOR_IRET and RUPDATE_PENDING is set.
3385084Sjohnlev  */
3395084Sjohnlev #define	__ASSERT_NO_RUPDATE_PENDING			\
3405084Sjohnlev 	pushq	%r15;					\
3415084Sjohnlev 	cmpw	$KCS_SEL, 0x10(%rsp);			\
3425084Sjohnlev 	je	1f;					\
3435084Sjohnlev 	movq	%gs:CPU_THREAD, %r15;			\
3445084Sjohnlev 	movq	T_LWP(%r15), %r15;			\
3455084Sjohnlev 	testb	$0x1, PCB_RUPDATE(%r15);		\
3465084Sjohnlev 	je	1f;					\
3475084Sjohnlev 	ud2;						\
3485084Sjohnlev 1:	popq	%r15
3495084Sjohnlev 
3505084Sjohnlev #else	/* DEBUG */
3515084Sjohnlev 
3525084Sjohnlev #define	__ASSERT_NO_RUPDATE_PENDING
3535084Sjohnlev 
3545084Sjohnlev #endif	/* DEBUG */
3555084Sjohnlev 
3565084Sjohnlev /*
3575084Sjohnlev  * Switching from guest kernel to user mode.
3585084Sjohnlev  * flag == VGCF_IN_SYSCALL => return via sysret
3595084Sjohnlev  * flag == 0 => return via iretq
3605084Sjohnlev  *
3615084Sjohnlev  * See definition in public/arch-x86_64.h. Stack going in must be:
3625084Sjohnlev  * rax, r11, rcx, flags, rip, cs, rflags, rsp, ss.
3635084Sjohnlev  */
3645084Sjohnlev #define	HYPERVISOR_IRET(flag)			\
3655084Sjohnlev 	__ASSERT_NO_RUPDATE_PENDING;		\
3665084Sjohnlev 	pushq	$flag;				\
3675084Sjohnlev 	pushq	%rcx;				\
3685084Sjohnlev 	pushq	%r11;				\
3695084Sjohnlev 	pushq	%rax;				\
3705084Sjohnlev 	movl	$__HYPERVISOR_iret, %eax;	\
3715084Sjohnlev 	syscall;				\
3725084Sjohnlev 	ud2	/* die nastily if we return! */
3735084Sjohnlev 
3745084Sjohnlev #define	IRET	HYPERVISOR_IRET(0)
375*11035STodd.Clayton@Sun.COM 
376*11035STodd.Clayton@Sun.COM /*
377*11035STodd.Clayton@Sun.COM  * XXPV: Normally we would expect to use sysret to return from kernel to
378*11035STodd.Clayton@Sun.COM  *       user mode when using the syscall instruction. The iret hypercall
379*11035STodd.Clayton@Sun.COM  *       does support both iret and sysret semantics. For us to use sysret
380*11035STodd.Clayton@Sun.COM  *	 style would require that we use the hypervisor's private descriptors
381*11035STodd.Clayton@Sun.COM  *	 that obey syscall instruction's imposed segment selector ordering.
382*11035STodd.Clayton@Sun.COM  *	 With iret we can use whatever %cs value we choose. We should fix
383*11035STodd.Clayton@Sun.COM  *	 this to use sysret one day.
384*11035STodd.Clayton@Sun.COM  */
385*11035STodd.Clayton@Sun.COM #define	SYSRETQ	HYPERVISOR_IRET(0)
3865084Sjohnlev #define	SYSRETL	ud2		/* 32-bit syscall/sysret not supported */
3875084Sjohnlev #define	SWAPGS	/* empty - handled in hypervisor */
3885084Sjohnlev 
3895084Sjohnlev #elif defined(__i386)
3905084Sjohnlev 
3915084Sjohnlev /*
3925084Sjohnlev  * Switching from guest kernel to user mode.
3935084Sjohnlev  * See definition in public/arch-x86_32.h. Stack going in must be:
3945084Sjohnlev  * eax, flags, eip, cs, eflags, esp, ss.
3955084Sjohnlev  */
3965084Sjohnlev #define	HYPERVISOR_IRET				\
3975084Sjohnlev 	pushl	%eax;				\
3985084Sjohnlev 	movl	$__HYPERVISOR_iret, %eax;	\
3995084Sjohnlev 	int	$0x82;				\
4005084Sjohnlev 	ud2	/* die nastily if we return! */
4015084Sjohnlev 
4025084Sjohnlev #define	IRET	HYPERVISOR_IRET
4035084Sjohnlev #define	SYSRET	ud2		/* 32-bit syscall/sysret not supported */
4045084Sjohnlev 
4055084Sjohnlev #endif	/* __i386 */
4065084Sjohnlev 
4075084Sjohnlev 
4085084Sjohnlev /*
4095084Sjohnlev  * Xen 3.x wedges the current value of upcall_mask into unused byte of
4105084Sjohnlev  * saved %cs on stack at the time of passing through a trap or interrupt
4115084Sjohnlev  * gate.  Since Xen also updates PS_IE in %[e,r]lags as well, we always
4125084Sjohnlev  * mask off the saved upcall mask so the kernel and/or tools like debuggers
4135084Sjohnlev  * will not be confused about bits set in reserved portions of %cs slot.
4145084Sjohnlev  *
4155084Sjohnlev  * See xen/include/public/arch-x86_[32,64].h:cpu_user_regs_t for details.
4165084Sjohnlev  */
4175084Sjohnlev #if defined(__amd64)
4185084Sjohnlev 
4195084Sjohnlev #define	CLEAN_CS	movb	$0, REGOFF_CS+4(%rsp)
4205084Sjohnlev 
4215084Sjohnlev #elif defined(__i386)
4225084Sjohnlev 
4235084Sjohnlev #define	CLEAN_CS	movb	$0, REGOFF_CS+2(%esp)
4245084Sjohnlev 
4255084Sjohnlev #endif	/* __i386 */
4265084Sjohnlev 
4275084Sjohnlev /*
4285084Sjohnlev  * All exceptions for amd64 have %r11 and %rcx on the stack.
4295084Sjohnlev  * Just pop them back into their appropriate registers and
4305084Sjohnlev  * let it get saved as is running native.
4315084Sjohnlev  */
4325084Sjohnlev #if defined(__amd64)
4335084Sjohnlev 
4345084Sjohnlev #define	XPV_TRAP_POP	\
4355084Sjohnlev 	popq	%rcx;	\
4365084Sjohnlev 	popq	%r11
4375084Sjohnlev 
4385084Sjohnlev #define	XPV_TRAP_PUSH	\
4395084Sjohnlev 	pushq	%r11;	\
4405084Sjohnlev 	pushq	%rcx
4415084Sjohnlev 
4425084Sjohnlev #endif	/* __amd64 */
4435084Sjohnlev 
4445084Sjohnlev 
4455084Sjohnlev /*
4465084Sjohnlev  * Macros for saving the original segment registers and restoring them
4475084Sjohnlev  * for fast traps.
4485084Sjohnlev  */
4495084Sjohnlev #if defined(__amd64)
4505084Sjohnlev 
4515084Sjohnlev /*
4525084Sjohnlev  * Smaller versions of INTR_PUSH and INTR_POP for fast traps.
4535084Sjohnlev  * The following registers have been pushed onto the stack by
4545084Sjohnlev  * hardware at this point:
4555084Sjohnlev  *
4565084Sjohnlev  *	greg_t	r_rip;
4575084Sjohnlev  *	greg_t	r_cs;
4585084Sjohnlev  *	greg_t	r_rfl;
4595084Sjohnlev  *	greg_t	r_rsp;
4605084Sjohnlev  *	greg_t	r_ss;
4615084Sjohnlev  *
4625084Sjohnlev  * This handler is executed both by 32-bit and 64-bit applications.
4635084Sjohnlev  * 64-bit applications allow us to treat the set (%rdi, %rsi, %rdx,
4645084Sjohnlev  * %rcx, %r8, %r9, %r10, %r11, %rax) as volatile across function calls.
4655084Sjohnlev  * However, 32-bit applications only expect (%eax, %edx, %ecx) to be volatile
4665084Sjohnlev  * across a function call -- in particular, %esi and %edi MUST be saved!
4675084Sjohnlev  *
4685084Sjohnlev  * We could do this differently by making a FAST_INTR_PUSH32 for 32-bit
4695084Sjohnlev  * programs, and FAST_INTR_PUSH for 64-bit programs, but it doesn't seem
4705084Sjohnlev  * particularly worth it.
4715084Sjohnlev  *
4725084Sjohnlev  */
4735084Sjohnlev #define	FAST_INTR_PUSH			\
4745084Sjohnlev 	INTGATE_INIT_KERNEL_FLAGS;	\
4755084Sjohnlev 	popq	%rcx;			\
4765084Sjohnlev 	popq	%r11;			\
4775084Sjohnlev 	subq    $REGOFF_RIP, %rsp;	\
4785084Sjohnlev 	movq    %rsi, REGOFF_RSI(%rsp);	\
4795084Sjohnlev 	movq    %rdi, REGOFF_RDI(%rsp);	\
4805084Sjohnlev 	CLEAN_CS
4815084Sjohnlev 
4825084Sjohnlev #define	FAST_INTR_POP			\
4835084Sjohnlev 	movq    REGOFF_RSI(%rsp), %rsi;	\
4845084Sjohnlev 	movq    REGOFF_RDI(%rsp), %rdi;	\
4855084Sjohnlev 	addq    $REGOFF_RIP, %rsp
4865084Sjohnlev 
4875084Sjohnlev #define	FAST_INTR_RETURN		\
4885084Sjohnlev 	ASSERT_UPCALL_MASK_IS_SET;	\
4895084Sjohnlev 	HYPERVISOR_IRET(0)
4905084Sjohnlev 
4915084Sjohnlev #elif defined(__i386)
4925084Sjohnlev 
4935084Sjohnlev #define	FAST_INTR_PUSH			\
4945084Sjohnlev 	cld;				\
4955084Sjohnlev 	__SEGREGS_PUSH			\
4965084Sjohnlev 	__SEGREGS_LOAD_KERNEL		\
4975084Sjohnlev 
4985084Sjohnlev #define	FAST_INTR_POP			\
4995084Sjohnlev 	__SEGREGS_POP
5005084Sjohnlev 
5015084Sjohnlev #define	FAST_INTR_RETURN		\
5025084Sjohnlev 	IRET
5035084Sjohnlev 
5045084Sjohnlev #endif	/* __i386 */
5055084Sjohnlev 
5065084Sjohnlev /*
5075084Sjohnlev  * Handling the CR0.TS bit for floating point handling.
5085084Sjohnlev  *
5095084Sjohnlev  * When the TS bit is *set*, attempts to touch the floating
5105084Sjohnlev  * point hardware will result in a #nm trap.
5115084Sjohnlev  */
5125084Sjohnlev #if defined(__amd64)
5135084Sjohnlev 
5145084Sjohnlev #define	STTS(rtmp)				\
5155084Sjohnlev 	pushq	%rdi;				\
5165084Sjohnlev 	movl	$1, %edi;			\
5175084Sjohnlev 	call	HYPERVISOR_fpu_taskswitch;	\
5185084Sjohnlev 	popq	%rdi
5195084Sjohnlev 
5205084Sjohnlev #define	CLTS					\
5215084Sjohnlev 	pushq	%rdi;				\
5225084Sjohnlev 	xorl	%edi, %edi;			\
5235084Sjohnlev 	call	HYPERVISOR_fpu_taskswitch;	\
5245084Sjohnlev 	popq	%rdi
5255084Sjohnlev 
5265084Sjohnlev #elif defined(__i386)
5275084Sjohnlev 
5285084Sjohnlev #define	STTS(r)					\
5295084Sjohnlev 	pushl	$1;				\
5305084Sjohnlev 	call	HYPERVISOR_fpu_taskswitch;	\
5315084Sjohnlev 	addl	$4, %esp
5325084Sjohnlev 
5335084Sjohnlev #define	CLTS					\
5345084Sjohnlev 	pushl	$0;				\
5355084Sjohnlev 	call	HYPERVISOR_fpu_taskswitch;	\
5365084Sjohnlev 	addl	$4, %esp
5375084Sjohnlev 
5385084Sjohnlev #endif	/* __i386 */
5395084Sjohnlev 
5405084Sjohnlev #ifdef __cplusplus
5415084Sjohnlev }
5425084Sjohnlev #endif
5435084Sjohnlev 
5445084Sjohnlev #endif	/* _SYS_MACHPRIVREGS_H */
545