xref: /onnv-gate/usr/src/uts/intel/ia32/ml/copy.s (revision 8653:fdebd6f92bb8)
10Sstevel@tonic-gate/*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
52712Snn35248 * Common Development and Distribution License (the "License").
62712Snn35248 * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate/*
22*8653SBill.Holler@Sun.COM * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
268377SBill.Holler@Sun.COM/*
27*8653SBill.Holler@Sun.COM * Copyright (c) 2009, Intel Corporation
288377SBill.Holler@Sun.COM * All rights reserved.
298377SBill.Holler@Sun.COM */
308377SBill.Holler@Sun.COM
310Sstevel@tonic-gate/*       Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
320Sstevel@tonic-gate/*       Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T		*/
330Sstevel@tonic-gate/*         All Rights Reserved						*/
340Sstevel@tonic-gate
350Sstevel@tonic-gate/*       Copyright (c) 1987, 1988 Microsoft Corporation			*/
360Sstevel@tonic-gate/*         All Rights Reserved						*/
370Sstevel@tonic-gate
380Sstevel@tonic-gate#include <sys/errno.h>
390Sstevel@tonic-gate#include <sys/asm_linkage.h>
400Sstevel@tonic-gate
410Sstevel@tonic-gate#if defined(__lint)
420Sstevel@tonic-gate#include <sys/types.h>
430Sstevel@tonic-gate#include <sys/systm.h>
440Sstevel@tonic-gate#else	/* __lint */
450Sstevel@tonic-gate#include "assym.h"
460Sstevel@tonic-gate#endif	/* __lint */
470Sstevel@tonic-gate
480Sstevel@tonic-gate#define	KCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
490Sstevel@tonic-gate#define	XCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
500Sstevel@tonic-gate/*
510Sstevel@tonic-gate * Non-temopral access (NTA) alignment requirement
520Sstevel@tonic-gate */
530Sstevel@tonic-gate#define	NTA_ALIGN_SIZE	4	/* Must be at least 4-byte aligned */
540Sstevel@tonic-gate#define	NTA_ALIGN_MASK	_CONST(NTA_ALIGN_SIZE-1)
550Sstevel@tonic-gate#define	COUNT_ALIGN_SIZE	16	/* Must be at least 16-byte aligned */
560Sstevel@tonic-gate#define	COUNT_ALIGN_MASK	_CONST(COUNT_ALIGN_SIZE-1)
570Sstevel@tonic-gate
580Sstevel@tonic-gate/*
598377SBill.Holler@Sun.COM * The optimal 64-bit bcopy and kcopy for modern x86 processors uses
608377SBill.Holler@Sun.COM * "rep smovq" for large sizes. Performance data shows that many calls to
618377SBill.Holler@Sun.COM * bcopy/kcopy/bzero/kzero operate on small buffers. For best performance for
628377SBill.Holler@Sun.COM * these small sizes unrolled code is used. For medium sizes loops writing
638377SBill.Holler@Sun.COM * 64-bytes per loop are used. Transition points were determined experimentally.
648377SBill.Holler@Sun.COM */
658377SBill.Holler@Sun.COM#define BZERO_USE_REP	(1024)
668377SBill.Holler@Sun.COM#define BCOPY_DFLT_REP	(128)
678377SBill.Holler@Sun.COM#define	BCOPY_NHM_REP	(768)
688377SBill.Holler@Sun.COM
698377SBill.Holler@Sun.COM/*
700Sstevel@tonic-gate * Copy a block of storage, returning an error code if `from' or
710Sstevel@tonic-gate * `to' takes a kernel pagefault which cannot be resolved.
720Sstevel@tonic-gate * Returns errno value on pagefault error, 0 if all ok
730Sstevel@tonic-gate */
740Sstevel@tonic-gate
750Sstevel@tonic-gate#if defined(__lint)
760Sstevel@tonic-gate
770Sstevel@tonic-gate/* ARGSUSED */
780Sstevel@tonic-gateint
790Sstevel@tonic-gatekcopy(const void *from, void *to, size_t count)
800Sstevel@tonic-gate{ return (0); }
810Sstevel@tonic-gate
820Sstevel@tonic-gate#else	/* __lint */
830Sstevel@tonic-gate
840Sstevel@tonic-gate	.globl	kernelbase
853446Smrj	.globl	postbootkernelbase
860Sstevel@tonic-gate
870Sstevel@tonic-gate#if defined(__amd64)
880Sstevel@tonic-gate
890Sstevel@tonic-gate	ENTRY(kcopy)
900Sstevel@tonic-gate	pushq	%rbp
910Sstevel@tonic-gate	movq	%rsp, %rbp
920Sstevel@tonic-gate#ifdef DEBUG
933446Smrj	cmpq	postbootkernelbase(%rip), %rdi 		/* %rdi = from */
940Sstevel@tonic-gate	jb	0f
953446Smrj	cmpq	postbootkernelbase(%rip), %rsi		/* %rsi = to */
960Sstevel@tonic-gate	jnb	1f
970Sstevel@tonic-gate0:	leaq	.kcopy_panic_msg(%rip), %rdi
980Sstevel@tonic-gate	xorl	%eax, %eax
990Sstevel@tonic-gate	call	panic
1000Sstevel@tonic-gate1:
1010Sstevel@tonic-gate#endif
1020Sstevel@tonic-gate	/*
1030Sstevel@tonic-gate	 * pass lofault value as 4th argument to do_copy_fault
1040Sstevel@tonic-gate	 */
1050Sstevel@tonic-gate	leaq	_kcopy_copyerr(%rip), %rcx
1060Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
1070Sstevel@tonic-gate
1080Sstevel@tonic-gatedo_copy_fault:
1090Sstevel@tonic-gate	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
1100Sstevel@tonic-gate	movq	%rcx, T_LOFAULT(%r9)	/* new lofault */
1118377SBill.Holler@Sun.COM	call	bcopy_altentry
1120Sstevel@tonic-gate	xorl	%eax, %eax		/* return 0 (success) */
1130Sstevel@tonic-gate
1140Sstevel@tonic-gate	/*
1150Sstevel@tonic-gate	 * A fault during do_copy_fault is indicated through an errno value
1160Sstevel@tonic-gate	 * in %rax and we iretq from the trap handler to here.
1170Sstevel@tonic-gate	 */
1180Sstevel@tonic-gate_kcopy_copyerr:
1190Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
1200Sstevel@tonic-gate	leave
1210Sstevel@tonic-gate	ret
1220Sstevel@tonic-gate	SET_SIZE(kcopy)
1230Sstevel@tonic-gate
1240Sstevel@tonic-gate#elif defined(__i386)
1250Sstevel@tonic-gate
1260Sstevel@tonic-gate#define	ARG_FROM	8
1270Sstevel@tonic-gate#define	ARG_TO		12
1280Sstevel@tonic-gate#define	ARG_COUNT	16
1290Sstevel@tonic-gate
1300Sstevel@tonic-gate	ENTRY(kcopy)
1310Sstevel@tonic-gate#ifdef DEBUG
1320Sstevel@tonic-gate	pushl	%ebp
1330Sstevel@tonic-gate	movl	%esp, %ebp
1343446Smrj	movl	postbootkernelbase, %eax
1350Sstevel@tonic-gate	cmpl	%eax, ARG_FROM(%ebp)
1360Sstevel@tonic-gate	jb	0f
1370Sstevel@tonic-gate	cmpl	%eax, ARG_TO(%ebp)
1380Sstevel@tonic-gate	jnb	1f
1390Sstevel@tonic-gate0:	pushl	$.kcopy_panic_msg
1400Sstevel@tonic-gate	call	panic
1410Sstevel@tonic-gate1:	popl	%ebp
1420Sstevel@tonic-gate#endif
1430Sstevel@tonic-gate	lea	_kcopy_copyerr, %eax	/* lofault value */
1440Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
1450Sstevel@tonic-gate
1460Sstevel@tonic-gatedo_copy_fault:
1470Sstevel@tonic-gate	pushl	%ebp
1480Sstevel@tonic-gate	movl	%esp, %ebp		/* setup stack frame */
1490Sstevel@tonic-gate	pushl	%esi
1500Sstevel@tonic-gate	pushl	%edi			/* save registers */
1510Sstevel@tonic-gate
1520Sstevel@tonic-gate	movl	T_LOFAULT(%edx), %edi
1530Sstevel@tonic-gate	pushl	%edi			/* save the current lofault */
1540Sstevel@tonic-gate	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
1550Sstevel@tonic-gate
1560Sstevel@tonic-gate	movl	ARG_COUNT(%ebp), %ecx
1570Sstevel@tonic-gate	movl	ARG_FROM(%ebp), %esi
1580Sstevel@tonic-gate	movl	ARG_TO(%ebp), %edi
1590Sstevel@tonic-gate	shrl	$2, %ecx		/* word count */
1600Sstevel@tonic-gate	rep
1610Sstevel@tonic-gate	  smovl
1620Sstevel@tonic-gate	movl	ARG_COUNT(%ebp), %ecx
1630Sstevel@tonic-gate	andl	$3, %ecx		/* bytes left over */
1640Sstevel@tonic-gate	rep
1650Sstevel@tonic-gate	  smovb
1660Sstevel@tonic-gate	xorl	%eax, %eax
1670Sstevel@tonic-gate
1680Sstevel@tonic-gate	/*
1690Sstevel@tonic-gate	 * A fault during do_copy_fault is indicated through an errno value
1700Sstevel@tonic-gate	 * in %eax and we iret from the trap handler to here.
1710Sstevel@tonic-gate	 */
1720Sstevel@tonic-gate_kcopy_copyerr:
1730Sstevel@tonic-gate	popl	%ecx
1740Sstevel@tonic-gate	popl	%edi
1750Sstevel@tonic-gate	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
1760Sstevel@tonic-gate	popl	%esi
1770Sstevel@tonic-gate	popl	%ebp
1780Sstevel@tonic-gate	ret
1790Sstevel@tonic-gate	SET_SIZE(kcopy)
1800Sstevel@tonic-gate
1810Sstevel@tonic-gate#undef	ARG_FROM
1820Sstevel@tonic-gate#undef	ARG_TO
1830Sstevel@tonic-gate#undef	ARG_COUNT
1840Sstevel@tonic-gate
1850Sstevel@tonic-gate#endif	/* __i386 */
1860Sstevel@tonic-gate#endif	/* __lint */
1870Sstevel@tonic-gate
1880Sstevel@tonic-gate#if defined(__lint)
1890Sstevel@tonic-gate
1900Sstevel@tonic-gate/*
1910Sstevel@tonic-gate * Copy a block of storage.  Similar to kcopy but uses non-temporal
1920Sstevel@tonic-gate * instructions.
1930Sstevel@tonic-gate */
1940Sstevel@tonic-gate
1950Sstevel@tonic-gate/* ARGSUSED */
1960Sstevel@tonic-gateint
1970Sstevel@tonic-gatekcopy_nta(const void *from, void *to, size_t count, int copy_cached)
1980Sstevel@tonic-gate{ return (0); }
1990Sstevel@tonic-gate
2000Sstevel@tonic-gate#else	/* __lint */
2010Sstevel@tonic-gate
2020Sstevel@tonic-gate#if defined(__amd64)
2030Sstevel@tonic-gate
2040Sstevel@tonic-gate#define	COPY_LOOP_INIT(src, dst, cnt)	\
2050Sstevel@tonic-gate	addq	cnt, src;			\
2060Sstevel@tonic-gate	addq	cnt, dst;			\
2070Sstevel@tonic-gate	shrq	$3, cnt;			\
2080Sstevel@tonic-gate	neg	cnt
2090Sstevel@tonic-gate
2100Sstevel@tonic-gate	/* Copy 16 bytes per loop.  Uses %rax and %r8 */
2110Sstevel@tonic-gate#define	COPY_LOOP_BODY(src, dst, cnt)	\
2120Sstevel@tonic-gate	prefetchnta	0x100(src, cnt, 8);	\
2130Sstevel@tonic-gate	movq	(src, cnt, 8), %rax;		\
2140Sstevel@tonic-gate	movq	0x8(src, cnt, 8), %r8;		\
2150Sstevel@tonic-gate	movnti	%rax, (dst, cnt, 8);		\
2160Sstevel@tonic-gate	movnti	%r8, 0x8(dst, cnt, 8);		\
2170Sstevel@tonic-gate	addq	$2, cnt
2180Sstevel@tonic-gate
2190Sstevel@tonic-gate	ENTRY(kcopy_nta)
2200Sstevel@tonic-gate	pushq	%rbp
2210Sstevel@tonic-gate	movq	%rsp, %rbp
2220Sstevel@tonic-gate#ifdef DEBUG
2233446Smrj	cmpq	postbootkernelbase(%rip), %rdi 		/* %rdi = from */
2240Sstevel@tonic-gate	jb	0f
2253446Smrj	cmpq	postbootkernelbase(%rip), %rsi		/* %rsi = to */
2260Sstevel@tonic-gate	jnb	1f
2270Sstevel@tonic-gate0:	leaq	.kcopy_panic_msg(%rip), %rdi
2280Sstevel@tonic-gate	xorl	%eax, %eax
2290Sstevel@tonic-gate	call	panic
2300Sstevel@tonic-gate1:
2310Sstevel@tonic-gate#endif
2320Sstevel@tonic-gate
2330Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9
2340Sstevel@tonic-gate	cmpq	$0, %rcx		/* No non-temporal access? */
2350Sstevel@tonic-gate	/*
2360Sstevel@tonic-gate	 * pass lofault value as 4th argument to do_copy_fault
2370Sstevel@tonic-gate	 */
2380Sstevel@tonic-gate	leaq	_kcopy_nta_copyerr(%rip), %rcx	/* doesn't set rflags */
2390Sstevel@tonic-gate	jnz	do_copy_fault		/* use regular access */
2400Sstevel@tonic-gate	/*
2410Sstevel@tonic-gate	 * Make sure cnt is >= KCOPY_MIN_SIZE
2420Sstevel@tonic-gate	 */
2430Sstevel@tonic-gate	cmpq	$KCOPY_MIN_SIZE, %rdx
2440Sstevel@tonic-gate	jb	do_copy_fault
2450Sstevel@tonic-gate
2460Sstevel@tonic-gate	/*
2470Sstevel@tonic-gate	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
2480Sstevel@tonic-gate	 * count is COUNT_ALIGN_SIZE aligned.
2490Sstevel@tonic-gate	 */
2500Sstevel@tonic-gate	movq	%rdi, %r10
2510Sstevel@tonic-gate	orq	%rsi, %r10
2520Sstevel@tonic-gate	andq	$NTA_ALIGN_MASK, %r10
2530Sstevel@tonic-gate	orq	%rdx, %r10
2540Sstevel@tonic-gate	andq	$COUNT_ALIGN_MASK, %r10
2550Sstevel@tonic-gate	jnz	do_copy_fault
2560Sstevel@tonic-gate
2570Sstevel@tonic-gate	ALTENTRY(do_copy_fault_nta)
2580Sstevel@tonic-gate	movq    %gs:CPU_THREAD, %r9     /* %r9 = thread addr */
2590Sstevel@tonic-gate	movq    T_LOFAULT(%r9), %r11    /* save the current lofault */
2600Sstevel@tonic-gate	movq    %rcx, T_LOFAULT(%r9)    /* new lofault */
2610Sstevel@tonic-gate
2620Sstevel@tonic-gate	/*
2630Sstevel@tonic-gate	 * COPY_LOOP_BODY uses %rax and %r8
2640Sstevel@tonic-gate	 */
2650Sstevel@tonic-gate	COPY_LOOP_INIT(%rdi, %rsi, %rdx)
2660Sstevel@tonic-gate2:	COPY_LOOP_BODY(%rdi, %rsi, %rdx)
2670Sstevel@tonic-gate	jnz	2b
2680Sstevel@tonic-gate
2690Sstevel@tonic-gate	mfence
2700Sstevel@tonic-gate	xorl	%eax, %eax		/* return 0 (success) */
2710Sstevel@tonic-gate
2720Sstevel@tonic-gate_kcopy_nta_copyerr:
2730Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)    /* restore original lofault */
2740Sstevel@tonic-gate	leave
2750Sstevel@tonic-gate	ret
2760Sstevel@tonic-gate	SET_SIZE(do_copy_fault_nta)
2770Sstevel@tonic-gate	SET_SIZE(kcopy_nta)
2780Sstevel@tonic-gate
2790Sstevel@tonic-gate#elif defined(__i386)
2800Sstevel@tonic-gate
2810Sstevel@tonic-gate#define	ARG_FROM	8
2820Sstevel@tonic-gate#define	ARG_TO		12
2830Sstevel@tonic-gate#define	ARG_COUNT	16
2840Sstevel@tonic-gate
2850Sstevel@tonic-gate#define	COPY_LOOP_INIT(src, dst, cnt)	\
2860Sstevel@tonic-gate	addl	cnt, src;			\
2870Sstevel@tonic-gate	addl	cnt, dst;			\
2880Sstevel@tonic-gate	shrl	$3, cnt;			\
2890Sstevel@tonic-gate	neg	cnt
2900Sstevel@tonic-gate
2910Sstevel@tonic-gate#define	COPY_LOOP_BODY(src, dst, cnt)	\
2920Sstevel@tonic-gate	prefetchnta	0x100(src, cnt, 8);	\
2930Sstevel@tonic-gate	movl	(src, cnt, 8), %esi;		\
2940Sstevel@tonic-gate	movnti	%esi, (dst, cnt, 8);		\
2950Sstevel@tonic-gate	movl	0x4(src, cnt, 8), %esi;		\
2960Sstevel@tonic-gate	movnti	%esi, 0x4(dst, cnt, 8);		\
2970Sstevel@tonic-gate	movl	0x8(src, cnt, 8), %esi;		\
2980Sstevel@tonic-gate	movnti	%esi, 0x8(dst, cnt, 8);		\
2990Sstevel@tonic-gate	movl	0xc(src, cnt, 8), %esi;		\
3000Sstevel@tonic-gate	movnti	%esi, 0xc(dst, cnt, 8);		\
3010Sstevel@tonic-gate	addl	$2, cnt
3020Sstevel@tonic-gate
3030Sstevel@tonic-gate	/*
3040Sstevel@tonic-gate	 * kcopy_nta is not implemented for 32-bit as no performance
3050Sstevel@tonic-gate	 * improvement was shown.  We simply jump directly to kcopy
3060Sstevel@tonic-gate	 * and discard the 4 arguments.
3070Sstevel@tonic-gate	 */
3080Sstevel@tonic-gate	ENTRY(kcopy_nta)
3090Sstevel@tonic-gate	jmp	kcopy
3100Sstevel@tonic-gate
3110Sstevel@tonic-gate	lea	_kcopy_nta_copyerr, %eax	/* lofault value */
3120Sstevel@tonic-gate	ALTENTRY(do_copy_fault_nta)
3130Sstevel@tonic-gate	pushl	%ebp
3140Sstevel@tonic-gate	movl	%esp, %ebp		/* setup stack frame */
3150Sstevel@tonic-gate	pushl	%esi
3160Sstevel@tonic-gate	pushl	%edi
3170Sstevel@tonic-gate
3180Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
3190Sstevel@tonic-gate	movl	T_LOFAULT(%edx), %edi
3200Sstevel@tonic-gate	pushl	%edi			/* save the current lofault */
3210Sstevel@tonic-gate	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
3220Sstevel@tonic-gate
3230Sstevel@tonic-gate	/* COPY_LOOP_BODY needs to use %esi */
3240Sstevel@tonic-gate	movl	ARG_COUNT(%ebp), %ecx
3250Sstevel@tonic-gate	movl	ARG_FROM(%ebp), %edi
3260Sstevel@tonic-gate	movl	ARG_TO(%ebp), %eax
3270Sstevel@tonic-gate	COPY_LOOP_INIT(%edi, %eax, %ecx)
3280Sstevel@tonic-gate1:	COPY_LOOP_BODY(%edi, %eax, %ecx)
3290Sstevel@tonic-gate	jnz	1b
3300Sstevel@tonic-gate	mfence
3310Sstevel@tonic-gate
3320Sstevel@tonic-gate	xorl	%eax, %eax
3330Sstevel@tonic-gate_kcopy_nta_copyerr:
3340Sstevel@tonic-gate	popl	%ecx
3350Sstevel@tonic-gate	popl	%edi
3360Sstevel@tonic-gate	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
3370Sstevel@tonic-gate	popl	%esi
3380Sstevel@tonic-gate	leave
3390Sstevel@tonic-gate	ret
3400Sstevel@tonic-gate	SET_SIZE(do_copy_fault_nta)
3410Sstevel@tonic-gate	SET_SIZE(kcopy_nta)
3420Sstevel@tonic-gate
3430Sstevel@tonic-gate#undef	ARG_FROM
3440Sstevel@tonic-gate#undef	ARG_TO
3450Sstevel@tonic-gate#undef	ARG_COUNT
3460Sstevel@tonic-gate
3470Sstevel@tonic-gate#endif	/* __i386 */
3480Sstevel@tonic-gate#endif	/* __lint */
3490Sstevel@tonic-gate
3500Sstevel@tonic-gate#if defined(__lint)
3510Sstevel@tonic-gate
3520Sstevel@tonic-gate/* ARGSUSED */
3530Sstevel@tonic-gatevoid
3540Sstevel@tonic-gatebcopy(const void *from, void *to, size_t count)
3550Sstevel@tonic-gate{}
3560Sstevel@tonic-gate
3570Sstevel@tonic-gate#else	/* __lint */
3580Sstevel@tonic-gate
3590Sstevel@tonic-gate#if defined(__amd64)
3600Sstevel@tonic-gate
3610Sstevel@tonic-gate	ENTRY(bcopy)
3620Sstevel@tonic-gate#ifdef DEBUG
3630Sstevel@tonic-gate	orq	%rdx, %rdx		/* %rdx = count */
3640Sstevel@tonic-gate	jz	1f
3653446Smrj	cmpq	postbootkernelbase(%rip), %rdi		/* %rdi = from */
3660Sstevel@tonic-gate	jb	0f
3673446Smrj	cmpq	postbootkernelbase(%rip), %rsi		/* %rsi = to */
3680Sstevel@tonic-gate	jnb	1f
3690Sstevel@tonic-gate0:	leaq	.bcopy_panic_msg(%rip), %rdi
3700Sstevel@tonic-gate	jmp	call_panic		/* setup stack and call panic */
3710Sstevel@tonic-gate1:
3720Sstevel@tonic-gate#endif
3738377SBill.Holler@Sun.COM	/*
3748377SBill.Holler@Sun.COM	 * bcopy_altentry() is called from kcopy, i.e., do_copy_fault.
3758377SBill.Holler@Sun.COM	 * kcopy assumes that bcopy doesn't touch %r9 and %r11. If bcopy
3768377SBill.Holler@Sun.COM	 * uses these registers in future they must be saved and restored.
3778377SBill.Holler@Sun.COM	 */
3788377SBill.Holler@Sun.COM	ALTENTRY(bcopy_altentry)
3790Sstevel@tonic-gatedo_copy:
3808377SBill.Holler@Sun.COM#define	L(s) .bcopy/**/s
3818377SBill.Holler@Sun.COM	cmpq	$0x50, %rdx		/* 80 */
3828377SBill.Holler@Sun.COM	jge	bcopy_ck_size
3838377SBill.Holler@Sun.COM
3848377SBill.Holler@Sun.COM	/*
3858377SBill.Holler@Sun.COM	 * Performance data shows many caller's copy small buffers. So for
3868377SBill.Holler@Sun.COM	 * best perf for these sizes unrolled code is used. Store data without
3878377SBill.Holler@Sun.COM	 * worrying about alignment.
3888377SBill.Holler@Sun.COM	 */
3898377SBill.Holler@Sun.COM	leaq	L(fwdPxQx)(%rip), %r10
3908377SBill.Holler@Sun.COM	addq	%rdx, %rdi
3918377SBill.Holler@Sun.COM	addq	%rdx, %rsi
3928377SBill.Holler@Sun.COM	movslq	(%r10,%rdx,4), %rcx
3938377SBill.Holler@Sun.COM	leaq	(%rcx,%r10,1), %r10
3948377SBill.Holler@Sun.COM	jmpq	*%r10
3958377SBill.Holler@Sun.COM
3968377SBill.Holler@Sun.COM	.p2align 4
3978377SBill.Holler@Sun.COML(fwdPxQx):
3988377SBill.Holler@Sun.COM	.int       L(P0Q0)-L(fwdPxQx)	/* 0 */
3998377SBill.Holler@Sun.COM	.int       L(P1Q0)-L(fwdPxQx)
4008377SBill.Holler@Sun.COM	.int       L(P2Q0)-L(fwdPxQx)
4018377SBill.Holler@Sun.COM	.int       L(P3Q0)-L(fwdPxQx)
4028377SBill.Holler@Sun.COM	.int       L(P4Q0)-L(fwdPxQx)
4038377SBill.Holler@Sun.COM	.int       L(P5Q0)-L(fwdPxQx)
4048377SBill.Holler@Sun.COM	.int       L(P6Q0)-L(fwdPxQx)
4058377SBill.Holler@Sun.COM	.int       L(P7Q0)-L(fwdPxQx)
4068377SBill.Holler@Sun.COM
4078377SBill.Holler@Sun.COM	.int       L(P0Q1)-L(fwdPxQx)	/* 8 */
4088377SBill.Holler@Sun.COM	.int       L(P1Q1)-L(fwdPxQx)
4098377SBill.Holler@Sun.COM	.int       L(P2Q1)-L(fwdPxQx)
4108377SBill.Holler@Sun.COM	.int       L(P3Q1)-L(fwdPxQx)
4118377SBill.Holler@Sun.COM	.int       L(P4Q1)-L(fwdPxQx)
4128377SBill.Holler@Sun.COM	.int       L(P5Q1)-L(fwdPxQx)
4138377SBill.Holler@Sun.COM	.int       L(P6Q1)-L(fwdPxQx)
4148377SBill.Holler@Sun.COM	.int       L(P7Q1)-L(fwdPxQx)
4158377SBill.Holler@Sun.COM
4168377SBill.Holler@Sun.COM	.int       L(P0Q2)-L(fwdPxQx)	/* 16 */
4178377SBill.Holler@Sun.COM	.int       L(P1Q2)-L(fwdPxQx)
4188377SBill.Holler@Sun.COM	.int       L(P2Q2)-L(fwdPxQx)
4198377SBill.Holler@Sun.COM	.int       L(P3Q2)-L(fwdPxQx)
4208377SBill.Holler@Sun.COM	.int       L(P4Q2)-L(fwdPxQx)
4218377SBill.Holler@Sun.COM	.int       L(P5Q2)-L(fwdPxQx)
4228377SBill.Holler@Sun.COM	.int       L(P6Q2)-L(fwdPxQx)
4238377SBill.Holler@Sun.COM	.int       L(P7Q2)-L(fwdPxQx)
4248377SBill.Holler@Sun.COM
4258377SBill.Holler@Sun.COM	.int       L(P0Q3)-L(fwdPxQx)	/* 24 */
4268377SBill.Holler@Sun.COM	.int       L(P1Q3)-L(fwdPxQx)
4278377SBill.Holler@Sun.COM	.int       L(P2Q3)-L(fwdPxQx)
4288377SBill.Holler@Sun.COM	.int       L(P3Q3)-L(fwdPxQx)
4298377SBill.Holler@Sun.COM	.int       L(P4Q3)-L(fwdPxQx)
4308377SBill.Holler@Sun.COM	.int       L(P5Q3)-L(fwdPxQx)
4318377SBill.Holler@Sun.COM	.int       L(P6Q3)-L(fwdPxQx)
4328377SBill.Holler@Sun.COM	.int       L(P7Q3)-L(fwdPxQx)
4338377SBill.Holler@Sun.COM
4348377SBill.Holler@Sun.COM	.int       L(P0Q4)-L(fwdPxQx)	/* 32 */
4358377SBill.Holler@Sun.COM	.int       L(P1Q4)-L(fwdPxQx)
4368377SBill.Holler@Sun.COM	.int       L(P2Q4)-L(fwdPxQx)
4378377SBill.Holler@Sun.COM	.int       L(P3Q4)-L(fwdPxQx)
4388377SBill.Holler@Sun.COM	.int       L(P4Q4)-L(fwdPxQx)
4398377SBill.Holler@Sun.COM	.int       L(P5Q4)-L(fwdPxQx)
4408377SBill.Holler@Sun.COM	.int       L(P6Q4)-L(fwdPxQx)
4418377SBill.Holler@Sun.COM	.int       L(P7Q4)-L(fwdPxQx)
4428377SBill.Holler@Sun.COM
4438377SBill.Holler@Sun.COM	.int       L(P0Q5)-L(fwdPxQx)	/* 40 */
4448377SBill.Holler@Sun.COM	.int       L(P1Q5)-L(fwdPxQx)
4458377SBill.Holler@Sun.COM	.int       L(P2Q5)-L(fwdPxQx)
4468377SBill.Holler@Sun.COM	.int       L(P3Q5)-L(fwdPxQx)
4478377SBill.Holler@Sun.COM	.int       L(P4Q5)-L(fwdPxQx)
4488377SBill.Holler@Sun.COM	.int       L(P5Q5)-L(fwdPxQx)
4498377SBill.Holler@Sun.COM	.int       L(P6Q5)-L(fwdPxQx)
4508377SBill.Holler@Sun.COM	.int       L(P7Q5)-L(fwdPxQx)
4518377SBill.Holler@Sun.COM
4528377SBill.Holler@Sun.COM	.int       L(P0Q6)-L(fwdPxQx)	/* 48 */
4538377SBill.Holler@Sun.COM	.int       L(P1Q6)-L(fwdPxQx)
4548377SBill.Holler@Sun.COM	.int       L(P2Q6)-L(fwdPxQx)
4558377SBill.Holler@Sun.COM	.int       L(P3Q6)-L(fwdPxQx)
4568377SBill.Holler@Sun.COM	.int       L(P4Q6)-L(fwdPxQx)
4578377SBill.Holler@Sun.COM	.int       L(P5Q6)-L(fwdPxQx)
4588377SBill.Holler@Sun.COM	.int       L(P6Q6)-L(fwdPxQx)
4598377SBill.Holler@Sun.COM	.int       L(P7Q6)-L(fwdPxQx)
4608377SBill.Holler@Sun.COM
4618377SBill.Holler@Sun.COM	.int       L(P0Q7)-L(fwdPxQx)	/* 56 */
4628377SBill.Holler@Sun.COM	.int       L(P1Q7)-L(fwdPxQx)
4638377SBill.Holler@Sun.COM	.int       L(P2Q7)-L(fwdPxQx)
4648377SBill.Holler@Sun.COM	.int       L(P3Q7)-L(fwdPxQx)
4658377SBill.Holler@Sun.COM	.int       L(P4Q7)-L(fwdPxQx)
4668377SBill.Holler@Sun.COM	.int       L(P5Q7)-L(fwdPxQx)
4678377SBill.Holler@Sun.COM	.int       L(P6Q7)-L(fwdPxQx)
4688377SBill.Holler@Sun.COM	.int       L(P7Q7)-L(fwdPxQx)
4698377SBill.Holler@Sun.COM
4708377SBill.Holler@Sun.COM	.int       L(P0Q8)-L(fwdPxQx)	/* 64 */
4718377SBill.Holler@Sun.COM	.int       L(P1Q8)-L(fwdPxQx)
4728377SBill.Holler@Sun.COM	.int       L(P2Q8)-L(fwdPxQx)
4738377SBill.Holler@Sun.COM	.int       L(P3Q8)-L(fwdPxQx)
4748377SBill.Holler@Sun.COM	.int       L(P4Q8)-L(fwdPxQx)
4758377SBill.Holler@Sun.COM	.int       L(P5Q8)-L(fwdPxQx)
4768377SBill.Holler@Sun.COM	.int       L(P6Q8)-L(fwdPxQx)
4778377SBill.Holler@Sun.COM	.int       L(P7Q8)-L(fwdPxQx)
4788377SBill.Holler@Sun.COM
4798377SBill.Holler@Sun.COM	.int       L(P0Q9)-L(fwdPxQx)	/* 72 */
4808377SBill.Holler@Sun.COM	.int       L(P1Q9)-L(fwdPxQx)
4818377SBill.Holler@Sun.COM	.int       L(P2Q9)-L(fwdPxQx)
4828377SBill.Holler@Sun.COM	.int       L(P3Q9)-L(fwdPxQx)
4838377SBill.Holler@Sun.COM	.int       L(P4Q9)-L(fwdPxQx)
4848377SBill.Holler@Sun.COM	.int       L(P5Q9)-L(fwdPxQx)
4858377SBill.Holler@Sun.COM	.int       L(P6Q9)-L(fwdPxQx)
4868377SBill.Holler@Sun.COM	.int       L(P7Q9)-L(fwdPxQx)	/* 79 */
4878377SBill.Holler@Sun.COM
4888377SBill.Holler@Sun.COM	.p2align 4
4898377SBill.Holler@Sun.COML(P0Q9):
4908377SBill.Holler@Sun.COM	mov    -0x48(%rdi), %rcx
4918377SBill.Holler@Sun.COM	mov    %rcx, -0x48(%rsi)
4928377SBill.Holler@Sun.COML(P0Q8):
4938377SBill.Holler@Sun.COM	mov    -0x40(%rdi), %r10
4948377SBill.Holler@Sun.COM	mov    %r10, -0x40(%rsi)
4958377SBill.Holler@Sun.COML(P0Q7):
4968377SBill.Holler@Sun.COM	mov    -0x38(%rdi), %r8
4978377SBill.Holler@Sun.COM	mov    %r8, -0x38(%rsi)
4988377SBill.Holler@Sun.COML(P0Q6):
4998377SBill.Holler@Sun.COM	mov    -0x30(%rdi), %rcx
5008377SBill.Holler@Sun.COM	mov    %rcx, -0x30(%rsi)
5018377SBill.Holler@Sun.COML(P0Q5):
5028377SBill.Holler@Sun.COM	mov    -0x28(%rdi), %r10
5038377SBill.Holler@Sun.COM	mov    %r10, -0x28(%rsi)
5048377SBill.Holler@Sun.COML(P0Q4):
5058377SBill.Holler@Sun.COM	mov    -0x20(%rdi), %r8
5068377SBill.Holler@Sun.COM	mov    %r8, -0x20(%rsi)
5078377SBill.Holler@Sun.COML(P0Q3):
5088377SBill.Holler@Sun.COM	mov    -0x18(%rdi), %rcx
5098377SBill.Holler@Sun.COM	mov    %rcx, -0x18(%rsi)
5108377SBill.Holler@Sun.COML(P0Q2):
5118377SBill.Holler@Sun.COM	mov    -0x10(%rdi), %r10
5128377SBill.Holler@Sun.COM	mov    %r10, -0x10(%rsi)
5138377SBill.Holler@Sun.COML(P0Q1):
5148377SBill.Holler@Sun.COM	mov    -0x8(%rdi), %r8
5158377SBill.Holler@Sun.COM	mov    %r8, -0x8(%rsi)
5168377SBill.Holler@Sun.COML(P0Q0):
5178377SBill.Holler@Sun.COM	ret
5188377SBill.Holler@Sun.COM
5198377SBill.Holler@Sun.COM	.p2align 4
5208377SBill.Holler@Sun.COML(P1Q9):
5218377SBill.Holler@Sun.COM	mov    -0x49(%rdi), %r8
5228377SBill.Holler@Sun.COM	mov    %r8, -0x49(%rsi)
5238377SBill.Holler@Sun.COML(P1Q8):
5248377SBill.Holler@Sun.COM	mov    -0x41(%rdi), %rcx
5258377SBill.Holler@Sun.COM	mov    %rcx, -0x41(%rsi)
5268377SBill.Holler@Sun.COML(P1Q7):
5278377SBill.Holler@Sun.COM	mov    -0x39(%rdi), %r10
5288377SBill.Holler@Sun.COM	mov    %r10, -0x39(%rsi)
5298377SBill.Holler@Sun.COML(P1Q6):
5308377SBill.Holler@Sun.COM	mov    -0x31(%rdi), %r8
5318377SBill.Holler@Sun.COM	mov    %r8, -0x31(%rsi)
5328377SBill.Holler@Sun.COML(P1Q5):
5338377SBill.Holler@Sun.COM	mov    -0x29(%rdi), %rcx
5348377SBill.Holler@Sun.COM	mov    %rcx, -0x29(%rsi)
5358377SBill.Holler@Sun.COML(P1Q4):
5368377SBill.Holler@Sun.COM	mov    -0x21(%rdi), %r10
5378377SBill.Holler@Sun.COM	mov    %r10, -0x21(%rsi)
5388377SBill.Holler@Sun.COML(P1Q3):
5398377SBill.Holler@Sun.COM	mov    -0x19(%rdi), %r8
5408377SBill.Holler@Sun.COM	mov    %r8, -0x19(%rsi)
5418377SBill.Holler@Sun.COML(P1Q2):
5428377SBill.Holler@Sun.COM	mov    -0x11(%rdi), %rcx
5438377SBill.Holler@Sun.COM	mov    %rcx, -0x11(%rsi)
5448377SBill.Holler@Sun.COML(P1Q1):
5458377SBill.Holler@Sun.COM	mov    -0x9(%rdi), %r10
5468377SBill.Holler@Sun.COM	mov    %r10, -0x9(%rsi)
5478377SBill.Holler@Sun.COML(P1Q0):
5488377SBill.Holler@Sun.COM	movzbq -0x1(%rdi), %r8
5498377SBill.Holler@Sun.COM	mov    %r8b, -0x1(%rsi)
5508377SBill.Holler@Sun.COM	ret
5518377SBill.Holler@Sun.COM
5528377SBill.Holler@Sun.COM	.p2align 4
5538377SBill.Holler@Sun.COML(P2Q9):
5548377SBill.Holler@Sun.COM	mov    -0x4a(%rdi), %r8
5558377SBill.Holler@Sun.COM	mov    %r8, -0x4a(%rsi)
5568377SBill.Holler@Sun.COML(P2Q8):
5578377SBill.Holler@Sun.COM	mov    -0x42(%rdi), %rcx
5588377SBill.Holler@Sun.COM	mov    %rcx, -0x42(%rsi)
5598377SBill.Holler@Sun.COML(P2Q7):
5608377SBill.Holler@Sun.COM	mov    -0x3a(%rdi), %r10
5618377SBill.Holler@Sun.COM	mov    %r10, -0x3a(%rsi)
5628377SBill.Holler@Sun.COML(P2Q6):
5638377SBill.Holler@Sun.COM	mov    -0x32(%rdi), %r8
5648377SBill.Holler@Sun.COM	mov    %r8, -0x32(%rsi)
5658377SBill.Holler@Sun.COML(P2Q5):
5668377SBill.Holler@Sun.COM	mov    -0x2a(%rdi), %rcx
5678377SBill.Holler@Sun.COM	mov    %rcx, -0x2a(%rsi)
5688377SBill.Holler@Sun.COML(P2Q4):
5698377SBill.Holler@Sun.COM	mov    -0x22(%rdi), %r10
5708377SBill.Holler@Sun.COM	mov    %r10, -0x22(%rsi)
5718377SBill.Holler@Sun.COML(P2Q3):
5728377SBill.Holler@Sun.COM	mov    -0x1a(%rdi), %r8
5738377SBill.Holler@Sun.COM	mov    %r8, -0x1a(%rsi)
5748377SBill.Holler@Sun.COML(P2Q2):
5758377SBill.Holler@Sun.COM	mov    -0x12(%rdi), %rcx
5768377SBill.Holler@Sun.COM	mov    %rcx, -0x12(%rsi)
5778377SBill.Holler@Sun.COML(P2Q1):
5788377SBill.Holler@Sun.COM	mov    -0xa(%rdi), %r10
5798377SBill.Holler@Sun.COM	mov    %r10, -0xa(%rsi)
5808377SBill.Holler@Sun.COML(P2Q0):
5818377SBill.Holler@Sun.COM	movzwq -0x2(%rdi), %r8
5828377SBill.Holler@Sun.COM	mov    %r8w, -0x2(%rsi)
5838377SBill.Holler@Sun.COM	ret
5848377SBill.Holler@Sun.COM
5858377SBill.Holler@Sun.COM	.p2align 4
5868377SBill.Holler@Sun.COML(P3Q9):
5878377SBill.Holler@Sun.COM	mov    -0x4b(%rdi), %r8
5888377SBill.Holler@Sun.COM	mov    %r8, -0x4b(%rsi)
5898377SBill.Holler@Sun.COML(P3Q8):
5908377SBill.Holler@Sun.COM	mov    -0x43(%rdi), %rcx
5918377SBill.Holler@Sun.COM	mov    %rcx, -0x43(%rsi)
5928377SBill.Holler@Sun.COML(P3Q7):
5938377SBill.Holler@Sun.COM	mov    -0x3b(%rdi), %r10
5948377SBill.Holler@Sun.COM	mov    %r10, -0x3b(%rsi)
5958377SBill.Holler@Sun.COML(P3Q6):
5968377SBill.Holler@Sun.COM	mov    -0x33(%rdi), %r8
5978377SBill.Holler@Sun.COM	mov    %r8, -0x33(%rsi)
5988377SBill.Holler@Sun.COML(P3Q5):
5998377SBill.Holler@Sun.COM	mov    -0x2b(%rdi), %rcx
6008377SBill.Holler@Sun.COM	mov    %rcx, -0x2b(%rsi)
6018377SBill.Holler@Sun.COML(P3Q4):
6028377SBill.Holler@Sun.COM	mov    -0x23(%rdi), %r10
6038377SBill.Holler@Sun.COM	mov    %r10, -0x23(%rsi)
6048377SBill.Holler@Sun.COML(P3Q3):
6058377SBill.Holler@Sun.COM	mov    -0x1b(%rdi), %r8
6068377SBill.Holler@Sun.COM	mov    %r8, -0x1b(%rsi)
6078377SBill.Holler@Sun.COML(P3Q2):
6088377SBill.Holler@Sun.COM	mov    -0x13(%rdi), %rcx
6098377SBill.Holler@Sun.COM	mov    %rcx, -0x13(%rsi)
6108377SBill.Holler@Sun.COML(P3Q1):
6118377SBill.Holler@Sun.COM	mov    -0xb(%rdi), %r10
6128377SBill.Holler@Sun.COM	mov    %r10, -0xb(%rsi)
6138377SBill.Holler@Sun.COM	/*
6148377SBill.Holler@Sun.COM	 * These trailing loads/stores have to do all their loads 1st,
6158377SBill.Holler@Sun.COM	 * then do the stores.
6168377SBill.Holler@Sun.COM	 */
6178377SBill.Holler@Sun.COML(P3Q0):
6188377SBill.Holler@Sun.COM	movzwq -0x3(%rdi), %r8
6198377SBill.Holler@Sun.COM	movzbq -0x1(%rdi), %r10
6208377SBill.Holler@Sun.COM	mov    %r8w, -0x3(%rsi)
6218377SBill.Holler@Sun.COM	mov    %r10b, -0x1(%rsi)
6228377SBill.Holler@Sun.COM	ret
6238377SBill.Holler@Sun.COM
6248377SBill.Holler@Sun.COM	.p2align 4
6258377SBill.Holler@Sun.COML(P4Q9):
6268377SBill.Holler@Sun.COM	mov    -0x4c(%rdi), %r8
6278377SBill.Holler@Sun.COM	mov    %r8, -0x4c(%rsi)
6288377SBill.Holler@Sun.COML(P4Q8):
6298377SBill.Holler@Sun.COM	mov    -0x44(%rdi), %rcx
6308377SBill.Holler@Sun.COM	mov    %rcx, -0x44(%rsi)
6318377SBill.Holler@Sun.COML(P4Q7):
6328377SBill.Holler@Sun.COM	mov    -0x3c(%rdi), %r10
6338377SBill.Holler@Sun.COM	mov    %r10, -0x3c(%rsi)
6348377SBill.Holler@Sun.COML(P4Q6):
6358377SBill.Holler@Sun.COM	mov    -0x34(%rdi), %r8
6368377SBill.Holler@Sun.COM	mov    %r8, -0x34(%rsi)
6378377SBill.Holler@Sun.COML(P4Q5):
6388377SBill.Holler@Sun.COM	mov    -0x2c(%rdi), %rcx
6398377SBill.Holler@Sun.COM	mov    %rcx, -0x2c(%rsi)
6408377SBill.Holler@Sun.COML(P4Q4):
6418377SBill.Holler@Sun.COM	mov    -0x24(%rdi), %r10
6428377SBill.Holler@Sun.COM	mov    %r10, -0x24(%rsi)
6438377SBill.Holler@Sun.COML(P4Q3):
6448377SBill.Holler@Sun.COM	mov    -0x1c(%rdi), %r8
6458377SBill.Holler@Sun.COM	mov    %r8, -0x1c(%rsi)
6468377SBill.Holler@Sun.COML(P4Q2):
6478377SBill.Holler@Sun.COM	mov    -0x14(%rdi), %rcx
6488377SBill.Holler@Sun.COM	mov    %rcx, -0x14(%rsi)
6498377SBill.Holler@Sun.COML(P4Q1):
6508377SBill.Holler@Sun.COM	mov    -0xc(%rdi), %r10
6518377SBill.Holler@Sun.COM	mov    %r10, -0xc(%rsi)
6528377SBill.Holler@Sun.COML(P4Q0):
6538377SBill.Holler@Sun.COM	mov    -0x4(%rdi), %r8d
6548377SBill.Holler@Sun.COM	mov    %r8d, -0x4(%rsi)
6558377SBill.Holler@Sun.COM	ret
6568377SBill.Holler@Sun.COM
6578377SBill.Holler@Sun.COM	.p2align 4
6588377SBill.Holler@Sun.COML(P5Q9):
6598377SBill.Holler@Sun.COM	mov    -0x4d(%rdi), %r8
6608377SBill.Holler@Sun.COM	mov    %r8, -0x4d(%rsi)
6618377SBill.Holler@Sun.COML(P5Q8):
6628377SBill.Holler@Sun.COM	mov    -0x45(%rdi), %rcx
6638377SBill.Holler@Sun.COM	mov    %rcx, -0x45(%rsi)
6648377SBill.Holler@Sun.COML(P5Q7):
6658377SBill.Holler@Sun.COM	mov    -0x3d(%rdi), %r10
6668377SBill.Holler@Sun.COM	mov    %r10, -0x3d(%rsi)
6678377SBill.Holler@Sun.COML(P5Q6):
6688377SBill.Holler@Sun.COM	mov    -0x35(%rdi), %r8
6698377SBill.Holler@Sun.COM	mov    %r8, -0x35(%rsi)
6708377SBill.Holler@Sun.COML(P5Q5):
6718377SBill.Holler@Sun.COM	mov    -0x2d(%rdi), %rcx
6728377SBill.Holler@Sun.COM	mov    %rcx, -0x2d(%rsi)
6738377SBill.Holler@Sun.COML(P5Q4):
6748377SBill.Holler@Sun.COM	mov    -0x25(%rdi), %r10
6758377SBill.Holler@Sun.COM	mov    %r10, -0x25(%rsi)
6768377SBill.Holler@Sun.COML(P5Q3):
6778377SBill.Holler@Sun.COM	mov    -0x1d(%rdi), %r8
6788377SBill.Holler@Sun.COM	mov    %r8, -0x1d(%rsi)
6798377SBill.Holler@Sun.COML(P5Q2):
6808377SBill.Holler@Sun.COM	mov    -0x15(%rdi), %rcx
6818377SBill.Holler@Sun.COM	mov    %rcx, -0x15(%rsi)
6828377SBill.Holler@Sun.COML(P5Q1):
6838377SBill.Holler@Sun.COM	mov    -0xd(%rdi), %r10
6848377SBill.Holler@Sun.COM	mov    %r10, -0xd(%rsi)
6858377SBill.Holler@Sun.COML(P5Q0):
6868377SBill.Holler@Sun.COM	mov    -0x5(%rdi), %r8d
6878377SBill.Holler@Sun.COM	movzbq -0x1(%rdi), %r10
6888377SBill.Holler@Sun.COM	mov    %r8d, -0x5(%rsi)
6898377SBill.Holler@Sun.COM	mov    %r10b, -0x1(%rsi)
6908377SBill.Holler@Sun.COM	ret
6918377SBill.Holler@Sun.COM
6928377SBill.Holler@Sun.COM	.p2align 4
6938377SBill.Holler@Sun.COML(P6Q9):
6948377SBill.Holler@Sun.COM	mov    -0x4e(%rdi), %r8
6958377SBill.Holler@Sun.COM	mov    %r8, -0x4e(%rsi)
6968377SBill.Holler@Sun.COML(P6Q8):
6978377SBill.Holler@Sun.COM	mov    -0x46(%rdi), %rcx
6988377SBill.Holler@Sun.COM	mov    %rcx, -0x46(%rsi)
6998377SBill.Holler@Sun.COML(P6Q7):
7008377SBill.Holler@Sun.COM	mov    -0x3e(%rdi), %r10
7018377SBill.Holler@Sun.COM	mov    %r10, -0x3e(%rsi)
7028377SBill.Holler@Sun.COML(P6Q6):
7038377SBill.Holler@Sun.COM	mov    -0x36(%rdi), %r8
7048377SBill.Holler@Sun.COM	mov    %r8, -0x36(%rsi)
7058377SBill.Holler@Sun.COML(P6Q5):
7068377SBill.Holler@Sun.COM	mov    -0x2e(%rdi), %rcx
7078377SBill.Holler@Sun.COM	mov    %rcx, -0x2e(%rsi)
7088377SBill.Holler@Sun.COML(P6Q4):
7098377SBill.Holler@Sun.COM	mov    -0x26(%rdi), %r10
7108377SBill.Holler@Sun.COM	mov    %r10, -0x26(%rsi)
7118377SBill.Holler@Sun.COML(P6Q3):
7128377SBill.Holler@Sun.COM	mov    -0x1e(%rdi), %r8
7138377SBill.Holler@Sun.COM	mov    %r8, -0x1e(%rsi)
7148377SBill.Holler@Sun.COML(P6Q2):
7158377SBill.Holler@Sun.COM	mov    -0x16(%rdi), %rcx
7168377SBill.Holler@Sun.COM	mov    %rcx, -0x16(%rsi)
7178377SBill.Holler@Sun.COML(P6Q1):
7188377SBill.Holler@Sun.COM	mov    -0xe(%rdi), %r10
7198377SBill.Holler@Sun.COM	mov    %r10, -0xe(%rsi)
7208377SBill.Holler@Sun.COML(P6Q0):
7218377SBill.Holler@Sun.COM	mov    -0x6(%rdi), %r8d
7228377SBill.Holler@Sun.COM	movzwq -0x2(%rdi), %r10
7238377SBill.Holler@Sun.COM	mov    %r8d, -0x6(%rsi)
7248377SBill.Holler@Sun.COM	mov    %r10w, -0x2(%rsi)
7258377SBill.Holler@Sun.COM	ret
7268377SBill.Holler@Sun.COM
7278377SBill.Holler@Sun.COM	.p2align 4
7288377SBill.Holler@Sun.COML(P7Q9):
7298377SBill.Holler@Sun.COM	mov    -0x4f(%rdi), %r8
7308377SBill.Holler@Sun.COM	mov    %r8, -0x4f(%rsi)
7318377SBill.Holler@Sun.COML(P7Q8):
7328377SBill.Holler@Sun.COM	mov    -0x47(%rdi), %rcx
7338377SBill.Holler@Sun.COM	mov    %rcx, -0x47(%rsi)
7348377SBill.Holler@Sun.COML(P7Q7):
7358377SBill.Holler@Sun.COM	mov    -0x3f(%rdi), %r10
7368377SBill.Holler@Sun.COM	mov    %r10, -0x3f(%rsi)
7378377SBill.Holler@Sun.COML(P7Q6):
7388377SBill.Holler@Sun.COM	mov    -0x37(%rdi), %r8
7398377SBill.Holler@Sun.COM	mov    %r8, -0x37(%rsi)
7408377SBill.Holler@Sun.COML(P7Q5):
7418377SBill.Holler@Sun.COM	mov    -0x2f(%rdi), %rcx
7428377SBill.Holler@Sun.COM	mov    %rcx, -0x2f(%rsi)
7438377SBill.Holler@Sun.COML(P7Q4):
7448377SBill.Holler@Sun.COM	mov    -0x27(%rdi), %r10
7458377SBill.Holler@Sun.COM	mov    %r10, -0x27(%rsi)
7468377SBill.Holler@Sun.COML(P7Q3):
7478377SBill.Holler@Sun.COM	mov    -0x1f(%rdi), %r8
7488377SBill.Holler@Sun.COM	mov    %r8, -0x1f(%rsi)
7498377SBill.Holler@Sun.COML(P7Q2):
7508377SBill.Holler@Sun.COM	mov    -0x17(%rdi), %rcx
7518377SBill.Holler@Sun.COM	mov    %rcx, -0x17(%rsi)
7528377SBill.Holler@Sun.COML(P7Q1):
7538377SBill.Holler@Sun.COM	mov    -0xf(%rdi), %r10
7548377SBill.Holler@Sun.COM	mov    %r10, -0xf(%rsi)
7558377SBill.Holler@Sun.COML(P7Q0):
7568377SBill.Holler@Sun.COM	mov    -0x7(%rdi), %r8d
7578377SBill.Holler@Sun.COM	movzwq -0x3(%rdi), %r10
7588377SBill.Holler@Sun.COM	movzbq -0x1(%rdi), %rcx
7598377SBill.Holler@Sun.COM	mov    %r8d, -0x7(%rsi)
7608377SBill.Holler@Sun.COM	mov    %r10w, -0x3(%rsi)
7618377SBill.Holler@Sun.COM	mov    %cl, -0x1(%rsi)
7628377SBill.Holler@Sun.COM	ret
7638377SBill.Holler@Sun.COM
7648377SBill.Holler@Sun.COM	/*
7658377SBill.Holler@Sun.COM	 * For large sizes rep smovq is fastest.
7668377SBill.Holler@Sun.COM	 * Transition point determined experimentally as measured on
7678377SBill.Holler@Sun.COM	 * Intel Xeon processors (incl. Nehalem and previous generations) and
7688377SBill.Holler@Sun.COM	 * AMD Opteron. The transition value is patched at boot time to avoid
7698377SBill.Holler@Sun.COM	 * memory reference hit.
7708377SBill.Holler@Sun.COM	 */
7718377SBill.Holler@Sun.COM	.globl bcopy_patch_start
7728377SBill.Holler@Sun.COMbcopy_patch_start:
7738377SBill.Holler@Sun.COM	cmpq	$BCOPY_NHM_REP, %rdx
7748377SBill.Holler@Sun.COM	.globl bcopy_patch_end
7758377SBill.Holler@Sun.COMbcopy_patch_end:
7768377SBill.Holler@Sun.COM
7778377SBill.Holler@Sun.COM	.p2align 4
7788377SBill.Holler@Sun.COM	.globl bcopy_ck_size
7798377SBill.Holler@Sun.COMbcopy_ck_size:
7808377SBill.Holler@Sun.COM	cmpq	$BCOPY_DFLT_REP, %rdx
7818377SBill.Holler@Sun.COM	jge	L(use_rep)
7828377SBill.Holler@Sun.COM
7838377SBill.Holler@Sun.COM	/*
7848377SBill.Holler@Sun.COM	 * Align to a 8-byte boundary. Avoids penalties from unaligned stores
7858377SBill.Holler@Sun.COM	 * as well as from stores spanning cachelines.
7868377SBill.Holler@Sun.COM	 */
7878377SBill.Holler@Sun.COM	test	$0x7, %rsi
7888377SBill.Holler@Sun.COM	jz	L(aligned_loop)
7898377SBill.Holler@Sun.COM	test	$0x1, %rsi
7908377SBill.Holler@Sun.COM	jz	2f
7918377SBill.Holler@Sun.COM	movzbq	(%rdi), %r8
7928377SBill.Holler@Sun.COM	dec	%rdx
7938377SBill.Holler@Sun.COM	inc	%rdi
7948377SBill.Holler@Sun.COM	mov	%r8b, (%rsi)
7958377SBill.Holler@Sun.COM	inc	%rsi
7968377SBill.Holler@Sun.COM2:
7978377SBill.Holler@Sun.COM	test	$0x2, %rsi
7988377SBill.Holler@Sun.COM	jz	4f
7998377SBill.Holler@Sun.COM	movzwq	(%rdi), %r8
8008377SBill.Holler@Sun.COM	sub	$0x2, %rdx
8018377SBill.Holler@Sun.COM	add	$0x2, %rdi
8028377SBill.Holler@Sun.COM	mov	%r8w, (%rsi)
8038377SBill.Holler@Sun.COM	add	$0x2, %rsi
8048377SBill.Holler@Sun.COM4:
8058377SBill.Holler@Sun.COM	test	$0x4, %rsi
8068377SBill.Holler@Sun.COM	jz	L(aligned_loop)
8078377SBill.Holler@Sun.COM	mov	(%rdi), %r8d
8088377SBill.Holler@Sun.COM	sub	$0x4, %rdx
8098377SBill.Holler@Sun.COM	add	$0x4, %rdi
8108377SBill.Holler@Sun.COM	mov	%r8d, (%rsi)
8118377SBill.Holler@Sun.COM	add	$0x4, %rsi
8128377SBill.Holler@Sun.COM
8138377SBill.Holler@Sun.COM	/*
8148377SBill.Holler@Sun.COM	 * Copy 64-bytes per loop
8158377SBill.Holler@Sun.COM	 */
8168377SBill.Holler@Sun.COM	.p2align 4
8178377SBill.Holler@Sun.COML(aligned_loop):
8188377SBill.Holler@Sun.COM	mov	(%rdi), %r8
8198377SBill.Holler@Sun.COM	mov	0x8(%rdi), %r10
8208377SBill.Holler@Sun.COM	lea	-0x40(%rdx), %rdx
8218377SBill.Holler@Sun.COM	mov	%r8, (%rsi)
8228377SBill.Holler@Sun.COM	mov	%r10, 0x8(%rsi)
8238377SBill.Holler@Sun.COM	mov	0x10(%rdi), %rcx
8248377SBill.Holler@Sun.COM	mov	0x18(%rdi), %r8
8258377SBill.Holler@Sun.COM	mov	%rcx, 0x10(%rsi)
8268377SBill.Holler@Sun.COM	mov	%r8, 0x18(%rsi)
8278377SBill.Holler@Sun.COM
8288377SBill.Holler@Sun.COM	cmp	$0x40, %rdx
8298377SBill.Holler@Sun.COM	mov	0x20(%rdi), %r10
8308377SBill.Holler@Sun.COM	mov	0x28(%rdi), %rcx
8318377SBill.Holler@Sun.COM	mov	%r10, 0x20(%rsi)
8328377SBill.Holler@Sun.COM	mov	%rcx, 0x28(%rsi)
8338377SBill.Holler@Sun.COM	mov	0x30(%rdi), %r8
8348377SBill.Holler@Sun.COM	mov	0x38(%rdi), %r10
8358377SBill.Holler@Sun.COM	lea	0x40(%rdi), %rdi
8368377SBill.Holler@Sun.COM	mov	%r8, 0x30(%rsi)
8378377SBill.Holler@Sun.COM	mov	%r10, 0x38(%rsi)
8388377SBill.Holler@Sun.COM	lea	0x40(%rsi), %rsi
8398377SBill.Holler@Sun.COM	jge	L(aligned_loop)
8408377SBill.Holler@Sun.COM
8418377SBill.Holler@Sun.COM	/*
8428377SBill.Holler@Sun.COM	 * Copy remaining bytes (0-63)
8438377SBill.Holler@Sun.COM	 */
8448377SBill.Holler@Sun.COML(do_remainder):
8458377SBill.Holler@Sun.COM	leaq	L(fwdPxQx)(%rip), %r10
8468377SBill.Holler@Sun.COM	addq	%rdx, %rdi
8478377SBill.Holler@Sun.COM	addq	%rdx, %rsi
8488377SBill.Holler@Sun.COM	movslq	(%r10,%rdx,4), %rcx
8498377SBill.Holler@Sun.COM	leaq	(%rcx,%r10,1), %r10
8508377SBill.Holler@Sun.COM	jmpq	*%r10
8518377SBill.Holler@Sun.COM
8528377SBill.Holler@Sun.COM	/*
8538377SBill.Holler@Sun.COM	 * Use rep smovq. Clear remainder via unrolled code
8548377SBill.Holler@Sun.COM	 */
8558377SBill.Holler@Sun.COM	.p2align 4
8568377SBill.Holler@Sun.COML(use_rep):
8570Sstevel@tonic-gate	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
8580Sstevel@tonic-gate	movq	%rdx, %rcx		/* %rcx = count */
8590Sstevel@tonic-gate	shrq	$3, %rcx		/* 8-byte word count */
8600Sstevel@tonic-gate	rep
8610Sstevel@tonic-gate	  smovq
8620Sstevel@tonic-gate
8638377SBill.Holler@Sun.COM	xchgq	%rsi, %rdi		/* %rdi = src, %rsi = destination */
8648377SBill.Holler@Sun.COM	andq	$7, %rdx		/* remainder */
8658377SBill.Holler@Sun.COM	jnz	L(do_remainder)
8660Sstevel@tonic-gate	ret
8678377SBill.Holler@Sun.COM#undef	L
8680Sstevel@tonic-gate
8690Sstevel@tonic-gate#ifdef DEBUG
8700Sstevel@tonic-gate	/*
8710Sstevel@tonic-gate	 * Setup frame on the run-time stack. The end of the input argument
8720Sstevel@tonic-gate	 * area must be aligned on a 16 byte boundary. The stack pointer %rsp,
8730Sstevel@tonic-gate	 * always points to the end of the latest allocated stack frame.
8740Sstevel@tonic-gate	 * panic(const char *format, ...) is a varargs function. When a
8750Sstevel@tonic-gate	 * function taking variable arguments is called, %rax must be set
8760Sstevel@tonic-gate	 * to eight times the number of floating point parameters passed
8770Sstevel@tonic-gate	 * to the function in SSE registers.
8780Sstevel@tonic-gate	 */
8790Sstevel@tonic-gatecall_panic:
8800Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
8810Sstevel@tonic-gate	movq	%rsp, %rbp
8820Sstevel@tonic-gate	xorl	%eax, %eax		/* no variable arguments */
8830Sstevel@tonic-gate	call	panic			/* %rdi = format string */
8840Sstevel@tonic-gate#endif
8858377SBill.Holler@Sun.COM	SET_SIZE(bcopy_altentry)
8860Sstevel@tonic-gate	SET_SIZE(bcopy)
8870Sstevel@tonic-gate
8880Sstevel@tonic-gate#elif defined(__i386)
8890Sstevel@tonic-gate
8900Sstevel@tonic-gate#define	ARG_FROM	4
8910Sstevel@tonic-gate#define	ARG_TO		8
8920Sstevel@tonic-gate#define	ARG_COUNT	12
8930Sstevel@tonic-gate
8940Sstevel@tonic-gate	ENTRY(bcopy)
8950Sstevel@tonic-gate#ifdef DEBUG
8960Sstevel@tonic-gate	movl	ARG_COUNT(%esp), %eax
8970Sstevel@tonic-gate	orl	%eax, %eax
8980Sstevel@tonic-gate	jz	1f
8993446Smrj	movl	postbootkernelbase, %eax
9000Sstevel@tonic-gate	cmpl	%eax, ARG_FROM(%esp)
9010Sstevel@tonic-gate	jb	0f
9020Sstevel@tonic-gate	cmpl	%eax, ARG_TO(%esp)
9030Sstevel@tonic-gate	jnb	1f
9040Sstevel@tonic-gate0:	pushl	%ebp
9050Sstevel@tonic-gate	movl	%esp, %ebp
9060Sstevel@tonic-gate	pushl	$.bcopy_panic_msg
9070Sstevel@tonic-gate	call	panic
9080Sstevel@tonic-gate1:
9090Sstevel@tonic-gate#endif
9100Sstevel@tonic-gatedo_copy:
9110Sstevel@tonic-gate	movl	%esi, %eax		/* save registers */
9120Sstevel@tonic-gate	movl	%edi, %edx
9130Sstevel@tonic-gate	movl	ARG_COUNT(%esp), %ecx
9140Sstevel@tonic-gate	movl	ARG_FROM(%esp), %esi
9150Sstevel@tonic-gate	movl	ARG_TO(%esp), %edi
9160Sstevel@tonic-gate
9170Sstevel@tonic-gate	shrl	$2, %ecx		/* word count */
9180Sstevel@tonic-gate	rep
9190Sstevel@tonic-gate	  smovl
9200Sstevel@tonic-gate	movl	ARG_COUNT(%esp), %ecx
9210Sstevel@tonic-gate	andl	$3, %ecx		/* bytes left over */
9220Sstevel@tonic-gate	rep
9230Sstevel@tonic-gate	  smovb
9240Sstevel@tonic-gate	movl	%eax, %esi		/* restore registers */
9250Sstevel@tonic-gate	movl	%edx, %edi
9260Sstevel@tonic-gate	ret
9270Sstevel@tonic-gate	SET_SIZE(bcopy)
9280Sstevel@tonic-gate
9290Sstevel@tonic-gate#undef	ARG_COUNT
9300Sstevel@tonic-gate#undef	ARG_FROM
9310Sstevel@tonic-gate#undef	ARG_TO
9320Sstevel@tonic-gate
9330Sstevel@tonic-gate#endif	/* __i386 */
9340Sstevel@tonic-gate#endif	/* __lint */
9350Sstevel@tonic-gate
9360Sstevel@tonic-gate
9370Sstevel@tonic-gate/*
9380Sstevel@tonic-gate * Zero a block of storage, returning an error code if we
9390Sstevel@tonic-gate * take a kernel pagefault which cannot be resolved.
9400Sstevel@tonic-gate * Returns errno value on pagefault error, 0 if all ok
9410Sstevel@tonic-gate */
9420Sstevel@tonic-gate
9430Sstevel@tonic-gate#if defined(__lint)
9440Sstevel@tonic-gate
9450Sstevel@tonic-gate/* ARGSUSED */
9460Sstevel@tonic-gateint
9470Sstevel@tonic-gatekzero(void *addr, size_t count)
9480Sstevel@tonic-gate{ return (0); }
9490Sstevel@tonic-gate
9500Sstevel@tonic-gate#else	/* __lint */
9510Sstevel@tonic-gate
9520Sstevel@tonic-gate#if defined(__amd64)
9530Sstevel@tonic-gate
9540Sstevel@tonic-gate	ENTRY(kzero)
9550Sstevel@tonic-gate#ifdef DEBUG
9563446Smrj        cmpq	postbootkernelbase(%rip), %rdi	/* %rdi = addr */
9570Sstevel@tonic-gate        jnb	0f
9580Sstevel@tonic-gate        leaq	.kzero_panic_msg(%rip), %rdi
9590Sstevel@tonic-gate	jmp	call_panic		/* setup stack and call panic */
9600Sstevel@tonic-gate0:
9610Sstevel@tonic-gate#endif
9620Sstevel@tonic-gate	/*
963*8653SBill.Holler@Sun.COM	 * pass lofault value as 3rd argument for fault return
9640Sstevel@tonic-gate	 */
9650Sstevel@tonic-gate	leaq	_kzeroerr(%rip), %rdx
9660Sstevel@tonic-gate
9670Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
9680Sstevel@tonic-gate	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
9690Sstevel@tonic-gate	movq	%rdx, T_LOFAULT(%r9)	/* new lofault */
9708377SBill.Holler@Sun.COM	call	bzero_altentry
971*8653SBill.Holler@Sun.COM	xorl	%eax, %eax
972*8653SBill.Holler@Sun.COM	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
973*8653SBill.Holler@Sun.COM	ret
9740Sstevel@tonic-gate	/*
975*8653SBill.Holler@Sun.COM	 * A fault during bzero is indicated through an errno value
9760Sstevel@tonic-gate	 * in %rax when we iretq to here.
9770Sstevel@tonic-gate	 */
9780Sstevel@tonic-gate_kzeroerr:
979*8653SBill.Holler@Sun.COM	addq	$8, %rsp		/* pop bzero_altentry call ret addr */
9800Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
9810Sstevel@tonic-gate	ret
9820Sstevel@tonic-gate	SET_SIZE(kzero)
9830Sstevel@tonic-gate
9840Sstevel@tonic-gate#elif defined(__i386)
9850Sstevel@tonic-gate
9860Sstevel@tonic-gate#define	ARG_ADDR	8
9870Sstevel@tonic-gate#define	ARG_COUNT	12
9880Sstevel@tonic-gate
9890Sstevel@tonic-gate	ENTRY(kzero)
9900Sstevel@tonic-gate#ifdef DEBUG
9910Sstevel@tonic-gate	pushl	%ebp
9920Sstevel@tonic-gate	movl	%esp, %ebp
9933446Smrj	movl	postbootkernelbase, %eax
9940Sstevel@tonic-gate        cmpl	%eax, ARG_ADDR(%ebp)
9950Sstevel@tonic-gate        jnb	0f
9960Sstevel@tonic-gate        pushl   $.kzero_panic_msg
9970Sstevel@tonic-gate        call    panic
9980Sstevel@tonic-gate0:	popl	%ebp
9990Sstevel@tonic-gate#endif
10000Sstevel@tonic-gate	lea	_kzeroerr, %eax		/* kzeroerr is lofault value */
10010Sstevel@tonic-gate
10020Sstevel@tonic-gate	pushl	%ebp			/* save stack base */
10030Sstevel@tonic-gate	movl	%esp, %ebp		/* set new stack base */
10040Sstevel@tonic-gate	pushl	%edi			/* save %edi */
10050Sstevel@tonic-gate
10060Sstevel@tonic-gate	mov	%gs:CPU_THREAD, %edx
10070Sstevel@tonic-gate	movl	T_LOFAULT(%edx), %edi
10080Sstevel@tonic-gate	pushl	%edi			/* save the current lofault */
10090Sstevel@tonic-gate	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
10100Sstevel@tonic-gate
10110Sstevel@tonic-gate	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
10120Sstevel@tonic-gate	movl	ARG_ADDR(%ebp), %edi	/* %edi <- address of bytes to clear */
10130Sstevel@tonic-gate	shrl	$2, %ecx		/* Count of double words to zero */
10140Sstevel@tonic-gate	xorl	%eax, %eax		/* sstol val */
10150Sstevel@tonic-gate	rep
10160Sstevel@tonic-gate	  sstol			/* %ecx contains words to clear (%eax=0) */
10170Sstevel@tonic-gate
10180Sstevel@tonic-gate	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
10190Sstevel@tonic-gate	andl	$3, %ecx		/* do mod 4 */
10200Sstevel@tonic-gate	rep
10210Sstevel@tonic-gate	  sstob			/* %ecx contains residual bytes to clear */
10220Sstevel@tonic-gate
10230Sstevel@tonic-gate	/*
1024*8653SBill.Holler@Sun.COM	 * A fault during kzero is indicated through an errno value
10250Sstevel@tonic-gate	 * in %eax when we iret to here.
10260Sstevel@tonic-gate	 */
10270Sstevel@tonic-gate_kzeroerr:
10280Sstevel@tonic-gate	popl	%edi
10290Sstevel@tonic-gate	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
10300Sstevel@tonic-gate	popl	%edi
10310Sstevel@tonic-gate	popl	%ebp
10320Sstevel@tonic-gate	ret
10330Sstevel@tonic-gate	SET_SIZE(kzero)
10340Sstevel@tonic-gate
10350Sstevel@tonic-gate#undef	ARG_ADDR
10360Sstevel@tonic-gate#undef	ARG_COUNT
10370Sstevel@tonic-gate
10380Sstevel@tonic-gate#endif	/* __i386 */
10390Sstevel@tonic-gate#endif	/* __lint */
10400Sstevel@tonic-gate
10410Sstevel@tonic-gate/*
10420Sstevel@tonic-gate * Zero a block of storage.
10430Sstevel@tonic-gate */
10440Sstevel@tonic-gate
10450Sstevel@tonic-gate#if defined(__lint)
10460Sstevel@tonic-gate
10470Sstevel@tonic-gate/* ARGSUSED */
10480Sstevel@tonic-gatevoid
10490Sstevel@tonic-gatebzero(void *addr, size_t count)
10500Sstevel@tonic-gate{}
10510Sstevel@tonic-gate
10520Sstevel@tonic-gate#else	/* __lint */
10530Sstevel@tonic-gate
10540Sstevel@tonic-gate#if defined(__amd64)
10550Sstevel@tonic-gate
10560Sstevel@tonic-gate	ENTRY(bzero)
10570Sstevel@tonic-gate#ifdef DEBUG
10583446Smrj	cmpq	postbootkernelbase(%rip), %rdi	/* %rdi = addr */
10590Sstevel@tonic-gate	jnb	0f
10600Sstevel@tonic-gate	leaq	.bzero_panic_msg(%rip), %rdi
10610Sstevel@tonic-gate	jmp	call_panic		/* setup stack and call panic */
10620Sstevel@tonic-gate0:
10630Sstevel@tonic-gate#endif
10648377SBill.Holler@Sun.COM	ALTENTRY(bzero_altentry)
10650Sstevel@tonic-gatedo_zero:
10668377SBill.Holler@Sun.COM#define	L(s) .bzero/**/s
10678377SBill.Holler@Sun.COM	xorl	%eax, %eax
10688377SBill.Holler@Sun.COM
10698377SBill.Holler@Sun.COM	cmpq	$0x50, %rsi		/* 80 */
10708377SBill.Holler@Sun.COM	jge	L(ck_align)
10718377SBill.Holler@Sun.COM
10728377SBill.Holler@Sun.COM	/*
10738377SBill.Holler@Sun.COM	 * Performance data shows many caller's are zeroing small buffers. So
10748377SBill.Holler@Sun.COM	 * for best perf for these sizes unrolled code is used. Store zeros
10758377SBill.Holler@Sun.COM	 * without worrying about alignment.
10768377SBill.Holler@Sun.COM	 */
10778377SBill.Holler@Sun.COM	leaq	L(setPxQx)(%rip), %r10
10788377SBill.Holler@Sun.COM	addq	%rsi, %rdi
10798377SBill.Holler@Sun.COM	movslq	(%r10,%rsi,4), %rcx
10808377SBill.Holler@Sun.COM	leaq	(%rcx,%r10,1), %r10
10818377SBill.Holler@Sun.COM	jmpq	*%r10
10828377SBill.Holler@Sun.COM
10838377SBill.Holler@Sun.COM	.p2align 4
10848377SBill.Holler@Sun.COML(setPxQx):
10858377SBill.Holler@Sun.COM	.int       L(P0Q0)-L(setPxQx)	/* 0 */
10868377SBill.Holler@Sun.COM	.int       L(P1Q0)-L(setPxQx)
10878377SBill.Holler@Sun.COM	.int       L(P2Q0)-L(setPxQx)
10888377SBill.Holler@Sun.COM	.int       L(P3Q0)-L(setPxQx)
10898377SBill.Holler@Sun.COM	.int       L(P4Q0)-L(setPxQx)
10908377SBill.Holler@Sun.COM	.int       L(P5Q0)-L(setPxQx)
10918377SBill.Holler@Sun.COM	.int       L(P6Q0)-L(setPxQx)
10928377SBill.Holler@Sun.COM	.int       L(P7Q0)-L(setPxQx)
10938377SBill.Holler@Sun.COM
10948377SBill.Holler@Sun.COM	.int       L(P0Q1)-L(setPxQx)	/* 8 */
10958377SBill.Holler@Sun.COM	.int       L(P1Q1)-L(setPxQx)
10968377SBill.Holler@Sun.COM	.int       L(P2Q1)-L(setPxQx)
10978377SBill.Holler@Sun.COM	.int       L(P3Q1)-L(setPxQx)
10988377SBill.Holler@Sun.COM	.int       L(P4Q1)-L(setPxQx)
10998377SBill.Holler@Sun.COM	.int       L(P5Q1)-L(setPxQx)
11008377SBill.Holler@Sun.COM	.int       L(P6Q1)-L(setPxQx)
11018377SBill.Holler@Sun.COM	.int       L(P7Q1)-L(setPxQx)
11028377SBill.Holler@Sun.COM
11038377SBill.Holler@Sun.COM	.int       L(P0Q2)-L(setPxQx)	/* 16 */
11048377SBill.Holler@Sun.COM	.int       L(P1Q2)-L(setPxQx)
11058377SBill.Holler@Sun.COM	.int       L(P2Q2)-L(setPxQx)
11068377SBill.Holler@Sun.COM	.int       L(P3Q2)-L(setPxQx)
11078377SBill.Holler@Sun.COM	.int       L(P4Q2)-L(setPxQx)
11088377SBill.Holler@Sun.COM	.int       L(P5Q2)-L(setPxQx)
11098377SBill.Holler@Sun.COM	.int       L(P6Q2)-L(setPxQx)
11108377SBill.Holler@Sun.COM	.int       L(P7Q2)-L(setPxQx)
11118377SBill.Holler@Sun.COM
11128377SBill.Holler@Sun.COM	.int       L(P0Q3)-L(setPxQx)	/* 24 */
11138377SBill.Holler@Sun.COM	.int       L(P1Q3)-L(setPxQx)
11148377SBill.Holler@Sun.COM	.int       L(P2Q3)-L(setPxQx)
11158377SBill.Holler@Sun.COM	.int       L(P3Q3)-L(setPxQx)
11168377SBill.Holler@Sun.COM	.int       L(P4Q3)-L(setPxQx)
11178377SBill.Holler@Sun.COM	.int       L(P5Q3)-L(setPxQx)
11188377SBill.Holler@Sun.COM	.int       L(P6Q3)-L(setPxQx)
11198377SBill.Holler@Sun.COM	.int       L(P7Q3)-L(setPxQx)
11208377SBill.Holler@Sun.COM
11218377SBill.Holler@Sun.COM	.int       L(P0Q4)-L(setPxQx)	/* 32 */
11228377SBill.Holler@Sun.COM	.int       L(P1Q4)-L(setPxQx)
11238377SBill.Holler@Sun.COM	.int       L(P2Q4)-L(setPxQx)
11248377SBill.Holler@Sun.COM	.int       L(P3Q4)-L(setPxQx)
11258377SBill.Holler@Sun.COM	.int       L(P4Q4)-L(setPxQx)
11268377SBill.Holler@Sun.COM	.int       L(P5Q4)-L(setPxQx)
11278377SBill.Holler@Sun.COM	.int       L(P6Q4)-L(setPxQx)
11288377SBill.Holler@Sun.COM	.int       L(P7Q4)-L(setPxQx)
11298377SBill.Holler@Sun.COM
11308377SBill.Holler@Sun.COM	.int       L(P0Q5)-L(setPxQx)	/* 40 */
11318377SBill.Holler@Sun.COM	.int       L(P1Q5)-L(setPxQx)
11328377SBill.Holler@Sun.COM	.int       L(P2Q5)-L(setPxQx)
11338377SBill.Holler@Sun.COM	.int       L(P3Q5)-L(setPxQx)
11348377SBill.Holler@Sun.COM	.int       L(P4Q5)-L(setPxQx)
11358377SBill.Holler@Sun.COM	.int       L(P5Q5)-L(setPxQx)
11368377SBill.Holler@Sun.COM	.int       L(P6Q5)-L(setPxQx)
11378377SBill.Holler@Sun.COM	.int       L(P7Q5)-L(setPxQx)
11388377SBill.Holler@Sun.COM
11398377SBill.Holler@Sun.COM	.int       L(P0Q6)-L(setPxQx)	/* 48 */
11408377SBill.Holler@Sun.COM	.int       L(P1Q6)-L(setPxQx)
11418377SBill.Holler@Sun.COM	.int       L(P2Q6)-L(setPxQx)
11428377SBill.Holler@Sun.COM	.int       L(P3Q6)-L(setPxQx)
11438377SBill.Holler@Sun.COM	.int       L(P4Q6)-L(setPxQx)
11448377SBill.Holler@Sun.COM	.int       L(P5Q6)-L(setPxQx)
11458377SBill.Holler@Sun.COM	.int       L(P6Q6)-L(setPxQx)
11468377SBill.Holler@Sun.COM	.int       L(P7Q6)-L(setPxQx)
11478377SBill.Holler@Sun.COM
11488377SBill.Holler@Sun.COM	.int       L(P0Q7)-L(setPxQx)	/* 56 */
11498377SBill.Holler@Sun.COM	.int       L(P1Q7)-L(setPxQx)
11508377SBill.Holler@Sun.COM	.int       L(P2Q7)-L(setPxQx)
11518377SBill.Holler@Sun.COM	.int       L(P3Q7)-L(setPxQx)
11528377SBill.Holler@Sun.COM	.int       L(P4Q7)-L(setPxQx)
11538377SBill.Holler@Sun.COM	.int       L(P5Q7)-L(setPxQx)
11548377SBill.Holler@Sun.COM	.int       L(P6Q7)-L(setPxQx)
11558377SBill.Holler@Sun.COM	.int       L(P7Q7)-L(setPxQx)
11568377SBill.Holler@Sun.COM
11578377SBill.Holler@Sun.COM	.int       L(P0Q8)-L(setPxQx)	/* 64 */
11588377SBill.Holler@Sun.COM	.int       L(P1Q8)-L(setPxQx)
11598377SBill.Holler@Sun.COM	.int       L(P2Q8)-L(setPxQx)
11608377SBill.Holler@Sun.COM	.int       L(P3Q8)-L(setPxQx)
11618377SBill.Holler@Sun.COM	.int       L(P4Q8)-L(setPxQx)
11628377SBill.Holler@Sun.COM	.int       L(P5Q8)-L(setPxQx)
11638377SBill.Holler@Sun.COM	.int       L(P6Q8)-L(setPxQx)
11648377SBill.Holler@Sun.COM	.int       L(P7Q8)-L(setPxQx)
11658377SBill.Holler@Sun.COM
11668377SBill.Holler@Sun.COM	.int       L(P0Q9)-L(setPxQx)	/* 72 */
11678377SBill.Holler@Sun.COM	.int       L(P1Q9)-L(setPxQx)
11688377SBill.Holler@Sun.COM	.int       L(P2Q9)-L(setPxQx)
11698377SBill.Holler@Sun.COM	.int       L(P3Q9)-L(setPxQx)
11708377SBill.Holler@Sun.COM	.int       L(P4Q9)-L(setPxQx)
11718377SBill.Holler@Sun.COM	.int       L(P5Q9)-L(setPxQx)
11728377SBill.Holler@Sun.COM	.int       L(P6Q9)-L(setPxQx)
11738377SBill.Holler@Sun.COM	.int       L(P7Q9)-L(setPxQx)	/* 79 */
11748377SBill.Holler@Sun.COM
11758377SBill.Holler@Sun.COM	.p2align 4
11768377SBill.Holler@Sun.COML(P0Q9): mov    %rax, -0x48(%rdi)
11778377SBill.Holler@Sun.COML(P0Q8): mov    %rax, -0x40(%rdi)
11788377SBill.Holler@Sun.COML(P0Q7): mov    %rax, -0x38(%rdi)
11798377SBill.Holler@Sun.COML(P0Q6): mov    %rax, -0x30(%rdi)
11808377SBill.Holler@Sun.COML(P0Q5): mov    %rax, -0x28(%rdi)
11818377SBill.Holler@Sun.COML(P0Q4): mov    %rax, -0x20(%rdi)
11828377SBill.Holler@Sun.COML(P0Q3): mov    %rax, -0x18(%rdi)
11838377SBill.Holler@Sun.COML(P0Q2): mov    %rax, -0x10(%rdi)
11848377SBill.Holler@Sun.COML(P0Q1): mov    %rax, -0x8(%rdi)
11858377SBill.Holler@Sun.COML(P0Q0):
11868377SBill.Holler@Sun.COM	 ret
11878377SBill.Holler@Sun.COM
11888377SBill.Holler@Sun.COM	.p2align 4
11898377SBill.Holler@Sun.COML(P1Q9): mov    %rax, -0x49(%rdi)
11908377SBill.Holler@Sun.COML(P1Q8): mov    %rax, -0x41(%rdi)
11918377SBill.Holler@Sun.COML(P1Q7): mov    %rax, -0x39(%rdi)
11928377SBill.Holler@Sun.COML(P1Q6): mov    %rax, -0x31(%rdi)
11938377SBill.Holler@Sun.COML(P1Q5): mov    %rax, -0x29(%rdi)
11948377SBill.Holler@Sun.COML(P1Q4): mov    %rax, -0x21(%rdi)
11958377SBill.Holler@Sun.COML(P1Q3): mov    %rax, -0x19(%rdi)
11968377SBill.Holler@Sun.COML(P1Q2): mov    %rax, -0x11(%rdi)
11978377SBill.Holler@Sun.COML(P1Q1): mov    %rax, -0x9(%rdi)
11988377SBill.Holler@Sun.COML(P1Q0): mov    %al, -0x1(%rdi)
11998377SBill.Holler@Sun.COM	 ret
12008377SBill.Holler@Sun.COM
12018377SBill.Holler@Sun.COM	.p2align 4
12028377SBill.Holler@Sun.COML(P2Q9): mov    %rax, -0x4a(%rdi)
12038377SBill.Holler@Sun.COML(P2Q8): mov    %rax, -0x42(%rdi)
12048377SBill.Holler@Sun.COML(P2Q7): mov    %rax, -0x3a(%rdi)
12058377SBill.Holler@Sun.COML(P2Q6): mov    %rax, -0x32(%rdi)
12068377SBill.Holler@Sun.COML(P2Q5): mov    %rax, -0x2a(%rdi)
12078377SBill.Holler@Sun.COML(P2Q4): mov    %rax, -0x22(%rdi)
12088377SBill.Holler@Sun.COML(P2Q3): mov    %rax, -0x1a(%rdi)
12098377SBill.Holler@Sun.COML(P2Q2): mov    %rax, -0x12(%rdi)
12108377SBill.Holler@Sun.COML(P2Q1): mov    %rax, -0xa(%rdi)
12118377SBill.Holler@Sun.COML(P2Q0): mov    %ax, -0x2(%rdi)
12128377SBill.Holler@Sun.COM	 ret
12138377SBill.Holler@Sun.COM
12148377SBill.Holler@Sun.COM	.p2align 4
12158377SBill.Holler@Sun.COML(P3Q9): mov    %rax, -0x4b(%rdi)
12168377SBill.Holler@Sun.COML(P3Q8): mov    %rax, -0x43(%rdi)
12178377SBill.Holler@Sun.COML(P3Q7): mov    %rax, -0x3b(%rdi)
12188377SBill.Holler@Sun.COML(P3Q6): mov    %rax, -0x33(%rdi)
12198377SBill.Holler@Sun.COML(P3Q5): mov    %rax, -0x2b(%rdi)
12208377SBill.Holler@Sun.COML(P3Q4): mov    %rax, -0x23(%rdi)
12218377SBill.Holler@Sun.COML(P3Q3): mov    %rax, -0x1b(%rdi)
12228377SBill.Holler@Sun.COML(P3Q2): mov    %rax, -0x13(%rdi)
12238377SBill.Holler@Sun.COML(P3Q1): mov    %rax, -0xb(%rdi)
12248377SBill.Holler@Sun.COML(P3Q0): mov    %ax, -0x3(%rdi)
12258377SBill.Holler@Sun.COM	 mov    %al, -0x1(%rdi)
12268377SBill.Holler@Sun.COM	 ret
12278377SBill.Holler@Sun.COM
12288377SBill.Holler@Sun.COM	.p2align 4
12298377SBill.Holler@Sun.COML(P4Q9): mov    %rax, -0x4c(%rdi)
12308377SBill.Holler@Sun.COML(P4Q8): mov    %rax, -0x44(%rdi)
12318377SBill.Holler@Sun.COML(P4Q7): mov    %rax, -0x3c(%rdi)
12328377SBill.Holler@Sun.COML(P4Q6): mov    %rax, -0x34(%rdi)
12338377SBill.Holler@Sun.COML(P4Q5): mov    %rax, -0x2c(%rdi)
12348377SBill.Holler@Sun.COML(P4Q4): mov    %rax, -0x24(%rdi)
12358377SBill.Holler@Sun.COML(P4Q3): mov    %rax, -0x1c(%rdi)
12368377SBill.Holler@Sun.COML(P4Q2): mov    %rax, -0x14(%rdi)
12378377SBill.Holler@Sun.COML(P4Q1): mov    %rax, -0xc(%rdi)
12388377SBill.Holler@Sun.COML(P4Q0): mov    %eax, -0x4(%rdi)
12398377SBill.Holler@Sun.COM	 ret
12408377SBill.Holler@Sun.COM
12418377SBill.Holler@Sun.COM	.p2align 4
12428377SBill.Holler@Sun.COML(P5Q9): mov    %rax, -0x4d(%rdi)
12438377SBill.Holler@Sun.COML(P5Q8): mov    %rax, -0x45(%rdi)
12448377SBill.Holler@Sun.COML(P5Q7): mov    %rax, -0x3d(%rdi)
12458377SBill.Holler@Sun.COML(P5Q6): mov    %rax, -0x35(%rdi)
12468377SBill.Holler@Sun.COML(P5Q5): mov    %rax, -0x2d(%rdi)
12478377SBill.Holler@Sun.COML(P5Q4): mov    %rax, -0x25(%rdi)
12488377SBill.Holler@Sun.COML(P5Q3): mov    %rax, -0x1d(%rdi)
12498377SBill.Holler@Sun.COML(P5Q2): mov    %rax, -0x15(%rdi)
12508377SBill.Holler@Sun.COML(P5Q1): mov    %rax, -0xd(%rdi)
12518377SBill.Holler@Sun.COML(P5Q0): mov    %eax, -0x5(%rdi)
12528377SBill.Holler@Sun.COM	 mov    %al, -0x1(%rdi)
12538377SBill.Holler@Sun.COM	 ret
12548377SBill.Holler@Sun.COM
12558377SBill.Holler@Sun.COM	.p2align 4
12568377SBill.Holler@Sun.COML(P6Q9): mov    %rax, -0x4e(%rdi)
12578377SBill.Holler@Sun.COML(P6Q8): mov    %rax, -0x46(%rdi)
12588377SBill.Holler@Sun.COML(P6Q7): mov    %rax, -0x3e(%rdi)
12598377SBill.Holler@Sun.COML(P6Q6): mov    %rax, -0x36(%rdi)
12608377SBill.Holler@Sun.COML(P6Q5): mov    %rax, -0x2e(%rdi)
12618377SBill.Holler@Sun.COML(P6Q4): mov    %rax, -0x26(%rdi)
12628377SBill.Holler@Sun.COML(P6Q3): mov    %rax, -0x1e(%rdi)
12638377SBill.Holler@Sun.COML(P6Q2): mov    %rax, -0x16(%rdi)
12648377SBill.Holler@Sun.COML(P6Q1): mov    %rax, -0xe(%rdi)
12658377SBill.Holler@Sun.COML(P6Q0): mov    %eax, -0x6(%rdi)
12668377SBill.Holler@Sun.COM	 mov    %ax, -0x2(%rdi)
12678377SBill.Holler@Sun.COM	 ret
12688377SBill.Holler@Sun.COM
12698377SBill.Holler@Sun.COM	.p2align 4
12708377SBill.Holler@Sun.COML(P7Q9): mov    %rax, -0x4f(%rdi)
12718377SBill.Holler@Sun.COML(P7Q8): mov    %rax, -0x47(%rdi)
12728377SBill.Holler@Sun.COML(P7Q7): mov    %rax, -0x3f(%rdi)
12738377SBill.Holler@Sun.COML(P7Q6): mov    %rax, -0x37(%rdi)
12748377SBill.Holler@Sun.COML(P7Q5): mov    %rax, -0x2f(%rdi)
12758377SBill.Holler@Sun.COML(P7Q4): mov    %rax, -0x27(%rdi)
12768377SBill.Holler@Sun.COML(P7Q3): mov    %rax, -0x1f(%rdi)
12778377SBill.Holler@Sun.COML(P7Q2): mov    %rax, -0x17(%rdi)
12788377SBill.Holler@Sun.COML(P7Q1): mov    %rax, -0xf(%rdi)
12798377SBill.Holler@Sun.COML(P7Q0): mov    %eax, -0x7(%rdi)
12808377SBill.Holler@Sun.COM	 mov    %ax, -0x3(%rdi)
12818377SBill.Holler@Sun.COM	 mov    %al, -0x1(%rdi)
12828377SBill.Holler@Sun.COM	 ret
12838377SBill.Holler@Sun.COM
12848377SBill.Holler@Sun.COM	/*
12858377SBill.Holler@Sun.COM	 * Align to a 16-byte boundary. Avoids penalties from unaligned stores
12868377SBill.Holler@Sun.COM	 * as well as from stores spanning cachelines. Note 16-byte alignment
12878377SBill.Holler@Sun.COM	 * is better in case where rep sstosq is used.
12888377SBill.Holler@Sun.COM	 */
12898377SBill.Holler@Sun.COM	.p2align 4
12908377SBill.Holler@Sun.COML(ck_align):
12918377SBill.Holler@Sun.COM	test	$0xf, %rdi
12928377SBill.Holler@Sun.COM	jz	L(aligned_now)
12938377SBill.Holler@Sun.COM	test	$1, %rdi
12948377SBill.Holler@Sun.COM	jz	2f
12958377SBill.Holler@Sun.COM	mov	%al, (%rdi)
12968377SBill.Holler@Sun.COM	dec	%rsi
12978377SBill.Holler@Sun.COM	lea	1(%rdi),%rdi
12988377SBill.Holler@Sun.COM2:
12998377SBill.Holler@Sun.COM	test	$2, %rdi
13008377SBill.Holler@Sun.COM	jz	4f
13018377SBill.Holler@Sun.COM	mov	%ax, (%rdi)
13028377SBill.Holler@Sun.COM	sub	$2, %rsi
13038377SBill.Holler@Sun.COM	lea	2(%rdi),%rdi
13048377SBill.Holler@Sun.COM4:
13058377SBill.Holler@Sun.COM	test	$4, %rdi
13068377SBill.Holler@Sun.COM	jz	8f
13078377SBill.Holler@Sun.COM	mov	%eax, (%rdi)
13088377SBill.Holler@Sun.COM	sub	$4, %rsi
13098377SBill.Holler@Sun.COM	lea	4(%rdi),%rdi
13108377SBill.Holler@Sun.COM8:
13118377SBill.Holler@Sun.COM	test	$8, %rdi
13128377SBill.Holler@Sun.COM	jz	L(aligned_now)
13138377SBill.Holler@Sun.COM	mov	%rax, (%rdi)
13148377SBill.Holler@Sun.COM	sub	$8, %rsi
13158377SBill.Holler@Sun.COM	lea	8(%rdi),%rdi
13168377SBill.Holler@Sun.COM
13178377SBill.Holler@Sun.COM	/*
13188377SBill.Holler@Sun.COM	 * For large sizes rep sstoq is fastest.
13198377SBill.Holler@Sun.COM	 * Transition point determined experimentally as measured on
13208377SBill.Holler@Sun.COM	 * Intel Xeon processors (incl. Nehalem) and AMD Opteron.
13218377SBill.Holler@Sun.COM	 */
13228377SBill.Holler@Sun.COML(aligned_now):
13238377SBill.Holler@Sun.COM	cmp	$BZERO_USE_REP, %rsi
13248377SBill.Holler@Sun.COM	jg	L(use_rep)
13258377SBill.Holler@Sun.COM
13268377SBill.Holler@Sun.COM	/*
13278377SBill.Holler@Sun.COM	 * zero 64-bytes per loop
13288377SBill.Holler@Sun.COM	 */
13298377SBill.Holler@Sun.COM	.p2align 4
13308377SBill.Holler@Sun.COML(bzero_loop):
13318377SBill.Holler@Sun.COM	leaq	-0x40(%rsi), %rsi
13328377SBill.Holler@Sun.COM	cmpq	$0x40, %rsi
13338377SBill.Holler@Sun.COM	movq	%rax, (%rdi)
13348377SBill.Holler@Sun.COM	movq	%rax, 0x8(%rdi)
13358377SBill.Holler@Sun.COM	movq	%rax, 0x10(%rdi)
13368377SBill.Holler@Sun.COM	movq	%rax, 0x18(%rdi)
13378377SBill.Holler@Sun.COM	movq	%rax, 0x20(%rdi)
13388377SBill.Holler@Sun.COM	movq	%rax, 0x28(%rdi)
13398377SBill.Holler@Sun.COM	movq	%rax, 0x30(%rdi)
13408377SBill.Holler@Sun.COM	movq	%rax, 0x38(%rdi)
13418377SBill.Holler@Sun.COM	leaq	0x40(%rdi), %rdi
13428377SBill.Holler@Sun.COM	jge	L(bzero_loop)
13438377SBill.Holler@Sun.COM
13448377SBill.Holler@Sun.COM	/*
13458377SBill.Holler@Sun.COM	 * Clear any remaining bytes..
13468377SBill.Holler@Sun.COM	 */
13478377SBill.Holler@Sun.COM9:
13488377SBill.Holler@Sun.COM	leaq	L(setPxQx)(%rip), %r10
13498377SBill.Holler@Sun.COM	addq	%rsi, %rdi
13508377SBill.Holler@Sun.COM	movslq	(%r10,%rsi,4), %rcx
13518377SBill.Holler@Sun.COM	leaq	(%rcx,%r10,1), %r10
13528377SBill.Holler@Sun.COM	jmpq	*%r10
13538377SBill.Holler@Sun.COM
13548377SBill.Holler@Sun.COM	/*
13558377SBill.Holler@Sun.COM	 * Use rep sstoq. Clear any remainder via unrolled code
13568377SBill.Holler@Sun.COM	 */
13578377SBill.Holler@Sun.COM	.p2align 4
13588377SBill.Holler@Sun.COML(use_rep):
13590Sstevel@tonic-gate	movq	%rsi, %rcx		/* get size in bytes */
13600Sstevel@tonic-gate	shrq	$3, %rcx		/* count of 8-byte words to zero */
13610Sstevel@tonic-gate	rep
13620Sstevel@tonic-gate	  sstoq				/* %rcx = words to clear (%rax=0) */
13638377SBill.Holler@Sun.COM	andq	$7, %rsi		/* remaining bytes */
13648377SBill.Holler@Sun.COM	jnz	9b
13650Sstevel@tonic-gate	ret
13668377SBill.Holler@Sun.COM#undef	L
13678377SBill.Holler@Sun.COM	SET_SIZE(bzero_altentry)
13680Sstevel@tonic-gate	SET_SIZE(bzero)
13690Sstevel@tonic-gate
13700Sstevel@tonic-gate#elif defined(__i386)
13710Sstevel@tonic-gate
13720Sstevel@tonic-gate#define	ARG_ADDR	4
13730Sstevel@tonic-gate#define	ARG_COUNT	8
13740Sstevel@tonic-gate
13750Sstevel@tonic-gate	ENTRY(bzero)
13760Sstevel@tonic-gate#ifdef DEBUG
13773446Smrj	movl	postbootkernelbase, %eax
13780Sstevel@tonic-gate	cmpl	%eax, ARG_ADDR(%esp)
13790Sstevel@tonic-gate	jnb	0f
13800Sstevel@tonic-gate	pushl	%ebp
13810Sstevel@tonic-gate	movl	%esp, %ebp
13820Sstevel@tonic-gate	pushl	$.bzero_panic_msg
13830Sstevel@tonic-gate	call	panic
13840Sstevel@tonic-gate0:
13850Sstevel@tonic-gate#endif
13860Sstevel@tonic-gatedo_zero:
13870Sstevel@tonic-gate	movl	%edi, %edx
13880Sstevel@tonic-gate	movl	ARG_COUNT(%esp), %ecx
13890Sstevel@tonic-gate	movl	ARG_ADDR(%esp), %edi
13900Sstevel@tonic-gate	shrl	$2, %ecx
13910Sstevel@tonic-gate	xorl	%eax, %eax
13920Sstevel@tonic-gate	rep
13930Sstevel@tonic-gate	  sstol
13940Sstevel@tonic-gate	movl	ARG_COUNT(%esp), %ecx
13950Sstevel@tonic-gate	andl	$3, %ecx
13960Sstevel@tonic-gate	rep
13970Sstevel@tonic-gate	  sstob
13980Sstevel@tonic-gate	movl	%edx, %edi
13990Sstevel@tonic-gate	ret
14000Sstevel@tonic-gate	SET_SIZE(bzero)
14010Sstevel@tonic-gate
14020Sstevel@tonic-gate#undef	ARG_ADDR
14030Sstevel@tonic-gate#undef	ARG_COUNT
14040Sstevel@tonic-gate
14050Sstevel@tonic-gate#endif	/* __i386 */
14060Sstevel@tonic-gate#endif	/* __lint */
14070Sstevel@tonic-gate
14080Sstevel@tonic-gate/*
14090Sstevel@tonic-gate * Transfer data to and from user space -
14100Sstevel@tonic-gate * Note that these routines can cause faults
14110Sstevel@tonic-gate * It is assumed that the kernel has nothing at
14120Sstevel@tonic-gate * less than KERNELBASE in the virtual address space.
14130Sstevel@tonic-gate *
14140Sstevel@tonic-gate * Note that copyin(9F) and copyout(9F) are part of the
14150Sstevel@tonic-gate * DDI/DKI which specifies that they return '-1' on "errors."
14160Sstevel@tonic-gate *
14170Sstevel@tonic-gate * Sigh.
14180Sstevel@tonic-gate *
14190Sstevel@tonic-gate * So there's two extremely similar routines - xcopyin_nta() and
14200Sstevel@tonic-gate * xcopyout_nta() which return the errno that we've faithfully computed.
14210Sstevel@tonic-gate * This allows other callers (e.g. uiomove(9F)) to work correctly.
14220Sstevel@tonic-gate * Given that these are used pretty heavily, we expand the calling
14230Sstevel@tonic-gate * sequences inline for all flavours (rather than making wrappers).
14240Sstevel@tonic-gate */
14250Sstevel@tonic-gate
14260Sstevel@tonic-gate/*
14270Sstevel@tonic-gate * Copy user data to kernel space.
14280Sstevel@tonic-gate */
14290Sstevel@tonic-gate
14300Sstevel@tonic-gate#if defined(__lint)
14310Sstevel@tonic-gate
14320Sstevel@tonic-gate/* ARGSUSED */
14330Sstevel@tonic-gateint
14340Sstevel@tonic-gatecopyin(const void *uaddr, void *kaddr, size_t count)
14350Sstevel@tonic-gate{ return (0); }
14360Sstevel@tonic-gate
14370Sstevel@tonic-gate#else	/* lint */
14380Sstevel@tonic-gate
14390Sstevel@tonic-gate#if defined(__amd64)
14400Sstevel@tonic-gate
14410Sstevel@tonic-gate	ENTRY(copyin)
14420Sstevel@tonic-gate	pushq	%rbp
14430Sstevel@tonic-gate	movq	%rsp, %rbp
1444*8653SBill.Holler@Sun.COM	subq	$24, %rsp
14450Sstevel@tonic-gate
14460Sstevel@tonic-gate	/*
14470Sstevel@tonic-gate	 * save args in case we trap and need to rerun as a copyop
14480Sstevel@tonic-gate	 */
14490Sstevel@tonic-gate	movq	%rdi, (%rsp)
14500Sstevel@tonic-gate	movq	%rsi, 0x8(%rsp)
14510Sstevel@tonic-gate	movq	%rdx, 0x10(%rsp)
14520Sstevel@tonic-gate
14530Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
14540Sstevel@tonic-gate#ifdef DEBUG
14550Sstevel@tonic-gate	cmpq	%rax, %rsi		/* %rsi = kaddr */
14560Sstevel@tonic-gate	jnb	1f
14570Sstevel@tonic-gate	leaq	.copyin_panic_msg(%rip), %rdi
14580Sstevel@tonic-gate	xorl	%eax, %eax
14590Sstevel@tonic-gate	call	panic
14600Sstevel@tonic-gate1:
14610Sstevel@tonic-gate#endif
14620Sstevel@tonic-gate	/*
14630Sstevel@tonic-gate	 * pass lofault value as 4th argument to do_copy_fault
14640Sstevel@tonic-gate	 */
14650Sstevel@tonic-gate	leaq	_copyin_err(%rip), %rcx
14660Sstevel@tonic-gate
14670Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9
14680Sstevel@tonic-gate	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
14690Sstevel@tonic-gate	jb	do_copy_fault
14700Sstevel@tonic-gate	jmp	3f
14710Sstevel@tonic-gate
14720Sstevel@tonic-gate_copyin_err:
14730Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
1474*8653SBill.Holler@Sun.COM	addq	$8, %rsp		/* pop bcopy_altentry call ret addr */
14750Sstevel@tonic-gate3:
14760Sstevel@tonic-gate	movq	T_COPYOPS(%r9), %rax
14770Sstevel@tonic-gate	cmpq	$0, %rax
14780Sstevel@tonic-gate	jz	2f
14790Sstevel@tonic-gate	/*
14800Sstevel@tonic-gate	 * reload args for the copyop
14810Sstevel@tonic-gate	 */
14820Sstevel@tonic-gate	movq	(%rsp), %rdi
14830Sstevel@tonic-gate	movq	0x8(%rsp), %rsi
14840Sstevel@tonic-gate	movq	0x10(%rsp), %rdx
14850Sstevel@tonic-gate	leave
14860Sstevel@tonic-gate	jmp	*CP_COPYIN(%rax)
14870Sstevel@tonic-gate
14880Sstevel@tonic-gate2:	movl	$-1, %eax
14890Sstevel@tonic-gate	leave
14900Sstevel@tonic-gate	ret
14910Sstevel@tonic-gate	SET_SIZE(copyin)
14920Sstevel@tonic-gate
14930Sstevel@tonic-gate#elif defined(__i386)
14940Sstevel@tonic-gate
14950Sstevel@tonic-gate#define	ARG_UADDR	4
14960Sstevel@tonic-gate#define	ARG_KADDR	8
14970Sstevel@tonic-gate
14980Sstevel@tonic-gate	ENTRY(copyin)
14990Sstevel@tonic-gate	movl	kernelbase, %ecx
15000Sstevel@tonic-gate#ifdef DEBUG
15010Sstevel@tonic-gate	cmpl	%ecx, ARG_KADDR(%esp)
15020Sstevel@tonic-gate	jnb	1f
15030Sstevel@tonic-gate	pushl	%ebp
15040Sstevel@tonic-gate	movl	%esp, %ebp
15050Sstevel@tonic-gate	pushl	$.copyin_panic_msg
15060Sstevel@tonic-gate	call	panic
15070Sstevel@tonic-gate1:
15080Sstevel@tonic-gate#endif
15090Sstevel@tonic-gate	lea	_copyin_err, %eax
15100Sstevel@tonic-gate
15110Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
15120Sstevel@tonic-gate	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
15130Sstevel@tonic-gate	jb	do_copy_fault
15140Sstevel@tonic-gate	jmp	3f
15150Sstevel@tonic-gate
15160Sstevel@tonic-gate_copyin_err:
15170Sstevel@tonic-gate	popl	%ecx
15180Sstevel@tonic-gate	popl	%edi
15190Sstevel@tonic-gate	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
15200Sstevel@tonic-gate	popl	%esi
15210Sstevel@tonic-gate	popl	%ebp
15220Sstevel@tonic-gate3:
15230Sstevel@tonic-gate	movl	T_COPYOPS(%edx), %eax
15240Sstevel@tonic-gate	cmpl	$0, %eax
15250Sstevel@tonic-gate	jz	2f
15260Sstevel@tonic-gate	jmp	*CP_COPYIN(%eax)
15270Sstevel@tonic-gate
15280Sstevel@tonic-gate2:	movl	$-1, %eax
15290Sstevel@tonic-gate	ret
15300Sstevel@tonic-gate	SET_SIZE(copyin)
15310Sstevel@tonic-gate
15320Sstevel@tonic-gate#undef	ARG_UADDR
15330Sstevel@tonic-gate#undef	ARG_KADDR
15340Sstevel@tonic-gate
15350Sstevel@tonic-gate#endif	/* __i386 */
15360Sstevel@tonic-gate#endif	/* __lint */
15370Sstevel@tonic-gate
15380Sstevel@tonic-gate#if defined(__lint)
15390Sstevel@tonic-gate
15400Sstevel@tonic-gate/* ARGSUSED */
15410Sstevel@tonic-gateint
15420Sstevel@tonic-gatexcopyin_nta(const void *uaddr, void *kaddr, size_t count, int copy_cached)
15430Sstevel@tonic-gate{ return (0); }
15440Sstevel@tonic-gate
15450Sstevel@tonic-gate#else	/* __lint */
15460Sstevel@tonic-gate
15470Sstevel@tonic-gate#if defined(__amd64)
15480Sstevel@tonic-gate
15490Sstevel@tonic-gate	ENTRY(xcopyin_nta)
15500Sstevel@tonic-gate	pushq	%rbp
15510Sstevel@tonic-gate	movq	%rsp, %rbp
1552*8653SBill.Holler@Sun.COM	subq	$24, %rsp
15530Sstevel@tonic-gate
15540Sstevel@tonic-gate	/*
15550Sstevel@tonic-gate	 * save args in case we trap and need to rerun as a copyop
15560Sstevel@tonic-gate	 * %rcx is consumed in this routine so we don't need to save
15570Sstevel@tonic-gate	 * it.
15580Sstevel@tonic-gate	 */
15590Sstevel@tonic-gate	movq	%rdi, (%rsp)
15600Sstevel@tonic-gate	movq	%rsi, 0x8(%rsp)
15610Sstevel@tonic-gate	movq	%rdx, 0x10(%rsp)
15620Sstevel@tonic-gate
15630Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
15640Sstevel@tonic-gate#ifdef DEBUG
15650Sstevel@tonic-gate	cmpq	%rax, %rsi		/* %rsi = kaddr */
15660Sstevel@tonic-gate	jnb	1f
15670Sstevel@tonic-gate	leaq	.xcopyin_panic_msg(%rip), %rdi
15680Sstevel@tonic-gate	xorl	%eax, %eax
15690Sstevel@tonic-gate	call	panic
15700Sstevel@tonic-gate1:
15710Sstevel@tonic-gate#endif
15720Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9
15730Sstevel@tonic-gate	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
1574858Snn35248	jae	4f
15750Sstevel@tonic-gate	cmpq	$0, %rcx		/* No non-temporal access? */
15760Sstevel@tonic-gate	/*
15770Sstevel@tonic-gate	 * pass lofault value as 4th argument to do_copy_fault
15780Sstevel@tonic-gate	 */
15790Sstevel@tonic-gate	leaq	_xcopyin_err(%rip), %rcx	/* doesn't set rflags */
15800Sstevel@tonic-gate	jnz	do_copy_fault		/* use regular access */
15810Sstevel@tonic-gate	/*
15820Sstevel@tonic-gate	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
15830Sstevel@tonic-gate	 */
15840Sstevel@tonic-gate	cmpq	$XCOPY_MIN_SIZE, %rdx
15850Sstevel@tonic-gate	jb	do_copy_fault
15860Sstevel@tonic-gate
15870Sstevel@tonic-gate	/*
15880Sstevel@tonic-gate	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
15890Sstevel@tonic-gate	 * count is COUNT_ALIGN_SIZE aligned.
15900Sstevel@tonic-gate	 */
15910Sstevel@tonic-gate	movq	%rdi, %r10
15920Sstevel@tonic-gate	orq	%rsi, %r10
15930Sstevel@tonic-gate	andq	$NTA_ALIGN_MASK, %r10
15940Sstevel@tonic-gate	orq	%rdx, %r10
15950Sstevel@tonic-gate	andq	$COUNT_ALIGN_MASK, %r10
15960Sstevel@tonic-gate	jnz	do_copy_fault
1597*8653SBill.Holler@Sun.COM	leaq	_xcopyin_nta_err(%rip), %rcx	/* doesn't set rflags */
15980Sstevel@tonic-gate	jmp	do_copy_fault_nta	/* use non-temporal access */
15990Sstevel@tonic-gate
1600858Snn352484:
1601858Snn35248	movl	$EFAULT, %eax
1602858Snn35248	jmp	3f
1603858Snn35248
16040Sstevel@tonic-gate	/*
16050Sstevel@tonic-gate	 * A fault during do_copy_fault or do_copy_fault_nta is
16060Sstevel@tonic-gate	 * indicated through an errno value in %rax and we iret from the
16070Sstevel@tonic-gate	 * trap handler to here.
16080Sstevel@tonic-gate	 */
16090Sstevel@tonic-gate_xcopyin_err:
1610*8653SBill.Holler@Sun.COM	addq	$8, %rsp		/* pop bcopy_altentry call ret addr */
1611*8653SBill.Holler@Sun.COM_xcopyin_nta_err:
16120Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
16130Sstevel@tonic-gate3:
16140Sstevel@tonic-gate	movq	T_COPYOPS(%r9), %r8
16150Sstevel@tonic-gate	cmpq	$0, %r8
16160Sstevel@tonic-gate	jz	2f
16170Sstevel@tonic-gate
16180Sstevel@tonic-gate	/*
16190Sstevel@tonic-gate	 * reload args for the copyop
16200Sstevel@tonic-gate	 */
16210Sstevel@tonic-gate	movq	(%rsp), %rdi
16220Sstevel@tonic-gate	movq	0x8(%rsp), %rsi
16230Sstevel@tonic-gate	movq	0x10(%rsp), %rdx
16240Sstevel@tonic-gate	leave
16250Sstevel@tonic-gate	jmp	*CP_XCOPYIN(%r8)
16260Sstevel@tonic-gate
16270Sstevel@tonic-gate2:	leave
16280Sstevel@tonic-gate	ret
16290Sstevel@tonic-gate	SET_SIZE(xcopyin_nta)
16300Sstevel@tonic-gate
16310Sstevel@tonic-gate#elif defined(__i386)
16320Sstevel@tonic-gate
16330Sstevel@tonic-gate#define	ARG_UADDR	4
16340Sstevel@tonic-gate#define	ARG_KADDR	8
16350Sstevel@tonic-gate#define	ARG_COUNT	12
16360Sstevel@tonic-gate#define	ARG_CACHED	16
16370Sstevel@tonic-gate
16380Sstevel@tonic-gate	.globl	use_sse_copy
16390Sstevel@tonic-gate
16400Sstevel@tonic-gate	ENTRY(xcopyin_nta)
16410Sstevel@tonic-gate	movl	kernelbase, %ecx
16420Sstevel@tonic-gate	lea	_xcopyin_err, %eax
16430Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
16440Sstevel@tonic-gate	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1645858Snn35248	jae	4f
16460Sstevel@tonic-gate
16470Sstevel@tonic-gate	cmpl	$0, use_sse_copy	/* no sse support */
16480Sstevel@tonic-gate	jz	do_copy_fault
16490Sstevel@tonic-gate
16500Sstevel@tonic-gate	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
16510Sstevel@tonic-gate	jnz	do_copy_fault
16520Sstevel@tonic-gate
16530Sstevel@tonic-gate	/*
16540Sstevel@tonic-gate	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
16550Sstevel@tonic-gate	 */
16560Sstevel@tonic-gate	cmpl	$XCOPY_MIN_SIZE, ARG_COUNT(%esp)
16570Sstevel@tonic-gate	jb	do_copy_fault
16580Sstevel@tonic-gate
16590Sstevel@tonic-gate	/*
16600Sstevel@tonic-gate	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
16610Sstevel@tonic-gate	 * count is COUNT_ALIGN_SIZE aligned.
16620Sstevel@tonic-gate	 */
16630Sstevel@tonic-gate	movl	ARG_UADDR(%esp), %ecx
16640Sstevel@tonic-gate	orl	ARG_KADDR(%esp), %ecx
16650Sstevel@tonic-gate	andl	$NTA_ALIGN_MASK, %ecx
16660Sstevel@tonic-gate	orl	ARG_COUNT(%esp), %ecx
16670Sstevel@tonic-gate	andl	$COUNT_ALIGN_MASK, %ecx
16680Sstevel@tonic-gate	jnz	do_copy_fault
16690Sstevel@tonic-gate
16700Sstevel@tonic-gate	jmp	do_copy_fault_nta	/* use regular access */
16710Sstevel@tonic-gate
1672858Snn352484:
1673858Snn35248	movl	$EFAULT, %eax
1674858Snn35248	jmp	3f
1675858Snn35248
16760Sstevel@tonic-gate	/*
16770Sstevel@tonic-gate	 * A fault during do_copy_fault or do_copy_fault_nta is
16780Sstevel@tonic-gate	 * indicated through an errno value in %eax and we iret from the
16790Sstevel@tonic-gate	 * trap handler to here.
16800Sstevel@tonic-gate	 */
16810Sstevel@tonic-gate_xcopyin_err:
16820Sstevel@tonic-gate	popl	%ecx
16830Sstevel@tonic-gate	popl	%edi
16840Sstevel@tonic-gate	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
16850Sstevel@tonic-gate	popl	%esi
16860Sstevel@tonic-gate	popl	%ebp
16870Sstevel@tonic-gate3:
16880Sstevel@tonic-gate	cmpl	$0, T_COPYOPS(%edx)
16890Sstevel@tonic-gate	jz	2f
16900Sstevel@tonic-gate	movl	T_COPYOPS(%edx), %eax
16910Sstevel@tonic-gate	jmp	*CP_XCOPYIN(%eax)
16920Sstevel@tonic-gate
1693545Skalai2:	rep; 	ret	/* use 2 byte return instruction when branch target */
1694545Skalai			/* AMD Software Optimization Guide - Section 6.2 */
16950Sstevel@tonic-gate	SET_SIZE(xcopyin_nta)
16960Sstevel@tonic-gate
16970Sstevel@tonic-gate#undef	ARG_UADDR
16980Sstevel@tonic-gate#undef	ARG_KADDR
16990Sstevel@tonic-gate#undef	ARG_COUNT
17000Sstevel@tonic-gate#undef	ARG_CACHED
17010Sstevel@tonic-gate
17020Sstevel@tonic-gate#endif	/* __i386 */
17030Sstevel@tonic-gate#endif	/* __lint */
17040Sstevel@tonic-gate
17050Sstevel@tonic-gate/*
17060Sstevel@tonic-gate * Copy kernel data to user space.
17070Sstevel@tonic-gate */
17080Sstevel@tonic-gate
17090Sstevel@tonic-gate#if defined(__lint)
17100Sstevel@tonic-gate
17110Sstevel@tonic-gate/* ARGSUSED */
17120Sstevel@tonic-gateint
17130Sstevel@tonic-gatecopyout(const void *kaddr, void *uaddr, size_t count)
17140Sstevel@tonic-gate{ return (0); }
17150Sstevel@tonic-gate
17160Sstevel@tonic-gate#else	/* __lint */
17170Sstevel@tonic-gate
17180Sstevel@tonic-gate#if defined(__amd64)
17190Sstevel@tonic-gate
17200Sstevel@tonic-gate	ENTRY(copyout)
17210Sstevel@tonic-gate	pushq	%rbp
17220Sstevel@tonic-gate	movq	%rsp, %rbp
1723*8653SBill.Holler@Sun.COM	subq	$24, %rsp
17240Sstevel@tonic-gate
17250Sstevel@tonic-gate	/*
17260Sstevel@tonic-gate	 * save args in case we trap and need to rerun as a copyop
17270Sstevel@tonic-gate	 */
17280Sstevel@tonic-gate	movq	%rdi, (%rsp)
17290Sstevel@tonic-gate	movq	%rsi, 0x8(%rsp)
17300Sstevel@tonic-gate	movq	%rdx, 0x10(%rsp)
17310Sstevel@tonic-gate
17320Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
17330Sstevel@tonic-gate#ifdef DEBUG
17340Sstevel@tonic-gate	cmpq	%rax, %rdi		/* %rdi = kaddr */
17350Sstevel@tonic-gate	jnb	1f
17360Sstevel@tonic-gate	leaq	.copyout_panic_msg(%rip), %rdi
17370Sstevel@tonic-gate	xorl	%eax, %eax
17380Sstevel@tonic-gate	call	panic
17390Sstevel@tonic-gate1:
17400Sstevel@tonic-gate#endif
17410Sstevel@tonic-gate	/*
17420Sstevel@tonic-gate	 * pass lofault value as 4th argument to do_copy_fault
17430Sstevel@tonic-gate	 */
17440Sstevel@tonic-gate	leaq	_copyout_err(%rip), %rcx
17450Sstevel@tonic-gate
17460Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9
17470Sstevel@tonic-gate	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
17480Sstevel@tonic-gate	jb	do_copy_fault
17490Sstevel@tonic-gate	jmp	3f
17500Sstevel@tonic-gate
17510Sstevel@tonic-gate_copyout_err:
17520Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
1753*8653SBill.Holler@Sun.COM	addq	$8, %rsp		/* pop bcopy_altentry call ret addr */
17540Sstevel@tonic-gate3:
17550Sstevel@tonic-gate	movq	T_COPYOPS(%r9), %rax
17560Sstevel@tonic-gate	cmpq	$0, %rax
17570Sstevel@tonic-gate	jz	2f
17580Sstevel@tonic-gate
17590Sstevel@tonic-gate	/*
17600Sstevel@tonic-gate	 * reload args for the copyop
17610Sstevel@tonic-gate	 */
17620Sstevel@tonic-gate	movq	(%rsp), %rdi
17630Sstevel@tonic-gate	movq	0x8(%rsp), %rsi
17640Sstevel@tonic-gate	movq	0x10(%rsp), %rdx
17650Sstevel@tonic-gate	leave
17660Sstevel@tonic-gate	jmp	*CP_COPYOUT(%rax)
17670Sstevel@tonic-gate
17680Sstevel@tonic-gate2:	movl	$-1, %eax
17690Sstevel@tonic-gate	leave
17700Sstevel@tonic-gate	ret
17710Sstevel@tonic-gate	SET_SIZE(copyout)
17720Sstevel@tonic-gate
17730Sstevel@tonic-gate#elif defined(__i386)
17740Sstevel@tonic-gate
17750Sstevel@tonic-gate#define	ARG_KADDR	4
17760Sstevel@tonic-gate#define	ARG_UADDR	8
17770Sstevel@tonic-gate
17780Sstevel@tonic-gate	ENTRY(copyout)
17790Sstevel@tonic-gate	movl	kernelbase, %ecx
17800Sstevel@tonic-gate#ifdef DEBUG
17810Sstevel@tonic-gate	cmpl	%ecx, ARG_KADDR(%esp)
17820Sstevel@tonic-gate	jnb	1f
17830Sstevel@tonic-gate	pushl	%ebp
17840Sstevel@tonic-gate	movl	%esp, %ebp
17850Sstevel@tonic-gate	pushl	$.copyout_panic_msg
17860Sstevel@tonic-gate	call	panic
17870Sstevel@tonic-gate1:
17880Sstevel@tonic-gate#endif
17890Sstevel@tonic-gate	lea	_copyout_err, %eax
17900Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
17910Sstevel@tonic-gate	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
17920Sstevel@tonic-gate	jb	do_copy_fault
17930Sstevel@tonic-gate	jmp	3f
17940Sstevel@tonic-gate
17950Sstevel@tonic-gate_copyout_err:
17960Sstevel@tonic-gate	popl	%ecx
17970Sstevel@tonic-gate	popl	%edi
17980Sstevel@tonic-gate	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
17990Sstevel@tonic-gate	popl	%esi
18000Sstevel@tonic-gate	popl	%ebp
18010Sstevel@tonic-gate3:
18020Sstevel@tonic-gate	movl	T_COPYOPS(%edx), %eax
18030Sstevel@tonic-gate	cmpl	$0, %eax
18040Sstevel@tonic-gate	jz	2f
18050Sstevel@tonic-gate	jmp	*CP_COPYOUT(%eax)
18060Sstevel@tonic-gate
18070Sstevel@tonic-gate2:	movl	$-1, %eax
18080Sstevel@tonic-gate	ret
18090Sstevel@tonic-gate	SET_SIZE(copyout)
18100Sstevel@tonic-gate
18110Sstevel@tonic-gate#undef	ARG_UADDR
18120Sstevel@tonic-gate#undef	ARG_KADDR
18130Sstevel@tonic-gate
18140Sstevel@tonic-gate#endif	/* __i386 */
18150Sstevel@tonic-gate#endif	/* __lint */
18160Sstevel@tonic-gate
18170Sstevel@tonic-gate#if defined(__lint)
18180Sstevel@tonic-gate
18190Sstevel@tonic-gate/* ARGSUSED */
18200Sstevel@tonic-gateint
18210Sstevel@tonic-gatexcopyout_nta(const void *kaddr, void *uaddr, size_t count, int copy_cached)
18220Sstevel@tonic-gate{ return (0); }
18230Sstevel@tonic-gate
18240Sstevel@tonic-gate#else	/* __lint */
18250Sstevel@tonic-gate
18260Sstevel@tonic-gate#if defined(__amd64)
18270Sstevel@tonic-gate
18280Sstevel@tonic-gate	ENTRY(xcopyout_nta)
18290Sstevel@tonic-gate	pushq	%rbp
18300Sstevel@tonic-gate	movq	%rsp, %rbp
1831*8653SBill.Holler@Sun.COM	subq	$24, %rsp
18320Sstevel@tonic-gate
18330Sstevel@tonic-gate	/*
18340Sstevel@tonic-gate	 * save args in case we trap and need to rerun as a copyop
18350Sstevel@tonic-gate	 */
18360Sstevel@tonic-gate	movq	%rdi, (%rsp)
18370Sstevel@tonic-gate	movq	%rsi, 0x8(%rsp)
18380Sstevel@tonic-gate	movq	%rdx, 0x10(%rsp)
18390Sstevel@tonic-gate
18400Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
18410Sstevel@tonic-gate#ifdef DEBUG
18420Sstevel@tonic-gate	cmpq	%rax, %rdi		/* %rdi = kaddr */
18430Sstevel@tonic-gate	jnb	1f
18440Sstevel@tonic-gate	leaq	.xcopyout_panic_msg(%rip), %rdi
18450Sstevel@tonic-gate	xorl	%eax, %eax
18460Sstevel@tonic-gate	call	panic
18470Sstevel@tonic-gate1:
18480Sstevel@tonic-gate#endif
18490Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9
18500Sstevel@tonic-gate	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1851858Snn35248	jae	4f
18520Sstevel@tonic-gate
18530Sstevel@tonic-gate	cmpq	$0, %rcx		/* No non-temporal access? */
18540Sstevel@tonic-gate	/*
18550Sstevel@tonic-gate	 * pass lofault value as 4th argument to do_copy_fault
18560Sstevel@tonic-gate	 */
18570Sstevel@tonic-gate	leaq	_xcopyout_err(%rip), %rcx
18580Sstevel@tonic-gate	jnz	do_copy_fault
18590Sstevel@tonic-gate	/*
18600Sstevel@tonic-gate	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
18610Sstevel@tonic-gate	 */
18620Sstevel@tonic-gate	cmpq	$XCOPY_MIN_SIZE, %rdx
18630Sstevel@tonic-gate	jb	do_copy_fault
18640Sstevel@tonic-gate
18650Sstevel@tonic-gate	/*
18660Sstevel@tonic-gate	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
18670Sstevel@tonic-gate	 * count is COUNT_ALIGN_SIZE aligned.
18680Sstevel@tonic-gate	 */
18690Sstevel@tonic-gate	movq	%rdi, %r10
18700Sstevel@tonic-gate	orq	%rsi, %r10
18710Sstevel@tonic-gate	andq	$NTA_ALIGN_MASK, %r10
18720Sstevel@tonic-gate	orq	%rdx, %r10
18730Sstevel@tonic-gate	andq	$COUNT_ALIGN_MASK, %r10
18740Sstevel@tonic-gate	jnz	do_copy_fault
1875*8653SBill.Holler@Sun.COM	leaq	_xcopyout_nta_err(%rip), %rcx
18760Sstevel@tonic-gate	jmp	do_copy_fault_nta
18770Sstevel@tonic-gate
1878858Snn352484:
1879858Snn35248	movl	$EFAULT, %eax
1880858Snn35248	jmp	3f
1881858Snn35248
18820Sstevel@tonic-gate	/*
18830Sstevel@tonic-gate	 * A fault during do_copy_fault or do_copy_fault_nta is
18840Sstevel@tonic-gate	 * indicated through an errno value in %rax and we iret from the
18850Sstevel@tonic-gate	 * trap handler to here.
18860Sstevel@tonic-gate	 */
18870Sstevel@tonic-gate_xcopyout_err:
1888*8653SBill.Holler@Sun.COM	addq	$8, %rsp		/* pop bcopy_altentry call ret addr */
1889*8653SBill.Holler@Sun.COM_xcopyout_nta_err:
18900Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
18910Sstevel@tonic-gate3:
18920Sstevel@tonic-gate	movq	T_COPYOPS(%r9), %r8
18930Sstevel@tonic-gate	cmpq	$0, %r8
18940Sstevel@tonic-gate	jz	2f
18950Sstevel@tonic-gate
18960Sstevel@tonic-gate	/*
18970Sstevel@tonic-gate	 * reload args for the copyop
18980Sstevel@tonic-gate	 */
18990Sstevel@tonic-gate	movq	(%rsp), %rdi
19000Sstevel@tonic-gate	movq	0x8(%rsp), %rsi
19010Sstevel@tonic-gate	movq	0x10(%rsp), %rdx
19020Sstevel@tonic-gate	leave
19030Sstevel@tonic-gate	jmp	*CP_XCOPYOUT(%r8)
19040Sstevel@tonic-gate
19050Sstevel@tonic-gate2:	leave
19060Sstevel@tonic-gate	ret
19070Sstevel@tonic-gate	SET_SIZE(xcopyout_nta)
19080Sstevel@tonic-gate
19090Sstevel@tonic-gate#elif defined(__i386)
19100Sstevel@tonic-gate
19110Sstevel@tonic-gate#define	ARG_KADDR	4
19120Sstevel@tonic-gate#define	ARG_UADDR	8
19130Sstevel@tonic-gate#define	ARG_COUNT	12
19140Sstevel@tonic-gate#define	ARG_CACHED	16
19150Sstevel@tonic-gate
19160Sstevel@tonic-gate	ENTRY(xcopyout_nta)
19170Sstevel@tonic-gate	movl	kernelbase, %ecx
19180Sstevel@tonic-gate	lea	_xcopyout_err, %eax
19190Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
19200Sstevel@tonic-gate	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1921858Snn35248	jae	4f
19220Sstevel@tonic-gate
19230Sstevel@tonic-gate	cmpl	$0, use_sse_copy	/* no sse support */
19240Sstevel@tonic-gate	jz	do_copy_fault
19250Sstevel@tonic-gate
19260Sstevel@tonic-gate	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
19270Sstevel@tonic-gate	jnz	do_copy_fault
19280Sstevel@tonic-gate
19290Sstevel@tonic-gate	/*
19300Sstevel@tonic-gate	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
19310Sstevel@tonic-gate	 */
19320Sstevel@tonic-gate	cmpl	$XCOPY_MIN_SIZE, %edx
19330Sstevel@tonic-gate	jb	do_copy_fault
19340Sstevel@tonic-gate
19350Sstevel@tonic-gate	/*
19360Sstevel@tonic-gate	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
19370Sstevel@tonic-gate	 * count is COUNT_ALIGN_SIZE aligned.
19380Sstevel@tonic-gate	 */
19390Sstevel@tonic-gate	movl	ARG_UADDR(%esp), %ecx
19400Sstevel@tonic-gate	orl	ARG_KADDR(%esp), %ecx
19410Sstevel@tonic-gate	andl	$NTA_ALIGN_MASK, %ecx
19420Sstevel@tonic-gate	orl	ARG_COUNT(%esp), %ecx
19430Sstevel@tonic-gate	andl	$COUNT_ALIGN_MASK, %ecx
19440Sstevel@tonic-gate	jnz	do_copy_fault
19450Sstevel@tonic-gate	jmp	do_copy_fault_nta
19460Sstevel@tonic-gate
1947858Snn352484:
1948858Snn35248	movl	$EFAULT, %eax
1949858Snn35248	jmp	3f
1950858Snn35248
19510Sstevel@tonic-gate	/*
19520Sstevel@tonic-gate	 * A fault during do_copy_fault or do_copy_fault_nta is
19530Sstevel@tonic-gate	 * indicated through an errno value in %eax and we iret from the
19540Sstevel@tonic-gate	 * trap handler to here.
19550Sstevel@tonic-gate	 */
19560Sstevel@tonic-gate_xcopyout_err:
19570Sstevel@tonic-gate	/ restore the original lofault
19580Sstevel@tonic-gate	popl	%ecx
19590Sstevel@tonic-gate	popl	%edi
19600Sstevel@tonic-gate	movl	%ecx, T_LOFAULT(%edx)	/ original lofault
19610Sstevel@tonic-gate	popl	%esi
19620Sstevel@tonic-gate	popl	%ebp
19630Sstevel@tonic-gate3:
19640Sstevel@tonic-gate	cmpl	$0, T_COPYOPS(%edx)
19650Sstevel@tonic-gate	jz	2f
19660Sstevel@tonic-gate	movl	T_COPYOPS(%edx), %eax
19670Sstevel@tonic-gate	jmp	*CP_XCOPYOUT(%eax)
19680Sstevel@tonic-gate
1969545Skalai2:	rep;	ret	/* use 2 byte return instruction when branch target */
1970545Skalai			/* AMD Software Optimization Guide - Section 6.2 */
19710Sstevel@tonic-gate	SET_SIZE(xcopyout_nta)
19720Sstevel@tonic-gate
19730Sstevel@tonic-gate#undef	ARG_UADDR
19740Sstevel@tonic-gate#undef	ARG_KADDR
19750Sstevel@tonic-gate#undef	ARG_COUNT
19760Sstevel@tonic-gate#undef	ARG_CACHED
19770Sstevel@tonic-gate
19780Sstevel@tonic-gate#endif	/* __i386 */
19790Sstevel@tonic-gate#endif	/* __lint */
19800Sstevel@tonic-gate
19810Sstevel@tonic-gate/*
19820Sstevel@tonic-gate * Copy a null terminated string from one point to another in
19830Sstevel@tonic-gate * the kernel address space.
19840Sstevel@tonic-gate */
19850Sstevel@tonic-gate
19860Sstevel@tonic-gate#if defined(__lint)
19870Sstevel@tonic-gate
19880Sstevel@tonic-gate/* ARGSUSED */
19890Sstevel@tonic-gateint
19900Sstevel@tonic-gatecopystr(const char *from, char *to, size_t maxlength, size_t *lencopied)
19910Sstevel@tonic-gate{ return (0); }
19920Sstevel@tonic-gate
19930Sstevel@tonic-gate#else	/* __lint */
19940Sstevel@tonic-gate
19950Sstevel@tonic-gate#if defined(__amd64)
19960Sstevel@tonic-gate
19970Sstevel@tonic-gate	ENTRY(copystr)
19980Sstevel@tonic-gate	pushq	%rbp
19990Sstevel@tonic-gate	movq	%rsp, %rbp
20000Sstevel@tonic-gate#ifdef DEBUG
20010Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
20020Sstevel@tonic-gate	cmpq	%rax, %rdi		/* %rdi = from */
20030Sstevel@tonic-gate	jb	0f
20040Sstevel@tonic-gate	cmpq	%rax, %rsi		/* %rsi = to */
20050Sstevel@tonic-gate	jnb	1f
20060Sstevel@tonic-gate0:	leaq	.copystr_panic_msg(%rip), %rdi
20070Sstevel@tonic-gate	xorl	%eax, %eax
20080Sstevel@tonic-gate	call	panic
20090Sstevel@tonic-gate1:
20100Sstevel@tonic-gate#endif
20110Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9
20120Sstevel@tonic-gate	movq	T_LOFAULT(%r9), %r8	/* pass current lofault value as */
20130Sstevel@tonic-gate					/* 5th argument to do_copystr */
20140Sstevel@tonic-gatedo_copystr:
20150Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
20160Sstevel@tonic-gate	movq    T_LOFAULT(%r9), %r11	/* save the current lofault */
20170Sstevel@tonic-gate	movq	%r8, T_LOFAULT(%r9)	/* new lofault */
20180Sstevel@tonic-gate
20190Sstevel@tonic-gate	movq	%rdx, %r8		/* save maxlength */
20200Sstevel@tonic-gate
20210Sstevel@tonic-gate	cmpq	$0, %rdx		/* %rdx = maxlength */
20220Sstevel@tonic-gate	je	copystr_enametoolong	/* maxlength == 0 */
20230Sstevel@tonic-gate
20240Sstevel@tonic-gatecopystr_loop:
20250Sstevel@tonic-gate	decq	%r8
20260Sstevel@tonic-gate	movb	(%rdi), %al
20270Sstevel@tonic-gate	incq	%rdi
20280Sstevel@tonic-gate	movb	%al, (%rsi)
20290Sstevel@tonic-gate	incq	%rsi
20300Sstevel@tonic-gate	cmpb	$0, %al
20310Sstevel@tonic-gate	je	copystr_null		/* null char */
20320Sstevel@tonic-gate	cmpq	$0, %r8
20330Sstevel@tonic-gate	jne	copystr_loop
20340Sstevel@tonic-gate
20350Sstevel@tonic-gatecopystr_enametoolong:
20360Sstevel@tonic-gate	movl	$ENAMETOOLONG, %eax
20370Sstevel@tonic-gate	jmp	copystr_out
20380Sstevel@tonic-gate
20390Sstevel@tonic-gatecopystr_null:
20400Sstevel@tonic-gate	xorl	%eax, %eax		/* no error */
20410Sstevel@tonic-gate
20420Sstevel@tonic-gatecopystr_out:
20430Sstevel@tonic-gate	cmpq	$0, %rcx		/* want length? */
20440Sstevel@tonic-gate	je	copystr_done		/* no */
20450Sstevel@tonic-gate	subq	%r8, %rdx		/* compute length and store it */
20460Sstevel@tonic-gate	movq	%rdx, (%rcx)
20470Sstevel@tonic-gate
20480Sstevel@tonic-gatecopystr_done:
20490Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
20500Sstevel@tonic-gate	leave
20510Sstevel@tonic-gate	ret
20520Sstevel@tonic-gate	SET_SIZE(copystr)
20530Sstevel@tonic-gate
20540Sstevel@tonic-gate#elif defined(__i386)
20550Sstevel@tonic-gate
20560Sstevel@tonic-gate#define	ARG_FROM	8
20570Sstevel@tonic-gate#define	ARG_TO		12
20580Sstevel@tonic-gate#define	ARG_MAXLEN	16
20590Sstevel@tonic-gate#define	ARG_LENCOPIED	20
20600Sstevel@tonic-gate
20610Sstevel@tonic-gate	ENTRY(copystr)
20620Sstevel@tonic-gate#ifdef DEBUG
20630Sstevel@tonic-gate	pushl	%ebp
20640Sstevel@tonic-gate	movl	%esp, %ebp
20650Sstevel@tonic-gate	movl	kernelbase, %eax
20660Sstevel@tonic-gate	cmpl	%eax, ARG_FROM(%esp)
20670Sstevel@tonic-gate	jb	0f
20680Sstevel@tonic-gate	cmpl	%eax, ARG_TO(%esp)
20690Sstevel@tonic-gate	jnb	1f
20700Sstevel@tonic-gate0:	pushl	$.copystr_panic_msg
20710Sstevel@tonic-gate	call	panic
20720Sstevel@tonic-gate1:	popl	%ebp
20730Sstevel@tonic-gate#endif
20740Sstevel@tonic-gate	/* get the current lofault address */
20750Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax
20760Sstevel@tonic-gate	movl	T_LOFAULT(%eax), %eax
20770Sstevel@tonic-gatedo_copystr:
20780Sstevel@tonic-gate	pushl	%ebp			/* setup stack frame */
20790Sstevel@tonic-gate	movl	%esp, %ebp
20800Sstevel@tonic-gate	pushl	%ebx			/* save registers */
20810Sstevel@tonic-gate	pushl	%edi
20820Sstevel@tonic-gate
20830Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %ebx
20840Sstevel@tonic-gate	movl	T_LOFAULT(%ebx), %edi
20850Sstevel@tonic-gate	pushl	%edi			/* save the current lofault */
20860Sstevel@tonic-gate	movl	%eax, T_LOFAULT(%ebx)	/* new lofault */
20870Sstevel@tonic-gate
20880Sstevel@tonic-gate	movl	ARG_MAXLEN(%ebp), %ecx
20890Sstevel@tonic-gate	cmpl	$0, %ecx
20900Sstevel@tonic-gate	je	copystr_enametoolong	/* maxlength == 0 */
20910Sstevel@tonic-gate
20920Sstevel@tonic-gate	movl	ARG_FROM(%ebp), %ebx	/* source address */
20930Sstevel@tonic-gate	movl	ARG_TO(%ebp), %edx	/* destination address */
20940Sstevel@tonic-gate
20950Sstevel@tonic-gatecopystr_loop:
20960Sstevel@tonic-gate	decl	%ecx
20970Sstevel@tonic-gate	movb	(%ebx), %al
20980Sstevel@tonic-gate	incl	%ebx
20990Sstevel@tonic-gate	movb	%al, (%edx)
21000Sstevel@tonic-gate	incl	%edx
21010Sstevel@tonic-gate	cmpb	$0, %al
21020Sstevel@tonic-gate	je	copystr_null		/* null char */
21030Sstevel@tonic-gate	cmpl	$0, %ecx
21040Sstevel@tonic-gate	jne	copystr_loop
21050Sstevel@tonic-gate
21060Sstevel@tonic-gatecopystr_enametoolong:
21070Sstevel@tonic-gate	movl	$ENAMETOOLONG, %eax
21080Sstevel@tonic-gate	jmp	copystr_out
21090Sstevel@tonic-gate
21100Sstevel@tonic-gatecopystr_null:
21110Sstevel@tonic-gate	xorl	%eax, %eax		/* no error */
21120Sstevel@tonic-gate
21130Sstevel@tonic-gatecopystr_out:
21140Sstevel@tonic-gate	cmpl	$0, ARG_LENCOPIED(%ebp)	/* want length? */
21150Sstevel@tonic-gate	je	copystr_done		/* no */
21160Sstevel@tonic-gate	movl	ARG_MAXLEN(%ebp), %edx
21170Sstevel@tonic-gate	subl	%ecx, %edx		/* compute length and store it */
21180Sstevel@tonic-gate	movl	ARG_LENCOPIED(%ebp), %ecx
21190Sstevel@tonic-gate	movl	%edx, (%ecx)
21200Sstevel@tonic-gate
21210Sstevel@tonic-gatecopystr_done:
21220Sstevel@tonic-gate	popl	%edi
21230Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %ebx
21240Sstevel@tonic-gate	movl	%edi, T_LOFAULT(%ebx)	/* restore the original lofault */
21250Sstevel@tonic-gate
21260Sstevel@tonic-gate	popl	%edi
21270Sstevel@tonic-gate	popl	%ebx
21280Sstevel@tonic-gate	popl	%ebp
21290Sstevel@tonic-gate	ret
21300Sstevel@tonic-gate	SET_SIZE(copystr)
21310Sstevel@tonic-gate
21320Sstevel@tonic-gate#undef	ARG_FROM
21330Sstevel@tonic-gate#undef	ARG_TO
21340Sstevel@tonic-gate#undef	ARG_MAXLEN
21350Sstevel@tonic-gate#undef	ARG_LENCOPIED
21360Sstevel@tonic-gate
21370Sstevel@tonic-gate#endif	/* __i386 */
21380Sstevel@tonic-gate#endif	/* __lint */
21390Sstevel@tonic-gate
21400Sstevel@tonic-gate/*
21410Sstevel@tonic-gate * Copy a null terminated string from the user address space into
21420Sstevel@tonic-gate * the kernel address space.
21430Sstevel@tonic-gate */
21440Sstevel@tonic-gate
21450Sstevel@tonic-gate#if defined(__lint)
21460Sstevel@tonic-gate
21470Sstevel@tonic-gate/* ARGSUSED */
21480Sstevel@tonic-gateint
21490Sstevel@tonic-gatecopyinstr(const char *uaddr, char *kaddr, size_t maxlength,
21500Sstevel@tonic-gate    size_t *lencopied)
21510Sstevel@tonic-gate{ return (0); }
21520Sstevel@tonic-gate
21530Sstevel@tonic-gate#else	/* __lint */
21540Sstevel@tonic-gate
21550Sstevel@tonic-gate#if defined(__amd64)
21560Sstevel@tonic-gate
21570Sstevel@tonic-gate	ENTRY(copyinstr)
21580Sstevel@tonic-gate	pushq	%rbp
21590Sstevel@tonic-gate	movq	%rsp, %rbp
21600Sstevel@tonic-gate	subq	$32, %rsp
21610Sstevel@tonic-gate
21620Sstevel@tonic-gate	/*
21630Sstevel@tonic-gate	 * save args in case we trap and need to rerun as a copyop
21640Sstevel@tonic-gate	 */
21650Sstevel@tonic-gate	movq	%rdi, (%rsp)
21660Sstevel@tonic-gate	movq	%rsi, 0x8(%rsp)
21670Sstevel@tonic-gate	movq	%rdx, 0x10(%rsp)
21680Sstevel@tonic-gate	movq	%rcx, 0x18(%rsp)
21690Sstevel@tonic-gate
21700Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
21710Sstevel@tonic-gate#ifdef DEBUG
21720Sstevel@tonic-gate	cmpq	%rax, %rsi		/* %rsi = kaddr */
21730Sstevel@tonic-gate	jnb	1f
21740Sstevel@tonic-gate	leaq	.copyinstr_panic_msg(%rip), %rdi
21750Sstevel@tonic-gate	xorl	%eax, %eax
21760Sstevel@tonic-gate	call	panic
21770Sstevel@tonic-gate1:
21780Sstevel@tonic-gate#endif
21790Sstevel@tonic-gate	/*
21800Sstevel@tonic-gate	 * pass lofault value as 5th argument to do_copystr
21810Sstevel@tonic-gate	 */
21820Sstevel@tonic-gate	leaq	_copyinstr_error(%rip), %r8
21830Sstevel@tonic-gate
21840Sstevel@tonic-gate	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
21850Sstevel@tonic-gate	jb	do_copystr
21860Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9
21870Sstevel@tonic-gate	jmp	3f
21880Sstevel@tonic-gate
21890Sstevel@tonic-gate_copyinstr_error:
21900Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
21910Sstevel@tonic-gate3:
21920Sstevel@tonic-gate	movq	T_COPYOPS(%r9), %rax
21930Sstevel@tonic-gate	cmpq	$0, %rax
21940Sstevel@tonic-gate	jz	2f
21950Sstevel@tonic-gate
21960Sstevel@tonic-gate	/*
21970Sstevel@tonic-gate	 * reload args for the copyop
21980Sstevel@tonic-gate	 */
21990Sstevel@tonic-gate	movq	(%rsp), %rdi
22000Sstevel@tonic-gate	movq	0x8(%rsp), %rsi
22010Sstevel@tonic-gate	movq	0x10(%rsp), %rdx
22020Sstevel@tonic-gate	movq	0x18(%rsp), %rcx
22030Sstevel@tonic-gate	leave
22040Sstevel@tonic-gate	jmp	*CP_COPYINSTR(%rax)
22050Sstevel@tonic-gate
22060Sstevel@tonic-gate2:	movl	$EFAULT, %eax		/* return EFAULT */
22070Sstevel@tonic-gate	leave
22080Sstevel@tonic-gate	ret
22090Sstevel@tonic-gate	SET_SIZE(copyinstr)
22100Sstevel@tonic-gate
22110Sstevel@tonic-gate#elif defined(__i386)
22120Sstevel@tonic-gate
22130Sstevel@tonic-gate#define	ARG_UADDR	4
22140Sstevel@tonic-gate#define	ARG_KADDR	8
22150Sstevel@tonic-gate
22160Sstevel@tonic-gate	ENTRY(copyinstr)
22170Sstevel@tonic-gate	movl	kernelbase, %ecx
22180Sstevel@tonic-gate#ifdef DEBUG
22190Sstevel@tonic-gate	cmpl	%ecx, ARG_KADDR(%esp)
22200Sstevel@tonic-gate	jnb	1f
22210Sstevel@tonic-gate	pushl	%ebp
22220Sstevel@tonic-gate	movl	%esp, %ebp
22230Sstevel@tonic-gate	pushl	$.copyinstr_panic_msg
22240Sstevel@tonic-gate	call	panic
22250Sstevel@tonic-gate1:
22260Sstevel@tonic-gate#endif
22270Sstevel@tonic-gate	lea	_copyinstr_error, %eax
22280Sstevel@tonic-gate	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
22290Sstevel@tonic-gate	jb	do_copystr
22300Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
22310Sstevel@tonic-gate	jmp	3f
22320Sstevel@tonic-gate
22330Sstevel@tonic-gate_copyinstr_error:
22340Sstevel@tonic-gate	popl	%edi
22350Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
22360Sstevel@tonic-gate	movl	%edi, T_LOFAULT(%edx)	/* original lofault */
22370Sstevel@tonic-gate
22380Sstevel@tonic-gate	popl	%edi
22390Sstevel@tonic-gate	popl	%ebx
22400Sstevel@tonic-gate	popl	%ebp
22410Sstevel@tonic-gate3:
22420Sstevel@tonic-gate	movl	T_COPYOPS(%edx), %eax
22430Sstevel@tonic-gate	cmpl	$0, %eax
22440Sstevel@tonic-gate	jz	2f
22450Sstevel@tonic-gate	jmp	*CP_COPYINSTR(%eax)
22460Sstevel@tonic-gate
22470Sstevel@tonic-gate2:	movl	$EFAULT, %eax		/* return EFAULT */
22480Sstevel@tonic-gate	ret
22490Sstevel@tonic-gate	SET_SIZE(copyinstr)
22500Sstevel@tonic-gate
22510Sstevel@tonic-gate#undef	ARG_UADDR
22520Sstevel@tonic-gate#undef	ARG_KADDR
22530Sstevel@tonic-gate
22540Sstevel@tonic-gate#endif	/* __i386 */
22550Sstevel@tonic-gate#endif	/* __lint */
22560Sstevel@tonic-gate
22570Sstevel@tonic-gate/*
22580Sstevel@tonic-gate * Copy a null terminated string from the kernel
22590Sstevel@tonic-gate * address space to the user address space.
22600Sstevel@tonic-gate */
22610Sstevel@tonic-gate
22620Sstevel@tonic-gate#if defined(__lint)
22630Sstevel@tonic-gate
22640Sstevel@tonic-gate/* ARGSUSED */
22650Sstevel@tonic-gateint
22660Sstevel@tonic-gatecopyoutstr(const char *kaddr, char *uaddr, size_t maxlength,
22670Sstevel@tonic-gate    size_t *lencopied)
22680Sstevel@tonic-gate{ return (0); }
22690Sstevel@tonic-gate
22700Sstevel@tonic-gate#else	/* __lint */
22710Sstevel@tonic-gate
22720Sstevel@tonic-gate#if defined(__amd64)
22730Sstevel@tonic-gate
22740Sstevel@tonic-gate	ENTRY(copyoutstr)
22750Sstevel@tonic-gate	pushq	%rbp
22760Sstevel@tonic-gate	movq	%rsp, %rbp
22770Sstevel@tonic-gate	subq	$32, %rsp
22780Sstevel@tonic-gate
22790Sstevel@tonic-gate	/*
22800Sstevel@tonic-gate	 * save args in case we trap and need to rerun as a copyop
22810Sstevel@tonic-gate	 */
22820Sstevel@tonic-gate	movq	%rdi, (%rsp)
22830Sstevel@tonic-gate	movq	%rsi, 0x8(%rsp)
22840Sstevel@tonic-gate	movq	%rdx, 0x10(%rsp)
22850Sstevel@tonic-gate	movq	%rcx, 0x18(%rsp)
22860Sstevel@tonic-gate
22870Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
22880Sstevel@tonic-gate#ifdef DEBUG
22890Sstevel@tonic-gate	cmpq	%rax, %rdi		/* %rdi = kaddr */
22900Sstevel@tonic-gate	jnb	1f
22910Sstevel@tonic-gate	leaq	.copyoutstr_panic_msg(%rip), %rdi
22920Sstevel@tonic-gate	jmp	call_panic		/* setup stack and call panic */
22930Sstevel@tonic-gate1:
22940Sstevel@tonic-gate#endif
22950Sstevel@tonic-gate	/*
22960Sstevel@tonic-gate	 * pass lofault value as 5th argument to do_copystr
22970Sstevel@tonic-gate	 */
22980Sstevel@tonic-gate	leaq	_copyoutstr_error(%rip), %r8
22990Sstevel@tonic-gate
23000Sstevel@tonic-gate	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
23010Sstevel@tonic-gate	jb	do_copystr
23020Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9
23030Sstevel@tonic-gate	jmp	3f
23040Sstevel@tonic-gate
23050Sstevel@tonic-gate_copyoutstr_error:
23060Sstevel@tonic-gate	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
23070Sstevel@tonic-gate3:
23080Sstevel@tonic-gate	movq	T_COPYOPS(%r9), %rax
23090Sstevel@tonic-gate	cmpq	$0, %rax
23100Sstevel@tonic-gate	jz	2f
23110Sstevel@tonic-gate
23120Sstevel@tonic-gate	/*
23130Sstevel@tonic-gate	 * reload args for the copyop
23140Sstevel@tonic-gate	 */
23150Sstevel@tonic-gate	movq	(%rsp), %rdi
23160Sstevel@tonic-gate	movq	0x8(%rsp), %rsi
23170Sstevel@tonic-gate	movq	0x10(%rsp), %rdx
23180Sstevel@tonic-gate	movq	0x18(%rsp), %rcx
23190Sstevel@tonic-gate	leave
23200Sstevel@tonic-gate	jmp	*CP_COPYOUTSTR(%rax)
23210Sstevel@tonic-gate
23220Sstevel@tonic-gate2:	movl	$EFAULT, %eax		/* return EFAULT */
23230Sstevel@tonic-gate	leave
23240Sstevel@tonic-gate	ret
23250Sstevel@tonic-gate	SET_SIZE(copyoutstr)
23260Sstevel@tonic-gate
23270Sstevel@tonic-gate#elif defined(__i386)
23280Sstevel@tonic-gate
23290Sstevel@tonic-gate#define	ARG_KADDR	4
23300Sstevel@tonic-gate#define	ARG_UADDR	8
23310Sstevel@tonic-gate
23320Sstevel@tonic-gate	ENTRY(copyoutstr)
23330Sstevel@tonic-gate	movl	kernelbase, %ecx
23340Sstevel@tonic-gate#ifdef DEBUG
23350Sstevel@tonic-gate	cmpl	%ecx, ARG_KADDR(%esp)
23360Sstevel@tonic-gate	jnb	1f
23370Sstevel@tonic-gate	pushl	%ebp
23380Sstevel@tonic-gate	movl	%esp, %ebp
23390Sstevel@tonic-gate	pushl	$.copyoutstr_panic_msg
23400Sstevel@tonic-gate	call	panic
23410Sstevel@tonic-gate1:
23420Sstevel@tonic-gate#endif
23430Sstevel@tonic-gate	lea	_copyoutstr_error, %eax
23440Sstevel@tonic-gate	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
23450Sstevel@tonic-gate	jb	do_copystr
23460Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
23470Sstevel@tonic-gate	jmp	3f
23480Sstevel@tonic-gate
23490Sstevel@tonic-gate_copyoutstr_error:
23500Sstevel@tonic-gate	popl	%edi
23510Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
23520Sstevel@tonic-gate	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
23530Sstevel@tonic-gate
23540Sstevel@tonic-gate	popl	%edi
23550Sstevel@tonic-gate	popl	%ebx
23560Sstevel@tonic-gate	popl	%ebp
23570Sstevel@tonic-gate3:
23580Sstevel@tonic-gate	movl	T_COPYOPS(%edx), %eax
23590Sstevel@tonic-gate	cmpl	$0, %eax
23600Sstevel@tonic-gate	jz	2f
23610Sstevel@tonic-gate	jmp	*CP_COPYOUTSTR(%eax)
23620Sstevel@tonic-gate
23630Sstevel@tonic-gate2:	movl	$EFAULT, %eax		/* return EFAULT */
23640Sstevel@tonic-gate	ret
23650Sstevel@tonic-gate	SET_SIZE(copyoutstr)
23660Sstevel@tonic-gate
23670Sstevel@tonic-gate#undef	ARG_KADDR
23680Sstevel@tonic-gate#undef	ARG_UADDR
23690Sstevel@tonic-gate
23700Sstevel@tonic-gate#endif	/* __i386 */
23710Sstevel@tonic-gate#endif	/* __lint */
23720Sstevel@tonic-gate
23730Sstevel@tonic-gate/*
23740Sstevel@tonic-gate * Since all of the fuword() variants are so similar, we have a macro to spit
23750Sstevel@tonic-gate * them out.  This allows us to create DTrace-unobservable functions easily.
23760Sstevel@tonic-gate */
23770Sstevel@tonic-gate
23780Sstevel@tonic-gate#if defined(__lint)
23790Sstevel@tonic-gate
23800Sstevel@tonic-gate#if defined(__amd64)
23810Sstevel@tonic-gate
23820Sstevel@tonic-gate/* ARGSUSED */
23830Sstevel@tonic-gateint
23840Sstevel@tonic-gatefuword64(const void *addr, uint64_t *dst)
23850Sstevel@tonic-gate{ return (0); }
23860Sstevel@tonic-gate
23870Sstevel@tonic-gate#endif
23880Sstevel@tonic-gate
23890Sstevel@tonic-gate/* ARGSUSED */
23900Sstevel@tonic-gateint
23910Sstevel@tonic-gatefuword32(const void *addr, uint32_t *dst)
23920Sstevel@tonic-gate{ return (0); }
23930Sstevel@tonic-gate
23940Sstevel@tonic-gate/* ARGSUSED */
23950Sstevel@tonic-gateint
23960Sstevel@tonic-gatefuword16(const void *addr, uint16_t *dst)
23970Sstevel@tonic-gate{ return (0); }
23980Sstevel@tonic-gate
23990Sstevel@tonic-gate/* ARGSUSED */
24000Sstevel@tonic-gateint
24010Sstevel@tonic-gatefuword8(const void *addr, uint8_t *dst)
24020Sstevel@tonic-gate{ return (0); }
24030Sstevel@tonic-gate
24040Sstevel@tonic-gate#else	/* __lint */
24050Sstevel@tonic-gate
24060Sstevel@tonic-gate#if defined(__amd64)
24070Sstevel@tonic-gate
24080Sstevel@tonic-gate/*
24090Sstevel@tonic-gate * (Note that we don't save and reload the arguments here
24100Sstevel@tonic-gate * because their values are not altered in the copy path)
24110Sstevel@tonic-gate */
24120Sstevel@tonic-gate
24130Sstevel@tonic-gate#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
24140Sstevel@tonic-gate	ENTRY(NAME)				\
24150Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9;		\
24160Sstevel@tonic-gate	cmpq	kernelbase(%rip), %rdi;		\
24170Sstevel@tonic-gate	jae	1f;				\
24180Sstevel@tonic-gate	leaq	_flt_/**/NAME, %rdx;		\
24190Sstevel@tonic-gate	movq	%rdx, T_LOFAULT(%r9);		\
24200Sstevel@tonic-gate	INSTR	(%rdi), REG;			\
24210Sstevel@tonic-gate	movq	$0, T_LOFAULT(%r9);		\
24220Sstevel@tonic-gate	INSTR	REG, (%rsi);			\
24230Sstevel@tonic-gate	xorl	%eax, %eax;			\
24240Sstevel@tonic-gate	ret;					\
24250Sstevel@tonic-gate_flt_/**/NAME:					\
24260Sstevel@tonic-gate	movq	$0, T_LOFAULT(%r9);		\
24270Sstevel@tonic-gate1:						\
24280Sstevel@tonic-gate	movq	T_COPYOPS(%r9), %rax;		\
24290Sstevel@tonic-gate	cmpq	$0, %rax;			\
24300Sstevel@tonic-gate	jz	2f;				\
24310Sstevel@tonic-gate	jmp	*COPYOP(%rax);			\
24320Sstevel@tonic-gate2:						\
24330Sstevel@tonic-gate	movl	$-1, %eax;			\
24340Sstevel@tonic-gate	ret;					\
24350Sstevel@tonic-gate	SET_SIZE(NAME)
24360Sstevel@tonic-gate
24370Sstevel@tonic-gate	FUWORD(fuword64, movq, %rax, CP_FUWORD64)
24380Sstevel@tonic-gate	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
24390Sstevel@tonic-gate	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
24400Sstevel@tonic-gate	FUWORD(fuword8, movb, %al, CP_FUWORD8)
24410Sstevel@tonic-gate
24420Sstevel@tonic-gate#elif defined(__i386)
24430Sstevel@tonic-gate
24440Sstevel@tonic-gate#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
24450Sstevel@tonic-gate	ENTRY(NAME)				\
24460Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %ecx;		\
24470Sstevel@tonic-gate	movl	kernelbase, %eax;		\
24480Sstevel@tonic-gate	cmpl	%eax, 4(%esp);			\
24490Sstevel@tonic-gate	jae	1f;				\
24500Sstevel@tonic-gate	lea	_flt_/**/NAME, %edx;		\
24510Sstevel@tonic-gate	movl	%edx, T_LOFAULT(%ecx);		\
24520Sstevel@tonic-gate	movl	4(%esp), %eax;			\
24530Sstevel@tonic-gate	movl	8(%esp), %edx;			\
24540Sstevel@tonic-gate	INSTR	(%eax), REG;			\
24550Sstevel@tonic-gate	movl	$0, T_LOFAULT(%ecx);		\
24560Sstevel@tonic-gate	INSTR	REG, (%edx);			\
24570Sstevel@tonic-gate	xorl	%eax, %eax;			\
24580Sstevel@tonic-gate	ret;					\
24590Sstevel@tonic-gate_flt_/**/NAME:					\
24600Sstevel@tonic-gate	movl	$0, T_LOFAULT(%ecx);		\
24610Sstevel@tonic-gate1:						\
24620Sstevel@tonic-gate	movl	T_COPYOPS(%ecx), %eax;		\
24630Sstevel@tonic-gate	cmpl	$0, %eax;			\
24640Sstevel@tonic-gate	jz	2f;				\
24650Sstevel@tonic-gate	jmp	*COPYOP(%eax);			\
24660Sstevel@tonic-gate2:						\
24670Sstevel@tonic-gate	movl	$-1, %eax;			\
24680Sstevel@tonic-gate	ret;					\
24690Sstevel@tonic-gate	SET_SIZE(NAME)
24700Sstevel@tonic-gate
24710Sstevel@tonic-gate	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
24720Sstevel@tonic-gate	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
24730Sstevel@tonic-gate	FUWORD(fuword8, movb, %al, CP_FUWORD8)
24740Sstevel@tonic-gate
24750Sstevel@tonic-gate#endif	/* __i386 */
24760Sstevel@tonic-gate
24770Sstevel@tonic-gate#undef	FUWORD
24780Sstevel@tonic-gate
24790Sstevel@tonic-gate#endif	/* __lint */
24800Sstevel@tonic-gate
24810Sstevel@tonic-gate/*
24820Sstevel@tonic-gate * Set user word.
24830Sstevel@tonic-gate */
24840Sstevel@tonic-gate
24850Sstevel@tonic-gate#if defined(__lint)
24860Sstevel@tonic-gate
24870Sstevel@tonic-gate#if defined(__amd64)
24880Sstevel@tonic-gate
24890Sstevel@tonic-gate/* ARGSUSED */
24900Sstevel@tonic-gateint
24910Sstevel@tonic-gatesuword64(void *addr, uint64_t value)
24920Sstevel@tonic-gate{ return (0); }
24930Sstevel@tonic-gate
24940Sstevel@tonic-gate#endif
24950Sstevel@tonic-gate
24960Sstevel@tonic-gate/* ARGSUSED */
24970Sstevel@tonic-gateint
24980Sstevel@tonic-gatesuword32(void *addr, uint32_t value)
24990Sstevel@tonic-gate{ return (0); }
25000Sstevel@tonic-gate
25010Sstevel@tonic-gate/* ARGSUSED */
25020Sstevel@tonic-gateint
25030Sstevel@tonic-gatesuword16(void *addr, uint16_t value)
25040Sstevel@tonic-gate{ return (0); }
25050Sstevel@tonic-gate
25060Sstevel@tonic-gate/* ARGSUSED */
25070Sstevel@tonic-gateint
25080Sstevel@tonic-gatesuword8(void *addr, uint8_t value)
25090Sstevel@tonic-gate{ return (0); }
25100Sstevel@tonic-gate
25110Sstevel@tonic-gate#else	/* lint */
25120Sstevel@tonic-gate
25130Sstevel@tonic-gate#if defined(__amd64)
25140Sstevel@tonic-gate
25150Sstevel@tonic-gate/*
25160Sstevel@tonic-gate * (Note that we don't save and reload the arguments here
25170Sstevel@tonic-gate * because their values are not altered in the copy path)
25180Sstevel@tonic-gate */
25190Sstevel@tonic-gate
25200Sstevel@tonic-gate#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
25210Sstevel@tonic-gate	ENTRY(NAME)				\
25220Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %r9;		\
25230Sstevel@tonic-gate	cmpq	kernelbase(%rip), %rdi;		\
25240Sstevel@tonic-gate	jae	1f;				\
25250Sstevel@tonic-gate	leaq	_flt_/**/NAME, %rdx;		\
25260Sstevel@tonic-gate	movq	%rdx, T_LOFAULT(%r9);		\
25270Sstevel@tonic-gate	INSTR	REG, (%rdi);			\
25280Sstevel@tonic-gate	movq	$0, T_LOFAULT(%r9);		\
25290Sstevel@tonic-gate	xorl	%eax, %eax;			\
25300Sstevel@tonic-gate	ret;					\
25310Sstevel@tonic-gate_flt_/**/NAME:					\
25320Sstevel@tonic-gate	movq	$0, T_LOFAULT(%r9);		\
25330Sstevel@tonic-gate1:						\
25340Sstevel@tonic-gate	movq	T_COPYOPS(%r9), %rax;		\
25350Sstevel@tonic-gate	cmpq	$0, %rax;			\
25360Sstevel@tonic-gate	jz	3f;				\
25370Sstevel@tonic-gate	jmp	*COPYOP(%rax);			\
25380Sstevel@tonic-gate3:						\
25390Sstevel@tonic-gate	movl	$-1, %eax;			\
25400Sstevel@tonic-gate	ret;					\
25410Sstevel@tonic-gate	SET_SIZE(NAME)
25420Sstevel@tonic-gate
25430Sstevel@tonic-gate	SUWORD(suword64, movq, %rsi, CP_SUWORD64)
25440Sstevel@tonic-gate	SUWORD(suword32, movl, %esi, CP_SUWORD32)
25450Sstevel@tonic-gate	SUWORD(suword16, movw, %si, CP_SUWORD16)
25460Sstevel@tonic-gate	SUWORD(suword8, movb, %sil, CP_SUWORD8)
25470Sstevel@tonic-gate
25480Sstevel@tonic-gate#elif defined(__i386)
25490Sstevel@tonic-gate
25500Sstevel@tonic-gate#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
25510Sstevel@tonic-gate	ENTRY(NAME)				\
25520Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %ecx;		\
25530Sstevel@tonic-gate	movl	kernelbase, %eax;		\
25540Sstevel@tonic-gate	cmpl	%eax, 4(%esp);			\
25550Sstevel@tonic-gate	jae	1f;				\
25560Sstevel@tonic-gate	lea	_flt_/**/NAME, %edx;		\
25570Sstevel@tonic-gate	movl	%edx, T_LOFAULT(%ecx);		\
25580Sstevel@tonic-gate	movl	4(%esp), %eax;			\
25590Sstevel@tonic-gate	movl	8(%esp), %edx;			\
25600Sstevel@tonic-gate	INSTR	REG, (%eax);			\
25610Sstevel@tonic-gate	movl	$0, T_LOFAULT(%ecx);		\
25620Sstevel@tonic-gate	xorl	%eax, %eax;			\
25630Sstevel@tonic-gate	ret;					\
25640Sstevel@tonic-gate_flt_/**/NAME:					\
25650Sstevel@tonic-gate	movl	$0, T_LOFAULT(%ecx);		\
25660Sstevel@tonic-gate1:						\
25670Sstevel@tonic-gate	movl	T_COPYOPS(%ecx), %eax;		\
25680Sstevel@tonic-gate	cmpl	$0, %eax;			\
25690Sstevel@tonic-gate	jz	3f;				\
25700Sstevel@tonic-gate	movl	COPYOP(%eax), %ecx;		\
25710Sstevel@tonic-gate	jmp	*%ecx;				\
25720Sstevel@tonic-gate3:						\
25730Sstevel@tonic-gate	movl	$-1, %eax;			\
25740Sstevel@tonic-gate	ret;					\
25750Sstevel@tonic-gate	SET_SIZE(NAME)
25760Sstevel@tonic-gate
25770Sstevel@tonic-gate	SUWORD(suword32, movl, %edx, CP_SUWORD32)
25780Sstevel@tonic-gate	SUWORD(suword16, movw, %dx, CP_SUWORD16)
25790Sstevel@tonic-gate	SUWORD(suword8, movb, %dl, CP_SUWORD8)
25800Sstevel@tonic-gate
25810Sstevel@tonic-gate#endif	/* __i386 */
25820Sstevel@tonic-gate
25830Sstevel@tonic-gate#undef	SUWORD
25840Sstevel@tonic-gate
25850Sstevel@tonic-gate#endif	/* __lint */
25860Sstevel@tonic-gate
25870Sstevel@tonic-gate#if defined(__lint)
25880Sstevel@tonic-gate
25890Sstevel@tonic-gate#if defined(__amd64)
25900Sstevel@tonic-gate
25910Sstevel@tonic-gate/*ARGSUSED*/
25920Sstevel@tonic-gatevoid
25930Sstevel@tonic-gatefuword64_noerr(const void *addr, uint64_t *dst)
25940Sstevel@tonic-gate{}
25950Sstevel@tonic-gate
25960Sstevel@tonic-gate#endif
25970Sstevel@tonic-gate
25980Sstevel@tonic-gate/*ARGSUSED*/
25990Sstevel@tonic-gatevoid
26000Sstevel@tonic-gatefuword32_noerr(const void *addr, uint32_t *dst)
26010Sstevel@tonic-gate{}
26020Sstevel@tonic-gate
26030Sstevel@tonic-gate/*ARGSUSED*/
26040Sstevel@tonic-gatevoid
26050Sstevel@tonic-gatefuword8_noerr(const void *addr, uint8_t *dst)
26060Sstevel@tonic-gate{}
26070Sstevel@tonic-gate
26080Sstevel@tonic-gate/*ARGSUSED*/
26090Sstevel@tonic-gatevoid
26100Sstevel@tonic-gatefuword16_noerr(const void *addr, uint16_t *dst)
26110Sstevel@tonic-gate{}
26120Sstevel@tonic-gate
26130Sstevel@tonic-gate#else   /* __lint */
26140Sstevel@tonic-gate
26150Sstevel@tonic-gate#if defined(__amd64)
26160Sstevel@tonic-gate
26170Sstevel@tonic-gate#define	FUWORD_NOERR(NAME, INSTR, REG)		\
26180Sstevel@tonic-gate	ENTRY(NAME)				\
26190Sstevel@tonic-gate	cmpq	kernelbase(%rip), %rdi;		\
26200Sstevel@tonic-gate	cmovnbq	kernelbase(%rip), %rdi;		\
26210Sstevel@tonic-gate	INSTR	(%rdi), REG;			\
26220Sstevel@tonic-gate	INSTR	REG, (%rsi);			\
26230Sstevel@tonic-gate	ret;					\
26240Sstevel@tonic-gate	SET_SIZE(NAME)
26250Sstevel@tonic-gate
26260Sstevel@tonic-gate	FUWORD_NOERR(fuword64_noerr, movq, %rax)
26270Sstevel@tonic-gate	FUWORD_NOERR(fuword32_noerr, movl, %eax)
26280Sstevel@tonic-gate	FUWORD_NOERR(fuword16_noerr, movw, %ax)
26290Sstevel@tonic-gate	FUWORD_NOERR(fuword8_noerr, movb, %al)
26300Sstevel@tonic-gate
26310Sstevel@tonic-gate#elif defined(__i386)
26320Sstevel@tonic-gate
26330Sstevel@tonic-gate#define	FUWORD_NOERR(NAME, INSTR, REG)		\
26340Sstevel@tonic-gate	ENTRY(NAME)				\
26350Sstevel@tonic-gate	movl	4(%esp), %eax;			\
26360Sstevel@tonic-gate	cmpl	kernelbase, %eax;		\
26370Sstevel@tonic-gate	jb	1f;				\
26380Sstevel@tonic-gate	movl	kernelbase, %eax;		\
26390Sstevel@tonic-gate1:	movl	8(%esp), %edx;			\
26400Sstevel@tonic-gate	INSTR	(%eax), REG;			\
26410Sstevel@tonic-gate	INSTR	REG, (%edx);			\
26420Sstevel@tonic-gate	ret;					\
26430Sstevel@tonic-gate	SET_SIZE(NAME)
26440Sstevel@tonic-gate
26450Sstevel@tonic-gate	FUWORD_NOERR(fuword32_noerr, movl, %ecx)
26460Sstevel@tonic-gate	FUWORD_NOERR(fuword16_noerr, movw, %cx)
26470Sstevel@tonic-gate	FUWORD_NOERR(fuword8_noerr, movb, %cl)
26480Sstevel@tonic-gate
26490Sstevel@tonic-gate#endif	/* __i386 */
26500Sstevel@tonic-gate
26510Sstevel@tonic-gate#undef	FUWORD_NOERR
26520Sstevel@tonic-gate
26530Sstevel@tonic-gate#endif	/* __lint */
26540Sstevel@tonic-gate
26550Sstevel@tonic-gate#if defined(__lint)
26560Sstevel@tonic-gate
26570Sstevel@tonic-gate#if defined(__amd64)
26580Sstevel@tonic-gate
26590Sstevel@tonic-gate/*ARGSUSED*/
26600Sstevel@tonic-gatevoid
26610Sstevel@tonic-gatesuword64_noerr(void *addr, uint64_t value)
26620Sstevel@tonic-gate{}
26630Sstevel@tonic-gate
26640Sstevel@tonic-gate#endif
26650Sstevel@tonic-gate
26660Sstevel@tonic-gate/*ARGSUSED*/
26670Sstevel@tonic-gatevoid
26680Sstevel@tonic-gatesuword32_noerr(void *addr, uint32_t value)
26690Sstevel@tonic-gate{}
26700Sstevel@tonic-gate
26710Sstevel@tonic-gate/*ARGSUSED*/
26720Sstevel@tonic-gatevoid
26730Sstevel@tonic-gatesuword16_noerr(void *addr, uint16_t value)
26740Sstevel@tonic-gate{}
26750Sstevel@tonic-gate
26760Sstevel@tonic-gate/*ARGSUSED*/
26770Sstevel@tonic-gatevoid
26780Sstevel@tonic-gatesuword8_noerr(void *addr, uint8_t value)
26790Sstevel@tonic-gate{}
26800Sstevel@tonic-gate
26810Sstevel@tonic-gate#else	/* lint */
26820Sstevel@tonic-gate
26830Sstevel@tonic-gate#if defined(__amd64)
26840Sstevel@tonic-gate
26850Sstevel@tonic-gate#define	SUWORD_NOERR(NAME, INSTR, REG)		\
26860Sstevel@tonic-gate	ENTRY(NAME)				\
26870Sstevel@tonic-gate	cmpq	kernelbase(%rip), %rdi;		\
26880Sstevel@tonic-gate	cmovnbq	kernelbase(%rip), %rdi;		\
26890Sstevel@tonic-gate	INSTR	REG, (%rdi);			\
26900Sstevel@tonic-gate	ret;					\
26910Sstevel@tonic-gate	SET_SIZE(NAME)
26920Sstevel@tonic-gate
26930Sstevel@tonic-gate	SUWORD_NOERR(suword64_noerr, movq, %rsi)
26940Sstevel@tonic-gate	SUWORD_NOERR(suword32_noerr, movl, %esi)
26950Sstevel@tonic-gate	SUWORD_NOERR(suword16_noerr, movw, %si)
26960Sstevel@tonic-gate	SUWORD_NOERR(suword8_noerr, movb, %sil)
26970Sstevel@tonic-gate
26980Sstevel@tonic-gate#elif defined(__i386)
26990Sstevel@tonic-gate
27000Sstevel@tonic-gate#define	SUWORD_NOERR(NAME, INSTR, REG)		\
27010Sstevel@tonic-gate	ENTRY(NAME)				\
27020Sstevel@tonic-gate	movl	4(%esp), %eax;			\
27030Sstevel@tonic-gate	cmpl	kernelbase, %eax;		\
27040Sstevel@tonic-gate	jb	1f;				\
27050Sstevel@tonic-gate	movl	kernelbase, %eax;		\
27060Sstevel@tonic-gate1:						\
27070Sstevel@tonic-gate	movl	8(%esp), %edx;			\
27080Sstevel@tonic-gate	INSTR	REG, (%eax);			\
27090Sstevel@tonic-gate	ret;					\
27100Sstevel@tonic-gate	SET_SIZE(NAME)
27110Sstevel@tonic-gate
27120Sstevel@tonic-gate	SUWORD_NOERR(suword32_noerr, movl, %edx)
27130Sstevel@tonic-gate	SUWORD_NOERR(suword16_noerr, movw, %dx)
27140Sstevel@tonic-gate	SUWORD_NOERR(suword8_noerr, movb, %dl)
27150Sstevel@tonic-gate
27160Sstevel@tonic-gate#endif	/* __i386 */
27170Sstevel@tonic-gate
27180Sstevel@tonic-gate#undef	SUWORD_NOERR
27190Sstevel@tonic-gate
27200Sstevel@tonic-gate#endif	/* lint */
27210Sstevel@tonic-gate
27220Sstevel@tonic-gate
27230Sstevel@tonic-gate#if defined(__lint)
27240Sstevel@tonic-gate
27250Sstevel@tonic-gate/*ARGSUSED*/
27260Sstevel@tonic-gateint
27270Sstevel@tonic-gatesubyte(void *addr, uchar_t value)
27280Sstevel@tonic-gate{ return (0); }
27290Sstevel@tonic-gate
27300Sstevel@tonic-gate/*ARGSUSED*/
27310Sstevel@tonic-gatevoid
27320Sstevel@tonic-gatesubyte_noerr(void *addr, uchar_t value)
27330Sstevel@tonic-gate{}
27340Sstevel@tonic-gate
27350Sstevel@tonic-gate/*ARGSUSED*/
27360Sstevel@tonic-gateint
27370Sstevel@tonic-gatefulword(const void *addr, ulong_t *valuep)
27380Sstevel@tonic-gate{ return (0); }
27390Sstevel@tonic-gate
27400Sstevel@tonic-gate/*ARGSUSED*/
27410Sstevel@tonic-gatevoid
27420Sstevel@tonic-gatefulword_noerr(const void *addr, ulong_t *valuep)
27430Sstevel@tonic-gate{}
27440Sstevel@tonic-gate
27450Sstevel@tonic-gate/*ARGSUSED*/
27460Sstevel@tonic-gateint
27470Sstevel@tonic-gatesulword(void *addr, ulong_t valuep)
27480Sstevel@tonic-gate{ return (0); }
27490Sstevel@tonic-gate
27500Sstevel@tonic-gate/*ARGSUSED*/
27510Sstevel@tonic-gatevoid
27520Sstevel@tonic-gatesulword_noerr(void *addr, ulong_t valuep)
27530Sstevel@tonic-gate{}
27540Sstevel@tonic-gate
27550Sstevel@tonic-gate#else
27560Sstevel@tonic-gate
27570Sstevel@tonic-gate	.weak	subyte
27580Sstevel@tonic-gate	subyte=suword8
27590Sstevel@tonic-gate	.weak	subyte_noerr
27600Sstevel@tonic-gate	subyte_noerr=suword8_noerr
27610Sstevel@tonic-gate
27620Sstevel@tonic-gate#if defined(__amd64)
27630Sstevel@tonic-gate
27640Sstevel@tonic-gate	.weak	fulword
27650Sstevel@tonic-gate	fulword=fuword64
27660Sstevel@tonic-gate	.weak	fulword_noerr
27670Sstevel@tonic-gate	fulword_noerr=fuword64_noerr
27680Sstevel@tonic-gate	.weak	sulword
27690Sstevel@tonic-gate	sulword=suword64
27700Sstevel@tonic-gate	.weak	sulword_noerr
27710Sstevel@tonic-gate	sulword_noerr=suword64_noerr
27720Sstevel@tonic-gate
27730Sstevel@tonic-gate#elif defined(__i386)
27740Sstevel@tonic-gate
27750Sstevel@tonic-gate	.weak	fulword
27760Sstevel@tonic-gate	fulword=fuword32
27770Sstevel@tonic-gate	.weak	fulword_noerr
27780Sstevel@tonic-gate	fulword_noerr=fuword32_noerr
27790Sstevel@tonic-gate	.weak	sulword
27800Sstevel@tonic-gate	sulword=suword32
27810Sstevel@tonic-gate	.weak	sulword_noerr
27820Sstevel@tonic-gate	sulword_noerr=suword32_noerr
27830Sstevel@tonic-gate
27840Sstevel@tonic-gate#endif /* __i386 */
27850Sstevel@tonic-gate
27860Sstevel@tonic-gate#endif /* __lint */
27870Sstevel@tonic-gate
27880Sstevel@tonic-gate#if defined(__lint)
27890Sstevel@tonic-gate
27900Sstevel@tonic-gate/*
27910Sstevel@tonic-gate * Copy a block of storage - must not overlap (from + len <= to).
27920Sstevel@tonic-gate * No fault handler installed (to be called under on_fault())
27930Sstevel@tonic-gate */
27940Sstevel@tonic-gate
27950Sstevel@tonic-gate/* ARGSUSED */
27960Sstevel@tonic-gatevoid
27970Sstevel@tonic-gatecopyout_noerr(const void *kfrom, void *uto, size_t count)
27980Sstevel@tonic-gate{}
27990Sstevel@tonic-gate
28000Sstevel@tonic-gate/* ARGSUSED */
28010Sstevel@tonic-gatevoid
28020Sstevel@tonic-gatecopyin_noerr(const void *ufrom, void *kto, size_t count)
28030Sstevel@tonic-gate{}
28040Sstevel@tonic-gate
28050Sstevel@tonic-gate/*
28060Sstevel@tonic-gate * Zero a block of storage in user space
28070Sstevel@tonic-gate */
28080Sstevel@tonic-gate
28090Sstevel@tonic-gate/* ARGSUSED */
28100Sstevel@tonic-gatevoid
28110Sstevel@tonic-gateuzero(void *addr, size_t count)
28120Sstevel@tonic-gate{}
28130Sstevel@tonic-gate
28140Sstevel@tonic-gate/*
28150Sstevel@tonic-gate * copy a block of storage in user space
28160Sstevel@tonic-gate */
28170Sstevel@tonic-gate
28180Sstevel@tonic-gate/* ARGSUSED */
28190Sstevel@tonic-gatevoid
28200Sstevel@tonic-gateucopy(const void *ufrom, void *uto, size_t ulength)
28210Sstevel@tonic-gate{}
28220Sstevel@tonic-gate
28232712Snn35248/*
28242712Snn35248 * copy a string in user space
28252712Snn35248 */
28262712Snn35248
28272712Snn35248/* ARGSUSED */
28282712Snn35248void
28292712Snn35248ucopystr(const char *ufrom, char *uto, size_t umaxlength, size_t *lencopied)
28302712Snn35248{}
28312712Snn35248
28320Sstevel@tonic-gate#else /* __lint */
28330Sstevel@tonic-gate
28340Sstevel@tonic-gate#if defined(__amd64)
28350Sstevel@tonic-gate
28360Sstevel@tonic-gate	ENTRY(copyin_noerr)
28370Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
28380Sstevel@tonic-gate#ifdef DEBUG
28390Sstevel@tonic-gate	cmpq	%rax, %rsi		/* %rsi = kto */
28400Sstevel@tonic-gate	jae	1f
28410Sstevel@tonic-gate	leaq	.cpyin_ne_pmsg(%rip), %rdi
28420Sstevel@tonic-gate	jmp	call_panic		/* setup stack and call panic */
28430Sstevel@tonic-gate1:
28440Sstevel@tonic-gate#endif
28450Sstevel@tonic-gate	cmpq	%rax, %rdi		/* ufrom < kernelbase */
28460Sstevel@tonic-gate	jb	do_copy
28470Sstevel@tonic-gate	movq	%rax, %rdi		/* force fault at kernelbase */
28480Sstevel@tonic-gate	jmp	do_copy
28490Sstevel@tonic-gate	SET_SIZE(copyin_noerr)
28500Sstevel@tonic-gate
28510Sstevel@tonic-gate	ENTRY(copyout_noerr)
28520Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
28530Sstevel@tonic-gate#ifdef DEBUG
28540Sstevel@tonic-gate	cmpq	%rax, %rdi		/* %rdi = kfrom */
28550Sstevel@tonic-gate	jae	1f
28560Sstevel@tonic-gate	leaq	.cpyout_ne_pmsg(%rip), %rdi
28570Sstevel@tonic-gate	jmp	call_panic		/* setup stack and call panic */
28580Sstevel@tonic-gate1:
28590Sstevel@tonic-gate#endif
28600Sstevel@tonic-gate	cmpq	%rax, %rsi		/* uto < kernelbase */
28610Sstevel@tonic-gate	jb	do_copy
28620Sstevel@tonic-gate	movq	%rax, %rsi		/* force fault at kernelbase */
28630Sstevel@tonic-gate	jmp	do_copy
28640Sstevel@tonic-gate	SET_SIZE(copyout_noerr)
28650Sstevel@tonic-gate
28660Sstevel@tonic-gate	ENTRY(uzero)
2867151Sahl	movq	kernelbase(%rip), %rax
2868151Sahl	cmpq	%rax, %rdi
2869151Sahl	jb	do_zero
2870151Sahl	movq	%rax, %rdi	/* force fault at kernelbase */
28710Sstevel@tonic-gate	jmp	do_zero
28720Sstevel@tonic-gate	SET_SIZE(uzero)
28730Sstevel@tonic-gate
28740Sstevel@tonic-gate	ENTRY(ucopy)
28750Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
2876151Sahl	cmpq	%rax, %rdi
28772712Snn35248	cmovaeq	%rax, %rdi	/* force fault at kernelbase */
2878151Sahl	cmpq	%rax, %rsi
28792712Snn35248	cmovaeq	%rax, %rsi	/* force fault at kernelbase */
28800Sstevel@tonic-gate	jmp	do_copy
28810Sstevel@tonic-gate	SET_SIZE(ucopy)
28820Sstevel@tonic-gate
28832712Snn35248	ENTRY(ucopystr)
28842712Snn35248	movq	kernelbase(%rip), %rax
28852712Snn35248	cmpq	%rax, %rdi
28862712Snn35248	cmovaeq	%rax, %rdi	/* force fault at kernelbase */
28872712Snn35248	cmpq	%rax, %rsi
28882712Snn35248	cmovaeq	%rax, %rsi	/* force fault at kernelbase */
28892712Snn35248	/* do_copystr expects lofault address in %r8 */
28902712Snn35248	movq	%gs:CPU_THREAD, %r8
28912712Snn35248	movq	T_LOFAULT(%r8), %r8
28922712Snn35248	jmp	do_copystr
28932712Snn35248	SET_SIZE(ucopystr)
28942712Snn35248
28950Sstevel@tonic-gate#elif defined(__i386)
28960Sstevel@tonic-gate
28970Sstevel@tonic-gate	ENTRY(copyin_noerr)
28980Sstevel@tonic-gate	movl	kernelbase, %eax
28990Sstevel@tonic-gate#ifdef DEBUG
29000Sstevel@tonic-gate	cmpl	%eax, 8(%esp)
29010Sstevel@tonic-gate	jae	1f
29020Sstevel@tonic-gate	pushl	$.cpyin_ne_pmsg
29030Sstevel@tonic-gate	call	panic
29040Sstevel@tonic-gate1:
29050Sstevel@tonic-gate#endif
29060Sstevel@tonic-gate	cmpl	%eax, 4(%esp)
29070Sstevel@tonic-gate	jb	do_copy
29080Sstevel@tonic-gate	movl	%eax, 4(%esp)	/* force fault at kernelbase */
29090Sstevel@tonic-gate	jmp	do_copy
29100Sstevel@tonic-gate	SET_SIZE(copyin_noerr)
29110Sstevel@tonic-gate
29120Sstevel@tonic-gate	ENTRY(copyout_noerr)
29130Sstevel@tonic-gate	movl	kernelbase, %eax
29140Sstevel@tonic-gate#ifdef DEBUG
29150Sstevel@tonic-gate	cmpl	%eax, 4(%esp)
29160Sstevel@tonic-gate	jae	1f
29170Sstevel@tonic-gate	pushl	$.cpyout_ne_pmsg
29180Sstevel@tonic-gate	call	panic
29190Sstevel@tonic-gate1:
29200Sstevel@tonic-gate#endif
29210Sstevel@tonic-gate	cmpl	%eax, 8(%esp)
29220Sstevel@tonic-gate	jb	do_copy
29230Sstevel@tonic-gate	movl	%eax, 8(%esp)	/* force fault at kernelbase */
29240Sstevel@tonic-gate	jmp	do_copy
29250Sstevel@tonic-gate	SET_SIZE(copyout_noerr)
29260Sstevel@tonic-gate
29270Sstevel@tonic-gate	ENTRY(uzero)
29280Sstevel@tonic-gate	movl	kernelbase, %eax
29290Sstevel@tonic-gate	cmpl	%eax, 4(%esp)
2930151Sahl	jb	do_zero
2931151Sahl	movl	%eax, 4(%esp)	/* force fault at kernelbase */
29320Sstevel@tonic-gate	jmp	do_zero
29330Sstevel@tonic-gate	SET_SIZE(uzero)
29340Sstevel@tonic-gate
29350Sstevel@tonic-gate	ENTRY(ucopy)
29360Sstevel@tonic-gate	movl	kernelbase, %eax
2937151Sahl	cmpl	%eax, 4(%esp)
29380Sstevel@tonic-gate	jb	1f
2939151Sahl	movl	%eax, 4(%esp)	/* force fault at kernelbase */
29400Sstevel@tonic-gate1:
2941151Sahl	cmpl	%eax, 8(%esp)
2942151Sahl	jb	do_copy
2943151Sahl	movl	%eax, 8(%esp)	/* force fault at kernelbase */
29440Sstevel@tonic-gate	jmp	do_copy
29450Sstevel@tonic-gate	SET_SIZE(ucopy)
29460Sstevel@tonic-gate
29472712Snn35248	ENTRY(ucopystr)
29482712Snn35248	movl	kernelbase, %eax
29492712Snn35248	cmpl	%eax, 4(%esp)
29502712Snn35248	jb	1f
29512712Snn35248	movl	%eax, 4(%esp)	/* force fault at kernelbase */
29522712Snn352481:
29532712Snn35248	cmpl	%eax, 8(%esp)
29542712Snn35248	jb	2f
29552712Snn35248	movl	%eax, 8(%esp)	/* force fault at kernelbase */
29562712Snn352482:
29572712Snn35248	/* do_copystr expects the lofault address in %eax */
29582712Snn35248	movl	%gs:CPU_THREAD, %eax
29592712Snn35248	movl	T_LOFAULT(%eax), %eax
29602712Snn35248	jmp	do_copystr
29612712Snn35248	SET_SIZE(ucopystr)
29622712Snn35248
29630Sstevel@tonic-gate#endif	/* __i386 */
29640Sstevel@tonic-gate
29650Sstevel@tonic-gate#ifdef DEBUG
29660Sstevel@tonic-gate	.data
29670Sstevel@tonic-gate.kcopy_panic_msg:
29680Sstevel@tonic-gate	.string "kcopy: arguments below kernelbase"
29690Sstevel@tonic-gate.bcopy_panic_msg:
29700Sstevel@tonic-gate	.string "bcopy: arguments below kernelbase"
29710Sstevel@tonic-gate.kzero_panic_msg:
29720Sstevel@tonic-gate        .string "kzero: arguments below kernelbase"
29730Sstevel@tonic-gate.bzero_panic_msg:
29740Sstevel@tonic-gate	.string	"bzero: arguments below kernelbase"
29750Sstevel@tonic-gate.copyin_panic_msg:
29760Sstevel@tonic-gate	.string "copyin: kaddr argument below kernelbase"
29770Sstevel@tonic-gate.xcopyin_panic_msg:
29780Sstevel@tonic-gate	.string	"xcopyin: kaddr argument below kernelbase"
29790Sstevel@tonic-gate.copyout_panic_msg:
29800Sstevel@tonic-gate	.string "copyout: kaddr argument below kernelbase"
29810Sstevel@tonic-gate.xcopyout_panic_msg:
29820Sstevel@tonic-gate	.string	"xcopyout: kaddr argument below kernelbase"
29830Sstevel@tonic-gate.copystr_panic_msg:
29840Sstevel@tonic-gate	.string	"copystr: arguments in user space"
29850Sstevel@tonic-gate.copyinstr_panic_msg:
29860Sstevel@tonic-gate	.string	"copyinstr: kaddr argument not in kernel address space"
29870Sstevel@tonic-gate.copyoutstr_panic_msg:
29880Sstevel@tonic-gate	.string	"copyoutstr: kaddr argument not in kernel address space"
29890Sstevel@tonic-gate.cpyin_ne_pmsg:
29900Sstevel@tonic-gate	.string "copyin_noerr: argument not in kernel address space"
29910Sstevel@tonic-gate.cpyout_ne_pmsg:
29920Sstevel@tonic-gate	.string "copyout_noerr: argument not in kernel address space"
29930Sstevel@tonic-gate#endif
29940Sstevel@tonic-gate
29950Sstevel@tonic-gate#endif	/* __lint */
2996