xref: /netbsd-src/sys/arch/i386/i386/copy.S (revision ca08b3e761e6fc5454aa5d028935f5d82d92ea3d)
1/*	$NetBSD: copy.S,v 1.32 2020/06/30 16:20:01 maxv Exp $	*/
2
3/*
4 * Copyright (c) 1998, 2000, 2004, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Copyright (c) 1990 The Regents of the University of California.
34 * All rights reserved.
35 *
36 * This code is derived from software contributed to Berkeley by
37 * William Jolitz.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 *    notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 *    notice, this list of conditions and the following disclaimer in the
46 *    documentation and/or other materials provided with the distribution.
47 * 3. Neither the name of the University nor the names of its contributors
48 *    may be used to endorse or promote products derived from this software
49 *    without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 *	@(#)locore.s	7.3 (Berkeley) 5/13/91
64 */
65
66#include <machine/asm.h>
67__KERNEL_RCSID(0, "$NetBSD: copy.S,v 1.32 2020/06/30 16:20:01 maxv Exp $");
68
69#include "assym.h"
70
71#include <sys/errno.h>
72
73#include <machine/frameasm.h>
74#include <machine/cputypes.h>
75
76#define GET_CURPCB(reg)	\
77	movl	CPUVAR(CURLWP),reg; \
78	movl	L_PCB(reg),reg
79
80/*
81 * These are arranged so that the abnormal case is a forwards
82 * conditional branch - which will be predicted not-taken by
83 * both Intel and AMD processors.
84 */
85#define DEFERRED_SWITCH_CHECK \
86	CHECK_DEFERRED_SWITCH			; \
87	jnz	99f				; \
8898:
89
90#define DEFERRED_SWITCH_CALL \
9199:						; \
92	call	_C_LABEL(do_pmap_load)		; \
93	jmp	98b
94
95/*
96 * The following primitives are to copy regions of memory.
97 * Label must be before all copy functions.
98 */
99	.text
100LABEL(x86_copyfunc_start)
101
102/*
103 * Handle deferred pmap switch.  We must re-enable preemption without
104 * making a function call, so that the program counter is visible to
105 * cpu_kpreempt_exit().  It can then know if it needs to restore the
106 * pmap on returning, because a preemption occurred within one of the
107 * copy functions.
108 */
109ENTRY(do_pmap_load)
110	pushl	%ebp
111	movl	%esp,%ebp
112	pushl	%ebx
113	movl	CPUVAR(CURLWP),%ebx
1141:
115	incl	L_NOPREEMPT(%ebx)
116	call	_C_LABEL(pmap_load)
117	decl	L_NOPREEMPT(%ebx)
118	jnz	2f
119	cmpl	$0,L_DOPREEMPT(%ebx)
120	jz	2f
121	pushl	$0
122	call	_C_LABEL(kpreempt)
123	addl	$4,%esp
1242:
125	cmpl	$0,CPUVAR(WANT_PMAPLOAD)
126	jnz	1b
127	popl	%ebx
128	leave
129	ret
130END(do_pmap_load)
131
132/*
133 * void *return_address(unsigned int level);
134 *
135 * The return address if level == 0, the return address of the caller
136 * `level' levels down the stack if level > 0.
137 */
138ENTRY(return_address)
139	movl	%ebp,%eax	/* frame pointer -> %eax */
140	movl	4(%esp),%ecx	/* level -> %ecx */
141	movl	CPUVAR(CURLWP),%edx
142	movl	L_PCB(%edx),%edx
143	movl	$_C_LABEL(return_address_fault),PCB_ONFAULT(%edx)
144	cmpl	$0,%ecx
145	je	2f
1461:
147	movl	(%eax),%eax	/* next frame pointer */
148	decl	%ecx
149	jnz	1b
1502:
151	movl	0x4(%eax),%eax
152	movl	$0,PCB_ONFAULT(%edx)
153	ret
154END(return_address)
155
156/*
157 * int kcopy(const void *from, void *to, size_t len);
158 * Copy len bytes from and to kernel memory, and abort on fault.
159 */
160ENTRY(kcopy)
161	pushl	%esi
162	pushl	%edi
163	movl	12(%esp),%esi
164	movl	16(%esp),%edi
165	movl	20(%esp),%ecx
166.Lkcopy_start:
167	movl	%edi,%eax
168	subl	%esi,%eax
169	cmpl	%ecx,%eax		/* overlapping? */
170	movl	%ecx,%edx
171	jb	1f
172	/* nope, copy forward */
173	shrl	$2,%ecx			/* copy by 32-bit words */
174	rep
175	movsl
176	movl	%edx,%ecx
177	andl	$3,%ecx			/* any bytes left? */
178	jz	0f
179	rep
180	movsb
1810:
182	popl	%edi
183	popl	%esi
184	xorl	%eax,%eax
185	ret
186
187	ALIGN_TEXT
1881:	addl	%ecx,%edi		/* copy backward */
189	addl	%ecx,%esi
190	std
191	andl	$3,%ecx			/* any fractional bytes? */
192	decl	%edi
193	decl	%esi
194	rep
195	movsb
196	movl	%edx,%ecx		/* copy remainder by 32-bit words */
197	shrl	$2,%ecx
198	subl	$3,%esi
199	subl	$3,%edi
200	rep
201	movsl
202	cld
203
204.Lkcopy_end:
205	popl	%edi
206	popl	%esi
207	xorl	%eax,%eax
208	ret
209END(kcopy)
210
211/*****************************************************************************/
212
213/*
214 * The following primitives are used to copy data in and out of the user's
215 * address space.
216 */
217
218/*
219 * int copyout(const void *from, void *to, size_t len);
220 * Copy len bytes into the user's address space.
221 * see copyout(9)
222 */
223ENTRY(copyout)
224	DEFERRED_SWITCH_CHECK
225	pushl	%esi
226	pushl	%edi
227	movl	12(%esp),%esi	/* from */
228	movl	16(%esp),%edi	/* to */
229	movl	20(%esp),%eax	/* len */
230
231	movl	%edi,%edx
232	addl	%eax,%edx
233	jc	_C_LABEL(copy_efault)
234	cmpl	$VM_MAXUSER_ADDRESS,%edx
235	ja	_C_LABEL(copy_efault)
236
237	SMAP_DISABLE
238.Lcopyout_start:
239	movl	%eax,%ecx
240	shrl	$2,%ecx
241	rep
242	movsl
243	andl	$3,%eax
244	jz	.Lcopyout_end
245	movl	%eax,%ecx
246	rep
247	movsb
248.Lcopyout_end:
249	SMAP_ENABLE
250
251	popl	%edi
252	popl	%esi
253	xorl	%eax,%eax
254	ret
255	DEFERRED_SWITCH_CALL
256END(copyout)
257
258/*
259 * int copyin(const void *from, void *to, size_t len);
260 * Copy len bytes from the user's address space.
261 * see copyin(9)
262 */
263ENTRY(copyin)
264	DEFERRED_SWITCH_CHECK
265	pushl	%esi
266	pushl	%edi
267	movl	12(%esp),%esi	/* from */
268	movl	16(%esp),%edi	/* to */
269	movl	20(%esp),%eax	/* len */
270
271	movl	%esi,%edx
272	addl	%eax,%edx
273	jc	_C_LABEL(copy_efault)
274	cmpl	$VM_MAXUSER_ADDRESS,%edx
275	ja	_C_LABEL(copy_efault)
276
277	SMAP_DISABLE
278.Lcopyin_start:
279	movl	%eax,%ecx
280	shrl	$2,%ecx
281	rep
282	movsl
283	andl	$3,%eax
284	jz	.Lcopyin_end
285	movl	%eax,%ecx
286	rep
287	movsb
288.Lcopyin_end:
289	SMAP_ENABLE
290
291	popl	%edi
292	popl	%esi
293	xorl	%eax,%eax
294	ret
295	DEFERRED_SWITCH_CALL
296END(copyin)
297
298ENTRY(copy_efault)
299	movl	$EFAULT,%eax
300	popl	%edi
301	popl	%esi
302	ret
303END(copy_efault)
304
305/*
306 * kcopy_fault is used by kcopy and copy_fault is used by copyin/out.
307 *
308 * they're distinguished for lazy pmap switching.  see trap().
309 */
310
311ENTRY(kcopy_fault)
312	cld
313	popl	%edi
314	popl	%esi
315	ret
316END(kcopy_fault)
317
318ENTRY(copy_fault)
319	SMAP_ENABLE
320	popl	%edi
321	popl	%esi
322	ret
323END(copy_fault)
324
325ENTRY(return_address_fault)
326	movl	$0,PCB_ONFAULT(%edx)
327	movl	$0,%eax
328	ret
329END(return_address_fault)
330
331/*
332 * int copyoutstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
333 * Copy a NUL-terminated string, at most maxlen characters long, into the
334 * user's address space.  Return the number of characters copied (including the
335 * NUL) in *lencopied.  If the string is too long, return ENAMETOOLONG; else
336 * return 0 or EFAULT.
337 * see copyoutstr(9)
338 */
339ENTRY(copyoutstr)
340	DEFERRED_SWITCH_CHECK
341	pushl	%esi
342	pushl	%edi
343	movl	12(%esp),%esi	/* esi = from */
344	movl	16(%esp),%edi	/* edi = to */
345	movl	20(%esp),%edx	/* edx = maxlen */
346
347	/*
348	 * Get min(%edx, VM_MAXUSER_ADDRESS-%edi).
349	 */
350	movl	$VM_MAXUSER_ADDRESS,%eax
351	subl	%edi,%eax
352	jc	_C_LABEL(copystr_efault)
353	cmpl	%edx,%eax
354	jae	1f
355	movl	%eax,%edx
356	movl	%eax,20(%esp)
3571:	incl	%edx
358
359	SMAP_DISABLE
360.Lcopyoutstr_start:
3611:	decl	%edx
362	jz	2f
363	lodsb
364	stosb
365	testb	%al,%al
366	jnz	1b
367.Lcopyoutstr_end:
368	SMAP_ENABLE
369
370	/* Success -- 0 byte reached. */
371	decl	%edx
372	xorl	%eax,%eax
373	jmp	copystr_return
374
3752:	/* edx is zero -- return EFAULT or ENAMETOOLONG. */
376	SMAP_ENABLE
377	cmpl	$VM_MAXUSER_ADDRESS,%edi
378	jae	_C_LABEL(copystr_efault)
379	movl	$ENAMETOOLONG,%eax
380	jmp	copystr_return
381	DEFERRED_SWITCH_CALL
382END(copyoutstr)
383
384/*
385 * int copyinstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
386 * Copy a NUL-terminated string, at most maxlen characters long, from the
387 * user's address space.  Return the number of characters copied (including the
388 * NUL) in *lencopied.  If the string is too long, return ENAMETOOLONG; else
389 * return 0 or EFAULT.
390 * see copyinstr(9)
391 */
392ENTRY(copyinstr)
393	DEFERRED_SWITCH_CHECK
394	pushl	%esi
395	pushl	%edi
396	movl	12(%esp),%esi		/* %esi = from */
397	movl	16(%esp),%edi		/* %edi = to */
398	movl	20(%esp),%edx		/* %edx = maxlen */
399
400	/*
401	 * Get min(%edx, VM_MAXUSER_ADDRESS-%esi).
402	 */
403	movl	$VM_MAXUSER_ADDRESS,%eax
404	subl	%esi,%eax
405	jc	_C_LABEL(copystr_efault)
406	cmpl	%edx,%eax
407	jae	1f
408	movl	%eax,%edx
409	movl	%eax,20(%esp)
4101:	incl	%edx
411
412	SMAP_DISABLE
413.Lcopyinstr_start:
4141:	decl	%edx
415	jz	2f
416	lodsb
417	stosb
418	testb	%al,%al
419	jnz	1b
420.Lcopyinstr_end:
421	SMAP_ENABLE
422
423	/* Success -- 0 byte reached. */
424	decl	%edx
425	xorl	%eax,%eax
426	jmp	copystr_return
427
4282:	/* edx is zero -- return EFAULT or ENAMETOOLONG. */
429	SMAP_ENABLE
430	cmpl	$VM_MAXUSER_ADDRESS,%esi
431	jae	_C_LABEL(copystr_efault)
432	movl	$ENAMETOOLONG,%eax
433	jmp	copystr_return
434	DEFERRED_SWITCH_CALL
435END(copyinstr)
436
437ENTRY(copystr_efault)
438	movl	$EFAULT,%eax
439	jmp	copystr_return
440END(copystr_efault)
441
442ENTRY(copystr_fault)
443	SMAP_ENABLE
444copystr_return:
445	/* Set *lencopied and return %eax. */
446	movl	20(%esp),%ecx
447	subl	%edx,%ecx
448	movl	24(%esp),%edx
449	testl	%edx,%edx
450	jz	8f
451	movl	%ecx,(%edx)
452
4538:	popl	%edi
454	popl	%esi
455	ret
456END(copystr_fault)
457
458/**************************************************************************/
459
460#define	UFETCHSTORE_PROLOGUE(x)						\
461	movl	4(%esp),%edx					;	\
462	cmpl	$VM_MAXUSER_ADDRESS-x,%edx			;	\
463	ja	_C_LABEL(ufetchstore_efault)
464
465/* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */
466ENTRY(_ufetch_8)
467	DEFERRED_SWITCH_CHECK
468	UFETCHSTORE_PROLOGUE(1)
469
470	SMAP_DISABLE
471.L_ufetch_8_start:
472	movb	(%edx),%al
473.L_ufetch_8_end:
474	SMAP_ENABLE
475
476	movl	8(%esp),%edx
477	movb	%al,(%edx)
478	xorl	%eax,%eax
479	ret
480	DEFERRED_SWITCH_CALL
481END(_ufetch_8)
482
483/* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */
484ENTRY(_ufetch_16)
485	DEFERRED_SWITCH_CHECK
486	UFETCHSTORE_PROLOGUE(2)
487
488	SMAP_DISABLE
489.L_ufetch_16_start:
490	movw	(%edx),%ax
491.L_ufetch_16_end:
492	SMAP_ENABLE
493
494	movl	8(%esp),%edx
495	movw	%ax,(%edx)
496	xorl	%eax,%eax
497	ret
498	DEFERRED_SWITCH_CALL
499END(_ufetch_16)
500
501/* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */
502ENTRY(_ufetch_32)
503	DEFERRED_SWITCH_CHECK
504	UFETCHSTORE_PROLOGUE(4)
505
506	SMAP_DISABLE
507.L_ufetch_32_start:
508	movl	(%edx),%eax
509.L_ufetch_32_end:
510	SMAP_ENABLE
511
512	movl	8(%esp),%edx
513	movl	%eax,(%edx)
514	xorl	%eax,%eax
515	ret
516	DEFERRED_SWITCH_CALL
517END(_ufetch_32)
518
519/* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */
520ENTRY(_ustore_8)
521	DEFERRED_SWITCH_CHECK
522	UFETCHSTORE_PROLOGUE(1)
523	movb	8(%esp),%al
524
525	SMAP_DISABLE
526.L_ustore_8_start:
527	movb	%al,(%edx)
528.L_ustore_8_end:
529	SMAP_ENABLE
530
531	xorl	%eax,%eax
532	ret
533	DEFERRED_SWITCH_CALL
534END(_ustore_8)
535
536/* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */
537ENTRY(_ustore_16)
538	DEFERRED_SWITCH_CHECK
539	UFETCHSTORE_PROLOGUE(2)
540	movw	8(%esp),%ax
541
542	SMAP_DISABLE
543.L_ustore_16_start:
544	movw	%ax,(%edx)
545.L_ustore_16_end:
546	SMAP_ENABLE
547
548	xorl	%eax,%eax
549	ret
550	DEFERRED_SWITCH_CALL
551END(_ustore_16)
552
553/* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */
554ENTRY(_ustore_32)
555	DEFERRED_SWITCH_CHECK
556	UFETCHSTORE_PROLOGUE(4)
557	movl	8(%esp),%eax
558
559	SMAP_DISABLE
560.L_ustore_32_start:
561	movl	%eax,(%edx)
562.L_ustore_32_end:
563	SMAP_ENABLE
564
565	xorl	%eax,%eax
566	ret
567	DEFERRED_SWITCH_CALL
568END(_ustore_32)
569
570ENTRY(ufetchstore_efault)
571	movl	$EFAULT,%eax
572	ret
573END(ufetchstore_efault)
574
575ENTRY(ufetchstore_fault)
576	SMAP_ENABLE
577	ret
578END(ufetchstore_fault)
579
580/**************************************************************************/
581
582/*
583 * Compare-and-swap the 32-bit integer in the user-space.
584 *
585 * int	_ucas_32(volatile uint32_t *uptr, uint32_t old, uint32_t new,
586 *		 uint32_t *ret);
587 */
588ENTRY(_ucas_32)
589	DEFERRED_SWITCH_CHECK
590	movl	4(%esp),%edx
591	movl	8(%esp),%eax
592	movl	12(%esp),%ecx
593	/* Fail if kernel-space */
594	cmpl	$VM_MAXUSER_ADDRESS-4,%edx
595	ja	_C_LABEL(ucas_efault)
596
597	SMAP_DISABLE
598.Lucas32_start:
599	/* Perform the CAS */
600	lock
601	cmpxchgl %ecx,(%edx)
602.Lucas32_end:
603	SMAP_ENABLE
604
605	/*
606	 * Note: %eax is "old" value.
607	 * Set the return values.
608	 */
609	movl	16(%esp),%edx
610	movl	%eax,(%edx)
611	xorl	%eax,%eax
612	ret
613	DEFERRED_SWITCH_CALL
614END(_ucas_32)
615
616ENTRY(ucas_efault)
617	movl	$EFAULT,%eax
618	ret
619END(ucas_efault)
620
621ENTRY(ucas_fault)
622	SMAP_ENABLE
623	ret
624END(ucas_fault)
625
626/*
627 * copyin() optimised for bringing in syscall arguments.
628 */
629ENTRY(x86_copyargs)
630	DEFERRED_SWITCH_CHECK
631	pushl	%esi
632	movl	8(%esp),%esi
633	movl	12(%esp),%edx
634	movl	16(%esp),%ecx
635
636	/*
637	 * In this function, we may copy more than the size given in the third
638	 * argument. In order to make sure the real end of the destination
639	 * buffer is not past the end of the user's address space, we don't
640	 * check the third argument but rather the largest possible size, which
641	 * is:
642	 * 	(2 + SYS_MAXSYSARGS) * 4 = 10 * 4
643	 */
644	movl	%esi,%eax
645	addl	$(10 * 4),%eax
646	jc	_C_LABEL(x86_copyargs_efault)
647	cmpl	$VM_MAXUSER_ADDRESS,%eax
648	ja	_C_LABEL(x86_copyargs_efault)
649
650	SMAP_DISABLE
651.Lx86_copyargs_start:
652	/* There are a maximum of 8 args + 2 for syscall indirect */
653	cmp	$16,%ecx
654	movl	(%esi),%eax
655	movl	4(%esi),%ecx
656	movl	%eax,(%edx)
657	movl	%ecx,4(%edx)
658	movl	8(%esi),%eax
659	movl	12(%esi),%ecx
660	movl	%eax,8(%edx)
661	movl	%ecx,12(%edx)
662
663	ja	2f		/* Optimise since most sycalls have <= 4 args */
664	jmp	.Lx86_copyargs_end
6652:
666
667	movl	16(%esi),%eax
668	movl	20(%esi),%ecx
669	movl	%eax,16(%edx)
670	movl	%ecx,20(%edx)
671	movl	24(%esi),%eax
672	movl	28(%esi),%ecx
673	movl	%eax,24(%edx)
674	movl	%ecx,28(%edx)
675	movl	32(%esi),%eax
676	movl	36(%esi),%ecx
677	movl	%eax,32(%edx)
678	movl	%ecx,36(%edx)
679.Lx86_copyargs_end:
680	SMAP_ENABLE
681
682	popl	%esi
683	xorl	%eax,%eax
684	ret
685	DEFERRED_SWITCH_CALL
686END(x86_copyargs)
687
688ENTRY(x86_copyargs_efault)
689	movl	$EFAULT,%eax
690	popl	%esi
691	ret
692END(x86_copyargs_efault)
693
694ENTRY(x86_copyargs_fault)
695	SMAP_ENABLE
696	popl	%esi
697	ret
698END(x86_copyargs_fault)
699
700/*
701 * Label must be after all copy functions.
702 */
703LABEL(x86_copyfunc_end)
704
705/*
706 * Fault table of copy functions for trap().
707 */
708	.section ".rodata"
709	.globl _C_LABEL(onfault_table)
710
711_C_LABEL(onfault_table):
712	.long .Lcopyin_start
713	.long .Lcopyin_end
714	.long _C_LABEL(copy_fault)
715
716	.long .Lcopyout_start
717	.long .Lcopyout_end
718	.long _C_LABEL(copy_fault)
719
720	.long .Lkcopy_start
721	.long .Lkcopy_end
722	.long _C_LABEL(kcopy_fault)
723
724	.long .Lcopyoutstr_start
725	.long .Lcopyoutstr_end
726	.long _C_LABEL(copystr_fault)
727
728	.long .Lcopyinstr_start
729	.long .Lcopyinstr_end
730	.long _C_LABEL(copystr_fault)
731
732	.long .Lucas32_start
733	.long .Lucas32_end
734	.long _C_LABEL(ucas_fault)
735
736	.long .L_ufetch_8_start
737	.long .L_ufetch_8_end
738	.long _C_LABEL(ufetchstore_fault)
739
740	.long .L_ufetch_16_start
741	.long .L_ufetch_16_end
742	.long _C_LABEL(ufetchstore_fault)
743
744	.long .L_ufetch_32_start
745	.long .L_ufetch_32_end
746	.long _C_LABEL(ufetchstore_fault)
747
748	.long .L_ustore_8_start
749	.long .L_ustore_8_end
750	.long _C_LABEL(ufetchstore_fault)
751
752	.long .L_ustore_16_start
753	.long .L_ustore_16_end
754	.long _C_LABEL(ufetchstore_fault)
755
756	.long .L_ustore_32_start
757	.long .L_ustore_32_end
758	.long _C_LABEL(ufetchstore_fault)
759
760	.long .Lx86_copyargs_start
761	.long .Lx86_copyargs_end
762	.long _C_LABEL(x86_copyargs_fault)
763
764	.long 0	/* terminate */
765
766	.text
767