xref: /openbsd-src/sys/arch/sh/sh/locore_subr.S (revision d8417bd7da1c4c9f6786edfd187f5ed8fdaf04fb)
1/*	$OpenBSD: locore_subr.S,v 1.19 2023/12/12 07:37:21 deraadt Exp $	*/
2/*	$NetBSD: locore_subr.S,v 1.28 2006/01/23 22:52:09 uwe Exp $	*/
3
4/*
5 * Copyright (c) 2007 Miodrag Vallat.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice, this permission notice, and the disclaimer below
10 * appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20/*-
21 * Copyright (c) 2002 The NetBSD Foundation, Inc.
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 *    notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 *    notice, this list of conditions and the following disclaimer in the
31 *    documentation and/or other materials provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
34 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
35 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
36 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
37 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
40 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
41 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include "assym.h"
47
48#include <sys/syscall.h>	/* SYS_sigreturn */
49#include <sh/asm.h>
50#include <sh/locore.h>
51#include <sh/param.h>		/* UPAGES */
52#include <sh/mmu_sh3.h>
53#include <sh/mmu_sh4.h>
54
55/*
56 * LINTSTUB: include <sys/types.h>
57 * LINTSTUB: include <sys/proc.h>
58 * LINTSTUB: include <sh/locore.h>
59 */
60
61/*
62 * Save integer registers in the pcb.
63 * reg points to pcb->pcb_sf.
64 */
65#define	SAVEPCB(reg) \
66	add	#SF_SIZE, reg ; \
67	sts.l	mach,	@-##reg ; \
68	sts.l	macl,	@-##reg ; \
69	stc.l	r7_bank,@-##reg ; \
70	stc.l	sr,	@-##reg ; \
71	stc.l	r6_bank,@-##reg ; \
72	sts.l	pr,	@-##reg ; \
73	mov.l	r8,	@-##reg ; \
74	mov.l	r9,	@-##reg ; \
75	mov.l	r10,	@-##reg ; \
76	mov.l	r11,	@-##reg ; \
77	mov.l	r12,	@-##reg ; \
78	mov.l	r13,	@-##reg ; \
79	mov.l	r14,	@-##reg ; \
80	mov.l	r15,	@-##reg
81
82/*
83 * Save floating point registers to a fpreg structure.
84 * reg points to the structure, tmp and tmp2 are two scratch integer registers.
85 */
86#define	SAVEFP(reg, tmp, tmp2) \
87	add	#124,	reg ; \
88	sts	fpscr,	tmp2 ; \
89	add	#(FP_SIZE - 124), reg ; \
90	mov	#0,	tmp; \
91	mov.l	tmp2,	@-##reg ; \
92	lds	tmp,	fpscr; \
93	sts.l	fpul,	@-##reg ; \
94	frchg; \
95	fmov.s	fr15,	@-##reg ; \
96	fmov.s	fr14,	@-##reg ; \
97	fmov.s	fr13,	@-##reg ; \
98	fmov.s	fr12,	@-##reg ; \
99	fmov.s	fr11,	@-##reg ; \
100	fmov.s	fr10,	@-##reg ; \
101	fmov.s	fr9,	@-##reg ; \
102	fmov.s	fr8,	@-##reg ; \
103	fmov.s	fr7,	@-##reg ; \
104	fmov.s	fr6,	@-##reg ; \
105	fmov.s	fr5,	@-##reg ; \
106	fmov.s	fr4,	@-##reg ; \
107	fmov.s	fr3,	@-##reg ; \
108	fmov.s	fr2,	@-##reg ; \
109	fmov.s	fr1,	@-##reg ; \
110	fmov.s	fr0,	@-##reg ; \
111	frchg; \
112	fmov.s	fr15,	@-##reg ; \
113	fmov.s	fr14,	@-##reg ; \
114	fmov.s	fr13,	@-##reg ; \
115	fmov.s	fr12,	@-##reg ; \
116	fmov.s	fr11,	@-##reg ; \
117	fmov.s	fr10,	@-##reg ; \
118	fmov.s	fr9,	@-##reg ; \
119	fmov.s	fr8,	@-##reg ; \
120	fmov.s	fr7,	@-##reg ; \
121	fmov.s	fr6,	@-##reg ; \
122	fmov.s	fr5,	@-##reg ; \
123	fmov.s	fr4,	@-##reg ; \
124	fmov.s	fr3,	@-##reg ; \
125	fmov.s	fr2,	@-##reg ; \
126	fmov.s	fr1,	@-##reg ; \
127	fmov.s	fr0,	@-##reg ; \
128	lds	tmp2,	fpscr
129
130/*
131 * Load floating point registers from a fpreg structure.
132 * reg points to the structure, tmp is a scratch integer register.
133 */
134#define	LOADFP(reg, tmp) \
135	mov	#0,	tmp; \
136	lds	tmp,	fpscr; \
137	fmov.s	@##reg##+, fr0 ; \
138	fmov.s	@##reg##+, fr1 ; \
139	fmov.s	@##reg##+, fr2 ; \
140	fmov.s	@##reg##+, fr3 ; \
141	fmov.s	@##reg##+, fr4 ; \
142	fmov.s	@##reg##+, fr5 ; \
143	fmov.s	@##reg##+, fr6 ; \
144	fmov.s	@##reg##+, fr7 ; \
145	fmov.s	@##reg##+, fr8 ; \
146	fmov.s	@##reg##+, fr9 ; \
147	fmov.s	@##reg##+, fr10 ; \
148	fmov.s	@##reg##+, fr11 ; \
149	fmov.s	@##reg##+, fr12 ; \
150	fmov.s	@##reg##+, fr13 ; \
151	fmov.s	@##reg##+, fr14 ; \
152	fmov.s	@##reg##+, fr15 ; \
153	frchg; \
154	fmov.s	@##reg##+, fr0 ; \
155	fmov.s	@##reg##+, fr1 ; \
156	fmov.s	@##reg##+, fr2 ; \
157	fmov.s	@##reg##+, fr3 ; \
158	fmov.s	@##reg##+, fr4 ; \
159	fmov.s	@##reg##+, fr5 ; \
160	fmov.s	@##reg##+, fr6 ; \
161	fmov.s	@##reg##+, fr7 ; \
162	fmov.s	@##reg##+, fr8 ; \
163	fmov.s	@##reg##+, fr9 ; \
164	fmov.s	@##reg##+, fr10 ; \
165	fmov.s	@##reg##+, fr11 ; \
166	fmov.s	@##reg##+, fr12 ; \
167	fmov.s	@##reg##+, fr13 ; \
168	fmov.s	@##reg##+, fr14 ; \
169	fmov.s	@##reg##+, fr15 ; \
170	lds.l	@##reg##+, fpul ; \
171	lds.l	@##reg##+, fpscr
172
173	.text
174	.align 5	/* align cache line size (32B) */
175/*
176 * LINTSTUB: Func: void cpu_switchto(struct proc *old, struct proc *new)
177 *	Switch proc contexts.
178 */
179ENTRY(cpu_switchto)
180	mov	r4,	r0
181	cmp/eq	#0,	r0
182	bt	1f
183
184	/* Save old proc's context to switchframe */
185	mov.l	.L_SF,	r0
186	mov.l	@(r0, r4), r1
187	SAVEPCB(r1)
188	add	#PCB_FP, r1
189	SAVEFP(r1, r8, r9)
190
1911:
192	mov.l	.L_cpu_switch_prepare, r0
193	jsr	@r0
194	 mov	r5,	r8	/* save new proc */
195	mov	r8,	r4
196
197	/* Setup kernel stack */
198	mov.l	.L_SF,	r0
199	mov.l	@(r0, r4), r1		/* switch frame */
200	mov.l	@(SF_R7_BANK, r1), r0	/* stack top */
201	mov.l	@(SF_R6_BANK, r1), r2	/* current frame */
202	mov.l	@(SF_R15, r1), r3	/* current stack */
203	/* During kernel stack switching, all interrupts are disabled. */
204	__EXCEPTION_BLOCK(r1, r5)
205	/* switch to new kernel stack */
206	ldc	r0,	r7_bank
207	ldc	r2,	r6_bank
208	mov	r3,	r15
209
210	/* Wire u-area */
211	MOV	(switch_resume, r0)
212	jsr	@r0
213	 mov	r4,	r8	/* save new proc */
214	mov	r8,	r4
215	__EXCEPTION_UNBLOCK(r0, r1)
216	/* Now OK to use kernel stack. */
217
218	/* Restore new proc's context from switchframe */
219	mov.l	.L_SF,	r0
220	mov.l	@(r0, r4), r1
221	add	#4,	r1		/* r15 already restored */
222	mov.l	@r1+,	r14
223	mov.l	@r1+,	r13
224	mov.l	@r1+,	r12
225	mov.l	@r1+,	r11
226	mov.l	@r1+,	r10
227	mov.l	@r1+,	r9
228	mov.l	@r1+,	r8
229	lds.l	@r1+,	pr
230	add	#4,	r1		/* r6_bank already restored */
231	ldc.l	@r1+,	sr
232	add	#4,	r1		/* r7_bank already restored */
233	lds.l	@r1+,	macl
234	lds.l	@r1+,	mach
235
236	mov.l	@(r0, r4), r1
237	add	#PCB_FP, r1
238	LOADFP(r1, r0)
239	rts
240	 nop
241	.align	2
242.L_SF:			.long	(P_MD_PCB)
243.L_cpu_switch_prepare:	.long	cpu_switch_prepare
244FUNC_SYMBOL(switch_resume)
245
246#ifdef SH3
247/*
248 * LINTSTUB: Func: void sh3_switch_resume(struct proc *p)
249 *	Set current u-area PTE array to curupte.
250 *	No need to flush any entries. it is depended on u-area mapping is
251 *	wired, and its mapping never cause modified/reference fault.
252 *	u-area TLB fault is only covered by TLB miss exception.
253 *	When the situation that "VPN match but not Valid" occur, SH3 jump to
254 *	"generic exception" handler instead of TLB miss exception.
255 *	But OpenBSD/sh code doesn't handle it. As the result, it causes
256 *	hard reset. (never can access kernel stack).
257 */
258NENTRY(sh3_switch_resume)
259	mov.l	.L_UPTE, r0
260	mov.l	.L_curupte, r1
261	add	r4,	r0	/* p->p_md.md_upte */
262	rts
263	 mov.l	r0,	@r1
264	.align	2
265.L_UPTE:		.long	P_MD_UPTE
266.L_curupte:		.long	curupte
267	SET_ENTRY_SIZE(sh3_switch_resume)
268#endif /* SH3 */
269
270
271#ifdef SH4
272/*
273 * LINTSTUB: Func: void sh4_switch_resume(struct proc *p)
274 *	Wire u-area. invalidate TLB entry for kernel stack to prevent
275 *	TLB multiple hit.
276 */
277NENTRY(sh4_switch_resume)
278	mov.l	.L_UPTE__sh4, r0
279	add	r0,	r4	/* p->p_md.md_upte */
280	mov	#UPAGES,r3
281	mov	#1,	r2
282	mov.l	@r4,	r0	/* if (p->p_md.md_upte[0].addr == 0) return; */
283	tst	r0,	r0
284	bt	2f
285
286	/* Save old ASID and set ASID to zero */
287	xor	r0,	r0
288	mov.l	.L_4_PTEH, r1
289	mov.l	@r1,	r7
290	mov.l	r0,	@r1
291
292	mov.l	.L_VPN_MASK, r6
293	mov.l	.L_4_UTLB_AA_A, r5
294
295	/* TLB address array must be accessed via P2. Setup jump address. */
296	mova	1f,	r0
297	mov.l	.L_P2BASE, r1
298	or	r1,	r0
299	jmp	@r0		/* run P2 */
300	 nop
301
302	/* Probe VPN match TLB entry and invalidate it. */
303	.align	2		/* mova target must be 4byte alignment */
3041:	mov.l	@(4, r4), r0
305	and	r6,	r0
306	mov.l	r0,	@r5	/* clear D, V */
307
308	/* Wire u-area TLB entry */
309	/* Address array */
310	mov.l	@r4+,	r0	/* addr */
311	mov.l	@r4+,	r1	/* data */
312	mov.l	r1,	@r0	/* *addr = data */
313
314	/* Data array */
315	mov.l	@r4+,	r0	/* addr */
316	mov.l	@r4+,	r1	/* data */
317	mov.l	r1,	@r0	/* *addr = data */
318	cmp/eq	r2,	r3
319	bf/s	1b
320	 add	#1,	r2
321
322	/* restore ASID */
323	mov.l	.L_4_PTEH, r0
324	mov.l	r7,	@r0
325	mova	2f,	r0
326	jmp	@r0		/* run P1 */
327	 nop
328	.align	2
3292:	rts			/* mova target must be 4byte alignment */
330	 nop
331	.align	2
332.L_UPTE__sh4:		.long	P_MD_UPTE
333.L_4_PTEH:		.long	SH4_PTEH
334.L_4_UTLB_AA_A:		.long	(SH4_UTLB_AA | SH4_UTLB_A)
335.L_4_ITLB_AA:		.long	SH4_ITLB_AA
336.L_VPN_MASK:		.long	0xfffff000
337.L_P2BASE:		.long	0xa0000000
338	SET_ENTRY_SIZE(sh4_switch_resume)
339#endif /* SH4 */
340
341
342/*
343 * LINTSTUB: Func: int _cpu_intr_raise(int s)
344 *	raise SR.IMASK to 's'. if current SR.IMASK is greater equal 's',
345 *	nothing to do. returns previous SR.IMASK.
346 */
347NENTRY(_cpu_intr_raise)
348	stc	sr,	r2
349	mov	#0x78,	r1
350	mov	r2,	r0
351	shll	r1		/* r1 = 0xf0 */
352	and	r1,	r0	/* r0 = SR & 0xf0 */
353	cmp/ge	r4,	r0	/* r0 >= r4 ? T = 1 */
354	bt/s	1f
355	 not	r1,	r1	/* r1 = 0xffffff0f */
356	and	r1,	r2	/* r2 = SR & ~0xf0 */
357	or	r2,	r4	/* r4 = (SR & ~0xf0) | s */
358	ldc	r4,	sr	/* SR = r4 (don't move to delay slot) */
3591:	rts
360	 nop	/* return (SR & 0xf0) */
361	SET_ENTRY_SIZE(_cpu_intr_raise)
362
363
364/*
365 * LINTSTUB: Func: int _cpu_intr_suspend(void)
366 *	Mask all external interrupt. Returns previous SR.IMASK.
367 */
368NENTRY(_cpu_intr_suspend)
369	stc	sr,	r0	/* r0 = SR */
370	mov	#0x78,	r1
371	shll	r1		/* r1 = 0x000000f0 */
372	mov	r0,	r2	/* r2 = SR */
373	or	r1,	r2	/* r2 |= 0x000000f0 */
374	ldc	r2,	sr	/* SR = r2 */
375	rts
376	 and	r1,	r0	/* r0 = SR & 0x000000f0 */
377	SET_ENTRY_SIZE(_cpu_intr_suspend)
378
379
380
381/*
382 * LINTSTUB: Func: int _cpu_intr_resume(int s)
383 *	Set 's' to SR.IMASK. Returns previous SR.IMASK.
384 */
385NENTRY(_cpu_intr_resume)
386	stc	sr,	r0	/* r0 = SR */
387	mov	#0x78,	r2
388	shll	r2		/* r2 = 0x000000f0 */
389	not	r2,	r1	/* r1 = 0xffffff0f */
390	and	r0,	r1	/* r1 = (SR & ~0xf0) */
391	or	r1,	r4	/* r4 = (SR & ~0xf0) | level */
392	ldc	r4,	sr	/* SR = r0 (don't move to delay slot) */
393	rts
394	 and	r2,	r0	/* return (SR & 0xf0) */
395	SET_ENTRY_SIZE(_cpu_intr_resume)
396
397
398/*
399 * LINTSTUB: Func: int _cpu_exception_suspend(void)
400 *	Block exception (SR.BL). if external interrupt raise, pending interrupt.
401 *	if exception occur, jump to 0xa0000000 (hard reset).
402 */
403NENTRY(_cpu_exception_suspend)
404	stc	sr,	r0	/* r0 = SR */
405	mov	#0x10,	r1
406	swap.b	r1,	r1
407	mov	r0,	r2	/* r2 = r0 */
408	swap.w	r1,	r1	/* r1 = 0x10000000 */
409	or	r1,	r2	/* r2 |= 0x10000000 */
410	ldc	r2,	sr	/* SR = r2 */
411	rts
412	 and	r1,	r0	/* r0 &= 0x10000000 */
413	SET_ENTRY_SIZE(_cpu_exception_suspend)
414
415
416/*
417 * LINTSTUB: Func: void _cpu_exception_resume(int s)
418 *	restore 's' exception mask. (SR.BL)
419 */
420NENTRY(_cpu_exception_resume)
421	stc	sr,	r0	/* r0 = SR */
422	mov	#0x10,	r1
423	swap.b	r1,	r1
424	swap.w	r1,	r1
425	not	r1,	r1	/* r1 = ~0x10000000 */
426	and	r1,	r0	/* r0 &= ~0x10000000 */
427	or	r4,	r0	/* r0 |= old SR.BL */
428	ldc	r0,	sr	/* SR = r0 (don't move to delay slot) */
429	rts
430	 nop
431	SET_ENTRY_SIZE(_cpu_exception_resume)
432
433
434/*
435 * LINTSTUB: Func: void _cpu_spin(uint32_t count)
436 *	Loop for 'count' * 10 cycles.
437 * [...]
438 * add    IF ID EX MA WB
439 * nop       IF ID EX MA WB
440 * cmp/pl       IF ID EX MA WB -  -
441 * nop             IF ID EX MA -  -  WB
442 * bt                 IF ID EX .  .  MA WB
443 * nop                   IF ID -  -  EX MA WB
444 * nop                      IF -  -  ID EX MA WB
445 * nop                      -  -  -  IF ID EX MA WB
446 * add                                  IF ID EX MA WB
447 * nop                                     IF ID EX MA WB
448 * cmp/pl                                     IF ID EX MA WB -  -
449 * nop                                           IF ID EX MA -  - WB
450 * bt                                               IF ID EX .  . MA
451 * [...]
452 */
453	.align 5	/* align cache line size (32B) */
454NENTRY(_cpu_spin)
4551:	nop			/* 1 */
456	nop			/* 2 */
457	nop			/* 3 */
458	add	#-1, r4		/* 4 */
459	nop			/* 5 */
460	cmp/pl	r4		/* 6 */
461	nop			/* 7 */
462	bt	1b		/* 8, 9, 10 */
463	rts
464	 nop
465	SET_ENTRY_SIZE(_cpu_spin)
466
467
468/*
469 * proc_trapmpoline:
470 *	Call the service function with one argument specified by the r12 and r11
471 *	respectively. set by cpu_fork().
472 */
473NENTRY(proc_trampoline)
474	mov.l	.L_proc_trampoline_mi, r1
475	jsr	@r1			/* proc_trampoline_mi() */
476	 nop
477	jsr	@r12
478	 mov	r11,	r4
479	__EXCEPTION_RETURN
480	/* NOTREACHED */
481.L_proc_trampoline_mi:
482	.long	proc_trampoline_mi
483	SET_ENTRY_SIZE(proc_trampoline)
484
485
486/*
487 * LINTSTUB: Var: char sigcode[1]
488 *	Signal trampoline.
489 *
490 *	The kernel arranges for the signal handler to be invoked directly.
491 *	This trampoline is used only to perform the return.
492 *
493 *	On entry, the stack looks like this:
494 *
495 *	sp->	sigcontext structure
496 */
497NENTRY(sigcode)
498	mov	r15, r4			/* get pointer to sigcontext */
499	mov.l	.L_SYS_sigreturn, r0
500	.globl	sigcodecall
501sigcodecall:
502	trapa	#0x80			/* and call sigreturn() */
503	.globl	sigcoderet
504sigcoderet:
505	sleep		/* privileged -> illegal? */
506	/* NOTREACHED */
507
508	.align	2
509.L_SYS_sigreturn:	.long	SYS_sigreturn
510
511/* LINTSTUB: Var: char esigcode[1] */
512.globl	esigcode
513esigcode:
514	SET_ENTRY_SIZE(sigcode)
515
516	.globl	sigfill
517sigfill:
518	sleep		/* privileged -> illegal? */
519esigfill:
520
521	.data
522	.globl	sigfillsiz
523sigfillsiz:
524	.word	esigfill - sigfill
525
526	.text
527
528/*
529 * LINTSTUB: Func: void savectx(struct pcb *pcb)
530 *	save struct switchframe.
531 */
532ENTRY(savectx)
533	SAVEPCB(r4)
534	add	#PCB_FP, r4
535	SAVEFP(r4, r0, r1)
536	rts
537	 nop
538	SET_ENTRY_SIZE(savectx)
539
540/*
541 * void fpu_save(struct fpreg *fp)
542 *
543 * Saves fpu context.
544 */
545ENTRY(fpu_save)
546	SAVEFP(r4, r0, r1)
547	rts
548	 nop
549	SET_ENTRY_SIZE(fpu_save)
550
551/*
552 * void fpu_restore(struct fpreg *fp)
553 *
554 * Restores fpu context.
555 */
556ENTRY(fpu_restore)
557	LOADFP(r4, r0)
558	rts
559	 nop
560	SET_ENTRY_SIZE(fpu_restore)
561
562/*
563 * LINTSTUB: Func: int copyout(const void *ksrc, void *udst, size_t len)
564 *	Copy len bytes into the user address space.
565 */
566ENTRY(copyout)
567	mov.l	r14,	@-r15
568	sts.l	pr,	@-r15
569	mov	r15,	r14
570
571	mov	#EFAULT, r0		/* assume there was a problem */
572	mov	r4,	r3
573	mov	r5,	r2
574	mov	r5,	r4
575	add	r6,	r2
576	cmp/hs	r5,	r2		/* bomb if uaddr+len wraps */
577	bf	2f
578	mov.l	.L_copyout_VM_MAXUSER_ADDRESS, r1
579	cmp/hi	r1,	r2		/* bomb if uaddr isn't in user space */
580	bt	2f
581
582	mov.l	.L_copyout_curpcb, r1	/* set fault handler */
583	mov.l	@r1,	r2
584	mov.l	.L_copyout_onfault, r1
585	mov.l	r1,	@(PCB_ONFAULT,r2)
586	mov.l	.L_copyout_memcpy, r1
587	jsr	@r1			/* memcpy(uaddr, kaddr, len) */
588	 mov	r3,	r5
589
590	mov	#0,	r0
5911:
592	mov.l	.L_copyout_curpcb, r1	/* clear fault handler */
593	mov.l	@r1,	r2
594	mov	#0,	r1
595	mov.l	r1,	@(PCB_ONFAULT,r2)
5962:
597	mov	r14,	r15
598	lds.l	@r15+,	pr
599	rts
600	 mov.l	@r15+,	r14
601
6023:
603	bra	1b
604	 mov	#EFAULT, r0
605
606	.align 2
607.L_copyout_onfault:
608	.long	3b
609.L_copyout_VM_MAXUSER_ADDRESS:
610	.long	VM_MAXUSER_ADDRESS
611.L_copyout_curpcb:
612	.long	curpcb
613.L_copyout_memcpy:
614	.long	memcpy
615	SET_ENTRY_SIZE(copyout)
616
617
618/*
619 * LINTSTUB: Func: int _copyin(const void *usrc, void *kdst, size_t len)
620 *	Copy len bytes from the user address space.
621 */
622ENTRY(_copyin)
623	mov.l	r14,	@-r15
624	sts.l	pr,	@-r15
625	mov	r15,	r14
626
627	mov	#EFAULT, r0		/* assume there was a problem */
628	mov	r4,	r3
629	mov	r5,	r4
630	mov	r3,	r2
631	add	r6,	r2
632	cmp/hs	r3,	r2		/* bomb if uaddr+len wraps */
633	bf	2f
634	mov.l	.L_copyin_VM_MAXUSER_ADDRESS, r1
635	cmp/hi	r1,	r2		/* bomb if uaddr isn't in user space */
636	bt	2f
637
638	mov.l	.L_copyin_curpcb, r1	/* set fault handler */
639	mov.l	@r1,	r2
640	mov.l	.L_copyin_onfault, r1
641	mov.l	r1,	@(PCB_ONFAULT,r2)
642	mov.l	.L_copyin_memcpy, r1
643	jsr	@r1			/* memcpy(kaddr, uaddr, len) */
644	 mov	r3,	r5
645
646	mov	#0,	r0
6471:
648	mov.l	.L_copyin_curpcb, r1	/* clear fault handler */
649	mov.l	@r1,	r2
650	mov	#0,	r1
651	mov.l	r1,	@(PCB_ONFAULT,r2)
6522:
653	mov	r14,	r15
654	lds.l	@r15+,	pr
655	rts
656	 mov.l	@r15+,	r14
657
6583:
659	bra	1b
660	 mov	#EFAULT, r0
661
662	.align 2
663.L_copyin_onfault:
664	.long	3b
665.L_copyin_VM_MAXUSER_ADDRESS:
666	.long	VM_MAXUSER_ADDRESS
667.L_copyin_curpcb:
668	.long	curpcb
669.L_copyin_memcpy:
670	.long	memcpy
671	SET_ENTRY_SIZE(_copyin)
672
673/*
674 * int copyin32(const void *usrc, void *kdst)
675 */
676ENTRY(copyin32)
677	mov.l	r14,	@-r15
678	sts.l	pr,	@-r15
679	mov	r15,	r14
680
681	mov	#3,	r3
682	mov	#EFAULT, r0		/* assume there was a problem */
683	and	r4,	r3
684	tst	r3,	r3
685	bf	2f			/* punt if not aligned */
686
687	mov.l	.L_copyin32_VM_MAXUSER_ADDRESS, r1
688	cmp/hi	r1,	r4		/* bomb if uaddr isn't in user space */
689	bt	2f
690
691	mov.l	.L_copyin32_curpcb, r1	/* set fault handler */
692	mov.l	@r1,	r2
693	mov.l	.L_copyin32_onfault, r3
694	mov.l	r3,	@(PCB_ONFAULT,r2)
695
696	mov.l	@r4,	r1
697	mov	#0,	r0
698	mov.l	r1,	@r5
699	mov.l	r0,	@(PCB_ONFAULT,r2)
7002:
701	mov	r14,	r15
702	lds.l	@r15+,	pr
703	rts
704	 mov.l	@r15+,	r14
705
7063:
707	mov.l	.L_copyin32_curpcb, r1	/* clear fault handler */
708	mov.l	@r1,	r2
709	mov	#0,	r1
710	mov.l	r1,	@(PCB_ONFAULT,r2)
711
712	bra	2b
713	 mov	#EFAULT, r0
714
715	.align 2
716.L_copyin32_onfault:
717	.long	3b
718.L_copyin32_VM_MAXUSER_ADDRESS:
719	.long	VM_MAXUSER_ADDRESS - 4	/* sizeof(uint32_t) */
720.L_copyin32_curpcb:
721	.long	curpcb
722	SET_ENTRY_SIZE(copyin32)
723
724/*
725 * LINTSTUB: Func: int copyoutstr(const void *ksrc, void *udst, size_t maxlen, size_t *lencopied)
726 *	Copy a NUL-terminated string, at most maxlen characters long,
727 *	into the user address space.  Return the number of characters
728 *	copied (including the NUL) in *lencopied.  If the string is
729 *	too long, return ENAMETOOLONG; else return 0 or EFAULT.
730 */
731ENTRY(copyoutstr)
732	mov.l	r8,	@-r15
733
734	mov	#EFAULT, r3		/* assume there was a problem */
735	mov	r4,	r8
736	mov.l	.L_copyoutstr_curpcb, r1	/* set fault handler */
737	mov.l	@r1,	r2
738	mov.l	.L_copyoutstr_onfault, r1
739	mov.l	r1,	@(PCB_ONFAULT,r2)
740	mov.l	.L_copyoutstr_VM_MAXUSER_ADDRESS, r1
741	cmp/hi	r1,	r5		/* bomb if udst isn't in user space */
742	bt	4f
743	mov	r1,	r0
744	sub	r5,	r0
745	cmp/hi	r6,	r0		/* don't beyond user space */
746	bf	2f
747	bra	2f
748	 mov	r6,	r0
749
750	.align 2
7511:
752	mov.b	@r4+,	r1		/* copy str */
753	mov.b	r1,	@r5
754	extu.b	r1,	r1
755	add	#1,	r5
756	tst	r1,	r1
757	bf	2f
758	bra	3f
759	 mov	#0,	r3
760	.align 2
7612:
762	add	#-1,	r0
763	cmp/eq	#-1,	r0
764	bf	1b
765	mov.l	.L_copyoutstr_VM_MAXUSER_ADDRESS, r1
766	cmp/hs	r1,	r5
767	bt	3f
768	mov	#ENAMETOOLONG, r3
769
7703:
771	tst	r7,	r7		/* set lencopied if needed */
772	bt	4f
773	mov	r4,	r1
774	sub	r8,	r1
775	mov.l	r1,	@r7
7764:
777	mov.l	.L_copyoutstr_curpcb, r1	/* clear fault handler */
778	mov.l	@r1,	r2
779	mov	#0,	r1
780	mov.l	r1,	@(PCB_ONFAULT,r2)
781
782	mov	r3,	r0
783	rts
784	 mov.l	@r15+,	r8
785
7865:
787	bra	4b
788	 mov	#EFAULT, r3
789
790	.align 2
791.L_copyoutstr_onfault:
792	.long	5b
793.L_copyoutstr_VM_MAXUSER_ADDRESS:
794	.long	VM_MAXUSER_ADDRESS
795.L_copyoutstr_curpcb:
796	.long	curpcb
797	SET_ENTRY_SIZE(copyoutstr)
798
799
800/*
801 * LINTSTUB: Func: int _copyinstr(const void *src, void *dst, size_t maxlen, size_t *lencopied)
802 *	Copy a NUL-terminated string, at most maxlen characters long,
803 *	from the user address space.  Return the number of characters
804 *	copied (including the NUL) in *lencopied.  If the string is
805 *	too long, return ENAMETOOLONG; else return 0 or EFAULT.
806 */
807ENTRY(_copyinstr)
808	mov.l	r8,	@-r15
809	mov	#EFAULT, r3		/* assume there was a problem */
810	mov	r4,	r8
811	mov.l	.L_copyinstr_curpcb, r1	/* set fault handler */
812	mov.l	@r1,	r2
813	mov.l	.L_copyinstr_onfault, r1
814	mov.l	r1,	@(PCB_ONFAULT,r2)
815
816	mov.l	.L_copyinstr_VM_MAXUSER_ADDRESS, r1
817	cmp/hi	r1,	r4		/* bomb if src isn't in user space */
818	bt	4f
819	mov	r1,	r0
820	sub	r4,	r0
821	cmp/hi	r6,	r0		/* don't beyond user space */
822	bf	2f
823	bra	2f
824	 mov	r6,	r0
825
826	.align 2
8271:
828	mov.b	@r4+,	r1		/* copy str */
829	mov.b	r1,	@r5
830	extu.b	r1,	r1
831	add	#1,	r5
832	tst	r1,	r1
833	bf	2f
834	bra	3f
835	 mov	#0,	r3
836	.align 2
8372:
838	add	#-1,	r0
839	cmp/eq	#-1,	r0
840	bf	1b
841	mov.l	.L_copyinstr_VM_MAXUSER_ADDRESS, r1
842	cmp/hs	r1,	r4
843	bt	3f
844	mov	#ENAMETOOLONG, r3
845
8463:
847	tst	r7,	r7		/* set lencopied if needed */
848	bt	4f
849	mov	r4,	r1
850	sub	r8,	r1
851	mov.l	r1,	@r7
8524:
853	mov.l	.L_copyinstr_curpcb, r1	/* clear fault handler */
854	mov.l	@r1,	r2
855	mov	#0,	r1
856	mov.l	r1,	@(PCB_ONFAULT,r2)
857
858	mov	r3,	r0
859	rts
860	 mov.l	@r15+,	r8
861
8625:
863	bra	4b
864	 mov	#EFAULT, r3
865
866	.align 2
867.L_copyinstr_onfault:
868	.long	5b
869.L_copyinstr_VM_MAXUSER_ADDRESS:
870	.long	VM_MAXUSER_ADDRESS
871.L_copyinstr_curpcb:
872	.long	curpcb
873	SET_ENTRY_SIZE(_copyinstr)
874
875/*
876 * LINTSTUB: Func: int kcopy(const void *src, void *dst, size_t len)
877 */
878ENTRY(kcopy)
879	mov.l	r8,	@-r15
880	mov.l	r14,	@-r15
881	sts.l	pr,	@-r15
882	mov	r15,	r14
883
884	mov	r4,	r3
885	mov.l	.L_kcopy_curpcb, r1
886	mov.l	@r1,	r2
887	mov.l	@(PCB_ONFAULT,r2) ,r8	/* save old fault handler */
888	mov.l	.L_kcopy_onfault, r1
889	mov.l	r1,	@(PCB_ONFAULT,r2) /* set fault handler */
890	mov.l	.L_kcopy_memcpy, r1
891	mov	r5,	r4
892	jsr	@r1			/* memcpy(dst, src, len) */
893	 mov	r3,	r5
894	mov	#0,	r0
8951:
896	mov.l	.L_kcopy_curpcb, r1	/* restore fault handler */
897	mov.l	@r1,	r2
898	mov.l	r8,	@(PCB_ONFAULT,r2)
899
900	mov	r14,	r15
901	lds.l	@r15+,	pr
902	mov.l	@r15+,	r14
903	rts
904	 mov.l	@r15+,	r8
905
9062:
907	bra	1b
908	 mov	#EFAULT, r0
909
910	.align 2
911.L_kcopy_onfault:
912	.long	2b
913.L_kcopy_curpcb:
914	.long	curpcb
915.L_kcopy_memcpy:
916	.long	memcpy
917	SET_ENTRY_SIZE(kcopy)
918
919
920#if defined(DDB)
921
922/*
923 * LINTSTUB: Func: int setjmp(label_t *jmpbuf)
924 */
925ENTRY(setjmp)
926	add	#4*9,	r4
927	mov.l	r8,	@-r4
928	mov.l	r9,	@-r4
929	mov.l	r10,	@-r4
930	mov.l	r11,	@-r4
931	mov.l	r12,	@-r4
932	mov.l	r13,	@-r4
933	mov.l	r14,	@-r4
934	mov.l	r15,	@-r4
935	sts.l	pr,	@-r4
936	rts
937	 xor	r0, r0
938	SET_ENTRY_SIZE(setjmp)
939
940/*
941 * LINTSTUB: Func: void longjmp(label_t *jmpbuf)
942 */
943ENTRY(longjmp)
944	lds.l	@r4+,	pr
945	mov.l	@r4+,	r15
946	mov.l	@r4+,	r14
947	mov.l	@r4+,	r13
948	mov.l	@r4+,	r12
949	mov.l	@r4+,	r11
950	mov.l	@r4+,	r10
951	mov.l	@r4+,	r9
952	mov.l	@r4+,	r8
953	rts
954	 mov	#1, r0		/* return 1 from setjmp */
955	SET_ENTRY_SIZE(longjmp)
956
957#endif /* DDB */
958