xref: /netbsd-src/sys/arch/sh3/sh3/locore_subr.S (revision 1af6baec78fa2bd1c6209db459d50dbd5998b8c1)
1/*	$NetBSD: locore_subr.S,v 1.61 2021/07/15 04:58:33 rin Exp $	*/
2
3/*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "opt_compat_netbsd.h"
30#include "opt_cputype.h"
31#include "opt_ddb.h"
32#include "opt_kgdb.h"
33#include "opt_modular.h"
34#include "assym.h"
35
36#include <sys/syscall.h>	/* SYS___sigreturn14, SYS_exit */
37#include <sh3/asm.h>
38#include <sh3/locore.h>
39#include <sh3/param.h>		/* UPAGES */
40#include <sh3/mmu_sh3.h>
41#include <sh3/mmu_sh4.h>
42
43__KERNEL_RCSID(0, "$NetBSD: locore_subr.S,v 1.61 2021/07/15 04:58:33 rin Exp $")
44
45
46/*
47 * LINTSTUB: include <sys/types.h>
48 * LINTSTUB: include <sys/lwp.h>
49 * LINTSTUB: include <sh3/locore.h>
50 */
51
52
53/*
54 * Save processor state to pcb->pcb_sf switchframe.
55 * Note that offsetof(struct pcb, pcb_sf) is zero.
56 */
57#define SAVEPCB_AND_JUMP(pcb, jump)			  \
58	add	#SF_SIZE, pcb				; \
59	stc.l	r7_bank, @-pcb				; \
60	stc.l	r6_bank, @-pcb				; \
61	mov.l	r15, @-pcb				; \
62	mov.l	r14, @-pcb				; \
63	mov.l	r13, @-pcb				; \
64	mov.l	r12, @-pcb				; \
65	mov.l	r11, @-pcb				; \
66	mov.l	r10, @-pcb				; \
67	mov.l	r9, @-pcb				; \
68	mov.l	r8, @-pcb				; \
69	sts.l	pr, @-pcb				; \
70	stc.l	sr, @-pcb				; \
71	jump						; \
72	 stc.l	gbr, @-pcb
73
74/* Hide ugly empty argument if we don't need the jump */
75#define SAVEPCB(pcb) \
76	SAVEPCB_AND_JUMP(pcb, /* no jump */)
77
78
79	.text
80	.align 5	/* align cache line size (32B) */
81/*
82 * LINTSTUB: Func: lwp_t *cpu_switchto(lwp_t *olwp, lwp_t *nlwp, bool returning)
83 *	Switch from olwp to nlwp.
84 *	If returning is true, we do the fast softint dance
85 *	 and can skip user-space related activities (pmap, ras, etc...)
86 *	Return olwp (in the nlwp context).
87 */
88ENTRY(cpu_switchto)
89	!! save old lwp's context to switchframe
90	mov.l	@(L_MD_PCB, r4), r1	! olwp->l_md.md_pcb
91	SAVEPCB(r1)
92
93	!! free to use callee-save registers now
94
95	mov.l	.L_curlwp, r2
96	mov.l	.L_curpcb, r3
97	mov.l	@(L_MD_PCB, r5), r10	! nlwp->l_md.md_pcb
98	!tst	r6, r6			! "full" switch?
99	mov.l	r5, @r2			! curlwp = nlwp;
100	!bt/s	.L_prepare_switch
101	 mov.l	r10, @r3		! curpcb = nlwp->l_md.md_pcb;
102
103	!mov	r5, r8		! preserve nlwp
104	!bra	.L_restore_nlwp
105	! mov	r4, r9		! preserve olwp
106
107.L_prepare_switch:
108	!! arguments to cpu_switch_prepare are already in the right registers
109	mov.l	.L_cpu_switch_prepare, r0
110	mov	r5, r8		! preserve nlwp
111	jsr	@r0
112	 mov	r4, r9		! preserve olwp
113
114.L_restore_nlwp:
115	!! restore new lwp's context from switchframe
116	!! r10 is nlwp->l_md.md_pcb == &nlwp->l_md.md_pcb->pcb_sf
117
118	!! setup nlwp's kernel stack first
119	mov.l	@(SF_R7_BANK, r10), r0	! kernel stack bottom
120	mov.l	@(SF_R6_BANK, r10), r2	! current trapframe
121	mov.l	@(SF_R15, r10), r3	! current kernel sp
122
123	!! while switching kernel stack, all exceptions must be disabled
124	__EXCEPTION_BLOCK(r1, r11)	! saves SR in r11
125	ldc	r0, r7_bank
126	ldc	r2, r6_bank
127	mov	r3, r15
128
129#if !defined(P1_STACK) && defined(SH4)
130	!! wire u-area in TLB
131	MOV	(switch_resume, r0)
132	jsr	@r0
133	 mov	r8, r4		! nlwp
134#endif
135	!! safe to use nlwp's kernel stack now
136	ldc	r11, sr		! __EXCEPTION_UNBLOCK
137
138	!! finish restoring new lwp's context from switchframe
139	!! sf_r15, sf_r6_bank, sf_r7_bank are already restored
140	mov	r10, r1		! &nlwp->l_md.md_pcb->pcb_sf
141	mov	r9, r0		! return olwp (we are about to clobber r9)
142	ldc.l	@r1+, gbr
143
144	/*
145	 * We cannot simply pop SR here; PSL_IMASK field should be
146	 * inherited to nlwp. Otherwise, IPL is lost during context
147	 * switch, which allows improper interrupts when, e.g.,
148	 * spin mutexes are hold.
149	 */
150	mov.l	@r1+, r8	! r8  = new SR
151	mov	#0x78, r9
152	shll	r9		! r9  =  PSL_IMASK
153	not	r9, r10		! r10 = ~PSL_IMASK
154	and	r9, r11		! r11 = old SR & PSL_IMASK
155	and	r10, r8
156	or	r11, r8
157	ldc	r8, sr
158
159	lds.l	@r1+, pr
160	mov.l	@r1+, r8
161	mov.l	@r1+, r9
162	mov.l	@r1+, r10
163	mov.l	@r1+, r11
164	mov.l	@r1+, r12
165	mov.l	@r1+, r13
166	rts
167	 mov.l	@r1+, r14
168
169	.align	2
170.L_curlwp:		.long	_C_LABEL(curlwp)
171.L_curpcb:		.long	_C_LABEL(curpcb)
172.L_cpu_switch_prepare:	.long	_C_LABEL(cpu_switch_prepare)
173#ifdef SH4
174FUNC_SYMBOL(switch_resume)
175#endif
176	SET_ENTRY_SIZE(cpu_switchto)
177
178
179#ifdef SH3
180/*
181 * LINTSTUB: Func: void sh3_switch_resume(struct lwp *l)
182 *     We only need this dummy sh3 version if both SH3 and SH4 are defined.
183 */
184NENTRY(sh3_switch_resume)
185	rts
186	 nop
187	SET_ENTRY_SIZE(sh3_switch_resume)
188#endif /* SH3 */
189
190
191#ifdef SH4
192/*
193 * LINTSTUB: Func: void sh4_switch_resume(struct lwp *l)
194 *	Wire u-area. invalidate TLB entry for kernel stack to prevent
195 *	TLB multiple hit.
196 */
197NENTRY(sh4_switch_resume)
198	add	#L_MD_UPTE, r4	! l->l_md.md_upte
199	mov	#UPAGES, r3
200	mov.l	@r4, r0		! if (l->l_md.md_upte[0].addr == 0) return;
201	tst	r0, r0
202	bt	2f
203
204	/* Save old ASID and set ASID to zero */
205	mov	#0, r0
206	mov.l	.L_4_PTEH, r2
207	mov.l	@r2, r7
208	mov.l	r0, @r2
209
210	mov.l	.L_VPN_MASK, r6
211	mov.l	.L_4_UTLB_AA_A, r5
212
213	/* TLB address array must be accessed via P2. Setup jump address. */
214	mova	1f, r0
215	mov.l	.L_P2BASE, r1
216	or	r1, r0
217	jmp	@r0		! run on P2
218	 nop
219
220	/* Probe VPN match TLB entry and invalidate it. */
221	.align	2		! mova target must be 4byte aligned
2221:	mov.l	@(4, r4), r0
223	and	r6, r0
224	mov.l	r0, @r5		! clear D, V
225
226	/* Wire u-area TLB entry */
227	/* Address array */
228	mov.l	@r4+, r0	! addr
229	mov.l	@r4+, r1	! data
230	mov.l	r1, @r0		! *addr = data
231
232	/* Data array */
233	mov.l	@r4+, r0	! addr
234	mov.l	@r4+, r1	! data
235	mov.l	r1, @r0		! *addr = data
236	dt	r3
237	bf	1b
238
239	/* restore ASID */
240	mov.l	r7, @r2
241
2422:	rts			! to the caller in P1
243	 nop
244
245	.align	2
246.L_4_PTEH:		.long	SH4_PTEH
247.L_4_UTLB_AA_A:		.long	(SH4_UTLB_AA | SH4_UTLB_A)
248.L_VPN_MASK:		.long	0xfffff000
249.L_P2BASE:		.long	0xa0000000
250	SET_ENTRY_SIZE(sh4_switch_resume)
251#endif /* SH4 */
252
253
254/*
255 * LINTSTUB: Func: int _cpu_intr_raise(int s)
256 *	raise SR.IMASK to 's'. if current SR.IMASK is greater equal 's',
257 *	nothing to do. returns previous SR.IMASK.
258 */
259NENTRY(_cpu_intr_raise)
260	stc	sr,	r2
261	mov	#0x78,	r1
262	mov	r2,	r0
263	shll	r1		/* r1 = 0xf0 */
264	and	r1,	r0	/* r0 = SR & 0xf0 */
265	cmp/ge	r4,	r0	/* r0 >= r4 ? T = 1 */
266	bt/s	1f
267	 not	r1,	r1	/* r1 = 0xffffff0f */
268	and	r1,	r2	/* r2 = SR & ~0xf0 */
269	or	r2,	r4	/* r4 = (SR & ~0xf0) | s */
270	ldc	r4,	sr	/* SR = r4 (don't move to delay slot) */
2711:	rts
272	 nop	/* return (SR & 0xf0) */
273	SET_ENTRY_SIZE(_cpu_intr_raise)
274
275
276/*
277 * LINTSTUB: Func: int _cpu_intr_suspend(void)
278 *	Mask all external interrupt. Returns previous SR.IMASK.
279 */
280NENTRY(_cpu_intr_suspend)
281	stc	sr,	r0	/* r0 = SR */
282	mov	#0x78,	r1
283	shll	r1		/* r1 = 0x000000f0 */
284	mov	r0,	r2	/* r2 = SR */
285	or	r1,	r2	/* r2 |= 0x000000f0 */
286	ldc	r2,	sr	/* SR = r2 */
287	rts
288	 and	r1,	r0	/* r0 = SR & 0x000000f0 */
289	SET_ENTRY_SIZE(_cpu_intr_suspend)
290
291
292
293/*
294 * LINTSTUB: Func: int _cpu_intr_resume(int s)
295 *	Set 's' to SR.IMASK. Returns previous SR.IMASK.
296 */
297NENTRY(_cpu_intr_resume)
298	stc	sr,	r0	/* r0 = SR */
299	mov	#0x78,	r2
300	shll	r2		/* r2 = 0x000000f0 */
301	not	r2,	r1	/* r1 = 0xffffff0f */
302	and	r0,	r1	/* r1 = (SR & ~0xf0) */
303	or	r1,	r4	/* r4 = (SR & ~0xf0) | level */
304	ldc	r4,	sr	/* SR = r0 (don't move to delay slot) */
305	rts
306	 and	r2,	r0	/* return (SR & 0xf0) */
307	SET_ENTRY_SIZE(_cpu_intr_resume)
308
309
310/*
311 * LINTSTUB: Func: int _cpu_exception_suspend(void)
312 *	Block exception (SR.BL). if external interrupt raise, pending interrupt.
313 *	if exception occur, jump to 0xa0000000 (hard reset).
314 */
315NENTRY(_cpu_exception_suspend)
316	stc	sr, r0		/* r0 = SR */
317	mov	#0x10, r1	/* bswap32(PSL_BL) - fits immediate */
318	swap.b	r1, r1
319	mov	r0, r2		/* r2 = r0 */
320	swap.w	r1, r1		/* r1 = PSL_BL */
321	or	r1, r2		/* r2 |= PSL_BL */
322	ldc	r2, sr		/* SR = r2 */
323	rts			/* return old SR */
324	 nop
325	SET_ENTRY_SIZE(_cpu_exception_suspend)
326
327
328/*
329 * LINTSTUB: Func: void _cpu_exception_resume(int s)
330 *	restore 's' exception mask. (SR.BL)
331 */
332NENTRY(_cpu_exception_resume)
333	stc	sr, r0		/* r0 = SR */
334	mov	#0x10, r1	/* bswap32(PSL_BL) - fits immediate */
335	swap.b	r1, r1
336	swap.w	r1, r1
337	not	r1, r1		/* r1 = ~PSL_BL */
338	and	r1, r0		/* r0 &= ~PSL_BL */
339	or	r4, r0		/* r0 |= old SR.BL */
340	ldc	r0, sr		/* SR = r0 (don't move to delay slot) */
341	rts
342	 nop
343	SET_ENTRY_SIZE(_cpu_exception_resume)
344
345
346/*
347 * LINTSTUB: Func: void _cpu_spin(uint32_t count)
348 *	Loop for 'count' * 10 cycles.
349 * [...]
350 * add    IF ID EX MA WB
351 * nop       IF ID EX MA WB
352 * cmp/pl       IF ID EX MA WB -  -
353 * nop             IF ID EX MA -  -  WB
354 * bt                 IF ID EX .  .  MA WB
355 * nop                   IF ID -  -  EX MA WB
356 * nop                      IF -  -  ID EX MA WB
357 * nop                      -  -  -  IF ID EX MA WB
358 * add                                  IF ID EX MA WB
359 * nop                                     IF ID EX MA WB
360 * cmp/pl                                     IF ID EX MA WB -  -
361 * nop                                           IF ID EX MA -  - WB
362 * bt                                               IF ID EX .  . MA
363 * [...]
364 */
365	.align 5	/* align cache line size (32B) */
366NENTRY(_cpu_spin)
3671:	nop			/* 1 */
368	nop			/* 2 */
369	nop			/* 3 */
370	add	#-1, r4		/* 4 */
371	nop			/* 5 */
372	cmp/pl	r4		/* 6 */
373	nop			/* 7 */
374	bt	1b		/* 8, 9, 10 */
375	rts
376	 nop
377	SET_ENTRY_SIZE(_cpu_spin)
378
379
380/*
381 * lwp_trampoline:
382 *
383 * cpu_lwp_fork() arranges for lwp_trampoline() to run when that
384 * nascent lwp is selected by cpu_switchto().
385 *
386 * The switch frame will contain pointer to struct lwp of this lwp in
387 * r10, a pointer to the function to call in r12, and an argument to
388 * pass to it in r11 (we abuse the callee-saved registers).
389 *
390 * We enter lwp_trampoline as if we are "returning" from
391 * cpu_switchto(), so r0 contains previous lwp (the one we are
392 * switching from) that we pass to lwp_startup().
393 *
394 * After that the trampoline should call the function that is intended
395 * to do some additional setup.  When the function returns, the
396 * trampoline returns to the user mode.
397 */
398NENTRY(lwp_trampoline)
399	mov.l	.L_lwp_startup, r1
400	mov	r0, r4		/* previous lwp returned by cpu_switchto */
401	jsr	@r1
402	 mov	r10, r5		/* my struct lwp */
403	jsr	@r12
404	 mov	r11, r4
405	__EXCEPTION_RETURN
406	/* NOTREACHED */
407
408	.align	2
409.L_lwp_startup:		.long	_C_LABEL(lwp_startup)
410
411	SET_ENTRY_SIZE(lwp_trampoline)
412
413
414#if defined(COMPAT_16) || defined(MODULAR)
415/*
416 * LINTSTUB: Var: char sigcode[1]
417 *	Signal trampoline. copied to top of user stack.
418 *
419 *	The kernel arranges for the signal handler to be invoked directly.
420 *	This trampoline is used only to perform the return.
421 *
422 *	On entry, the stack looks like this:
423 *
424 *	sp->	sigcontext structure
425 */
426NENTRY(sigcode)
427	mov	r15, r4			/* get pointer to sigcontext */
428	mov.l	.L_SYS___sigreturn14, r0
429	trapa	#0x80			/* and call sigreturn() */
430	mov.l	.L_SYS_exit, r0
431	trapa	#0x80			/* exit if sigreturn fails */
432	/* NOTREACHED */
433
434	.align	2
435.L_SYS___sigreturn14:	.long	SYS_compat_16___sigreturn14
436.L_SYS_exit:		.long	SYS_exit
437
438/* LINTSTUB: Var: char esigcode[1] */
439.globl	_C_LABEL(esigcode)
440_C_LABEL(esigcode):
441	SET_ENTRY_SIZE(sigcode)
442#endif /* COMPAT_16 || MODULAR */
443
444
445/*
446 * LINTSTUB: Func: void savectx(struct pcb *pcb)
447 *	Save CPU state in pcb->pcb_sf
448 */
449ENTRY(savectx)
450	SAVEPCB_AND_JUMP(r4, rts)
451	SET_ENTRY_SIZE(savectx)
452
453
454/*
455 * LINTSTUB: Func: int copyout(const void *ksrc, void *udst, size_t len)
456 *	Copy len bytes into the user address space.
457 */
458ENTRY(copyout)
459	mov.l	r14,	@-r15
460	sts.l	pr,	@-r15
461	mov	r15,	r14
462
463	mov	r4,	r3
464	mov	r5,	r2
465	mov	r5,	r4
466	add	r6,	r2
467	cmp/hs	r5,	r2		/* bomb if uaddr+len wraps */
468	bf	3f
469	mov.l	.L_copyout_VM_MAXUSER_ADDRESS, r1
470	cmp/hi	r1,	r2		/* bomb if uaddr isn't in user space */
471	bt	3f
472
473	mov.l	.L_copyout_curpcb, r1	/* set fault handler */
474	mov.l	@r1,	r2
475	mov.l	.L_copyout_onfault, r1
476	mov.l	r1,	@(PCB_ONFAULT,r2)
477	mov.l	.L_copyout_memcpy, r1
478	jsr	@r1			/* memcpy(uaddr, kaddr, len) */
479	 mov	r3,	r5
480
481	mov	#0,	r0
4821:
483	mov.l	.L_copyout_curpcb, r1	/* clear fault handler */
484	mov.l	@r1,	r2
485	mov	#0,	r1
486	mov.l	r1,	@(PCB_ONFAULT,r2)
4872:
488	mov	r14,	r15
489	lds.l	@r15+,	pr
490	rts
491	 mov.l	@r15+,	r14
492
4933:
494	bra	2b
495	 mov	#EFAULT, r0
496
497	.align 2
498.L_copyout_onfault:
499	.long	1b
500.L_copyout_VM_MAXUSER_ADDRESS:
501	.long	VM_MAXUSER_ADDRESS
502.L_copyout_curpcb:
503	.long	_C_LABEL(curpcb)
504.L_copyout_memcpy:
505	.long	_C_LABEL(memcpy)
506	SET_ENTRY_SIZE(copyout)
507
508
509/*
510 * LINTSTUB: Func: int copyin(const void *usrc, void *kdst, size_t len)
511 *	Copy len bytes from the user address space.
512 */
513ENTRY(copyin)
514	mov.l	r14,	@-r15
515	sts.l	pr,	@-r15
516	mov	r15,	r14
517
518	mov	r4,	r3
519	mov	r5,	r4
520	mov	r3,	r2
521	add	r6,	r2
522	cmp/hs	r3,	r2		/* bomb if uaddr+len wraps */
523	bf	3f
524	mov.l	.L_copyin_VM_MAXUSER_ADDRESS, r1
525	cmp/hi	r1,	r2		/* bomb if uaddr isn't in user space */
526	bt	3f
527
528	mov.l	.L_copyin_curpcb, r1	/* set fault handler */
529	mov.l	@r1,	r2
530	mov.l	.L_copyin_onfault, r1
531	mov.l	r1,	@(PCB_ONFAULT,r2)
532	mov.l	.L_copyin_memcpy, r1
533	jsr	@r1			/* memcpy(kaddr, uaddr, len) */
534	 mov	r3,	r5
535
536	mov	#0,	r0
5371:
538	mov.l	.L_copyin_curpcb, r1	/* clear fault handler */
539	mov.l	@r1,	r2
540	mov	#0,	r1
541	mov.l	r1,	@(PCB_ONFAULT,r2)
5422:
543	mov	r14,	r15
544	lds.l	@r15+,	pr
545	rts
546	 mov.l	@r15+,	r14
547
5483:
549	bra	2b
550	 mov	#EFAULT, r0
551
552	.align 2
553.L_copyin_onfault:
554	.long	1b
555.L_copyin_VM_MAXUSER_ADDRESS:
556	.long	VM_MAXUSER_ADDRESS
557.L_copyin_curpcb:
558	.long	_C_LABEL(curpcb)
559.L_copyin_memcpy:
560	.long	_C_LABEL(memcpy)
561	SET_ENTRY_SIZE(copyin)
562
563
564/*
565 * LINTSTUB: Func: int copyoutstr(const void *ksrc, void *udst, size_t maxlen, size_t *lencopied)
566 *	Copy a NUL-terminated string, at most maxlen characters long,
567 *	into the user address space.  Return the number of characters
568 *	copied (including the NUL) in *lencopied.  If the string is
569 *	too long, return ENAMETOOLONG; else return 0 or EFAULT.
570 */
571ENTRY(copyoutstr)
572	mov.l	r8,	@-r15
573
574	mov	r4,	r8
575	mov.l	.L_copyoutstr_curpcb, r1	/* set fault handler */
576	mov.l	@r1,	r2
577	mov.l	.L_copyoutstr_onfault, r1
578	mov.l	r1,	@(PCB_ONFAULT,r2)
579	mov.l	.L_copyoutstr_VM_MAXUSER_ADDRESS, r3
580	cmp/hi	r3,	r5		/* bomb if udst isn't in user space */
581	bt	5f
582	mov	r3,	r0
583	sub	r5,	r0
584	cmp/hi	r6,	r0		/* don't beyond user space */
585	bf	2f
586	bra	2f
587	 mov	r6,	r0
588
589	.align 2
5901:
591	mov.b	@r4+,	r1		/* copy str */
592	mov.b	r1,	@r5
593	extu.b	r1,	r1
594	add	#1,	r5
595	tst	r1,	r1
596	bf	2f
597	bra	3f
598	 mov	#0,	r0
599
600	.align 2
6012:
602	add	#-1,	r0
603	cmp/eq	#-1,	r0
604	bf	1b
605	cmp/hi	r3,	r5
606	bf	6f
607	mov	#0,	r0
608
6093:
610	tst	r7,	r7		/* set lencopied if needed */
611	bt	4f
612	mov	r4,	r1
613	sub	r8,	r1
614	mov.l	r1,	@r7
615
6164:
617	mov.l	.L_copyoutstr_curpcb, r1	/* clear fault handler */
618	mov.l	@r1,	r2
619	mov	#0,	r1
620	mov.l	r1,	@(PCB_ONFAULT,r2)
621
622	rts
623	 mov.l	@r15+,	r8
624
6255:
626	bra	4b
627	 mov	#EFAULT, r0
628
6296:	bra	3b
630	 mov	#ENAMETOOLONG, r0
631
632	.align 2
633.L_copyoutstr_onfault:
634	.long	4b
635.L_copyoutstr_VM_MAXUSER_ADDRESS:
636	.long	VM_MAXUSER_ADDRESS
637.L_copyoutstr_curpcb:
638	.long	_C_LABEL(curpcb)
639	SET_ENTRY_SIZE(copyoutstr)
640
641
642/*
643 * LINTSTUB: Func: int copyinstr(const void *usrc, void *kdst, size_t maxlen, size_t *lencopied)
644 *	Copy a NUL-terminated string, at most maxlen characters long,
645 *	from the user address space.  Return the number of characters
646 *	copied (including the NUL) in *lencopied.  If the string is
647 *	too long, return ENAMETOOLONG; else return 0 or EFAULT.
648 */
649ENTRY(copyinstr)
650	mov.l	r8,	@-r15
651
652	mov	r4,	r8
653	mov.l	.L_copyinstr_curpcb, r1	/* set fault handler */
654	mov.l	@r1,	r2
655	mov.l	.L_copyinstr_onfault, r1
656	mov.l	r1,	@(PCB_ONFAULT,r2)
657
658	mov.l	.L_copyinstr_VM_MAXUSER_ADDRESS, r3
659	cmp/hi	r3,	r4		/* bomb if usrc isn't in user space */
660	bt	5f
661	mov	r3,	r0
662	sub	r4,	r0
663	cmp/hi	r6,	r0		/* don't beyond user space */
664	bf	2f
665	bra	2f
666	 mov	r6,	r0
667
668	.align 2
6691:
670	mov.b	@r4+,	r1		/* copy str */
671	mov.b	r1,	@r5
672	extu.b	r1,	r1
673	add	#1,	r5
674	tst	r1,	r1
675	bf	2f
676	bra	3f
677	 mov	#0,	r0
678
679	.align 2
6802:
681	add	#-1,	r0
682	cmp/eq	#-1,	r0
683	bf	1b
684	cmp/hi	r3,	r4
685	bf	6f
686	mov	#0,	r0
687
6883:
689	tst	r7,	r7		/* set lencopied if needed */
690	bt	4f
691	mov	r4,	r1
692	sub	r8,	r1
693	mov.l	r1,	@r7
694
6954:
696	mov.l	.L_copyinstr_curpcb, r1	/* clear fault handler */
697	mov.l	@r1,	r2
698	mov	#0,	r1
699	mov.l	r1,	@(PCB_ONFAULT,r2)
700
701	rts
702	 mov.l	@r15+,	r8
703
7045:
705	bra	4b
706	 mov	#EFAULT, r0
707
7086:
709	bra	3b
710	 mov	#ENAMETOOLONG, r0
711
712	.align 2
713.L_copyinstr_onfault:
714	.long	4b
715.L_copyinstr_VM_MAXUSER_ADDRESS:
716	.long	VM_MAXUSER_ADDRESS
717.L_copyinstr_curpcb:
718	.long	_C_LABEL(curpcb)
719	SET_ENTRY_SIZE(copyinstr)
720
721/**************************************************************************/
722
723#define	UFETCHSTORE_PROLOGUE(func)					 \
724	mov.l	.L ## func ## _VM_MAXUSER_ADDRESS, r1			;\
725	cmp/hi	r1,	r4	/* bomb if uaddr isn't in user space */	;\
726	bt/s	2f							;\
727	 mov	#EFAULT,r0						;\
728	mov.l	.L ## func ## _curpcb, r1				;\
729	mov.l	@r1,	r2	/* r2 = curpcb */			;\
730	mov.l	.L ## func ## _onfault, r1				;\
731	mov.l	r1,	@(PCB_ONFAULT,r2)	/* set pcb_onfault */
732
733#define	UFETCHSTORE_EPILOGUE(func)					 \
734	mov	#0,	r0	/* return "success!" */			;\
7351:									;\
736	mov.l	.L ## func ## _curpcb, r1				;\
737	mov.l	@r1,	r2	/* r2 = curpcb */			;\
738	mov	#0,	r1						;\
739	mov.l	r1,	@(PCB_ONFAULT,r2)				;\
7402:									;\
741	rts								;\
742	 nop								;\
743									 \
744	.align 2							;\
745.L ## func ## _onfault:							;\
746	.long	1b							;\
747.L ## func ## _VM_MAXUSER_ADDRESS:					;\
748	.long	VM_MAXUSER_ADDRESS - 4	/* sizeof(long) */		;\
749.L ## func ## _curpcb:							;\
750	.long	_C_LABEL(curpcb)
751
752/* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */
753ENTRY(_ufetch_8)
754	UFETCHSTORE_PROLOGUE(_ufetch_8)
755	mov.b	@r4,	r1	/* r1 = *uaddr */
756	mov.b	r1,	@r5	/* *valp = r1 */
757	UFETCHSTORE_EPILOGUE(_ufetch_8)
758	SET_ENTRY_SIZE(_ufetch_8)
759
760/* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */
761ENTRY(_ufetch_16)
762	UFETCHSTORE_PROLOGUE(_ufetch_16)
763	mov.w	@r4,	r1	/* r1 = *uaddr */
764	mov.w	r1	@r5	/* *valp = r1 */
765	UFETCHSTORE_EPILOGUE(_ufetch_16)
766	SET_ENTRY_SIZE(_ufetch_16)
767
768/* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */
769ENTRY(_ufetch_32)
770	UFETCHSTORE_PROLOGUE(_ufetch_32)
771	mov.l	@r4,	r1	/* r1 = *uaddr */
772	mov.l	r1,	@r5	/* *valp = r1 */
773	UFETCHSTORE_EPILOGUE(_ufetch_32)
774	SET_ENTRY_SIZE(_ufetch_32)
775
776/* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */
777ENTRY(_ustore_8)
778	UFETCHSTORE_PROLOGUE(_ustore_8)
779	mov.b	r5,	@r4	/* *uaddr = val */
780	UFETCHSTORE_EPILOGUE(_ustore_8)
781	SET_ENTRY_SIZE(_ustore_8)
782
783/* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */
784ENTRY(_ustore_16)
785	UFETCHSTORE_PROLOGUE(_ustore_16)
786	mov.w	r5,	@r4	/* *uaddr = val */
787	UFETCHSTORE_EPILOGUE(_ustore_16)
788	SET_ENTRY_SIZE(_ustore_16)
789
790/* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */
791ENTRY(_ustore_32)
792	UFETCHSTORE_PROLOGUE(_ustore_32)
793	mov.l	r5,	@r4	/* *uaddr = val */
794	UFETCHSTORE_EPILOGUE(_ustore_32)
795	SET_ENTRY_SIZE(_ustore_32)
796
797/**************************************************************************/
798
799/*
800 * LINTSTUB: Func: int kcopy(const void *src, void *dst, size_t len)
801 */
802ENTRY(kcopy)
803	mov.l	r8,	@-r15
804	mov.l	r14,	@-r15
805	sts.l	pr,	@-r15
806	mov	r15,	r14
807
808	mov	r4,	r3
809	mov.l	.L_kcopy_curpcb, r1
810	mov.l	@r1,	r2
811	mov.l	@(PCB_ONFAULT,r2) ,r8	/* save old fault handler */
812	mov.l	.L_kcopy_onfault, r1
813	mov.l	r1,	@(PCB_ONFAULT,r2) /* set fault handler */
814	mov.l	.L_kcopy_memcpy, r1
815	mov	r5,	r4
816	jsr	@r1			/* memcpy(dst, src, len) */
817	 mov	r3,	r5
818	mov	#0,	r0
8191:
820	mov.l	.L_kcopy_curpcb, r1	/* restore fault handler */
821	mov.l	@r1,	r2
822	mov.l	r8,	@(PCB_ONFAULT,r2)
823
824	mov	r14,	r15
825	lds.l	@r15+,	pr
826	mov.l	@r15+,	r14
827	rts
828	 mov.l	@r15+,	r8
829
830	.align 2
831.L_kcopy_onfault:
832	.long	1b
833.L_kcopy_curpcb:
834	.long	_C_LABEL(curpcb)
835.L_kcopy_memcpy:
836	.long	_C_LABEL(memcpy)
837	SET_ENTRY_SIZE(kcopy)
838
839
840#if defined(DDB) || defined(KGDB)
841
842/*
843 * LINTSTUB: Func: int setjmp(label_t *jmpbuf)
844 */
845ENTRY(setjmp)
846	add	#4*9,	r4
847	mov.l	r8,	@-r4
848	mov.l	r9,	@-r4
849	mov.l	r10,	@-r4
850	mov.l	r11,	@-r4
851	mov.l	r12,	@-r4
852	mov.l	r13,	@-r4
853	mov.l	r14,	@-r4
854	mov.l	r15,	@-r4
855	sts.l	pr,	@-r4
856	rts
857	 xor	r0, r0
858	SET_ENTRY_SIZE(setjmp)
859
860/*
861 * LINTSTUB: Func: void longjmp(label_t *jmpbuf)
862 */
863ENTRY(longjmp)
864	lds.l	@r4+,	pr
865	mov.l	@r4+,	r15
866	mov.l	@r4+,	r14
867	mov.l	@r4+,	r13
868	mov.l	@r4+,	r12
869	mov.l	@r4+,	r11
870	mov.l	@r4+,	r10
871	mov.l	@r4+,	r9
872	mov.l	@r4+,	r8
873	rts
874	 mov	#1, r0		/* return 1 from setjmp */
875	SET_ENTRY_SIZE(longjmp)
876
877#endif /* DDB || KGDB */
878