xref: /netbsd-src/sys/arch/vax/vax/subr.S (revision 0f470c04091ac4c3b5928b423a25307db3b7aacb)
1/*	$NetBSD: subr.S,v 1.43 2023/12/18 22:40:01 kalvisd Exp $	   */
2
3/*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <machine/asm.h>
29
30#include "assym.h"
31#include "opt_ddb.h"
32#include "opt_multiprocessor.h"
33#include "opt_lockdebug.h"
34#include "opt_compat_netbsd.h"
35#include "opt_compat_ultrix.h"
36#ifdef COMPAT_ULTRIX
37#include <compat/ultrix/ultrix_syscall.h>
38#endif
39
40#define JSBENTRY(x)	.globl x ; .align 2 ; x :
41#define SCBENTRY(name) \
42	.text			; \
43	.align 2		; \
44	.globl __CONCAT(X,name)	; \
45__CONCAT(X,name):
46
47		.text
48
49#ifdef	KERNEL_LOADABLE_BY_MOP
50/*
51 * This is a little tricky. The kernel is not loaded at the correct
52 * address, so the kernel must first be relocated, then copied, then
53 * jump back to the correct address.
54 */
55/* Copy routine */
56cps:
572:	movb	(%r0)+,(%r1)+
58	cmpl	%r0,%r7
59	bneq	2b
60
613:	clrb	(%r1)+
62	incl	%r0
63	cmpl	%r0,%r6
64	bneq	3b
65	clrl	-(%sp)
66	movl	%sp,%ap
67	movl	$_cca,%r7
68	movl	%r8,(%r7)
69	movpsl	-(%sp)
70	pushl	%r2
71	rei
72cpe:
73
74/* Copy the copy routine */
751:	movab	cps,%r0
76	movab	cpe,%r1
77	movl	$0x300000,%sp
78	movl	%sp,%r3
794:	movb	(%r0)+,(%r3)+
80	cmpl	%r0,%r1
81	bneq	4b
82	movl	%r7,%r8
83/* Ok, copy routine copied, set registers and rei */
84	movab	_edata,%r7
85	movab	_end,%r6
86	movl	$0x80000000,%r1
87	movl	$0x80000200,%r0
88	subl3	$0x200,%r6,%r9
89	movab	2f,%r2
90	subl2	$0x200,%r2
91	movpsl	-(%sp)
92	pushab	4(%sp)
93	rei
94
95/*
96 * First entry routine from boot. This should be in a file called locore.
97 */
98JSBENTRY(start)
99	brb	1b				# Netbooted starts here
100#else
101ASENTRY(start, 0)
102#endif
1032:	bisl3	$0x80000000,%r9,_C_LABEL(esym)	# End of loaded code
104	pushl	$0x1f0000			# Push a nice PSL
105	pushl	$to				# Address to jump to
106	rei					# change to kernel stack
107to:	movw	$0xfff,_C_LABEL(panic)		# Save all regs in panic
108	cmpb	(%ap),$3			# symbols info present?
109	blssu	3f				# nope, skip
110	bisl3	$0x80000000,8(%ap),_C_LABEL(symtab_start)
111						#   save start of symtab
112	movl	12(%ap),_C_LABEL(symtab_nsyms)	#   save number of symtab
113	bisl3	$0x80000000,%r9,_C_LABEL(symtab_end)
114						#   save end of symtab
1153:	addl3	_C_LABEL(esym),$0x3ff,%r0	# Round symbol table end
116	bicl3	$0x3ff,%r0,%r1			#
117	movl	%r1,_C_LABEL(lwp0)+L_PCB	# lwp0 pcb, XXXuvm_lwp_getuarea
118	bicl3	$0x80000000,%r1,%r0		# get phys lwp0 uarea addr
119	mtpr	%r0,$PR_PCBB			# Save in IPR PCBB
120	addl3	$USPACE,%r1,%r0			# Get kernel stack top
121	mtpr	%r0,$PR_KSP			# put in IPR KSP
122	movl	%r0,_C_LABEL(Sysmap)		# SPT start addr after KSP
123	movl	_C_LABEL(lwp0)+L_PCB,%r0	# get PCB virtual address
124	mfpr	$PR_PCBB,PCB_PADDR(%r0)		# save PCB physical address
125	movab	PCB_ONFAULT(%r0),ESP(%r0)	# Save trap address in ESP
126	mtpr	4(%r0),$PR_ESP			# Put it in ESP also
127
128# Set some registers in known state
129	movl	%r1,%r0				# get lwp0 pcb
130	clrl	P0LR(%r0)
131	clrl	P1LR(%r0)
132	mtpr	$0,$PR_P0LR
133	mtpr	$0,$PR_P1LR
134	movl	$0x80000000,%r1
135	movl	%r1,P0BR(%r0)
136	movl	%r1,P1BR(%r0)
137	mtpr	%r1,$PR_P0BR
138	mtpr	%r1,$PR_P1BR
139	clrl	PCB_ONFAULT(%r0)
140	mtpr	$0,$PR_SCBB
141
142# Copy the RPB to its new position
143#if defined(COMPAT_14)
144	tstl	(%ap)				# Any arguments?
145	bneq	1f				# Yes, called from new boot
146	movl	%r11,_C_LABEL(boothowto)		# Howto boot (single etc...)
147#	movl	%r10,_C_LABEL(bootdev)		# uninteresting, will complain
148	movl	%r8,_C_LABEL(avail_end)		# Usable memory (from VMB)
149	clrl	-(%sp)				# Have no RPB
150	brb	2f
151#endif
152
1531:	pushl	4(%ap)				# Address of old rpb
1542:	calls	$1,_C_LABEL(_start)		# Jump away.
155	/* NOTREACHED */
156
157
158/*
159 * Signal handler code.
160 */
161
162	.align	2
163	.globl	_C_LABEL(sigcode),_C_LABEL(esigcode)
164_C_LABEL(sigcode):
165	pushr	$0x3f
166	subl2	$0xc,%sp
167	movl	0x24(%sp),%r0
168	calls	$3,(%r0)
169	popr	$0x3f
170	chmk	$SYS_compat_16___sigreturn14
171	chmk	$SYS_exit
172	halt
173_C_LABEL(esigcode):
174
175#ifdef COMPAT_ULTRIX
176	.align	2
177	.globl	_C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
178_C_LABEL(ultrix_sigcode):
179	pushr	$0x3f
180	subl2	$0xc,%sp
181	movl	0x24(%sp),%r0
182	calls	$3,(%r0)
183	popr	$0x3f
184	chmk	$ULTRIX_SYS_sigreturn
185	chmk	$SYS_exit
186	halt
187_C_LABEL(ultrix_esigcode):
188#endif
189
190	.align	2
191	.globl	_C_LABEL(idsptch), _C_LABEL(eidsptch)
192_C_LABEL(idsptch):
193	pushr	$0x3f
194	.word	0x9f16		# jsb to absolute address
195	.long	_C_LABEL(cmn_idsptch)	# the absolute address
196	.long	0		# the callback interrupt routine
197	.long	0		# its argument
198	.long	0		# ptr to correspond evcnt struct
199_C_LABEL(eidsptch):
200
201_C_LABEL(cmn_idsptch):
202#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
203	calls	$0,_C_LABEL(krnlock)
204#endif
205	movl	(%sp)+,%r0	# get pointer to idspvec
206	mtpr	$IPL_VM,$PR_IPL	# Make sure we are at IPL_VM
207	movl	8(%r0),%r1	# get evcnt pointer
208	beql	1f		# no ptr, skip increment
209	incl	EV_COUNT(%r1)	# increment low longword
210	adwc	$0,EV_COUNT+4(%r1) # add any carry to hi longword
2111:	mfpr	$PR_SSP, %r2	# get curlwp
212	movl	L_CPU(%r2), %r2 # get curcpu
213	incl	CI_NINTR(%r2)	# increment ci_data.cpu_nintr
214	adwc	$0,(CI_NINTR+4)(%r2)
215#if 0
216	pushl	%r0
217	movq	(%r0),-(%sp)
218	pushab	2f
219	calls	$3,_C_LABEL(printf)
220	movl	(%sp)+,%r0
221#endif
222	pushl	4(%r0)		# push argument
223	calls	$1,*(%r0)	# call interrupt routine
224#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
225	calls	$0,_C_LABEL(krnunlock)
226#endif
227	popr	$0x3f		# pop registers
228	rei			# return from interrupt
229#if 0
2302:	.asciz	"intr %p(%p)\n"
231#endif
232
233ENTRY(badaddr,0)			# Called with addr,b/w/l
234	mfpr	$PR_IPL,%r0	# splhigh()
235	mtpr	$IPL_HIGH,$PR_IPL
236	movl	4(%ap),%r2	# First argument, the address
237	movl	8(%ap),%r1	# Sec arg, b,w,l
238	pushl	%r0		# Save old IPL
239	clrl	%r3
240	movab	4f,_C_LABEL(memtest)	# Set the return address
241
242	caseb	%r1,$1,$4	# What is the size
2431:	.word	1f-1b
244	.word	2f-1b
245	.word	3f-1b		# This is unused
246	.word	3f-1b
247
2481:	movb	(%r2),%r1		# Test a byte
249	brb	5f
250
2512:	movw	(%r2),%r1		# Test a word
252	brb	5f
253
2543:	movl	(%r2),%r1		# Test a long
255	brb	5f
256
2574:	incl	%r3		# Got machine chk => addr bad
2585:	mtpr	(%sp)+,$PR_IPL
259	movl	%r3,%r0
260	ret
261
262#ifdef DDB
263/*
264 * DDB is the only routine that uses setjmp/longjmp.
265 */
266	.globl	_C_LABEL(setjmp), _C_LABEL(longjmp)
267_C_LABEL(setjmp):.word	0
268	movl	4(%ap), %r0
269	movl	8(%fp), (%r0)
270	movl	12(%fp), 4(%r0)
271	movl	16(%fp), 8(%r0)
272	moval	28(%fp),12(%r0)
273	clrl	%r0
274	ret
275
276_C_LABEL(longjmp):.word	0
277	movl	4(%ap), %r1
278	movl	8(%ap), %r0
279	movl	(%r1), %ap
280	movl	4(%r1), %fp
281	movl	12(%r1), %sp
282	jmp	*8(%r1)
283#endif
284
285#if defined(MULTIPROCESSOR)
286	.align 2
287	.globl	_C_LABEL(vax_mp_tramp)	# used to kick off multiprocessor systems.
288_C_LABEL(vax_mp_tramp):
289	ldpctx
290	rei
291#endif
292
293	.globl	softint_cleanup,softint_exit,softint_process
294	.type	softint_cleanup@function
295	.type	softint_exit@function
296	.type	softint_process@function
297softint_cleanup:
298	movl    L_CPU(%r0),%r1		/* get cpu_info */
299	incl    CI_MTX_COUNT(%r1)	/* increment mutex count */
300	movl	L_PCB(%r0),%r1		/* get PCB of softint LWP */
301softint_exit:
302	popr	$0x3			/* restore r0 and r1 */
303	rei				/* return from interrupt */
304
305softint_process:
306	/*
307	 * R6 contains pinned LWP
308	 * R7 contains ipl to dispatch with
309	 */
310	movq	%r6,-(%sp)		/* push old lwp and ipl onto stack */
311	calls	$2,_C_LABEL(softint_dispatch) /* dispatch it */
312
313	/* We can use any register because ldpctx will overwrite them */
314	movl	L_PCB(%r6),%r3		/* get pcb */
315	movab	softint_exit,PCB_PC(%r3)/* do a quick exit */
316#ifdef MULTIPROCESSOR
317	movl	L_CPU(%r6),%r8
318	/* XXX store-before-store barrier -- see cpu_switchto */
319	movl	%r6,CI_CURLWP(%r8)
320	/* XXX store-before-load barrier -- see cpu_switchto */
321#endif
322	/* copy AST level from current LWP to pinned LWP, reset
323	   current AST level */
324	mfpr	$PR_SSP,%r4		/* current LWP */
325	movl	L_PCB(%r4),%r4		/* PCB address */
326	movl	P0LR(%r4),%r0		/* LR and ASTLVL field, current PCB */
327	movl	P0LR(%r3),%r1		/* same, pinned LWP */
328	cmpl	%r0,%r1
329	bgtru	1f			/* AST(current) >= AST(pinned) */
330	extv	$24,$3,%r0,%r0		/* ASTLVL field for current LWP */
331	insv	%r0,$24,$3,P0LR(%r3)	/* copy to pinned LWP */
332	insv	$4,$24,$3,P0LR(%r4)	/* reset AST for current LWP */
3331:
334	mtpr	PCB_PADDR(%r3),$PR_PCBB	/* restore PA of interrupted pcb */
335	ldpctx				/* implicitly updates curlwp */
336	rei
337
338
339softint_common:
340	mfpr	$PR_IPL,%r1
341	mtpr	$IPL_HIGH,$PR_IPL	/* we need to be at IPL_HIGH */
342	movpsl	-(%sp)			/* add cleanup hook */
343	pushab	softint_cleanup
344	svpctx
345
346	/* We can use any register because ldpctx will overwrite them */
347	mfpr	$PR_SSP,%r6		/* Get curlwp */
348	movl	L_CPU(%r6),%r8		/* get cpu_info */
349	movl	CI_SOFTLWPS(%r8)[%r0],%r2 /* get softlwp to switch to */
350	movl	L_PCB(%r2),%r3		/* Get pointer to its pcb. */
351	movl	%r6,PCB_R6(%r3)		/* move old lwp into new pcb */
352	movl	%r1,PCB_R7(%r3)		/* move IPL into new pcb */
353#ifdef MULTIPROCESSOR
354	/* XXX store-before-store barrier -- see cpu_switchto */
355	movl	%r2,CI_CURLWP(%r8)	/* update ci_curlwp */
356	/* XXX store-before-load barrier -- see cpu_switchto */
357#endif
358
359	/*
360	 * Now reset the PCB since we no idea what state it was last in
361	 */
362	movab	(USPACE-TRAPFRAMELEN-CALLSFRAMELEN)(%r3),%r0
363					/* calculate where KSP should be */
364	movl	%r0,KSP(%r3)		/* save it as SP */
365	movl	%r0,PCB_FP(%r3)		/* and as the FP too */
366	movab	CA_ARGNO(%r0),PCB_AP(%r3) /* update the AP as well */
367	movab	softint_process,PCB_PC(%r3) /* and where we will start */
368	movl	$PSL_HIGHIPL,PCB_PSL(%r3) /* Needs to be running at IPL_HIGH */
369
370	mtpr	PCB_PADDR(%r3),$PR_PCBB	/* set PA of new pcb */
371	ldpctx				/* load it */
372	rei				/* get off interrupt stack */
373
374SCBENTRY(softclock)
375	pushr	$0x3			/* save r0 and r1 */
376	movl	$SOFTINT_CLOCK,%r0
377	brb	softint_common
378
379SCBENTRY(softbio)
380	pushr	$0x3			/* save r0 and r1 */
381	movl	$SOFTINT_BIO,%r0
382	brb	softint_common
383
384SCBENTRY(softnet)
385	pushr	$0x3			/* save r0 and r1 */
386	movl	$SOFTINT_NET,%r0
387	brb	softint_common
388
389SCBENTRY(softserial)
390	pushr	$0x3			/* save r0 and r1 */
391	movl	$SOFTINT_SERIAL,%r0
392	brb	softint_common
393
394/*
395 * Helper routine for cpu_lwp_fork.  It get invoked by Swtchto.
396 * It let's the kernel know the lwp is alive and then calls func(arg)
397 * and possibly returns to sret.
398 */
399ENTRY(cpu_lwp_bootstrap, 0)
400	movq	%r2,-(%sp)			/* save func & arg */
401	movq	%r0,-(%sp)			/* push oldl/newl */
402	calls	$2,_C_LABEL(lwp_startup)	/* startup the lwp */
403	movl	(%sp)+,%r0			/* grab func */
404	calls	$1,(%r0)			/* call it with arg */
405	ret
406
407/*
408 * r1 = newlwp
409 * r0 = oldlwp
410 */
411JSBENTRY(Swtchto)
412	/* this pops the pc and psw from the stack and puts them in the pcb. */
413	svpctx				# Now on interrupt stack
414
415	/* We can know use any register because ldpctx will overwrite them */
416	/* New LWP already in %r1 */
417	movl	L_PCB(%r1),%r3		# Get pointer to new pcb.
418	movl	%r0,PCB_R0(%r3)		# move r0 into new pcb (return value)
419#ifdef MULTIPROCESSOR
420	movl	L_CPU(%r0), %r8		/* get cpu_info of old lwp */
421	movl	%r8, L_CPU(%r1)		/* update cpu_info of new lwp */
422	/*
423	 * Issue barriers to coordinate mutex_exit on this CPU with
424	 * mutex_vector_enter on another CPU.
425	 *
426	 * 1. Any prior mutex_exit by oldlwp must be visible to other
427	 *    CPUs before we set ci_curlwp := newlwp on this one,
428	 *    requiring a store-before-store barrier.
429	 *
430	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
431	 *    before any subsequent mutex_exit by newlwp can even test
432	 *    whether there might be waiters, requiring a
433	 *    store-before-load barrier.
434	 *
435	 * See kern_mutex.c for details -- this is necessary for
436	 * adaptive mutexes to detect whether the lwp is on the CPU in
437	 * order to safely block without requiring atomic r/m/w in
438	 * mutex_exit.
439	 *
440	 * XXX I'm fuzzy on the memory model of VAX.  I would guess
441	 * it's TSO like x86 but I can't find a store-before-load
442	 * barrier, which is the only one TSO requires explicitly.
443	 */
444	/* XXX store-before-store barrier */
445	movl	%r1,CI_CURLWP(%r8)	/* update ci_curlwp */
446	/* XXX store-before-load barrier */
447#endif
448
449	mtpr	PCB_PADDR(%r3),$PR_PCBB	# set PA of new pcb
450	mtpr	$IPL_HIGH,$PR_IPL	/* we need to be at IPL_HIGH */
451	ldpctx				# load it
452	/* r0 already has previous lwp */
453	/* r1 already has this lwp */
454	/* r2/r3 and r4/r5 restored */
455	rei				/* get off interrupt stack */
456
457#
458# copy/fetch/store routines.
459#
460
461ENTRY(copyout, 0)
462	movl	8(%ap),%r3
463	blss	3f		# kernel space
464	movl	4(%ap),%r1
465	brb	2f
466
467ENTRY(copyin, 0)
468	movl	4(%ap),%r1
469	blss	3f		# kernel space
470	movl	8(%ap),%r3
4712:	mfpr	$PR_ESP,%r2
472	movab	1f,(%r2)	# set pcb_onfault
4734:	tstw	14(%ap)		# check if >= 64K
474	bneq	5f
475	movc3	12(%ap),(%r1),(%r3)
476	clrl	%r0
4771:	mfpr	$PR_ESP,%r2
478	clrl	(%r2)		# clear pcb_onfault
479	ret
4805:	movc3	$0xfffc,(%r1),(%r3)
481	subl2	$0xfffc,12(%ap)
482	brb	4b
483
4843:	movl	$EFAULT,%r0
485	ret
486
487ENTRY(kcopy,0)
488	mfpr	$PR_ESP,%r3
489	movl	(%r3),-(%sp)	# save current pcb_onfault
490	movab	1f,(%r3)	# set pcb_onfault
491	movl	4(%ap),%r1
492	movl	8(%ap),%r2
493	movc3	12(%ap),(%r1), (%r2)
494	clrl	%r0
4951:	mfpr	$PR_ESP,%r3
496	movl	(%sp)+,(%r3)	# restore pcb_onfault
497	ret
498
499/*
500 * copy{in,out}str() copies data from/to user space to/from kernel space.
501 * Security checks:
502 *	1) user space address must be < KERNBASE
503 *	2) the VM system will do the checks while copying
504 */
505ENTRY(copyinstr, 0)
506	tstl	4(%ap)		# kernel address?
507	bgeq	8f		# no, continue
5086:	movl	$EFAULT,%r0
509	movl	16(%ap),%r2
510	beql	7f
511	clrl	(%r2)
5127:	ret
513
514ENTRY(copyoutstr, 0)
515	tstl	8(%ap)		# kernel address?
516	bgeq	8f		# no, continue
517	brb	6b		# yes, return EFAULT
518
5198:	movl	4(%ap),%r5	# from
520	movl	8(%ap),%r4	# to
521	movl	12(%ap),%r3	# len
522	movl	16(%ap),%r2	# copied
523	clrl	%r0
524	mfpr	$PR_ESP,%r1
525	movab	2f,(%r1)	# set pcb_onfault
526
527	tstl	%r3		# any chars to copy?
528	bneq	1f		# yes, jump for more
5290:	tstl	%r2		# save copied len?
530	beql	2f		# no
531	subl3	4(%ap),%r5,(%r2)	# save copied len
5322:	mfpr	$PR_ESP,%r1
533	clrl	(%r1)		# clear pcb_onfault
534	ret
535
5361:	movb	(%r5)+,(%r4)+	# copy one char
537	beql	0b		# jmp if last char
538	sobgtr	%r3,1b		# copy one more
539	movl	$ENAMETOOLONG,%r0 # inform about too long string
540	brb	0b		# out of chars
541
542/**************************************************************************/
543
544	.align	2
545
546#define	UFETCHSTORE_PROLOGUE						 \
547	tstl	4(%ap)			/* uaddr in userspace? */	;\
548	blss	1f			/* nope, fault */		;\
549	mfpr	$PR_ESP,%r1		/* &pcb_onfault is in ESP */	;\
550	movab	2f,(%r1)		/* set pcb_onfault */
551
552#define	UFETCHSTORE_EPILOGUE						 \
553	mfpr	$PR_ESP,%r1		/* &pcb_onfault is in ESP */	;\
554	clrl	(%r1)			/* pcb_onfault = NULL */
555
556#define	UFETCHSTORE_RETURN						 \
557	clrl	%r0			/* return success */		;\
558	ret								;\
5591:	movl	$EFAULT,%r0						;\
560	ret				/* return EFAULT */		;\
5612:	UFETCHSTORE_EPILOGUE						;\
562	ret				/* error already in %r0 */
563
564/* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */
565ENTRY(_ufetch_8,0)
566	UFETCHSTORE_PROLOGUE
567	movb	*4(%ap),*8(%ap)		# *valp = *uaddr
568	UFETCHSTORE_EPILOGUE
569	UFETCHSTORE_RETURN
570
571/* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */
572ENTRY(_ufetch_16,0)
573	UFETCHSTORE_PROLOGUE
574	movw	*4(%ap),*8(%ap)		# *valp = *uaddr
575	UFETCHSTORE_EPILOGUE
576	UFETCHSTORE_RETURN
577
578/* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */
579ENTRY(_ufetch_32,0)
580	UFETCHSTORE_PROLOGUE
581	movl	*4(%ap),*8(%ap)		# *valp = *uaddr
582	UFETCHSTORE_EPILOGUE
583	UFETCHSTORE_RETURN
584
585/* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */
586ENTRY(_ustore_8,0)
587	UFETCHSTORE_PROLOGUE
588	movb	8(%ap),*4(%ap)		# *uaddr = val
589	UFETCHSTORE_EPILOGUE
590	UFETCHSTORE_RETURN
591
592/* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */
593ENTRY(_ustore_16,0)
594	UFETCHSTORE_PROLOGUE
595	movw	8(%ap),*4(%ap)		# *uaddr = val
596	UFETCHSTORE_EPILOGUE
597	UFETCHSTORE_RETURN
598
599/* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */
600ENTRY(_ustore_32,0)
601	UFETCHSTORE_PROLOGUE
602	movl	8(%ap),*4(%ap)		# *uaddr = val
603	UFETCHSTORE_EPILOGUE
604	UFETCHSTORE_RETURN
605
606/**************************************************************************/
607
608	.align	2
609
610JSBENTRY(Slock)
6111:	bbssi	$0,(%r1),1b
612	rsb
613
614JSBENTRY(Slocktry)
615	clrl	%r0
616	bbssi	$0,(%r1),1f
617	incl	%r0
6181:	rsb
619
620JSBENTRY(Sunlock)
621	bbcci	$0,(%r1),1f
6221:	rsb
623
624#
625# data department
626#
627	.data
628
629	.globl _C_LABEL(memtest)
630_C_LABEL(memtest):		# memory test in progress
631	.long 0
632