xref: /netbsd-src/sys/arch/sun3/sun3/locore.s (revision abb0f93cd77b67f080613360c65701f85e5f5cfe)
1/*	$NetBSD: locore.s,v 1.91 2009/12/10 05:10:04 rmind Exp $	*/
2
3/*
4 * Copyright (c) 1980, 1990, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
36 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
37 */
38
39/*
40 * Copyright (c) 1994, 1995 Gordon W. Ross
41 * Copyright (c) 1993 Adam Glass
42 * Copyright (c) 1988 University of Utah.
43 *
44 * This code is derived from software contributed to Berkeley by
45 * the Systems Programming Group of the University of Utah Computer
46 * Science Department.
47 *
48 * Redistribution and use in source and binary forms, with or without
49 * modification, are permitted provided that the following conditions
50 * are met:
51 * 1. Redistributions of source code must retain the above copyright
52 *    notice, this list of conditions and the following disclaimer.
53 * 2. Redistributions in binary form must reproduce the above copyright
54 *    notice, this list of conditions and the following disclaimer in the
55 *    documentation and/or other materials provided with the distribution.
56 * 3. All advertising materials mentioning features or use of this software
57 *    must display the following acknowledgement:
58 *	This product includes software developed by the University of
59 *	California, Berkeley and its contributors.
60 * 4. Neither the name of the University nor the names of its contributors
61 *    may be used to endorse or promote products derived from this software
62 *    without specific prior written permission.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74 * SUCH DAMAGE.
75 *
76 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
77 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
78 */
79
80#include "opt_compat_netbsd.h"
81#include "opt_compat_svr4.h"
82#include "opt_compat_sunos.h"
83#include "opt_kgdb.h"
84#include "opt_lockdebug.h"
85
86#include "assym.h"
87#include <machine/asm.h>
88#include <machine/trap.h>
89
90| Remember this is a fun project!
91
92| This is for kvm_mkdb, and should be the address of the beginning
93| of the kernel text segment (not necessarily the same as kernbase).
94	.text
95GLOBAL(kernel_text)
96
97| This is the entry point, as well as the end of the temporary stack
98| used during process switch (one 8K page ending at start)
99ASGLOBAL(tmpstk)
100ASGLOBAL(start)
101
102| First we need to set it up so we can access the sun MMU, and be otherwise
103| undisturbed.  Until otherwise noted, all code must be position independent
104| as the boot loader put us low in memory, but we are linked high.
105	movw	#PSL_HIGHIPL,%sr	| no interrupts
106	moveq	#FC_CONTROL,%d0		| make movs access "control"
107	movc	%d0,%sfc		| space where the sun3 designers
108	movc	%d0,%dfc		| put all the "useful" stuff
109
110| Set context zero and stay there until pmap_bootstrap.
111	moveq	#0,%d0
112	movsb	%d0,CONTEXT_REG
113
114| In order to "move" the kernel to high memory, we are going to copy the
115| first 4 Mb of pmegs such that we will be mapped at the linked address.
116| This is all done by copying in the segment map (top-level MMU table).
117| We will unscramble which PMEGs we actually need later.
118
119	movl	#(SEGMAP_BASE+0),%a0		| src
120	movl	#(SEGMAP_BASE+KERNBASE),%a1	| dst
121	movl	#(0x400000/NBSG),%d0		| count
122
123L_per_pmeg:
124	movsb	%a0@,%d1		| copy segmap entry
125	movsb	%d1,%a1@
126	addl	#NBSG,%a0		| increment pointers
127	addl	#NBSG,%a1
128	subql	#1,%d0			| decrement count
129	bgt	L_per_pmeg
130
131| Kernel is now double mapped at zero and KERNBASE.
132| Force a long jump to the relocated code (high VA).
133	movl	#IC_CLEAR,%d0		| Flush the I-cache
134	movc	%d0,%cacr
135	jmp	L_high_code:l		| long jump
136
137L_high_code:
138| We are now running in the correctly relocated kernel, so
139| we are no longer restricted to position-independent code.
140
141| Do bootstrap stuff needed before main() gets called.
142| Make sure the initial frame pointer is zero so that
143| the backtrace algorithm used by KGDB terminates nicely.
144	lea	_ASM_LABEL(tmpstk),%sp
145	movl	#0,%a6
146	jsr	_C_LABEL(_bootstrap)	| See locore2.c
147
148| Now that _bootstrap() is done using the PROM functions,
149| we can safely set the %sfc/dfc to something != FC_CONTROL
150	moveq	#FC_USERD,%d0		| make movs access "user data"
151	movc	%d0,%sfc		| space for copyin/copyout
152	movc	%d0,%dfc
153
154| Setup process zero user/kernel stacks.
155	lea	_C_LABEL(lwp0),%a0	| lwp0
156	movl	%a0@(L_PCB),%a1		| XXXuvm_lwp_getuarea
157	lea	%a1@(USPACE-4),%sp	| set SSP to last word
158	movl	#USRSTACK-4,%a2
159	movl	%a2,%usp		| init user SP
160
161| Note curpcb was already set in _bootstrap().
162| Will do fpu initialization during autoconfig (see fpu.c)
163| The interrupt vector table and stack are now ready.
164| Interrupts will be enabled later, AFTER  autoconfiguration
165| is finished, to avoid spurrious interrupts.
166
167/*
168 * Create a fake exception frame so that cpu_fork() can copy it.
169 * main() nevers returns; we exit to user mode from a forked process
170 * later on.
171 */
172	clrw	%sp@-			| tf_format,tf_vector
173	clrl	%sp@-			| tf_pc (filled in later)
174	movw	#PSL_USER,%sp@-		| tf_sr for user mode
175	clrl	%sp@-			| tf_stackadj
176	lea	%sp@(-64),%sp		| tf_regs[16]
177	movl	%a1,%a0@(L_MD_REGS)	| lwp0.p_md.md_regs = trapframe
178	jbsr	_C_LABEL(main)		| main(&trapframe)
179	PANIC("main() returned")
180
181| That is all the assembly startup code we need on the sun3!
182| The rest of this is like the hp300/locore.s where possible.
183
184/*
185 * Trap/interrupt vector routines
186 */
187#include <m68k/m68k/trap_subr.s>
188
189GLOBAL(buserr)
190	tstl	_C_LABEL(nofault)	| device probe?
191	jeq	_C_LABEL(addrerr)	| no, handle as usual
192	movl	_C_LABEL(nofault),%sp@-	| yes,
193	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
194GLOBAL(addrerr)
195	clrl	%sp@-			| stack adjust count
196	moveml	#0xFFFF,%sp@-		| save user registers
197	movl	%usp,%a0		| save the user SP
198	movl	%a0,%sp@(FR_SP)		|   in the savearea
199	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
200	moveq	#0,%d0
201	movw	%a1@(10),%d0		| grab SSW for fault processing
202	btst	#12,%d0			| RB set?
203	jeq	LbeX0			| no, test RC
204	bset	#14,%d0			| yes, must set FB
205	movw	%d0,%a1@(10)		| for hardware too
206LbeX0:
207	btst	#13,%d0			| RC set?
208	jeq	LbeX1			| no, skip
209	bset	#15,%d0			| yes, must set FC
210	movw	%d0,%a1@(10)		| for hardware too
211LbeX1:
212	btst	#8,%d0			| data fault?
213	jeq	Lbe0			| no, check for hard cases
214	movl	%a1@(16),%d1		| fault address is as given in frame
215	jra	Lbe10			| thats it
216Lbe0:
217	btst	#4,%a1@(6)		| long (type B) stack frame?
218	jne	Lbe4			| yes, go handle
219	movl	%a1@(2),%d1		| no, can use save PC
220	btst	#14,%d0			| FB set?
221	jeq	Lbe3			| no, try FC
222	addql	#4,%d1			| yes, adjust address
223	jra	Lbe10			| done
224Lbe3:
225	btst	#15,%d0			| FC set?
226	jeq	Lbe10			| no, done
227	addql	#2,%d1			| yes, adjust address
228	jra	Lbe10			| done
229Lbe4:
230	movl	%a1@(36),%d1		| long format, use stage B address
231	btst	#15,%d0			| FC set?
232	jeq	Lbe10			| no, all done
233	subql	#2,%d1			| yes, adjust address
234Lbe10:
235	movl	%d1,%sp@-		| push fault VA
236	movl	%d0,%sp@-		| and padded SSW
237	movw	%a1@(6),%d0		| get frame format/vector offset
238	andw	#0x0FFF,%d0		| clear out frame format
239	cmpw	#12,%d0			| address error vector?
240	jeq	Lisaerr			| yes, go to it
241
242/*
243 * the sun3 specific code
244 *
245 * our mission: figure out whether what we are looking at is
246 *              bus error in the UNIX sense, or
247 *	        a memory error i.e a page fault
248 *
249 * [this code replaces similarly mmu specific code in the hp300 code]
250 */
251sun3_mmu_specific:
252	clrl %d0			| make sure top bits are cleard too
253	movl %d1,%sp@-			| save %d1
254	movc %sfc,%d1			| save %sfc to %d1
255	moveq #FC_CONTROL,%d0		| %sfc = FC_CONTROL
256	movc %d0,%sfc
257	movsb BUSERR_REG,%d0		| get value of bus error register
258	movc %d1,%sfc			| restore %sfc
259	movl %sp@+,%d1			| restore %d1
260	andb #BUSERR_MMU,%d0 		| is this an MMU fault?
261	jeq Lisberr			| non-MMU bus error
262/* End of sun3 specific code. */
263
264Lismerr:
265	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
266	jra	_ASM_LABEL(faultstkadj)	| and deal with it
267Lisaerr:
268	movl	#T_ADDRERR,%sp@-	| mark address error
269	jra	_ASM_LABEL(faultstkadj)	| and deal with it
270Lisberr:
271	movl	#T_BUSERR,%sp@-		| mark bus error
272	jra	_ASM_LABEL(faultstkadj)	| and deal with it
273
274/*
275 * FP exceptions.
276 */
277GLOBAL(fpfline)
278	clrl	%sp@-			| stack adjust count
279	moveml	#0xFFFF,%sp@-		| save registers
280	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
281	jra	_ASM_LABEL(fault)	| do it
282
283GLOBAL(fpunsupp)
284	clrl	%sp@-			| stack adjust count
285	moveml	#0xFFFF,%sp@-		| save registers
286	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
287	jra	_ASM_LABEL(fault)	| do it
288
289/*
290 * Handles all other FP coprocessor exceptions.
291 * Note that since some FP exceptions generate mid-instruction frames
292 * and may cause signal delivery, we need to test for stack adjustment
293 * after the trap call.
294 */
295GLOBAL(fpfault)
296	clrl	%sp@-		| stack adjust count
297	moveml	#0xFFFF,%sp@-	| save user registers
298	movl	%usp,%a0	| and save
299	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
300	clrl	%sp@-		| no VA arg
301	movl	_C_LABEL(curpcb),%a0	| current pcb
302	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
303	fsave	%a0@		| save state
304	tstb	%a0@		| null state frame?
305	jeq	Lfptnull	| yes, safe
306	clrw	%d0		| no, need to tweak BIU
307	movb	%a0@(1),%d0	| get frame size
308	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
309Lfptnull:
310	fmovem	%fpsr,%sp@-	| push fpsr as code argument
311	frestore %a0@		| restore state
312	movl	#T_FPERR,%sp@-	| push type arg
313	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
314
315/*
316 * Other exceptions only cause four and six word stack frame and require
317 * no post-trap stack adjustment.
318 */
319GLOBAL(badtrap)
320	clrl	%sp@-			| stack adjust count
321	moveml	#0xFFFF,%sp@-		| save std frame regs
322	jbsr	_C_LABEL(straytrap)	| report
323	moveml	%sp@+,#0xFFFF		| restore regs
324	addql	#4,%sp			| stack adjust count
325	jra	_ASM_LABEL(rei)		| all done
326
327/*
328 * Trap 0 is for system calls
329 */
330GLOBAL(trap0)
331	clrl	%sp@-			| stack adjust count
332	moveml	#0xFFFF,%sp@-		| save user registers
333	movl	%usp,%a0		| save the user SP
334	movl	%a0,%sp@(FR_SP)		|   in the savearea
335	movl	%d0,%sp@-		| push syscall number
336	jbsr	_C_LABEL(syscall)	| handle it
337	addql	#4,%sp			| pop syscall arg
338	movl	%sp@(FR_SP),%a0		| grab and restore
339	movl	%a0,%usp		|   user SP
340	moveml	%sp@+,#0x7FFF		| restore most registers
341	addql	#8,%sp			| pop SP and stack adjust
342	jra	_ASM_LABEL(rei)		| all done
343
344/*
345 * Trap 12 is the entry point for the cachectl "syscall"
346 *	cachectl(command, addr, length)
347 * command in %d0, addr in %a1, length in %d1
348 */
349GLOBAL(trap12)
350	movl	_C_LABEL(curlwp),%a0
351	movl	%a0@(L_PROC),%sp@-	| push curproc pointer
352	movl	%d1,%sp@-		| push length
353	movl	%a1,%sp@-		| push addr
354	movl	%d0,%sp@-		| push command
355	jbsr	_C_LABEL(cachectl1)	| do it
356	lea	%sp@(16),%sp		| pop args
357	jra	_ASM_LABEL(rei)		| all done
358
359/*
360 * Trace (single-step) trap.  Kernel-mode is special.
361 * User mode traps are simply passed on to trap().
362 */
363GLOBAL(trace)
364	clrl	%sp@-			| stack adjust count
365	moveml	#0xFFFF,%sp@-
366	moveq	#T_TRACE,%d0
367
368	| Check PSW and see what happen.
369	|   T=0 S=0	(should not happen)
370	|   T=1 S=0	trace trap from user mode
371	|   T=0 S=1	trace trap on a trap instruction
372	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
373
374	movw	%sp@(FR_HW),%d1		| get PSW
375	notw	%d1			| XXX no support for T0 on 680[234]0
376	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
377	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
378	jra	_ASM_LABEL(fault)	| no, user-mode fault
379
380/*
381 * Trap 15 is used for:
382 *	- GDB breakpoints (in user programs)
383 *	- KGDB breakpoints (in the kernel)
384 *	- trace traps for SUN binaries (not fully supported yet)
385 * User mode traps are simply passed to trap().
386 */
387GLOBAL(trap15)
388	clrl	%sp@-			| stack adjust count
389	moveml	#0xFFFF,%sp@-
390	moveq	#T_TRAP15,%d0
391	btst	#5,%sp@(FR_HW)		| was supervisor mode?
392	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
393	jra	_ASM_LABEL(fault)	| no, user-mode fault
394
395ASLOCAL(kbrkpt)
396	| Kernel-mode breakpoint or trace trap. (%d0=trap_type)
397	| Save the system sp rather than the user sp.
398	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
399	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
400	movl	%a6,%sp@(FR_SP)		|  from before trap
401
402	| If we are not on tmpstk switch to it.
403	| (so debugger can change the stack pointer)
404	movl	%a6,%d1
405	cmpl	#_ASM_LABEL(tmpstk),%d1
406	jls	Lbrkpt2 		| already on tmpstk
407	| Copy frame to the temporary stack
408	movl	%sp,%a0			| %a0=src
409	lea	_ASM_LABEL(tmpstk)-96,%a1	| %a1=dst
410	movl	%a1,%sp			| sp=new frame
411	moveq	#FR_SIZE,%d1
412Lbrkpt1:
413	movl	%a0@+,%a1@+
414	subql	#4,%d1
415	bgt	Lbrkpt1
416
417Lbrkpt2:
418	| Call the trap handler for the kernel debugger.
419	| Do not call trap() to handle it, so that we can
420	| set breakpoints in trap() if we want.  We know
421	| the trap type is either T_TRACE or T_BREAKPOINT.
422	movl	%d0,%sp@-		| push trap type
423	jbsr	_C_LABEL(trap_kdebug)
424	addql	#4,%sp			| pop args
425
426	| The stack pointer may have been modified, or
427	| data below it modified (by kgdb push call),
428	| so push the hardware frame at the current sp
429	| before restoring registers and returning.
430	movl	%sp@(FR_SP),%a0		| modified sp
431	lea	%sp@(FR_SIZE),%a1	| end of our frame
432	movl	%a1@-,%a0@-		| copy 2 longs with
433	movl	%a1@-,%a0@-		| ... predecrement
434	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
435	moveml	%sp@+,#0x7FFF		| restore all but sp
436	movl	%sp@,%sp		| ... and sp
437	rte				| all done
438
439/* Use common m68k sigreturn */
440#include <m68k/m68k/sigreturn.s>
441
442/*
443 * Interrupt handlers.  Most are auto-vectored,
444 * and hard-wired the same way on all sun3 models.
445 * Format in the stack is:
446 *   %d0,%d1,%a0,%a1, sr, pc, vo
447 */
448
449#define INTERRUPT_SAVEREG \
450	moveml	#0xC0C0,%sp@-
451
452#define INTERRUPT_RESTORE \
453	moveml	%sp@+,#0x0303
454
455/*
456 * This is the common auto-vector interrupt handler,
457 * for which the CPU provides the vector=0x18+level.
458 * These are installed in the interrupt vector table.
459 */
460#ifdef __ELF__
461	.align	4
462#else
463	.align	2
464#endif
465GLOBAL(_isr_autovec)
466	INTERRUPT_SAVEREG
467	jbsr	_C_LABEL(isr_autovec)
468	INTERRUPT_RESTORE
469	jra	_ASM_LABEL(rei)
470
471/* clock: see clock.c */
472#ifdef __ELF__
473	.align	4
474#else
475	.align	2
476#endif
477GLOBAL(_isr_clock)
478	INTERRUPT_SAVEREG
479	jbsr	_C_LABEL(clock_intr)
480	INTERRUPT_RESTORE
481	jra	_ASM_LABEL(rei)
482
483| Handler for all vectored interrupts (i.e. VME interrupts)
484#ifdef __ELF__
485	.align	4
486#else
487	.align	2
488#endif
489GLOBAL(_isr_vectored)
490	INTERRUPT_SAVEREG
491	jbsr	_C_LABEL(isr_vectored)
492	INTERRUPT_RESTORE
493	jra	_ASM_LABEL(rei)
494
495#undef	INTERRUPT_SAVEREG
496#undef	INTERRUPT_RESTORE
497
498/* interrupt counters (needed by vmstat) */
499GLOBAL(intrnames)
500	.asciz	"spur"	| 0
501	.asciz	"lev1"	| 1
502	.asciz	"lev2"	| 2
503	.asciz	"lev3"	| 3
504	.asciz	"lev4"	| 4
505	.asciz	"clock"	| 5
506	.asciz	"lev6"	| 6
507	.asciz	"nmi"	| 7
508GLOBAL(eintrnames)
509
510	.data
511	.even
512GLOBAL(intrcnt)
513	.long	0,0,0,0,0,0,0,0,0,0
514GLOBAL(eintrcnt)
515	.text
516
517/*
518 * Emulation of VAX REI instruction.
519 *
520 * This code is (mostly) un-altered from the hp300 code,
521 * except that sun machines do not need a simulated SIR
522 * because they have a real software interrupt register.
523 *
524 * This code deals with checking for and servicing ASTs
525 * (profiling, scheduling) and software interrupts (network, softclock).
526 * We check for ASTs first, just like the VAX.  To avoid excess overhead
527 * the T_ASTFLT handling code will also check for software interrupts so we
528 * do not have to do it here.  After identifying that we need an AST we
529 * drop the IPL to allow device interrupts.
530 *
531 * This code is complicated by the fact that sendsig may have been called
532 * necessitating a stack cleanup.
533 */
534
535ASGLOBAL(rei)
536#ifdef	DIAGNOSTIC
537	tstl	_C_LABEL(panicstr)	| have we paniced?
538	jne	Ldorte			| yes, do not make matters worse
539#endif
540	tstl	_C_LABEL(astpending)	| AST pending?
541	jeq	Ldorte			| no, done
542Lrei1:
543	btst	#5,%sp@			| yes, are we returning to user mode?
544	jne	Ldorte			| no, done
545	movw	#PSL_LOWIPL,%sr		| lower SPL
546	clrl	%sp@-			| stack adjust
547	moveml	#0xFFFF,%sp@-		| save all registers
548	movl	%usp,%a1		| including
549	movl	%a1,%sp@(FR_SP)		|    the users SP
550	clrl	%sp@-			| VA == none
551	clrl	%sp@-			| code == none
552	movl	#T_ASTFLT,%sp@-		| type == async system trap
553	pea	%sp@(12)		| fp == address of trap frame
554	jbsr	_C_LABEL(trap)		| go handle it
555	lea	%sp@(16),%sp		| pop value args
556	movl	%sp@(FR_SP),%a0		| restore user SP
557	movl	%a0,%usp		|   from save area
558	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
559	jne	Laststkadj		| yes, go to it
560	moveml	%sp@+,#0x7FFF		| no, restore most user regs
561	addql	#8,%sp			| toss SP and stack adjust
562	rte				| and do real RTE
563Laststkadj:
564	lea	%sp@(FR_HW),%a1		| pointer to HW frame
565	addql	#8,%a1			| source pointer
566	movl	%a1,%a0			| source
567	addw	%d0,%a0			|  + hole size = dest pointer
568	movl	%a1@-,%a0@-		| copy
569	movl	%a1@-,%a0@-		|  8 bytes
570	movl	%a0,%sp@(FR_SP)		| new SSP
571	moveml	%sp@+,#0x7FFF		| restore user registers
572	movl	%sp@,%sp		| and our SP
573Ldorte:
574	rte				| real return
575
576/*
577 * Initialization is at the beginning of this file, because the
578 * kernel entry point needs to be at zero for compatibility with
579 * the Sun boot loader.  This works on Sun machines because the
580 * interrupt vector table for reset is NOT at address zero.
581 * (The MMU has a "boot" bit that forces access to the PROM)
582 */
583
584/*
585 * Use common m68k sigcode.
586 */
587#include <m68k/m68k/sigcode.s>
588#ifdef COMPAT_SUNOS
589#include <m68k/m68k/sunos_sigcode.s>
590#endif
591#ifdef COMPAT_SVR4
592#include <m68k/m68k/svr4_sigcode.s>
593#endif
594
595	.text
596
597/*
598 * Primitives
599 */
600
601/*
602 * Use common m68k support routines.
603 */
604#include <m68k/m68k/support.s>
605
606/*
607 * Use common m68k process/lwp switch and context save subroutines.
608 */
609#define FPCOPROC	/* XXX: Temp. Reqd. */
610#include <m68k/m68k/switch_subr.s>
611
612
613/* suline() */
614/* TBIA, TBIS, TBIAS, TBIAU */
615
616/*
617 * Invalidate instruction cache
618 */
619ENTRY(ICIA)
620	movl	#IC_CLEAR,%d0
621	movc	%d0,%cacr		| invalidate i-cache
622	rts
623
624/* DCIA, DCIS */
625
626/*
627 * Invalidate data cache.
628 */
629ENTRY(DCIU)
630	rts
631
632/* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
633/* PCIA, ecacheon, ecacheoff */
634
635/*
636 * Get callers current SP value.
637 * Note that simply taking the address of a local variable in a C function
638 * doesn't work because callee saved registers may be outside the stack frame
639 * defined by A6 (e.g. GCC generated code).
640 *
641 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
642 */
643GLOBAL(getsp)
644	movl	%sp,%d0			| get current SP
645	addql	#4,%d0			| compensate for return address
646	movl	%d0,%a0
647	rts
648
649ENTRY(getsfc)
650	movc	%sfc,%d0
651	movl	%d0,%a0
652	rts
653
654ENTRY(getdfc)
655	movc	%dfc,%d0
656	movl	%d0,%a0
657	rts
658
659ENTRY(getvbr)
660	movc	%vbr,%a0
661	rts
662
663ENTRY(setvbr)
664	movl	%sp@(4),%d0
665	movc	%d0,%vbr
666	rts
667
668/* loadustp, ptest_addr */
669
670/*
671 * Set processor priority level calls.  Most are implemented with
672 * inline asm expansions.  However, we need one instantiation here
673 * in case some non-optimized code makes external references.
674 * Most places will use the inlined functions param.h supplies.
675 */
676
677ENTRY(_getsr)
678	clrl	%d0
679	movw	%sr,%d0
680	movl	%d0,%a0
681	rts
682
683ENTRY(_spl)
684	clrl	%d0
685	movw	%sr,%d0
686	movl	%sp@(4),%d1
687	movw	%d1,%sr
688	rts
689
690ENTRY(_splraise)
691	clrl	%d0
692	movw	%sr,%d0
693	movl	%d0,%d1
694	andl	#PSL_HIGHIPL,%d1 	| old &= PSL_HIGHIPL
695	cmpl	%sp@(4),%d1		| (old - new)
696	bge	Lsplr
697	movl	%sp@(4),%d1
698	movw	%d1,%sr
699Lsplr:
700	rts
701
702/*
703 * Save and restore 68881 state.
704 */
705ENTRY(m68881_save)
706	movl	%sp@(4),%a0		| save area pointer
707	fsave	%a0@			| save state
708	tstb	%a0@			| null state frame?
709	jeq	Lm68881sdone		| yes, all done
710	fmovem	%fp0-%fp7,%a0@(FPF_REGS)	| save FP general regs
711	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control regs
712Lm68881sdone:
713	rts
714
715ENTRY(m68881_restore)
716	movl	%sp@(4),%a0		| save area pointer
717	tstb	%a0@			| null state frame?
718	jeq	Lm68881rdone		| yes, easy
719	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control regs
720	fmovem	%a0@(FPF_REGS),%fp0-%fp7	| restore FP general regs
721Lm68881rdone:
722	frestore %a0@			| restore state
723	rts
724
725/*
726 * _delay(unsigned N)
727 * Delay for at least (N/256) microseconds.
728 * This routine depends on the variable:  delay_divisor
729 * which should be set based on the CPU clock rate.
730 * XXX: Currently this is set based on the CPU model,
731 * XXX: but this should be determined at run time...
732 */
733GLOBAL(_delay)
734	| %d0 = arg = (usecs << 8)
735	movl	%sp@(4),%d0
736	| %d1 = delay_divisor;
737	movl	_C_LABEL(delay_divisor),%d1
738	jra	L_delay			/* Jump into the loop! */
739
740	/*
741	 * Align the branch target of the loop to a half-line (8-byte)
742	 * boundary to minimize cache effects.  This guarantees both
743	 * that there will be no prefetch stalls due to cache line burst
744	 * operations and that the loop will run from a single cache
745	 * half-line.
746	 */
747#ifdef __ELF__
748	.align	8
749#else
750	.align	3
751#endif
752L_delay:
753	subl	%d1,%d0
754	jgt	L_delay
755	rts
756
757/*
758 * void set_segmap_allctx(vaddr_t va, int sme)
759 */
760ENTRY(set_segmap_allctx)
761	linkw	%fp,#0
762	moveml	#0x3000,%sp@-
763	movl	8(%fp),%d3		| d3 = va
764	andl	#0xffffffc,%d3
765	bset	#29,%d3
766	movl	%d3,%a1			| a1 = ctrladdr, d3 avail
767	movl	12(%fp),%d1		| d1 = sme
768	moveq	#FC_CONTROL,%d0
769	movl	#CONTEXT_REG,%a0	| a0 = ctxreg
770	movc	%sfc,%d3		| d3 = oldsfc
771	movc	%d0,%sfc
772	movsb	%a0@,%d2
773	andi	#7,%d2			| d2 = oldctx
774	movc	%d3,%sfc		| restore sfc, d3 avail
775	movc	%dfc,%d3		| d3 = olddfc
776	movc	%d0,%dfc
777	movl	#(CONTEXT_NUM - 1),%d0	| d0 = ctx number
7781:
779	movsb	%d0,%a0@		| change to ctx
780	movsb	%d1,%a1@		| set segmap
781	dbf	%d0,1b			| loop setting each ctx
782	movsb	%d2,%a0@		| restore ctx
783	movc	%d3,%dfc		| restore dfc
784	moveml	%sp@+,#0x000c
785	unlk	%fp
786	rts
787
788| Define some addresses, mostly so DDB can print useful info.
789| Not using _C_LABEL() here because these symbols are never
790| referenced by any C code, and if the leading underscore
791| ever goes away, these lines turn into syntax errors...
792	.set	_KERNBASE,KERNBASE
793	.set	_MONSTART,SUN3_MONSTART
794	.set	_PROM_BASE,SUN3_PROM_BASE
795	.set	_MONEND,SUN3_MONEND
796
797|The end!
798