xref: /netbsd-src/sys/arch/virt68k/virt68k/locore.s (revision 49351b43765925623a84c8d95ec3295fff958f11)
1/*	$NetBSD: locore.s,v 1.15 2024/01/19 05:46:36 thorpej Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah $Hdr: locore.s 1.66 92/12/22$
37 *
38 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
39 */
40
41#include "opt_compat_netbsd.h"
42#include "opt_compat_sunos.h"
43#include "opt_fpsp.h"
44#include "opt_ddb.h"
45#include "opt_kgdb.h"
46#include "opt_lockdebug.h"
47#include "opt_m68k_arch.h"
48
49#include "assym.h"
50#include <machine/asm.h>
51#include <machine/trap.h>
52
53#include "ksyms.h"
54
55/*
56 * Memory starts at 0x0000.0000, and we have linked the kernel
57 * at 0x0000.2000 to ensure that VA==0 is unmapped once we turn
58 * on the MMU.  We arrive here running VA==PA and with the MMU
59 * disabled.
60 *
61 * This first 8KB of RAM at PA==0 won't go to waste, though; we
62 * will use it for the kernel message buffer.
63 */
64
65/*
66 * Temporary stack for a variety of purposes.
67 * Try and make this the first thing is the data segment so it
68 * is page aligned.  Note that if we overflow here, we run into
69 * our text segment.
70 */
71	.data
72	.space	PAGE_SIZE
73ASLOCAL(tmpstk)
74
75/*
76 * Macro to relocate a symbol, used before MMU is enabled.
77 */
78#define	_RELOC(var, ar)		\
79	lea	var,ar
80
81#define	RELOC(var, ar)		_RELOC(_C_LABEL(var), ar)
82#define	ASRELOC(var, ar)	_RELOC(_ASM_LABEL(var), ar)
83
84BSS(esym,4)
85
86	.globl	_C_LABEL(edata)
87	.globl	_C_LABEL(etext),_C_LABEL(end)
88
89/*
90 * This is for kvm_mkdb, and should be the address of the beginning
91 * of the kernel text segment (not necessarily the same as kernbase).
92 */
93	.text
94GLOBAL(kernel_text)
95
96/*
97 * start of kernel and .text!
98 */
99ASENTRY_NOPROFILE(start)
100	movw	#PSL_HIGHIPL,%sr	| no interrupts
101	movl	#0,%a5			| RAM starts at 0 (a5)
102
103	ASRELOC(tmpstk, %a0)
104	movl	%a0,%sp			| give ourselves a temporary stack
105
106	RELOC(edata,%a0)		| clear out BSS
107	movl	#_C_LABEL(end) - 4, %d0	| (must be <= 256 kB)
108	subl	#_C_LABEL(edata), %d0
109	lsrl	#2,%d0
1101:	clrl	%a0@+
111	dbra	%d0,1b
112
113	/*
114	 * Qemu does not pass us the symbols, so leave esym alone.
115	 * The bootinfo immediately follows the kernel.  Go parse
116	 * it to get CPU/FPU/MMU information and figure out where
117	 * the end of the loaded image really is.
118	 */
119	RELOC(bootinfo_start,%a0)
120	movl	#_C_LABEL(end),%sp@-
121	jbsr	%a0@			| bootinfo_start(end)
122	addql	#4,%sp
123
124	/* XXX XXX XXX */
125	movl	#CACHE_OFF,%d0
126	movc	%d0,%cacr		| clear and disable on-chip cache(s)
127	/* XXX XXX XXX */
128
129/* initialize source/destination control registers for movs */
130	moveq	#FC_USERD,%d0		| user space
131	movc	%d0,%sfc		|   as source
132	movc	%d0,%dfc		|   and destination of transfers
133
134	/*
135	 * bootinfo_start() recorded the first PA following the
136	 * bootinfo in bootinfo_end.  That represents the end of
137	 * the loaded image.  Rounding that to a page gives us
138	 * the first free physical page.
139	 */
140	RELOC(bootinfo_end,%a0)
141	movl	%a0@,%d2
142	addl	#PAGE_SIZE-1,%d2
143	andl	#PG_FRAME,%d2		| round to a page
144	movl	%d2,%a4
145	addl	%a5,%a4			| convert to PA
146	pea	%a5@			| firstpa
147	pea	%a4@			| nextpa
148	RELOC(pmap_bootstrap,%a0)
149	jbsr	%a0@			| pmap_bootstrap(firstpa, nextpa)
150	addql	#8,%sp
151
152/*
153 * Enable the MMU.
154 * Since the kernel is mapped logical == physical, we just turn it on.
155 */
156	RELOC(Sysseg_pa, %a0)		| system segment table addr
157	movl	%a0@,%d1		| read value (a PA)
158	RELOC(mmutype, %a0)
159	cmpl	#MMU_68040,%a0@		| 68040?
160	jne	Lmotommu1		| no, skip
161	.long	0x4e7b1807		| movc d1,srp
162	jra	Lstploaddone
163Lmotommu1:
164#ifdef M68030
165	RELOC(protorp, %a0)
166	movl	%d1,%a0@(4)		| segtable address
167	pmove	%a0@,%srp		| load the supervisor root pointer
168#endif /* M68030 */
169Lstploaddone:
170	RELOC(mmutype, %a0)
171	cmpl	#MMU_68040,%a0@		| 68040?
172	jne	Lmotommu2		| no, skip
173
174	movl	#VIRT68K_TT40_IO,%d0	| DTT0 maps the I/O space
175	.long	0x4e7b0006		| movc d0,dtt0
176
177	moveq	#0,%d0			| ensure the other TT regs are disabled
178	.long	0x4e7b0004		| movc d0,itt0
179	.long	0x4e7b0005		| movc d0,itt1
180	.long	0x4e7b0007		| movc d0,dtt1
181
182	.word	0xf4d8			| cinva bc
183	.word	0xf518			| pflusha
184	movl	#MMU40_TCR_BITS,%d0
185	.long	0x4e7b0003		| movc d0,tc
186#ifdef M68060
187	RELOC(cputype, %a0)
188	cmpl	#CPU_68060,%a0@		| 68060?
189	jne	Lnot060cache
190	movl	#1,%d0
191	.long	0x4e7b0808		| movcl d0,pcr
192	movl	#0xa0808000,%d0
193	movc	%d0,%cacr		| enable store buffer, both caches
194	jmp	Lenab1
195Lnot060cache:
196#endif
197	movl	#0x80008000,%d0
198	movc	%d0,%cacr		| turn on both caches
199	jmp	Lenab1
200
201Lmotommu2:
202	movl	#VIRT68K_TT30_IO,%sp@-	| TT0 maps the I/O space
203	.long	0xf0170800		| pmove %sp@,%tt0
204	clrl	%sp@			| ensure TT1 is disabled
205	.long	0xf0170c00		| pmove %sp@,%tt1
206
207	pflusha
208	movl	#MMU51_TCR_BITS,%sp@	| value to load TC with
209	pmove	%sp@,%tc		| load it
210
211/*
212 * Should be running mapped from this point on
213 */
214Lenab1:
215	lea	_ASM_LABEL(tmpstk),%sp	| re-load the temporary stack
216	jbsr	_C_LABEL(vec_init)	| initialize the vector table
217/* call final pmap setup */
218	jbsr	_C_LABEL(pmap_bootstrap_finalize)
219/* set kernel stack, user SP */
220	movl	_C_LABEL(lwp0uarea),%a1	| get lwp0 uarea
221	lea	%a1@(USPACE-4),%sp	| set kernel stack to end of area
222	movl	#USRSTACK-4,%a2
223	movl	%a2,%usp		| init user SP
224	tstl	_C_LABEL(fputype)	| Have an FPU?
225	jeq	Lenab2			| No, skip.
226	clrl	%a1@(PCB_FPCTX)		| ensure null FP context
227	movl	%a1,%sp@-
228	jbsr	_C_LABEL(m68881_restore) | restore it (does not kill a1)
229	addql	#4,%sp
230Lenab2:
231	cmpl	#MMU_68040,_C_LABEL(mmutype)	| 68040?
232	jeq	Ltbia040		| yes, cache already on
233	pflusha
234	movl	#CACHE_ON,%d0
235	movc	%d0,%cacr		| clear cache(s)
236	jra	Lenab3
237Ltbia040:
238	.word	0xf518
239Lenab3:
240/*
241 * final setup for C code:
242 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
243 * main() nevers returns; we exit to user mode from a forked process
244 * later on.
245 */
246	jbsr	_C_LABEL(virt68k_init)	| additional pre-main initialization
247#if 0
248	/*
249	 * XXX Don't do the spl0() here; when Qemu performs a reboot request,
250	 * XXX it seems to not clear pending interrupts, and so we blow up
251	 * XXX early when the new kernel starts up.
252	 */
253	movw	#PSL_LOWIPL,%sr		| lower SPL
254#endif
255	clrw	%sp@-			| vector offset/frame type
256	clrl	%sp@-			| PC - filled in by "execve"
257	movw	#PSL_USER,%sp@-		| in user mode
258	clrl	%sp@-			| stack adjust count and padding
259	lea	%sp@(-64),%sp		| construct space for D0-D7/A0-A7
260	lea	_C_LABEL(lwp0),%a0	| save pointer to frame
261	movl	%sp,%a0@(L_MD_REGS)	|   in lwp0.l_md.md_regs
262
263	jra	_C_LABEL(main)		| main()
264
265/*
266 * Trap/interrupt vector routines
267 */
268#include <m68k/m68k/trap_subr.s>
269
270/*
271 * Use common m68k bus error and address error handlers.
272 */
273#include <m68k/m68k/busaddrerr.s>
274
275/*
276 * FP exceptions.
277 */
278ENTRY_NOPROFILE(fpfline)
279#if defined(M68040)
280	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
281	jne	Lfp_unimp		| no, skip FPSP
282	cmpw	#0x202c,%sp@(6)		| format type 2?
283	jne	_C_LABEL(illinst)	| no, not an FP emulation
284#ifdef FPSP
285	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
286#else
287	clrl	%sp@-			| stack adjust count
288	moveml	#0xFFFF,%sp@-		| save registers
289	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
290	jra	_ASM_LABEL(fault)	| do it
291#endif
292Lfp_unimp:
293#endif /* M68040 */
294	jra	_C_LABEL(illinst)
295
296ENTRY_NOPROFILE(fpunsupp)
297#if defined(M68040)
298	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
299	jne	Lfp_unsupp		| No, skip FPSP
300#ifdef FPSP
301	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
302#else
303	clrl	%sp@-			| stack adjust count
304	moveml	#0xFFFF,%sp@-		| save registers
305	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
306	jra	_ASM_LABEL(fault)	| do it
307#endif
308Lfp_unsupp:
309#endif /* M68040 */
310	jra	_C_LABEL(illinst)
311
312/*
313 * Handles all other FP coprocessor exceptions.
314 * Note that since some FP exceptions generate mid-instruction frames
315 * and may cause signal delivery, we need to test for stack adjustment
316 * after the trap call.
317 */
318ENTRY_NOPROFILE(fpfault)
319	clrl	%sp@-		| stack adjust count
320	moveml	#0xFFFF,%sp@-	| save user registers
321	movl	%usp,%a0	| and save
322	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
323	clrl	%sp@-		| no VA arg
324	movl	_C_LABEL(curpcb),%a0 | current pcb
325	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
326	fsave	%a0@		| save state
327#if defined(M68040) || defined(M68060)
328	/* always null state frame on 68040, 68060 */
329	cmpl	#FPU_68040,_C_LABEL(fputype)
330	jge	Lfptnull
331#endif
332	tstb	%a0@		| null state frame?
333	jeq	Lfptnull	| yes, safe
334	clrw	%d0		| no, need to tweak BIU
335	movb	%a0@(1),%d0	| get frame size
336	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
337Lfptnull:
338	fmovem	%fpsr,%sp@-	| push fpsr as code argument
339	frestore %a0@		| restore state
340	movl	#T_FPERR,%sp@-	| push type arg
341	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
342
343
344/*
345 * Other exceptions only cause four and six word stack frame and require
346 * no post-trap stack adjustment.
347 */
348
349ENTRY_NOPROFILE(badtrap)
350	moveml	#0xC0C0,%sp@-		| save scratch regs
351	movw	%sp@(22),%sp@-		| push exception vector info
352	clrw	%sp@-
353	movl	%sp@(22),%sp@-		| and PC
354	jbsr	_C_LABEL(straytrap)	| report
355	addql	#8,%sp			| pop args
356	moveml	%sp@+,#0x0303		| restore regs
357	jra	_ASM_LABEL(rei)		| all done
358
359ENTRY_NOPROFILE(trap0)
360	clrl	%sp@-			| stack adjust count
361	moveml	#0xFFFF,%sp@-		| save user registers
362	movl	%usp,%a0		| save the user SP
363	movl	%a0,%sp@(FR_SP)		|   in the savearea
364	movl	%d0,%sp@-		| push syscall number
365	jbsr	_C_LABEL(syscall)	| handle it
366	addql	#4,%sp			| pop syscall arg
367	tstl	_C_LABEL(astpending)	| AST pending?
368	jne	Lrei1			| Yup, go deal with it.
369	movl	%sp@(FR_SP),%a0		| grab and restore
370	movl	%a0,%usp		|   user SP
371	moveml	%sp@+,#0x7FFF		| restore most registers
372	addql	#8,%sp			| pop SP and stack adjust
373	rte
374
375/*
376 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
377 *	cachectl(command, addr, length)
378 * command in d0, addr in a1, length in d1
379 */
380ENTRY_NOPROFILE(trap12)
381	movl	_C_LABEL(curlwp),%a0
382	movl	%a0@(L_PROC),%sp@-	| push current proc pointer
383	movl	%d1,%sp@-		| push length
384	movl	%a1,%sp@-		| push addr
385	movl	%d0,%sp@-		| push command
386	jbsr	_C_LABEL(cachectl1)	| do it
387	lea	%sp@(16),%sp		| pop args
388	jra	_ASM_LABEL(rei)		| all done
389
390/*
391 * Trace (single-step) trap.  Kernel-mode is special.
392 * User mode traps are simply passed on to trap().
393 */
394ENTRY_NOPROFILE(trace)
395	clrl	%sp@-			| stack adjust count
396	moveml	#0xFFFF,%sp@-
397	moveq	#T_TRACE,%d0
398
399	| Check PSW and see what happen.
400	|   T=0 S=0	(should not happen)
401	|   T=1 S=0	trace trap from user mode
402	|   T=0 S=1	trace trap on a trap instruction
403	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
404
405	movw	%sp@(FR_HW),%d1		| get PSW
406	notw	%d1			| XXX no support for T0 on 680[234]0
407	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
408	jeq	Lkbrkpt			| yes, kernel breakpoint
409	jra	_ASM_LABEL(fault)	| no, user-mode fault
410
411/*
412 * Trap 15 is used for:
413 *	- GDB breakpoints (in user programs)
414 *	- KGDB breakpoints (in the kernel)
415 *	- trace traps for SUN binaries (not fully supported yet)
416 * User mode traps are simply passed to trap().
417 */
418ENTRY_NOPROFILE(trap15)
419	clrl	%sp@-			| stack adjust count
420	moveml	#0xFFFF,%sp@-
421	moveq	#T_TRAP15,%d0
422	movw	%sp@(FR_HW),%d1		| get PSW
423	andw	#PSL_S,%d1		| from system mode?
424	jne	Lkbrkpt			| yes, kernel breakpoint
425	jra	_ASM_LABEL(fault)	| no, user-mode fault
426
427Lkbrkpt: | Kernel-mode breakpoint or trace trap. (d0=trap_type)
428	| Save the system sp rather than the user sp.
429	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
430	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
431	movl	%a6,%sp@(FR_SP)		|  from before trap
432
433	| If were are not on tmpstk switch to it.
434	| (so debugger can change the stack pointer)
435	movl	%a6,%d1
436	cmpl	#_ASM_LABEL(tmpstk),%d1
437	jls	Lbrkpt2			| already on tmpstk
438	| Copy frame to the temporary stack
439	movl	%sp,%a0			| a0=src
440	lea	_ASM_LABEL(tmpstk)-96,%a1 | a1=dst
441	movl	%a1,%sp			| sp=new frame
442	movql	#FR_SIZE,%d1
443Lbrkpt1:
444	movl	%a0@+,%a1@+
445	subql	#4,%d1
446	jbgt	Lbrkpt1
447
448Lbrkpt2:
449	| Call the trap handler for the kernel debugger.
450	| Do not call trap() to do it, so that we can
451	| set breakpoints in trap() if we want.  We know
452	| the trap type is either T_TRACE or T_BREAKPOINT.
453	| If we have both DDB and KGDB, let KGDB see it first,
454	| because KGDB will just return 0 if not connected.
455	| Save args in d2, a2
456	movl	%d0,%d2			| trap type
457	movl	%sp,%a2			| frame ptr
458#ifdef KGDB
459	| Let KGDB handle it (if connected)
460	movl	%a2,%sp@-		| push frame ptr
461	movl	%d2,%sp@-		| push trap type
462	jbsr	_C_LABEL(kgdb_trap)	| handle the trap
463	addql	#8,%sp			| pop args
464	cmpl	#0,%d0			| did kgdb handle it?
465	jne	Lbrkpt3			| yes, done
466#endif
467#ifdef DDB
468	| Let DDB handle it
469	movl	%a2,%sp@-		| push frame ptr
470	movl	%d2,%sp@-		| push trap type
471	jbsr	_C_LABEL(kdb_trap)	| handle the trap
472	addql	#8,%sp			| pop args
473#endif
474	/* Sun 3 drops into PROM here. */
475Lbrkpt3:
476	| The stack pointer may have been modified, or
477	| data below it modified (by kgdb push call),
478	| so push the hardware frame at the current sp
479	| before restoring registers and returning.
480
481	movl	%sp@(FR_SP),%a0		| modified sp
482	lea	%sp@(FR_SIZE),%a1	| end of our frame
483	movl	%a1@-,%a0@-		| copy 2 longs with
484	movl	%a1@-,%a0@-		| ... predecrement
485	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
486	moveml	%sp@+,#0x7FFF		| restore all but sp
487	movl	%sp@,%sp		| ... and sp
488	rte				| all done
489
490/*
491 * Interrupt handlers.
492 *
493 * For auto-vectored interrupts, the CPU provides the
494 * vector 0x18+level.
495 *
496 * intrhand_autovec is the entry point for auto-vectored
497 * interrupts.
498 */
499
500ENTRY_NOPROFILE(intrhand_autovec)
501	addql	#1,_C_LABEL(intr_depth)
502	INTERRUPT_SAVEREG
503	jbsr	_C_LABEL(intr_dispatch)	| call dispatcher
504	INTERRUPT_RESTOREREG
505	subql	#1,_C_LABEL(intr_depth)
506
507	/* FALLTHROUGH to rei */
508
509/*
510 * Emulation of VAX REI instruction.
511 *
512 * This code deals with checking for and servicing ASTs
513 * (profiling, scheduling).
514 * After identifying that we need an AST we drop the IPL to allow device
515 * interrupts.
516 *
517 * This code is complicated by the fact that sendsig may have been called
518 * necessitating a stack cleanup.
519 */
520ASENTRY_NOPROFILE(rei)
521	tstl	_C_LABEL(astpending)	| AST pending?
522	jeq	Ldorte			| Nope. Just return.
523	btst	#5,%sp@			| Returning to kernel mode?
524	jne	Ldorte			| Yup. Can't do ASTs
525	movw	#PSL_LOWIPL,%sr		| lower SPL
526	clrl	%sp@-			| stack adjust
527	moveml	#0xFFFF,%sp@-		| save all registers
528	movl	%usp,%a1		| including
529	movl	%a1,%sp@(FR_SP)		|    the users SP
530Lrei1:	clrl	%sp@-			| VA == none
531	clrl	%sp@-			| code == none
532	movl	#T_ASTFLT,%sp@-		| type == async system trap
533	pea	%sp@(12)		| fp == address of trap frame
534	jbsr	_C_LABEL(trap)		| go handle it
535	lea	%sp@(16),%sp		| pop value args
536	movl	%sp@(FR_SP),%a0		| restore user SP
537	movl	%a0,%usp		|   from save area
538	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
539	jne	Laststkadj		| yes, go to it
540	moveml	%sp@+,#0x7FFF		| no, restore most user regs
541	addql	#8,%sp			| toss SP and stack adjust
542Ldorte:	rte				| and do real RTE
543
544Laststkadj:
545	lea	%sp@(FR_HW),%a1		| pointer to HW frame
546	addql	#8,%a1			| source pointer
547	movl	%a1,%a0			| source
548	addw	%d0,%a0			|  + hole size = dest pointer
549	movl	%a1@-,%a0@-		| copy
550	movl	%a1@-,%a0@-		|  8 bytes
551	movl	%a0,%sp@(FR_SP)		| new SSP
552	moveml	%sp@+,#0x7FFF		| restore user registers
553	movl	%sp@,%sp		| and our SP
554	rte				| and do real RTE
555
556/*
557 * Primitives
558 */
559
560/*
561 * Use common m68k process/lwp switch and context save subroutines.
562 */
563#define	FPCOPROC	/* XXX: Temp. Reqd. */
564#include <m68k/m68k/switch_subr.s>
565
566#if defined(M68040) || defined(M68060)
567ENTRY(suline)
568	movl	%sp@(4),%a0		| address to write
569	movl	_C_LABEL(curpcb),%a1	| current pcb
570	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
571	movl	%sp@(8),%a1		| address of line
572	movl	%a1@+,%d0		| get lword
573	movsl	%d0,%a0@+		| put lword
574	nop				| sync
575	movl	%a1@+,%d0		| get lword
576	movsl	%d0,%a0@+		| put lword
577	nop				| sync
578	movl	%a1@+,%d0		| get lword
579	movsl	%d0,%a0@+		| put lword
580	nop				| sync
581	movl	%a1@+,%d0		| get lword
582	movsl	%d0,%a0@+		| put lword
583	nop				| sync
584	moveq	#0,%d0			| indicate no fault
585	jra	Lsldone
586Lslerr:
587	moveq	#-1,%d0
588Lsldone:
589	movl	_C_LABEL(curpcb),%a1	| current pcb
590	clrl	%a1@(PCB_ONFAULT)	| clear fault address
591	rts
592#endif
593
594
595ENTRY(ecacheon)
596	rts
597
598ENTRY(ecacheoff)
599	rts
600
601/*
602 * Misc. global variables.
603 */
604	.data
605
606GLOBAL(mmutype)
607	.long	MMU_68040	| default to MMU_68040
608
609GLOBAL(cputype)
610	.long	CPU_68040	| default to CPU_68040
611
612GLOBAL(fputype)
613	.long	FPU_68040	| default to FPU_68040
614