xref: /plan9-contrib/sys/src/9/vt5/l.s (revision cce710d3fba6018ef0c8c1b503e500038744d49c)
1/* virtex5 ppc440x5 machine assist */
2
3#include	"mem.h"
4
5#define CPU0ONLY		/* if defined, put cpu1 to sleep for now */
6
7
8/*
9 * Special Purpose Registers of interest here (440 versions)
10 */
11#define SPR_CCR0	0x3b3		/* Core Configuration Register 0 */
12#define SPR_CCR1	0x378	/* core configuration register 1 */
13#define SPR_DAC1	0x13c		/* Data Address Compare 1 */
14#define SPR_DAC2	0x13d		/* Data Address Compare 2 */
15#define SPR_DBCR0	0x134		/* Debug Control Register 0 */
16#define SPR_DBCR1	0x135		/* Debug Control Register 1 */
17#define SPR_DBCR2	0x136		/* Debug Control Register 1 */
18#define SPR_DBSR	0x130		/* Debug Status Register */
19#define SPR_DVC1	0x13e		/* Data Value Compare 1 */
20#define SPR_DVC2	0x13f		/* Data Value Compare 2 */
21#define SPR_DEAR	0x3D		/* Data Error Address Register */
22#define SPR_ESR		0x3E		/* Exception Syndrome Register */
23#define SPR_IAC1	0x138		/* Instruction Address Compare 1 */
24#define SPR_IAC2	0x139		/* Instruction Address Compare 2 */
25#define SPR_IAC3	0x13a		/* Instruction Address Compare 3 */
26#define SPR_IAC4	0x13b		/* Instruction Address Compare 4 */
27#define SPR_PID		0x30		/* Process ID (not the same as 405) */
28#define SPR_PVR		0x11f		/* Processor Version Register */
29
30#define SPR_SPRG0	0x110		/* SPR General 0 */
31#define SPR_SPRG1	0x111		/* SPR General 1 */
32#define SPR_SPRG2	0x112		/* SPR General 2 */
33#define SPR_SPRG3	0x113		/* SPR General 3 */
34
35/* beware that these registers differ in R/W ability on 440 compared to 405 */
36#define SPR_SPRG4R		0x104	/* SPR general 4; user/supervisor R */
37#define SPR_SPRG5R		0x105	/* SPR general 5; user/supervisor R */
38#define SPR_SPRG6R		0x106	/* SPR general 6; user/supervisor R */
39#define SPR_SPRG7R		0x107	/* SPR general 7; user/supervisor R */
40#define SPR_SPRG4W	0x114		/* SPR General 4; supervisor W */
41#define SPR_SPRG5W	0x115		/* SPR General 5; supervisor W  */
42#define SPR_SPRG6W	0x116		/* SPR General 6; supervisor W  */
43#define SPR_SPRG7W	0x117		/* SPR General 7; supervisor W */
44
45#define SPR_SRR0	0x01a		/* Save/Restore Register 0 */
46#define SPR_SRR1	0x01b		/* Save/Restore Register 1 */
47#define SPR_CSRR0	0x03a		/* Critical Save/Restore Register 0 */
48#define SPR_CSRR1	0x03b		/* Critical Save/Restore Register 1 */
49#define SPR_TBL		0x11c		/* Time Base Lower */
50#define SPR_TBU		0x11d		/* Time Base Upper */
51#define	SPR_PIR		0x11e		/* Processor Identity Register */
52
53#define SPR_TCR	0x154	/* timer control */
54#define SPR_TSR	0x150	/* timer status */
55#define SPR_MMUCR	0x3B2	/* mmu control */
56#define SPR_DNV0	0x390	/* data cache normal victim 0-3 */
57#define SPR_DNV1	0x391
58#define SPR_DNV2	0x392
59#define SPR_DNV3	0x393
60#define SPR_DTV0	0x394	/* data cache transient victim 0-3 */
61#define SPR_DTV1	0x395
62#define SPR_DTV2	0x396
63#define SPR_DTV3	0x397
64#define SPR_DVLIM	0x398	/* data cache victim limit */
65#define SPR_INV0	0x370	/* instruction cache normal victim 0-3 */
66#define SPR_INV1	0x371
67#define SPR_INV2	0x372
68#define SPR_INV3	0x374
69#define SPR_ITV0	0x374	/* instruction cache transient victim 0-3 */
70#define SPR_ITV1	0x375
71#define SPR_ITV2	0x376
72#define SPR_ITV3	0x377
73#define SPR_IVOR(n)	(0x190+(n))	/* interrupt vector offset registers 0-15 */
74#define SPR_IVPR	0x03F	/* instruction vector prefix register */
75#define SPR_IVLIM	0x399	/* instruction cache victim limit */
76
77#define SPR_MCSRR0	0x23A	/* 440GX only */
78#define SPR_MCSRR1	0x23B
79#define SPR_MCSR	0x23C
80
81#define SPR_DEC		0x16	/* on 440 they've gone back to using DEC instead of PIT  ... */
82#define SPR_DECAR	0x36	/* ... with the auto-reload register now visible */
83
84/* 440 */
85
86/* use of SPRG registers in save/restore */
87#define	SAVER0	SPR_SPRG0
88#define	SAVER1	SPR_SPRG1
89#define	SAVELR	SPR_SPRG2
90#define	SAVEXX	SPR_SPRG3
91
92/* special instruction definitions */
93#define	BDNZ	BC	16,0,
94#define	BDNE	BC	0,2,
95
96#define	TBRL	268	/* read time base lower in MFTB */
97#define	TBRU	269	/* read time base upper in MFTB */
98#define	MFTB(tbr,d)	WORD	$((31<<26)|((d)<<21)|((tbr&0x1f)<<16)|(((tbr>>5)&0x1f)<<11)|(371<<1))
99
100#define	TLBIA		WORD	$((31<<26)|(370<<1))
101#define	TLBSYNC		WORD	$((31<<26)|(566<<1))
102
103/* 400 models; perhaps others */
104#define	ICCCI(a,b)	WORD	$((31<<26)|((a)<<16)|((b)<<11)|(966<<1))
105#define	DCCCI(a,b)	WORD	$((31<<26)|((a)<<16)|((b)<<11)|(454<<1))
106/* these follow the source -> dest ordering */
107#define	DCREAD(s,t)	WORD	$((31<<26)|((t)<<21)|((s)<<11)|(486<<1))
108#define	DCRF(n)			((((n)>>5)&0x1F)|(((n)&0x1F)<<5))
109#define	MTDCR(s,n)	WORD	$((31<<26)|((s)<<21)|(DCRF(n)<<11)|(451<<1))
110#define	MFDCR(n,t)	WORD	$((31<<26)|((t)<<21)|(DCRF(n)<<11)|(323<<1))
111#define	MSYNC		WORD	$((31<<26)|(598<<1))
112#define	TLBRELO(a,t)	WORD	$((31<<26)|((t)<<21)|((a)<<16)|(2<<11)|(946<<1))
113#define	TLBREMD(a,t)	WORD	$((31<<26)|((t)<<21)|((a)<<16)|(1<<11)|(946<<1))
114#define	TLBREHI(a,t)	WORD	$((31<<26)|((t)<<21)|((a)<<16)|(0<<11)|(946<<1))
115#define	TLBWELO(s,a)	WORD	$((31<<26)|((s)<<21)|((a)<<16)|(2<<11)|(978<<1))
116#define	TLBWEMD(s,a)	WORD	$((31<<26)|((s)<<21)|((a)<<16)|(1<<11)|(978<<1))
117#define	TLBWEHI(s,a)	WORD	$((31<<26)|((s)<<21)|((a)<<16)|(0<<11)|(978<<1))
118#define	TLBSXF(a,b,t)	WORD	$((31<<26)|((t)<<21)|((a)<<16)|((b)<<11)|(914<<1))
119#define	TLBSXCC(a,b,t)	WORD	$((31<<26)|((t)<<21)|((a)<<16)|((b)<<11)|(914<<1)|1)
120/*
121 * these are useless because there aren't CE/CEI equivalents for
122 * critical interrupts, which are caused by critical input, debug exceptions
123 * or watchdog time-outs.
124 */
125//#define WRTMSR_EE(s)	WORD	$((31<<26)|((s)<<21)|(131<<1))
126//#define WRTMSR_EEI(e)	WORD	$((31<<26)|((e)<<15)|(163<<1))
127
128/*
129 * there are three flavours of barrier: MBAR, MSYNC and ISYNC.
130 * ISYNC is a context sync, a strong instruction barrier.
131 * MSYNC is an execution sync (weak instruction barrier) + data storage barrier.
132 * MBAR is a memory (data storage) barrier.
133 */
134#define MBAR	EIEIO
135
136/*
137 * on some models mtmsr doesn't synchronise enough (eg, 603e).
138 * the 440 does, however.
139 */
140#define MSRSYNC			/* MSYNC; ISYNC */
141
142/* on the 405 series, the prefetcher madly fetches across RFI, sys call, and others; use BR 0(PC) to stop */
143#define RFI	WORD	$((19<<26)|(50<<1)); BR 0(PC)
144#define	RFCI	WORD	$((19<<26)|(51<<1)); BR 0(PC)
145
146#define STEP(c)	MSYNC; MOVW $(Uartlite+4), R7; MOVW c, R8; MOVW R8, 0(R7); MSYNC
147
148/*
149 * print progress character iff on cpu0.
150 * steps on R7 and R8, needs SB set and TLB entry for i/o registers set.
151 */
152#define STEP0(c, zero, notzero) \
153	MOVW	SPR(SPR_PIR), R7; \
154	CMP	R7, $0; \
155	BEQ	zero; \
156	CMP	R7, $017; \
157	BNE	notzero; \
158zero:	STEP(c); \
159notzero: MSYNC
160
161#define	UREGSPACE	(UREGSIZE+8)
162
163	NOSCHED
164
165	TEXT start(SB), 1, $-4
166	/*
167	 * utterly clobber any outstanding machine checks before anything else
168	 */
169	MOVW	$0, R0
170	MSRSYNC
171	MOVW	MSR, R3
172	RLWNM	$0, R3, $~MSR_ME, R3	/* disable machine check traps */
173	ISYNC
174	MOVW	R3, MSR
175	MSRSYNC
176	MOVW	$-1, R3
177	MOVW	R3, SPR(SPR_MCSR)	/* clear machine check causes */
178	MSRSYNC
179	MOVW	R0, SPR(SPR_ESR)	/* clears machine check */
180	MSRSYNC
181	MSYNC				/* instead of above MSRSYNC */
182
183	/*
184	 * setup MSR
185	 * turn off interrupts, FPU
186	 * use 0x000 as exception prefix
187	 * enable machine check
188	 */
189	MOVW	MSR, R3
190	RLWNM	$0, R3, $~MSR_EE, R3
191	RLWNM	$0, R3, $~MSR_CE, R3
192	RLWNM	$0, R3, $~MSR_FP, R3
193	RLWNM	$0, R3, $~(MSR_IS|MSR_DS), R3
194	OR	$(MSR_ME|MSR_DE), R3
195	ISYNC
196	MOVW	R3, MSR
197	MSRSYNC
198
199	/* except during trap handling, R0 is zero from now on */
200	MOVW	$0, R0
201	MOVW	R0, CR
202
203	/* we may be running at 0 or KZERO */
204	/* setup SB for pre mmu */
205	BL	1(PC)
206	MOVW	LR, R9
207	MOVW	R9, R10
208	ANDCC	$KZERO, R10			/* R10 gets 0 or KZERO */
209	MOVW	$setSB-KZERO(SB), R2		/* SB until tlb established */
210	OR	R10, R2				/* adapt to address space */
211
212	MOVW	$18, R18
213	MOVW	$19, R19
214	MOVW	$20, R20
215	MOVW	$21, R21
216	MOVW	$22, R22
217	MOVW	$23, R23
218	MOVW	$24, R24
219
220STEP0($'\n', zeronl, notzeronl)
221STEP0($'P',  zeroP,  notzeroP)
222
223/*
224 * invalidate the caches
225 */
226	ICCCI(0, 2)	/* this flushes the icache of a 440; the errata reveals that EA is used; we'll use SB */
227	ISYNC
228	DCCCI(0, 2)	/* this flash invalidates the dcache of a 440 (must not be in use) */
229	MSRSYNC
230
231	MOVW	R0, SPR(SPR_DBCR0)
232	MOVW	R0, SPR(SPR_DBCR1)
233	MOVW	R0, SPR(SPR_DBCR2)
234	ISYNC
235	MOVW	$~0, R3
236	MOVW	R3, SPR(SPR_DBSR)
237
238STEP0($'l', zerol, notzerol)
239
240	/*
241	 * CCR0:
242	 *	recover from data parity = 1
243	 *	disable gathering = 0
244	 *	disable trace broadcast = 1
245	 *	disable apu instruction broadcast = 1
246	 *	force load/store alignment = 1
247	 *	apu/fpu use = 0
248	 *	fill one speculative line on icache miss (errata #38, #40) = 0
249	 * CCR1:
250	 *	normal parity, normal cache operation
251	 *	cpu timer advances with tick of CPU input clock
252	 *		(not timer clock) TCS=0
253	 */
254	MOVW	$((1<<30)|(0<<21)|(1<<20)|(1<<15)|(1<<8)|(0<<5)|(0<<2)), R3
255	MOVW	R3, SPR(SPR_CCR0)
256	MOVW	$(0<<7), R3		/* TCS=0; 1<<11 is full-line flush */
257	MOVW	R3, SPR(SPR_CCR1)
258
259	/* clear i/d cache regions */
260	MOVW	R0, SPR(SPR_INV0)
261	MOVW	R0, SPR(SPR_INV1)
262	MOVW	R0, SPR(SPR_INV2)
263	MOVW	R0, SPR(SPR_INV3)
264	MOVW	R0, SPR(SPR_DNV0)
265	MOVW	R0, SPR(SPR_DNV1)
266	MOVW	R0, SPR(SPR_DNV2)
267	MOVW	R0, SPR(SPR_DNV3)
268
269	/* set i/d cache limits (all normal) */
270	MOVW	$((0<<22)|(63<<11)|(0<<0)), R3	/* TFLOOR=0, TCEILING=63 ways, NFLOOR = 0 */
271	MOVW	R3, SPR(SPR_DVLIM)
272	MOVW	R3, SPR(SPR_IVLIM)
273
274	/*
275	 * set other system configuration values
276	 */
277	MOVW	R0, SPR(SPR_DEC)
278	MOVW	$~0, R3
279	MOVW	R3, SPR(SPR_TSR)
280	MOVW	R0, SPR(SPR_TCR)
281
282#ifdef CPU0ONLY
283	/* put cpus other than first to sleep */
284	MOVW	SPR(SPR_PIR), R3
285	CMP	R3, R0
286	BEQ	okcpu
287	CMP	R3, $017
288	BEQ	okcpu
289STEP($'Z')
290STEP($'Z')
291	ADD	$'0', R3
292STEP(R3)
293sleep:
294	MOVW	MSR, R3
295	RLWNM	$0, R3, $~MSR_EE, R3
296	RLWNM	$0, R3, $~MSR_CE, R3
297	OR	$MSR_WE, R3
298	ISYNC
299	MOVW	R3, MSR
300	MSRSYNC
301	BR	sleep
302okcpu:
303#endif
304
305	MOVW	$KZERO, R3
306	CMPU	R9, R3
307	BGT	highaddr
308	MOVW	$'L', R11			/* R11 gets 'L' or 'H' */
309	BR	done
310highaddr:
311	MOVW	$'H', R11
312done:
313
314STEP0($'a', zeroa, notzeroa)
315	BL	kernelmmu(SB)
316
317	/*
318	 * now running with MMU on, in kernel address space (at KZERO).
319	 */
320
321STEP0($'n', zeron, notzeron)
322	/* set SB to match new address space */
323	MOVW	$setSB(SB), R2
324
325STEP0($' ', zerosp, notzerosp)
326	/* make the vectors match the old values */
327	MOVW	$(KZERO | VECBASE), R3
328	MOVW	R3, SPR(SPR_IVPR)	/* vector prefix at KZERO */
329	MOVW	$INT_CI, R3
330	MOVW	R3, SPR(SPR_IVOR(0))
331	MOVW	$INT_MCHECK, R3
332	MOVW	R3, SPR(SPR_IVOR(1))
333	MOVW	$INT_DSI, R3
334	MOVW	R3, SPR(SPR_IVOR(2))
335	MOVW	$INT_ISI, R3
336	MOVW	R3, SPR(SPR_IVOR(3))
337	MOVW	$INT_EI, R3
338	MOVW	R3, SPR(SPR_IVOR(4))
339	MOVW	$INT_ALIGN, R3
340	MOVW	R3, SPR(SPR_IVOR(5))
341	MOVW	$INT_PROG, R3
342	MOVW	R3, SPR(SPR_IVOR(6))
343	MOVW	$INT_FPU, R3
344	MOVW	R3, SPR(SPR_IVOR(7))	/* reserved (FPU?) */
345	MOVW	$INT_SYSCALL, R3
346	MOVW	R3, SPR(SPR_IVOR(8))	/* system call */
347	MOVW	$INT_APU, R3
348	MOVW	R3, SPR(SPR_IVOR(9))	/* aux. proc. unavail. */
349	MOVW	$INT_PIT, R3
350	MOVW	R3, SPR(SPR_IVOR(10))	/* decrementer */
351	MOVW	$INT_FIT, R3
352	MOVW	R3, SPR(SPR_IVOR(11))	/* fixed interval  */
353	MOVW	$INT_WDT, R3
354	MOVW	R3, SPR(SPR_IVOR(12))	/* watchdog */
355	MOVW	$INT_DMISS,	R3
356	MOVW	R3, SPR(SPR_IVOR(13))	/* data TLB */
357	MOVW	$INT_IMISS,	R3
358	MOVW	R3, SPR(SPR_IVOR(14))	/* instruction TLB */
359	MOVW	$INT_DEBUG,  R3
360	MOVW	R3, SPR(SPR_IVOR(15))	/* debug */
361
362	ISYNC
363	/* invalidate the caches again to flush any addresses below KZERO */
364	ICCCI(0, 2)	/* this flushes the icache of a 440; the errata reveals that EA is used; we'll use SB */
365	ISYNC
366
367/*	BL	kfpinit(SB)	*/
368
369STEP0($'9', zero9, notzero9)
370	/* set up Mach */
371	MOVW	$mach0(SB), R(MACH)
372	MOVW	SPR(SPR_PIR), R3
373	CMP	R3, R0
374	BEQ	bootcpub
375	CMP	R3, $017
376	BEQ	bootcpub
377	ADD	$MACHSIZE, R(MACH)	/* use second Mach struct on cpu1 */
378bootcpub:
379	ADD	$(MACHSIZE-8), R(MACH), R1	/* set stack */
380	SUB	$4, R(MACH), R3
381	ADD	$4, R1, R4
382// STEP0($'z', zero90, notzero90)
383clrmach:
384	MOVWU	R0, 4(R3)
385	CMP	R3, R4
386	BNE	clrmach
387
388	MOVW	R0, R(USER)
389	MOVW	R0, 0(R(MACH))
390
391	MSRSYNC
392	MSYNC				/* instead of above MSRSYNC */
393
394STEP0($' ', zerosp2, notzerosp2)
395	MOVW	SPR(SPR_PIR), R3
396	CMP	R3, R0
397	BEQ	bootcpu
398	CMP	R3, $017
399	BNE	appcpu
400bootcpu:
401	/* only clear bss on cpu0 */
402	MOVW	$edata(SB), R3
403	MOVW	$end(SB), R4
404	ADD	$4, R4
405	SUB	$4, R3
406clrbss:
407	MOVWU	R0, 4(R3)
408	CMP	R3, R4
409	BNE	clrbss
410
411appcpu:
412	MSYNC
413
414	/* print H or L */
415STEP0(R11, zerodig, notzerodig)
416STEP0($'\r', zerocr, notzerocr)
417STEP0($'\n', zeronl2, notzeronl2)
418
419	MOVW	$((200*1000*1000)/300), R1	/* 3 or 4 ms */
420delay:
421	SUB	$1, R1
422	CMP	R1, R0
423	BNE	delay
424
425	BL	main(SB)
426	BR	0(PC)   /* paranoia -- not reached */
427
428
429/* R10 is or-ed into addresses of tlbtab & tlbtabe */
430TEXT	kernelmmu(SB), 1, $-4
431	/* make following TLB entries shared, TID=PID=0 */
432	MOVW	R0, SPR(SPR_PID)
433
434	/*
435	 * allocate cache on store miss, disable U1 as transient,
436	 * disable U2 as SWOA, no dcbf or icbi exception, tlbsx search 0.
437	 */
438	MOVW	R0, SPR(SPR_MMUCR)
439
440	/* map various things 1:1 */
441	MOVW	$tlbtab-KZERO(SB), R4
442	OR	R10, R4				/* adapt to address space */
443	MOVW	$tlbtabe-KZERO(SB), R5
444	OR	R10, R5				/* adapt to address space */
445	SUB	R4, R5
446	MOVW	$(3*4), R6
447	DIVW	R6, R5
448	SUB	$4, R4
449	MOVW	R5, CTR
450	MOVW	$63, R3
451ltlb:
452	MOVWU	4(R4), R5	/* TLBHI */
453	TLBWEHI(5,3)
454	MOVWU	4(R4), R5	/* TLBMD */
455	TLBWEMD(5,3)
456	MOVWU	4(R4), R5	/* TLBLO */
457	TLBWELO(5,3)
458	SUB	$1, R3
459	BDNZ	ltlb
460
461	/* clear all remaining entries to remove any aliasing from boot */
462	CMP	R3, R0
463	BEQ	cltlbe
464cltlb:
465	TLBWEHI(0,3)
466	TLBWEMD(0,3)
467	TLBWELO(0,3)
468	SUB	$1, R3
469	CMP	R3, R0
470	BGE	cltlb
471cltlbe:
472
473	/*
474	 * we're currently relying on the shadow I/D TLBs.  to switch to
475	 * the new TLBs, we need a synchronising instruction.  ISYNC
476	 * won't work here because the PC is still below KZERO, but
477	 * there's no TLB entry to support that, and once we ISYNC the
478	 * shadow i-tlb entry vanishes, taking the PC's location with it.
479	 */
480	MOVW	LR, R4
481	OR	$KZERO, R4
482	MOVW	R4, SPR(SPR_SRR0)
483	MOVW	MSR, R4
484	MOVW	R4, SPR(SPR_SRR1)
485	RFI	/* resume in kernel mode in caller; R3 has the index of the first unneeded TLB entry */
486
487TEXT	tlbinval(SB), 1, $-4
488	TLBWEHI(0, 3)
489	TLBWEMD(0, 3)
490	TLBWELO(0, 3)
491	ISYNC
492	RETURN
493
494TEXT	splhi(SB), 1, $-4
495	MOVW	MSR, R3
496	RLWNM	$0, R3, $~MSR_EE, R4
497	RLWNM	$0, R4, $~MSR_CE, R4
498	MOVW	R4, MSR
499	MSRSYNC
500	MOVW	LR, R31
501	MOVW	R31, 4(R(MACH))	/* save PC in m->splpc */
502	RETURN
503
504/*
505 * everything from here up to, but excluding, spldone
506 * will be billed by devkprof to the pc saved when we went splhi.
507 */
508TEXT	spllo(SB), 1, $-4
509	MOVW	MSR, R3
510	OR	$MSR_EE, R3, R4
511	OR	$MSR_CE, R4
512	MOVW	R4, MSR
513	MSRSYNC
514	RETURN
515
516TEXT	splx(SB), 1, $-4
517	MOVW	LR, R31
518	MOVW	R31, 4(R(MACH))	/* save PC in m->splpc */
519	/* fall though */
520
521TEXT	splxpc(SB), 1, $-4
522	MOVW	MSR, R4
523	RLWMI	$0, R3, $MSR_EE, R4
524	RLWMI	$0, R3, $MSR_CE, R4
525	MOVW	R4, MSR
526	MSRSYNC
527	RETURN
528
529/***/
530TEXT	spldone(SB), 1, $-4
531	RETURN
532
533TEXT	islo(SB), 1, $-4
534	MOVW	MSR, R3
535	MOVW	$(MSR_EE|MSR_CE), R4
536	AND	R4, R3
537	RETURN
538
539TEXT	setlabel(SB), 1, $-4
540	MOVW	LR, R31
541	MOVW	R1, 0(R3)
542	MOVW	R31, 4(R3)
543	MOVW	$0, R3
544	RETURN
545
546TEXT	gotolabel(SB), 1, $-4
547	MOVW	4(R3), R31
548	MOVW	R31, LR
549	MOVW	0(R3), R1
550	MOVW	$1, R3
551	RETURN
552
553TEXT	touser(SB), 1, $-4
554	/* splhi */
555	MOVW	MSR, R5
556	RLWNM	$0, R5, $~MSR_EE, R5
557	RLWNM	$0, R5, $~MSR_CE, R5
558	MOVW	R5, MSR
559	MSRSYNC
560	MOVW	R(USER), R4			/* up */
561	MOVW	8(R4), R4			/* up->kstack */
562	ADD	$(KSTACK-UREGSPACE), R4
563	MOVW	R4, SPR(SPR_SPRG7W)	/* save for use in traps/interrupts */
564	MOVW	$(UTZERO+32), R5	/* header appears in text */
565	MOVW	$UMSR, R4
566	MOVW	R4, SPR(SPR_SRR1)
567	MOVW	R3, R1
568	MOVW	R5, SPR(SPR_SRR0)
569	ISYNC				/* should be redundant with RFI */
570	RFI
571
572/* invalidate i-cache */
573TEXT iccci(SB), 1, $-4
574	ICCCI(0, 2)
575	MSYNC
576	RETURN
577
578/* invalidate i-cache region */
579TEXT	icflush(SB), 1, $-4		/* icflush(virtaddr, count) */
580	MSYNC
581	MOVW	n+4(FP), R4
582	RLWNM	$0, R3, $~(ICACHELINESZ-1), R5
583	SUB	R5, R3
584	ADD	R3, R4
585	ADD	$(ICACHELINESZ-1), R4
586	SRAW	$ICACHELINELOG, R4
587	MOVW	R4, CTR
588icf0:	ICBI	(R5)
589	ADD	$ICACHELINESZ, R5
590	BDNZ	icf0
591	ISYNC
592	RETURN
593
594/* write-back and invalidate d-cache region */
595TEXT	dcflush(SB), 1, $-4		/* dcflush(virtaddr, count) */
596	MSYNC
597	MOVW	n+4(FP), R4
598	RLWNM	$0, R3, $~(DCACHELINESZ-1), R5
599	CMP	R4, $0
600	BLE	dcf1
601	SUB	R5, R3
602	ADD	R3, R4
603	ADD	$(DCACHELINESZ-1), R4
604	SRAW	$DCACHELINELOG, R4
605	MOVW	R4, CTR
606dcf0:	DCBF	(R5)
607	ADD	$DCACHELINESZ, R5
608	BDNZ	dcf0
609dcf1:
610	MSYNC
611	RETURN
612
613/* write-back d-cache region */
614TEXT	dcbst(SB), 1, $-4		/* dcbst(virtaddr, count) */
615	MSYNC
616	MOVW	n+4(FP), R4
617	RLWNM	$0, R3, $~(DCACHELINESZ-1), R5
618	CMP	R4, $0
619	BLE	dcbst1
620	SUB	R5, R3
621	ADD	R3, R4
622	ADD	$(DCACHELINESZ-1), R4
623	SRAW	$DCACHELINELOG, R4
624	MOVW	R4, CTR
625dcbst0:	DCBST	(R5)
626	ADD	$DCACHELINESZ, R5
627	BDNZ	dcbst0
628dcbst1:
629	MSYNC
630	RETURN
631
632/* invalidate d-cache region */
633TEXT	dcbi(SB), 1, $-4		/* dcbi(virtaddr, count) */
634	MSYNC
635	MOVW	n+4(FP), R4
636	RLWNM	$0, R3, $~(DCACHELINESZ-1), R5
637	CMP	R4, $0
638	BLE	dcbi1
639	SUB	R5, R3
640	ADD	R3, R4
641	ADD	$(DCACHELINESZ-1), R4
642	SRAW	$DCACHELINELOG, R4
643	MOVW	R4, CTR
644dcbi0:	DCBI	(R5)
645	ADD	$DCACHELINESZ, R5
646	BDNZ	dcbi0
647dcbi1:
648	MSYNC
649	RETURN
650
651TEXT	tas32(SB), 1, $-4
652	MOVW	R3, R4
653	MOVW	$0xdead,R5
654tas1:
655	DCBF	(R4)				/* fix for 603x bug */
656	LWAR	(R4), R3
657	CMP	R3, $0
658	BNE	tas0
659	STWCCC	R5, (R4)
660	BNE	tas1
661tas0:
662	RETURN
663
664TEXT _xinc(SB), 1, $-4				/* long _xinc(long*); */
665TEXT ainc(SB), 1, $-4				/* long ainc(long*); */
666	MOVW	R3, R4
667_ainc:
668	DCBF	(R4)				/* fix for 603x bug */
669	LWAR	(R4), R3
670	ADD	$1, R3
671	STWCCC	R3, (R4)
672	BNE	_ainc
673
674	CMP	R3, $0				/* overflow if -ve or 0 */
675	BGT	_return
676_trap:
677	MOVW	$0, R0
678	MOVW	(R0), R0			/* over under sideways down */
679_return:
680	RETURN
681
682TEXT _xdec(SB), 1, $-4				/* long _xdec(long*); */
683TEXT adec(SB), 1, $-4				/* long adec(long*); */
684	MOVW	R3, R4
685_adec:
686	DCBF	(R4)				/* fix for 603x bug */
687	LWAR	(R4), R3
688	ADD	$-1, R3
689	STWCCC	R3, (R4)
690	BNE	_adec
691
692	CMP	R3, $0				/* underflow if -ve */
693	BLT	_trap
694	RETURN
695
696TEXT cas32(SB), 1, $-4			/* int cas32(void*, u32int, u32int) */
697	MOVW	R3, R4			/* addr */
698	MOVW	old+4(FP), R5
699	MOVW	new+8(FP), R6
700	DCBF	(R4)			/* fix for 603x bug? */
701	LWAR	(R4), R3
702	CMP	R3, R5
703	BNE	 fail
704	STWCCC	R6, (R4)
705	BNE	 fail
706	MOVW	 $1, R3
707	RETURN
708fail:
709	MOVW	 $0, R3
710	RETURN
711
712TEXT	getpit(SB), 1, $-4
713	MOVW	SPR(SPR_DEC), R3	/* they've moved it back to DEC */
714	RETURN
715
716TEXT	putpit(SB), 1, $-4
717	MOVW	R3, SPR(SPR_DEC)
718	MOVW	R3, SPR(SPR_DECAR)
719	RETURN
720
721TEXT	putpid(SB), 1, $-4
722TEXT	pidput(SB), 1, $-4
723	MOVW	R3, SPR(SPR_PID)
724	MOVW	SPR(SPR_MMUCR), R4
725	RLWMI	$0, R3, $0xFF, R4
726	MOVW	R4, SPR(SPR_MMUCR)
727	RETURN
728
729TEXT	getpid(SB), 1, $-4
730TEXT	pidget(SB), 1, $-4
731	MOVW	SPR(SPR_PID), R3
732	RLWNM	$0, R3, $0xFF, R3
733	RETURN
734
735TEXT	getpir(SB), 1, $-4
736	MOVW	SPR(SPR_PIR), R3
737	CMP	R3, $017
738	BNE	normal
739	MOVW	R0, R3
740normal:
741	RETURN
742
743TEXT	putstid(SB), 1, $-4
744TEXT	stidput(SB), 1, $-4
745	MOVW	SPR(SPR_MMUCR), R4
746	RLWMI	$0, R3, $0xFF, R4
747	MOVW	R4, SPR(SPR_MMUCR)
748	RETURN
749
750TEXT	getstid(SB), 1, $-4
751TEXT	stidget(SB), 1, $-4
752	MOVW	SPR(SPR_MMUCR), R3
753	RLWNM	$0, R3, $0xFF, R3
754	RETURN
755
756TEXT	gettbl(SB), 1, $-4
757	MFTB(TBRL, 3)
758	RETURN
759
760TEXT	gettbu(SB), 1, $-4
761	MFTB(TBRU, 3)
762	RETURN
763
764TEXT	gettsr(SB), 1, $-4
765	MOVW	SPR(SPR_TSR), R3
766	RETURN
767
768TEXT	puttsr(SB), 1, $-4
769	MOVW	R3, SPR(SPR_TSR)
770	RETURN
771
772TEXT	puttcr(SB), 1, $-4
773	MOVW	R3, SPR(SPR_TCR)
774	RETURN
775
776TEXT	getpvr(SB), 1, $-4
777	MOVW	SPR(SPR_PVR), R3
778	RETURN
779
780TEXT	getmsr(SB), 1, $-4
781	MOVW	MSR, R3
782	RETURN
783
784TEXT	putmsr(SB), 1, $-4
785	MOVW	R3, MSR
786	MSRSYNC
787	RETURN
788
789TEXT	getmcsr(SB), 1, $-4
790	MOVW	SPR(SPR_MCSR), R3
791	RETURN
792
793TEXT	putmcsr(SB), 1, $-4
794	MOVW	R3, SPR(SPR_MCSR)
795	RETURN
796
797TEXT	getesr(SB), 1, $-4
798	MOVW	SPR(SPR_ESR), R3
799	RETURN
800
801TEXT	putesr(SB), 1, $-4
802	MOVW	R3, SPR(SPR_ESR)
803	RETURN
804
805TEXT	putevpr(SB), 1, $-4
806	MOVW	R3, SPR(SPR_IVPR)
807	RETURN
808
809TEXT getccr0(SB), 1, $-4
810	MOVW	SPR(SPR_CCR0), R3
811	RETURN
812
813TEXT	getdear(SB), 1, $-4
814	MOVW	SPR(SPR_DEAR), R3
815	RETURN
816
817TEXT	getdcr(SB), 1, $-4
818	MOVW	$_getdcr(SB), R5
819	SLW	$3, R3
820	ADD	R3, R5
821	MOVW	R5, CTR
822	BR	(CTR)
823
824TEXT	putdcr(SB), 1, $-4
825	MOVW	$_putdcr(SB), R5
826	SLW	$3, R3
827	ADD	R3, R5
828	MOVW	R5, CTR
829	MOVW	8(R1), R3
830	BR	(CTR)
831
832TEXT	putdbsr(SB), 1, $-4
833	MOVW	R3, SPR(SPR_DBSR)
834	RETURN
835
836TEXT	barriers(SB), 1, $-4
837TEXT	mbar(SB), 1, $-4
838	MBAR
839	RETURN
840
841TEXT	sync(SB), 1, $-4
842	MSYNC
843	RETURN
844
845TEXT	isync(SB), 1, $-4
846	ISYNC
847	RETURN
848
849TEXT	tlbwrx(SB), 1, $-4
850	MOVW	hi+4(FP), R5
851	MOVW	mid+8(FP), R6
852	MOVW	lo+12(FP), R7
853	MSYNC
854	TLBWEHI(5, 3)
855	TLBWEMD(6, 3)
856	TLBWELO(7, 3)
857	ISYNC
858	RETURN
859
860TEXT	tlbrehi(SB), 1, $-4
861	TLBREHI(3, 3)
862	RETURN
863
864TEXT	tlbremd(SB), 1, $-4
865	TLBREMD(3, 3)
866	RETURN
867
868TEXT	tlbrelo(SB), 1, $-4
869	TLBRELO(3, 3)
870	RETURN
871
872TEXT	tlbsxcc(SB), 1, $-4
873	TLBSXCC(0, 3, 3)
874	BEQ	tlbsxcc0
875	MOVW	$-1, R3	/* not found */
876tlbsxcc0:
877	RETURN
878
879TEXT	gotopc(SB), 1, $0
880	MOVW	R3, CTR
881	MOVW	LR, R31	/* for trace back */
882	BR	(CTR)
883
884TEXT	dtlbmiss(SB), 1, $-4
885	MOVW	R0, SPR(SAVER0)
886	MOVW	MSR, R0
887	RLWNM	$0, R0, $~MSR_CE, R0
888	MOVW	R0, MSR
889	MOVW	SPR(SAVER0), R0
890
891	MOVW	R3, SPR(SPR_SPRG1)
892	MOVW	$INT_DMISS, R3
893	MOVW	R3, SPR(SAVEXX)	/* value for cause if entry not in soft tlb */
894	MOVW	SPR(SPR_DEAR), R3
895	BR	tlbmiss
896
897TEXT	itlbmiss(SB), 1, $-4
898	MOVW	R0, SPR(SAVER0)
899	MOVW	MSR, R0
900	RLWNM	$0, R0, $~MSR_CE, R0
901	MOVW	R0, MSR
902	MOVW	SPR(SAVER0), R0
903
904	MOVW	R3, SPR(SPR_SPRG1)
905	MOVW	$INT_IMISS, R3
906	MOVW	R3, SPR(SAVEXX)
907	MOVW	SPR(SPR_SRR0), R3
908
909tlbmiss:
910	/* R3 contains missed address */
911	RLWNM	$0, R3, $~(BY2PG-1), R3	/* just the page */
912	MOVW	R2, SPR(SPR_SPRG0)
913	MOVW	R4, SPR(SPR_SPRG2)
914	MOVW	R5, SPR(SPR_SPRG6W)
915	MOVW	R6, SPR(SPR_SPRG4W)
916	MOVW	CR, R6
917	MOVW	R6, SPR(SPR_SPRG5W)
918	MOVW	$setSB(SB), R2
919	MOVW	$mach0(SB), R2
920	MOVW	(6*4)(R2), R4		/* m->tlbfault++ */
921	ADD	$1, R4
922	MOVW	R4, (6*4)(R2)
923	MOVW	SPR(SPR_PID), R4
924	SRW	$12, R3, R6
925	RLWMI	$2, R4, $(0xFF<<2), R3	/* shift and insert PID for match */
926	XOR	R3, R6	/* hash=(va>>12)^(pid<<2); (assumes STLBSIZE is 10 to 12 bits) */
927	MOVW	(3*4)(R2), R5			/* m->stlb */
928	RLWNM	$0, R6, $(STLBSIZE-1), R6	/* mask */
929	SLW	$1, R6, R4
930	ADD	R4, R6
931	SLW	$2, R6		/* index 12-byte entries */
932	MOVWU	(R6+R5), R4	/* fetch Softtlb.hi for comparison; updated address goes to R5 */
933	CMP	R4, R3
934	BNE	tlbtrap
935	MFTB(TBRL, 6)
936	MOVW	(4*4)(R2), R4		/* m->utlbhi */
937	RLWNM	$0, R6, $(NTLB-1), R6	/* pseudo-random tlb index */
938	CMP	R6, R4
939	BLE	tlbm1
940	SUB	R4, R6
941tlbm1:
942	RLWNM	$0, R3, $~(BY2PG-1), R3
943	OR	$(TLB4K | TLBVALID), R3	/* make valid tlb hi */
944	TLBWEHI(3, 6)
945	MOVW	4(R5), R4	/* tlb mid */
946	TLBWEMD(4, 6)
947	MOVW	8(R5), R4	/* tlb lo */
948	TLBWELO(4, 6)
949	ISYNC
950	MOVW	SPR(SPR_SPRG5R), R6
951	MOVW	R6, CR
952	MOVW	SPR(SPR_SPRG4R), R6
953	MOVW	SPR(SPR_SPRG6R), R5
954	MOVW	SPR(SPR_SPRG2), R4
955	MOVW	SPR(SPR_SPRG1), R3
956	MOVW	SPR(SPR_SPRG0), R2
957	RFI
958
959tlbtrap:
960	MOVW	SPR(SPR_SPRG5R), R6
961	MOVW	R6, CR
962	MOVW	SPR(SPR_SPRG4R), R6
963	MOVW	SPR(SPR_SPRG6R), R5
964	MOVW	SPR(SPR_SPRG2), R4
965	MOVW	SPR(SPR_SPRG1), R3
966	MOVW	SPR(SPR_SPRG0), R2
967	MOVW	R0, SPR(SAVER0)
968	MOVW	LR, R0
969	MOVW	R0, SPR(SAVELR)
970	BR	trapcommon
971
972/*
973 * following Book E, traps thankfully leave the mmu on.
974 * the following code has been executed at the exception
975 * vector location already:
976 *	MOVW R0, SPR(SAVER0)
977 *	(critical interrupts disabled in MSR, using R0)
978 *	MOVW LR, R0
979 *	MOVW R0, SPR(SAVELR)
980 *	bl	trapvec(SB)
981 */
982TEXT	trapvec(SB), 1, $-4
983	MOVW	LR, R0
984	MOVW	R0, SPR(SAVEXX)			/* save interrupt vector offset */
985trapcommon:					/* entry point for machine checks, and full tlb faults */
986	MOVW	R1, SPR(SAVER1)			/* save stack pointer */
987	/* did we come from user space? */
988	MOVW	SPR(SPR_SRR1), R0
989	MOVW	CR, R1
990	MOVW	R0, CR
991	BC	4,17,ktrap			/* if MSR[PR]=0, we are in kernel space */
992
993	/* was user mode, switch to kernel stack and context */
994	MOVW	R1, CR
995
996	MOVW	SPR(SPR_SPRG7R), R1		/* up->kstack+KSTACK-UREGSPACE, set in touser and forkret */
997//	MFTB(TBRL, RTBL)			/* time-stamp tracing in R28 */
998	BL	saveureg(SB)
999	MOVW	$mach0(SB), R(MACH)
1000	MOVW	8(R(MACH)), R(USER)
1001	BL	trap(SB)
1002	BR	restoreureg
1003
1004ktrap:
1005	/* was kernel mode, R(MACH) and R(USER) already set */
1006	MOVW	R1, CR
1007	MOVW	SPR(SAVER1), R1
1008	SUB	$UREGSPACE, R1	/* push onto current kernel stack */
1009	BL	saveureg(SB)
1010	BL	trap(SB)
1011
1012restoreureg:
1013	MOVMW	48(R1), R2	/* r2:r31 */
1014	/* defer R1, R0 */
1015	MOVW	36(R1), R0
1016	MOVW	R0, CTR
1017	MOVW	32(R1), R0
1018	MOVW	R0, XER
1019	MOVW	28(R1), R0
1020	MOVW	R0, CR	/* CR */
1021	MOVW	24(R1), R0
1022	MOVW	R0, LR
1023	MOVW	20(R1), R0
1024	MOVW	R0, SPR(SPR_SPRG7W)	/* kstack for traps from user space */
1025	MOVW	16(R1), R0
1026	MOVW	R0, SPR(SPR_SRR0)	/* old PC */
1027	MOVW	12(R1), R0
1028	RLWNM	$0, R0, $~MSR_WE, R0	/* remove wait state */
1029	MOVW	R0, SPR(SPR_SRR1)	/* old MSR */
1030	/* cause, skip */
1031	MOVW	40(R1), R0
1032	MOVW	44(R1), R1	/* old SP */
1033	RFI
1034
1035/*
1036 * machine check.
1037 * make it look like the others.
1038 * it's safe to destroy SPR_SRR0/1 because they can only be in
1039 * use if a critical interrupt has interrupted a non-critical interrupt
1040 * before it has had a chance to block critical interrupts,
1041 * but no recoverable machine checks can occur during a critical interrupt,
1042 * so the lost state doesn't matter.
1043 */
1044TEXT	trapmvec(SB), 1, $-4
1045	MOVW	LR, R0
1046	MOVW	R0, SPR(SAVEXX)
1047	MOVW	SPR(SPR_MCSRR0), R0		/* PC or excepting insn */
1048	MOVW	R0, SPR(SPR_SRR0)
1049	MOVW	SPR(SPR_MCSRR1), R0		/* old MSR */
1050	MOVW	R0, SPR(SPR_SRR1)
1051	BR	trapcommon
1052
1053/*
1054 * external interrupts (non-critical)
1055 */
1056TEXT	intrvec(SB), 1, $-4
1057	MOVW	LR, R0
1058	MOVW	R0, SPR(SAVEXX)			/* save interrupt vector offset */
1059	MOVW	R1, SPR(SAVER1)			/* save stack pointer */
1060
1061	/* did we come from user space? */
1062	MOVW	SPR(SPR_SRR1), R0
1063	MOVW	CR, R1
1064	MOVW	R0, CR
1065	BC	4,17,intr1			/* if MSR[PR]=0, we are in kernel space */
1066
1067	/* was user mode, switch to kernel stack and context */
1068	MOVW	R1, CR
1069	MOVW	SPR(SPR_SPRG7R), R1		/* up->kstack+KSTACK-UREGSPACE, set in touser and forkret */
1070	BL	saveureg(SB)
1071	MOVW	$mach0(SB), R(MACH)
1072	MOVW	8(R(MACH)), R(USER)
1073	BL	intr(SB)
1074	BR	restoreureg
1075
1076intr1:
1077	/* was kernel mode, R(MACH) and R(USER) already set */
1078	MOVW	R1, CR
1079	MOVW	SPR(SAVER1), R1
1080	SUB	$UREGSPACE, R1	/* push onto current kernel stack */
1081	BL	saveureg(SB)
1082	BL	intr(SB)
1083	BR	restoreureg
1084
1085/*
1086 * critical interrupt
1087 */
1088TEXT	critintrvec(SB), 1, $-4
1089	MOVW	LR, R0
1090	MOVW	R0, SPR(SAVEXX)
1091	MOVW	R1, SPR(SAVER1)			/* save stack pointer */
1092
1093	/* did we come from user space? */
1094	MOVW	SPR(SPR_CSRR1), R0
1095	MOVW	CR, R1
1096	MOVW	R0, CR
1097	BC	4,16,kintrintr		/* if MSR[EE]=0, kernel was interrupted at start of intrvec */
1098	BC	4,17,kcintr1		/* if MSR[PR]=0, we are in kernel space */
1099
1100ucintr:
1101	/* was user mode or intrvec interrupted: switch to kernel stack and context */
1102	MOVW	R1, CR
1103	MOVW	SPR(SPR_SPRG7R), R1		/* up->kstack+KSTACK-UREGSPACE, set in touser and forkret */
1104	BL	saveureg(SB)
1105	MOVW	$mach0(SB), R(MACH)
1106	MOVW	8(R(MACH)), R(USER)
1107	BR	cintrcomm
1108
1109kintrintr:
1110	/* kernel mode, and EE off, so kernel intrvec interrupted, but was previous mode kernel or user? */
1111	MOVW	SPR(SPR_SRR1), R0
1112	MOVW	R0, CR
1113	BC	(4+8),17,ucintr	/* MSR[PR]=1, we were in user space, need set up */
1114
1115kcintr1:
1116	/*  was kernel mode and external interrupts enabled, R(MACH) and R(USER) already set */
1117	MOVW	R1, CR
1118	MOVW	SPR(SAVER1), R1
1119	SUB	$UREGSPACE, R1	/* push onto current kernel stack */
1120	BL	saveureg(SB)
1121
1122cintrcomm:
1123	/* special part of Ureg for critical interrupts only (using Ureg.dcmp, Ureg.icmp, Ureg.dmiss) */
1124	MOVW	SPR(SPR_SPRG6R), R4	/* critical interrupt saves volatile R0 in SPRG6 */
1125	MOVW	R4, (160+8)(R1)
1126	MOVW	SPR(SPR_CSRR0), R4		/* store critical interrupt pc */
1127	MOVW	R4, (164+8)(R1)
1128	MOVW	SPR(SPR_CSRR1), R4		/* critical interrupt msr */
1129	MOVW	R4, (168+8)(R1)
1130
1131	BL	intr(SB)
1132
1133	/* first restore usual part of Ureg */
1134	MOVMW	48(R1), R2	/* r2:r31 */
1135	/* defer R1, R0 */
1136	MOVW	40(R1), R0
1137	MOVW	R0, SPR(SAVER0)		/* restore normal r0 save */
1138	MOVW	36(R1), R0
1139	MOVW	R0, CTR
1140	MOVW	32(R1), R0
1141	MOVW	R0, XER
1142	MOVW	28(R1), R0
1143	MOVW	R0, CR	/* CR */
1144	MOVW	24(R1), R0
1145	MOVW	R0, LR
1146	MOVW	20(R1), R0
1147	MOVW	R0, SPR(SPR_SPRG7W)	/* kstack for traps from user space */
1148	MOVW	16(R1), R0
1149	MOVW	R0, SPR(SPR_SRR0)	/* saved normal PC */
1150	MOVW	12(R1), R0
1151	MOVW	R0, SPR(SPR_SRR1)	/* saved normal MSR */
1152
1153	/* restore special bits for critical interrupts */
1154	MOVW	(164+8)(R1), R0		/* critical interrupt's saved pc */
1155	MOVW	R0, SPR(SPR_CSRR0)
1156	MOVW	(168+8)(R1), R0
1157	RLWNM	$0, R0, $~MSR_WE, R0	/* remove wait state */
1158	MOVW	R0, SPR(SPR_CSRR1)
1159
1160	/* cause, skip */
1161	MOVW	(160+8)(R1), R0		/* critical interrupt's saved R0 */
1162	MOVW	44(R1), R1	/* old SP */
1163	RFCI
1164
1165/*
1166 * enter with stack set and mapped.
1167 * on return, SB (R2) has been set, and R3 has the Ureg*,
1168 * the MMU has been re-enabled, kernel text and PC are in KSEG,
1169 * Stack (R1), R(MACH) and R(USER) are set by caller, if required.
1170 */
1171TEXT	saveureg(SB), 1, $-4
1172	MOVMW	R2, 48(R1)			/* save gprs r2 to r31 */
1173	MOVW	$setSB(SB), R2
1174	MOVW	SPR(SAVER1), R4
1175	MOVW	R4, 44(R1)
1176	MOVW	SPR(SAVER0), R5
1177	MOVW	R5, 40(R1)
1178	MOVW	CTR, R6
1179	MOVW	R6, 36(R1)
1180	MOVW	XER, R4
1181	MOVW	R4, 32(R1)
1182	MOVW	CR, R5
1183	MOVW	R5, 28(R1)
1184	MOVW	SPR(SAVELR), R6	/* LR */
1185	MOVW	R6, 24(R1)
1186	MOVW	SPR(SPR_SPRG7R), R6		/* up->kstack+KSTACK-UREGSPACE */
1187	MOVW	R6, 20(R1)
1188	MOVW	SPR(SPR_SRR0), R0
1189	MOVW	R0, 16(R1)				/* PC of excepting insn (or next insn) */
1190	MOVW	SPR(SPR_SRR1), R0
1191	MOVW	R0, 12(R1)				/* old MSR */
1192	MOVW	SPR(SAVEXX), R0
1193	MOVW	R0, 8(R1)	/* cause/vector */
1194	ADD	$8, R1, R3	/* Ureg* */
1195	STWCCC	R3, (R1)	/* break any pending reservations */
1196	MOVW	$0, R0	/* compiler/linker expect R0 to be zero */
1197	RETURN
1198
1199/*
1200 * restore state from Ureg and return from trap/interrupt
1201 */
1202TEXT forkret(SB), 1, $-4
1203	MOVW	R1, 20(R1)	/* up->kstack+KSTACK-UREGSPACE set in ureg */
1204	BR	restoreureg
1205
1206/*
1207 * 4xx specific
1208 */
1209TEXT	firmware(SB), 1, $0
1210	ISYNC
1211	MOVW	$(3<<28), R3
1212	MOVW	R3, SPR(SPR_DBCR0)	/* system reset */
1213	ISYNC
1214	BR	0(PC)
1215
1216GLOBL	mach0(SB), $(MAXMACH*MACHSIZE)
1217