xref: /inferno-os/os/mpc/l.s (revision 74a4d8c26dd3c1e9febcb717cfd6cb6512991a7a)
1#include	"mem.h"
2
3#define	MB	(1024*1024)
4
5/*
6 * options
7 */
8#undef	MMUTWC		/* we don't map enough memory to need table walk */
9#undef	SHOWCYCLE	/* might be needed for BDM debugger to keep control */
10
11/*
12 * common ppc special purpose registers
13 */
14#define DSISR	18
15#define DAR	19	/* Data Address Register */
16#define DEC	22	/* Decrementer */
17#define SRR0	26	/* Saved Registers (exception) */
18#define SRR1	27
19#define SPRG0	272	/* Supervisor Private Registers */
20#define SPRG1	273
21#define SPRG2	274
22#define SPRG3	275
23#define TBRU	269	/* Time base Upper/Lower (Reading) */
24#define TBRL	268
25#define TBWU	285	/* Time base Upper/Lower (Writing) */
26#define TBWL	284
27#define PVR	287	/* Processor Version */
28
29/*
30 * mpc8xx-specific special purpose registers of interest here
31 */
32#define EIE	80
33#define EID	81
34#define NRI	82
35#define IMMR	638
36#define IC_CSR	560
37#define IC_ADR	561
38#define IC_DAT	562
39#define DC_CSR	568
40#define DC_ADR	569
41#define DC_DAT	570
42#define MI_CTR	784
43#define MI_AP	786
44#define MI_EPN	787
45#define MI_TWC	789
46#define MI_RPN	790
47#define MI_DBCAM	816
48#define MI_DBRAM0	817
49#define MI_DBRAM1	818
50#define MD_CTR	792
51#define M_CASID	793
52#define MD_AP	794
53#define MD_EPN	795
54#define M_TWB	796
55#define MD_TWC	797
56#define MD_RPN	798
57#define	M_TW	799
58#define	MD_DBCAM	824
59#define	MD_DBRAM0	825
60#define	MD_DBRAM1	826
61
62/* use of SPRG registers in save/restore */
63#define	SAVER0	SPRG0
64#define	SAVER1	SPRG1
65#define	SAVELR	SPRG2
66#define	SAVEXX	SPRG3
67
68/* special instruction definitions */
69#define	BDNZ	BC	16,0,
70#define	BDNE	BC	0,2,
71#define	TLBIA	WORD	$((31<<26)|(370<<1))
72#define	MFTB(tbr,d)	WORD	$((31<<26)|((d)<<21)|((tbr&0x1f)<<16)|(((tbr>>5)&0x1f)<<11)|(371<<1))
73
74/* on some models mtmsr doesn't synchronise enough (eg, 603e) */
75#define	MSRSYNC	SYNC; ISYNC
76
77#define	UREGSPACE	(UREGSIZE+8)
78
79/* could define STEP to set an LED to mark progress */
80#define	STEP(x)
81
82/*
83 * Boot first processor
84 */
85	TEXT start(SB), $-4
86
87	MOVW	MSR, R3
88	RLWNM	$0, R3, $~EE, R3
89	RLWNM	$0, R3, $~FPE, R3
90	OR	$ME, R3
91	ISYNC
92	MOVW	R3, MSR	/* turn off interrupts but enable traps */
93	MSRSYNC
94	MOVW	$0, R0	/* except during trap handling, R0 is zero from now on */
95	MOVW	R0, CR
96	MOVW	$setSB(SB), R2
97
98/*
99 * reset the caches and disable them for now
100 */
101	MOVW	SPR(IC_CSR), R4	/* read and clear */
102	MOVW	$(5<<25), R4
103	MOVW	R4, SPR(IC_CSR)	/* unlock all */
104	ISYNC
105	MOVW	$(6<<25), R4
106	MOVW	R4, SPR(IC_CSR)	/* invalidate all */
107	ISYNC
108	MOVW	$(2<<25), R4
109	MOVW	R4, SPR(IC_CSR)	/* disable i-cache */
110	ISYNC
111
112	SYNC
113	MOVW	SPR(DC_CSR), R4	/* read and clear */
114	MOVW	$(10<<24), R4
115	SYNC
116	MOVW	R4, SPR(DC_CSR)	/* unlock all */
117	ISYNC
118	MOVW	$(12<<24), R4
119	SYNC
120	MOVW	R4, SPR(DC_CSR)	/* invalidate all */
121	ISYNC
122	MOVW	$(4<<24), R4
123	SYNC
124	MOVW	R4, SPR(DC_CSR)	/* disable d-cache */
125	ISYNC
126
127#ifdef SHOWCYCLE
128	MOVW	$0, R4
129#else
130	MOVW	$7, R4
131#endif
132	MOVW	R4, SPR(158)		/* cancel `show cycle' for normal instruction execution */
133	ISYNC
134
135/*
136 * set other system configuration values
137 */
138	MOVW	$PHYSIMM, R4
139	MOVW	R4, SPR(IMMR)		/* set internal memory base */
140
141STEP(1)
142
143	BL	kernelmmu(SB)
144
145STEP(2)
146	/* no kfpinit on 82x */
147
148	MOVW	$mach0(SB), R(MACH)
149	ADD	$(MACHSIZE-8), R(MACH), R1
150	SUB	$4, R(MACH), R3
151	ADD	$4, R1, R4
152clrmach:
153	MOVWU	R0, 4(R3)
154	CMP	R3, R4
155	BNE	clrmach
156
157	MOVW	R0, R(USER)
158	MOVW	R0, 0(R(MACH))
159
160	MOVW	$edata(SB), R3
161	MOVW	$end(SB), R4
162	ADD	$4, R4
163	SUB	$4, R3
164clrbss:
165	MOVWU	R0, 4(R3)
166	CMP	R3, R4
167	BNE	clrbss
168
169STEP(3)
170	BL	main(SB)
171	BR	0(PC)
172
173TEXT	kernelmmu(SB), $0
174	TLBIA
175	ISYNC
176
177	MOVW	$0, R4
178	MOVW	R4, SPR(M_CASID)	/* set supervisor space */
179	MOVW	$(0<<29), R4		/* allow i-cache when IR=0 */
180	MOVW	R4, SPR(MI_CTR)	/* i-mmu control */
181	ISYNC
182	MOVW	$((1<<29)|(1<<28)), R4	/* cache inhibit when DR=0, write-through */
183	SYNC
184	MOVW	R4, SPR(MD_CTR)	/* d-mmu control */
185	ISYNC
186	TLBIA
187
188	/* map various things 1:1 */
189	MOVW	$tlbtab-KZERO(SB), R4
190	MOVW	$tlbtabe-KZERO(SB), R5
191	SUB	R4, R5
192	MOVW	$(3*4), R6
193	DIVW	R6, R5
194	SUB	$4, R4
195	MOVW	R5, CTR
196ltlb:
197	MOVWU	4(R4), R5
198	MOVW	R5, SPR(MD_EPN)
199	MOVW	R5, SPR(MI_EPN)
200	MOVWU	4(R4), R5
201	MOVW	R5, SPR(MI_TWC)
202	MOVW	R5, SPR(MD_TWC)
203	MOVWU	4(R4), R5
204	MOVW	R5, SPR(MD_RPN)
205	MOVW	R5, SPR(MI_RPN)
206	BDNZ	ltlb
207
208	MOVW	$(1<<25), R4
209	MOVW	R4, SPR(IC_CSR)	/* enable i-cache */
210	ISYNC
211
212	MOVW	$(3<<24), R4
213	SYNC
214	MOVW	R4, SPR(DC_CSR)	/* clear force write through mode */
215	MOVW	$(2<<24), R4
216	SYNC
217	MOVW	R4, SPR(DC_CSR)	/* enable d-cache */
218	ISYNC
219
220	/* enable MMU and set kernel PC to virtual space */
221	MOVW	$((0<<29)|(0<<28)), R4	/* cache when DR=0, write back */
222	SYNC
223	MOVW	R4, SPR(MD_CTR)	/* d-mmu control */
224	MOVW	LR, R3
225	OR	$KZERO, R3
226	MOVW	R3, SPR(SRR0)
227	MOVW	MSR, R4
228	OR	$(ME|IR|DR), R4	/* had ME|FPE|FE0|FE1 */
229	MOVW	R4, SPR(SRR1)
230	RFI	/* resume in kernel mode in caller */
231
232TEXT	splhi(SB), $0
233	MOVW	MSR, R3
234	RLWNM	$0, R3, $~EE, R4
235	SYNC
236	MOVW	R4, MSR
237	MSRSYNC
238	MOVW	LR, R31
239	MOVW	R31, 4(R(MACH))	/* save PC in m->splpc */
240	RETURN
241
242TEXT	splx(SB), $0
243	MOVW	MSR, R4
244	RLWMI	$0, R3, $EE, R4
245	RLWNMCC	$0, R3, $EE, R5
246	BNE	splx0
247	MOVW	LR, R31
248	MOVW	R31, 4(R(MACH))	/* save PC in m->splpc */
249splx0:
250	SYNC
251	MOVW	R4, MSR
252	MSRSYNC
253	RETURN
254
255TEXT	splxpc(SB), $0
256	MOVW	MSR, R4
257	RLWMI	$0, R3, $EE, R4
258	RLWNMCC	$0, R3, $EE, R5
259	SYNC
260	MOVW	R4, MSR
261	MSRSYNC
262	RETURN
263
264TEXT	spllo(SB), $0
265	MFTB(TBRL, 3)
266	MOVW	R3, spltbl(SB)
267	MOVW	MSR, R3
268	OR	$EE, R3, R4
269	SYNC
270	MOVW	R4, MSR
271	MSRSYNC
272	RETURN
273
274TEXT	spldone(SB), $0
275	RETURN
276
277TEXT	islo(SB), $0
278	MOVW	MSR, R3
279	RLWNM	$0, R3, $EE, R3
280	RETURN
281
282TEXT	setlabel(SB), $-4
283	MOVW	LR, R31
284	MOVW	R1, 0(R3)
285	MOVW	R31, 4(R3)
286	MOVW	$0, R3
287	RETURN
288
289TEXT	gotolabel(SB), $-4
290	MOVW	4(R3), R31
291	MOVW	R31, LR
292	MOVW	0(R3), R1
293	MOVW	$1, R3
294	RETURN
295
296/*
297 * enter with stack set and mapped.
298 * on return, SB (R2) has been set, and R3 has the Ureg*,
299 * the MMU has been re-enabled, kernel text and PC are in KSEG,
300 * R(MACH) has been set, and R0 contains 0.
301 *
302 * this can be simplified in the Inferno regime
303 */
304TEXT	saveureg(SB), $-4
305/*
306 * save state
307 */
308	MOVMW	R2, 48(R1)	/* r2:r31 */
309	MOVW	$setSB(SB), R2
310	MOVW	SPR(SAVER1), R4
311	MOVW	R4, 44(R1)
312	MOVW	SPR(SAVER0), R5
313	MOVW	R5, 40(R1)
314	MOVW	CTR, R6
315	MOVW	R6, 36(R1)
316	MOVW	XER, R4
317	MOVW	R4, 32(R1)
318	MOVW	CR, R5
319	MOVW	R5, 28(R1)
320	MOVW	SPR(SAVELR), R6	/* LR */
321	MOVW	R6, 24(R1)
322	/* pad at 20(R1) */
323	/* old PC(16) and status(12) saved earlier */
324	MOVW	SPR(SAVEXX), R0
325	MOVW	R0, 8(R1)	/* cause/vector */
326	ADD	$8, R1, R3	/* Ureg* */
327	STWCCC	R3, (R1)	/* break any pending reservations */
328	MOVW	$0, R0	/* compiler/linker expect R0 to be zero */
329
330	MOVW	MSR, R5
331	OR	$(IR|DR), R5	/* enable MMU */
332	MOVW	R5, SPR(SRR1)
333	MOVW	LR, R31
334	OR	$KZERO, R31	/* return PC in KSEG0 */
335	MOVW	R31, SPR(SRR0)
336	SYNC
337	ISYNC
338	RFI	/* returns to trap handler */
339
340TEXT	icflush(SB), $-4	/* icflush(virtaddr, count) */
341	MOVW	n+4(FP), R4
342	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
343	SUB	R5, R3
344	ADD	R3, R4
345	ADD		$(CACHELINESZ-1), R4
346	SRAW	$CACHELINELOG, R4
347	MOVW	R4, CTR
348icf0:	ICBI	(R5)
349	ADD	$CACHELINESZ, R5
350	BDNZ	icf0
351	ISYNC
352	RETURN
353
354/*
355 * flush to store and invalidate globally
356 */
357TEXT	dcflush(SB), $-4	/* dcflush(virtaddr, count) */
358	SYNC
359	MOVW	n+4(FP), R4
360	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
361	CMP	R4, $0
362	BLE	dcf1
363	SUB	R5, R3
364	ADD	R3, R4
365	ADD		$(CACHELINESZ-1), R4
366	SRAW	$CACHELINELOG, R4
367	MOVW	R4, CTR
368dcf0:	DCBF	(R5)
369	ADD	$CACHELINESZ, R5
370	BDNZ	dcf0
371	SYNC
372	ISYNC
373dcf1:
374	RETURN
375
376/*
377 * invalidate without flush, globally
378 */
379TEXT	dcinval(SB), $-4	/* dcinval(virtaddr, count) */
380	SYNC
381	MOVW	n+4(FP), R4
382	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
383	CMP	R4, $0
384	BLE	dci1
385	SUB	R5, R3
386	ADD	R3, R4
387	ADD		$(CACHELINESZ-1), R4
388	SRAW	$CACHELINELOG, R4
389	MOVW	R4, CTR
390dci0:	DCBI	(R5)
391	ADD	$CACHELINESZ, R5
392	BDNZ	dci0
393	SYNC
394	ISYNC
395dci1:
396	RETURN
397
398TEXT	_tas(SB), $0
399	SYNC
400	MOVW	R3, R4
401	MOVW	$0xdeaddead,R5
402tas1:
403	DCBF	(R4)	/* fix for 603x bug */
404	LWAR	(R4), R3
405	CMP	R3, $0
406	BNE	tas0
407	STWCCC	R5, (R4)
408	BNE	tas1
409tas0:
410	SYNC
411	ISYNC
412	RETURN
413
414TEXT	gettbl(SB), $0
415	MFTB(TBRL, 3)
416	RETURN
417
418TEXT	gettbu(SB), $0
419	MFTB(TBRU, 3)
420	RETURN
421
422TEXT	getpvr(SB), $0
423	MOVW	SPR(PVR), R3
424	RETURN
425
426TEXT	getimmr(SB), $0
427	MOVW	SPR(IMMR), R3
428	RETURN
429
430TEXT	getdec(SB), $0
431	MOVW	SPR(DEC), R3
432	RETURN
433
434TEXT	putdec(SB), $0
435	MOVW	R3, SPR(DEC)
436	RETURN
437
438TEXT	getcallerpc(SB), $-4
439	MOVW	0(R1), R3
440	RETURN
441
442TEXT getdar(SB), $0
443	MOVW	SPR(DAR), R3
444	RETURN
445
446TEXT getdsisr(SB), $0
447	MOVW	SPR(DSISR), R3
448	RETURN
449
450TEXT	getdepn(SB), $0
451	MOVW	SPR(MD_EPN), R3
452	RETURN
453
454TEXT	getmsr(SB), $0
455	MOVW	MSR, R3
456	RETURN
457
458TEXT	putmsr(SB), $0
459	SYNC
460	MOVW	R3, MSR
461	MSRSYNC
462	RETURN
463
464TEXT	eieio(SB), $0
465	EIEIO
466	RETURN
467
468TEXT	gotopc(SB), $0
469	MOVW	R3, CTR
470	MOVW	LR, R31	/* for trace back */
471	BR	(CTR)
472
473TEXT	firmware(SB), $0
474	MOVW	MSR, R3
475	MOVW	$(EE|ME), R4
476	ANDN	R4, R3
477	OR	$(MSR_IP), R3
478	ISYNC
479	MOVW	R3, MSR	/* turn off interrupts and machine checks */
480	MSRSYNC
481	MOVW	$(RI|IR|DR|ME), R4
482	ANDN	R4, R3
483	MOVW	R3, SPR(SRR1)
484	MOVW	$(0xFF00<<16), R4
485	MOVW	R4, SPR(IMMR)
486	MOVW	$(0x0800<<16), R4
487	MOVW	R4, SPR(SRR0)	/* force bad address */
488	MOVW	R0, SPR(149)	/* ensure checkstop on machine check */
489	MOVW	R4, R1
490	MOVW	R4, R2
491	EIEIO
492	ISYNC
493	RFI
494
495/*
496 * byte swapping of arrays of long and short;
497 * could possibly be avoided with more changes to drivers
498 */
499TEXT	swabl(SB), $0
500	MOVW	v+4(FP), R4
501	MOVW	n+8(FP), R5
502	SRAW	$2, R5, R5
503	MOVW	R5, CTR
504	SUB	$4, R4
505	SUB	$4, R3
506swabl1:
507	ADD	$4, R3
508	MOVWU	4(R4), R7
509	MOVWBR	R7, (R3)
510	BDNZ	swabl1
511	RETURN
512
513TEXT	swabs(SB), $0
514	MOVW	v+4(FP), R4
515	MOVW	n+8(FP), R5
516	SRAW	$1, R5, R5
517	MOVW	R5, CTR
518	SUB	$2, R4
519	SUB	$2, R3
520swabs1:
521	ADD	$2, R3
522	MOVHZU	2(R4), R7
523	MOVHBR	R7, (R3)
524	BDNZ	swabs1
525	RETURN
526
527TEXT	legetl(SB), $0
528	MOVWBR	(R3), R3
529	RETURN
530
531TEXT	lesetl(SB), $0
532	MOVW	v+4(FP), R4
533	MOVWBR	R4, (R3)
534	RETURN
535
536TEXT	legets(SB), $0
537	MOVHBR	(R3), R3
538	RETURN
539
540TEXT	lesets(SB), $0
541	MOVW	v+4(FP), R4
542	MOVHBR	R4, (R3)
543	RETURN
544
545#ifdef MMUTWC
546/*
547 * ITLB miss
548 *	avoid references that might need the right SB value;
549 *	IR and DR are off.
550 */
551TEXT	itlbmiss(SB), $-4
552	MOVW	R1, SPR(M_TW)
553	MOVW	SPR(SRR0), R1	/* instruction miss address */
554	MOVW	R1, SPR(MD_EPN)
555	MOVW	SPR(M_TWB), R1	/* level one pointer */
556	MOVW	(R1), R1
557	MOVW	R1, SPR(MI_TWC)	/* save level one attributes */
558	MOVW	R1, SPR(MD_TWC)	/* save base and attributes */
559	MOVW	SPR(MD_TWC), R1	/* level two pointer */
560	MOVW	(R1), R1	/* level two entry */
561	MOVW	R1, SPR(MI_RPN)	/* write TLB */
562	MOVW	SPR(M_TW), R1
563	RFI
564
565/*
566 * DTLB miss
567 *	avoid references that might need the right SB value;
568 *	IR and DR are off.
569 */
570TEXT	dtlbmiss(SB), $-4
571	MOVW	R1, SPR(M_TW)
572	MOVW	SPR(M_TWB), R1	/* level one pointer */
573	MOVW	(R1), R1	/* level one entry */
574	MOVW	R1, SPR(MD_TWC)	/* save base and attributes */
575	MOVW	SPR(MD_TWC), R1	/* level two pointer */
576	MOVW	(R1), R1	/* level two entry */
577	MOVW	R1, SPR(MD_RPN)	/* write TLB */
578	MOVW	SPR(M_TW), R1
579	RFI
580#else
581TEXT	itlbmiss(SB), $-4
582	BR	traps
583TEXT	dtlbmiss(SB), $-4
584	BR	traps
585#endif
586
587/*
588 * traps force memory mapping off.
589 * this code goes to too much effort (for the Inferno environment) to restore it.
590 */
591TEXT	trapvec(SB), $-4
592traps:
593	MOVW	LR, R0
594
595pagefault:
596
597/*
598 * map data virtually and make space to save
599 */
600	MOVW	R0, SPR(SAVEXX)	/* vector */
601	MOVW	R1, SPR(SAVER1)
602	SYNC
603	ISYNC
604	MOVW	MSR, R0
605	OR	$(DR|ME), R0		/* make data space usable */
606	SYNC
607	MOVW	R0, MSR
608	MSRSYNC
609	SUB	$UREGSPACE, R1
610
611	MOVW	SPR(SRR0), R0	/* save SRR0/SRR1 now, since DLTB might be missing stack page */
612	MOVW	R0, LR
613	MOVW	SPR(SRR1), R0
614	MOVW	R0, 12(R1)	/* save status: could take DLTB miss here */
615	MOVW	LR, R0
616	MOVW	R0, 16(R1)	/* old PC */
617	BL	saveureg(SB)
618	BL	trap(SB)
619	BR	restoreureg
620
621TEXT	intrvec(SB), $-4
622	MOVW	LR, R0
623
624/*
625 * map data virtually and make space to save
626 */
627	MOVW	R0, SPR(SAVEXX)	/* vector */
628	MOVW	R1, SPR(SAVER1)
629	SYNC
630	ISYNC
631	MOVW	MSR, R0
632	OR	$DR, R0		/* make data space usable */
633	SYNC
634	MOVW	R0, MSR
635	MSRSYNC
636	SUB	$UREGSPACE, R1
637
638	MFTB(TBRL, 0)
639	MOVW	R0, intrtbl(SB)
640
641	MOVW	SPR(SRR0), R0
642	MOVW	R0, LR
643	MOVW	SPR(SRR1), R0
644	MOVW	R0, 12(R1)
645	MOVW	LR, R0
646	MOVW	R0, 16(R1)
647	BL	saveureg(SB)
648
649	MFTB(TBRL, 5)
650	MOVW	R5, isavetbl(SB)
651
652	BL	intr(SB)
653
654/*
655 * restore state from Ureg and return from trap/interrupt
656 */
657restoreureg:
658	MOVMW	48(R1), R2	/* r2:r31 */
659	/* defer R1 */
660	MOVW	40(R1), R0
661	MOVW	R0, SPR(SAVER0)
662	MOVW	36(R1), R0
663	MOVW	R0, CTR
664	MOVW	32(R1), R0
665	MOVW	R0, XER
666	MOVW	28(R1), R0
667	MOVW	R0, CR	/* CR */
668	MOVW	24(R1), R0
669	MOVW	R0, SPR(SAVELR)	/* LR */
670	/* pad, skip */
671	MOVW	16(R1), R0
672	MOVW	R0, SPR(SRR0)	/* old PC */
673	MOVW	12(R1), R0
674	MOVW	R0, SPR(SRR1)	/* old MSR */
675	/* cause, skip */
676	MOVW	44(R1), R1	/* old SP */
677	MOVW	SPR(SAVELR), R0
678	MOVW	R0, LR
679	MOVW	SPR(SAVER0), R0
680	RFI
681
682GLOBL	mach0+0(SB), $MACHSIZE
683GLOBL	spltbl+0(SB), $4
684GLOBL	intrtbl+0(SB), $4
685GLOBL	isavetbl+0(SB), $4
686