xref: /plan9/sys/src/9/mtx/l.s (revision 3c2ddefeebfd7a80eaebf272955335c2cf163bd5)
1#include	"mem.h"
2
3/* use of SPRG registers in save/restore */
4#define	SAVER0	SPRG0
5#define	SAVER1	SPRG1
6#define	SAVELR	SPRG2
7#define	SAVEXX	SPRG3
8
9/* special instruction definitions */
10#define	BDNZ	BC	16,0,
11#define	BDNE	BC	0,2,
12
13#define	TLBIA	WORD	$((31<<26)|(307<<1))
14#define	TLBSYNC	WORD	$((31<<26)|(566<<1))
15
16/* on some models mtmsr doesn't synchronise enough (eg, 603e) */
17#define	MSRSYNC	SYNC; ISYNC
18
19#define	UREGSPACE	(UREGSIZE+8)
20
21	TEXT start(SB), $-4
22
23	/*
24	 * setup MSR
25	 * turn off interrupts
26	 * use 0x000 as exception prefix
27	 * enable machine check
28	 */
29	MOVW	MSR, R3
30	MOVW	$(MSR_EE|MSR_IP), R4
31	ANDN	R4, R3
32	OR		$(MSR_ME), R3
33	ISYNC
34	MOVW	R3, MSR
35	MSRSYNC
36
37	/* except during trap handling, R0 is zero from now on */
38	MOVW	$0, R0
39
40	/* setup SB for pre mmu */
41	MOVW	$setSB(SB), R2
42	MOVW	$KZERO, R3
43	ANDN	R3, R2
44
45	BL	mmuinit0(SB)
46
47	/* running with MMU on!! */
48
49	/* set R2 to correct value */
50	MOVW	$setSB(SB), R2
51
52	/* debugger sets R1 to top of usable memory +1 */
53	MOVW R1, memsize(SB)
54
55	BL		kfpinit(SB)
56
57	/* set up Mach */
58	MOVW	$mach0(SB), R(MACH)
59	ADD	$(MACHSIZE-8), R(MACH), R1	/* set stack */
60
61	MOVW	R0, R(USER)
62	MOVW	R0, 0(R(MACH))
63
64	BL	main(SB)
65
66	RETURN		/* not reached */
67
68GLOBL	mach0(SB), $(MAXMACH*BY2PG)
69GLOBL	memsize(SB), $4
70
71/*
72 * on return from this function we will be running in virtual mode.
73 * We set up the Block Address Translation (BAT) registers thus:
74 * 1) first 3 BATs are 256M blocks, starting from KZERO->0
75 * 2) remaining BAT maps last 256M directly
76 */
77TEXT	mmuinit0(SB), $0
78	/* reset all the tlbs */
79	MOVW	$64, R3
80	MOVW	R3, CTR
81	MOVW	$0, R4
82tlbloop:
83	TLBIE	R4
84	ADD		$BIT(19), R4
85	BDNZ	tlbloop
86	TLBSYNC
87
88	/* KZERO -> 0 */
89	MOVW	$(KZERO|(0x7ff<<2)|2), R3
90	MOVW	$(PTEVALID|PTEWRITE), R4
91	MOVW	R3, SPR(IBATU(0))
92	MOVW	R4, SPR(IBATL(0))
93	MOVW	R3, SPR(DBATU(0))
94	MOVW	R4, SPR(DBATL(0))
95
96	/* KZERO+256M -> 256M */
97	ADD		$(1<<28), R3
98	ADD		$(1<<28), R4
99	MOVW	R3, SPR(IBATU(1))
100	MOVW	R4, SPR(IBATL(1))
101	MOVW	R3, SPR(DBATU(1))
102	MOVW	R4, SPR(DBATL(1))
103
104	/* KZERO+512M -> 512M */
105	ADD		$(1<<28), R3
106	ADD		$(1<<28), R4
107	MOVW	R3, SPR(IBATU(2))
108	MOVW	R4, SPR(IBATL(2))
109	MOVW	R3, SPR(DBATU(2))
110	MOVW	R4, SPR(DBATL(2))
111
112	/* direct map last block, uncached, (?guarded) */
113	MOVW	$((0xf<<28)|(0x7ff<<2)|2), R3
114	MOVW	$((0xf<<28)|PTE1_I|PTE1_G|PTE1_RW), R4
115	MOVW	R3, SPR(DBATU(3))
116	MOVW	R4, SPR(DBATL(3))
117
118	/* IBAT 3 unused */
119	MOVW	R0, SPR(IBATU(3))
120	MOVW	R0, SPR(IBATL(3))
121
122	/* enable MMU */
123	MOVW	LR, R3
124	OR	$KZERO, R3
125	MOVW	R3, SPR(SRR0)
126	MOVW	MSR, R4
127	OR	$(MSR_IR|MSR_DR), R4
128	MOVW	R4, SPR(SRR1)
129	RFI	/* resume in kernel mode in caller */
130
131	RETURN
132
133TEXT	kfpinit(SB), $0
134	MOVFL	$0,FPSCR(7)
135	MOVFL	$0xD,FPSCR(6)	/* VE, OE, ZE */
136	MOVFL	$0, FPSCR(5)
137	MOVFL	$0, FPSCR(3)
138	MOVFL	$0, FPSCR(2)
139	MOVFL	$0, FPSCR(1)
140	MOVFL	$0, FPSCR(0)
141
142	FMOVD	$4503601774854144.0, F27
143	FMOVD	$0.5, F29
144	FSUB		F29, F29, F28
145	FADD	F29, F29, F30
146	FADD	F30, F30, F31
147	FMOVD	F28, F0
148	FMOVD	F28, F1
149	FMOVD	F28, F2
150	FMOVD	F28, F3
151	FMOVD	F28, F4
152	FMOVD	F28, F5
153	FMOVD	F28, F6
154	FMOVD	F28, F7
155	FMOVD	F28, F8
156	FMOVD	F28, F9
157	FMOVD	F28, F10
158	FMOVD	F28, F11
159	FMOVD	F28, F12
160	FMOVD	F28, F13
161	FMOVD	F28, F14
162	FMOVD	F28, F15
163	FMOVD	F28, F16
164	FMOVD	F28, F17
165	FMOVD	F28, F18
166	FMOVD	F28, F19
167	FMOVD	F28, F20
168	FMOVD	F28, F21
169	FMOVD	F28, F22
170	FMOVD	F28, F23
171	FMOVD	F28, F24
172	FMOVD	F28, F25
173	FMOVD	F28, F26
174	RETURN
175
176TEXT	splhi(SB), $0
177	MOVW	LR, R31
178	MOVW	R31, 4(R(MACH))	/* save PC in m->splpc */
179	MOVW	MSR, R3
180	RLWNM	$0, R3, $~MSR_EE, R4
181	SYNC
182	MOVW	R4, MSR
183	MSRSYNC
184	RETURN
185
186TEXT	splx(SB), $0
187	/* fall though */
188
189TEXT	splxpc(SB), $0
190	MOVW	LR, R31
191	MOVW	R31, 4(R(MACH))	/* save PC in m->splpc */
192	MOVW	MSR, R4
193	RLWMI	$0, R3, $MSR_EE, R4
194	SYNC
195	MOVW	R4, MSR
196	MSRSYNC
197	RETURN
198
199TEXT	spllo(SB), $0
200	MOVW	MSR, R3
201	OR	$MSR_EE, R3, R4
202	SYNC
203	MOVW	R4, MSR
204	MSRSYNC
205	RETURN
206
207TEXT	spldone(SB), $0
208	RETURN
209
210TEXT	islo(SB), $0
211	MOVW	MSR, R3
212	RLWNM	$0, R3, $MSR_EE, R3
213	RETURN
214
215TEXT	setlabel(SB), $-4
216	MOVW	LR, R31
217	MOVW	R1, 0(R3)
218	MOVW	R31, 4(R3)
219	MOVW	$0, R3
220	RETURN
221
222TEXT	gotolabel(SB), $-4
223	MOVW	4(R3), R31
224	MOVW	R31, LR
225	MOVW	0(R3), R1
226	MOVW	$1, R3
227	RETURN
228
229TEXT	touser(SB), $-4
230	MOVW	$(UTZERO+32), R5	/* header appears in text */
231	MOVW	$(MSR_EE|MSR_PR|MSR_ME|MSR_IR|MSR_DR|MSR_RI), R4
232	MOVW	R4, SPR(SRR1)
233	MOVW	R3, R1
234	MOVW	R5, SPR(SRR0)
235	RFI
236
237TEXT	icflush(SB), $-4	/* icflush(virtaddr, count) */
238	MOVW	n+4(FP), R4
239	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
240	SUB	R5, R3
241	ADD	R3, R4
242	ADD		$(CACHELINESZ-1), R4
243	SRAW	$CACHELINELOG, R4
244	MOVW	R4, CTR
245icf0:	ICBI	(R5)
246	ADD	$CACHELINESZ, R5
247	BDNZ	icf0
248	ISYNC
249	RETURN
250
251TEXT	dcflush(SB), $-4	/* dcflush(virtaddr, count) */
252	MOVW	n+4(FP), R4
253	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
254	CMP	R4, $0
255	BLE	dcf1
256	SUB	R5, R3
257	ADD	R3, R4
258	ADD		$(CACHELINESZ-1), R4
259	SRAW	$CACHELINELOG, R4
260	MOVW	R4, CTR
261dcf0:	DCBF	(R5)
262	ADD	$CACHELINESZ, R5
263	BDNZ	dcf0
264dcf1:
265	SYNC
266	RETURN
267
268TEXT	tas(SB), $0
269	SYNC
270	MOVW	R3, R4
271	MOVW	$0xdead,R5
272tas1:
273	DCBF	(R4)	/* fix for 603x bug */
274	LWAR	(R4), R3
275	CMP	R3, $0
276	BNE	tas0
277	STWCCC	R5, (R4)
278	BNE	tas1
279tas0:
280	SYNC
281	ISYNC
282	RETURN
283
284TEXT	_xinc(SB),$0	/* void _xinc(long *); */
285	MOVW	R3, R4
286xincloop:
287	DCBF	(R4)	/* fix for 603x bug */
288	LWAR	(R4), R3
289	ADD		$1, R3
290	STWCCC	R3, (R4)
291	BNE		xincloop
292	RETURN
293
294TEXT	_xdec(SB),$0	/* long _xdec(long *); */
295	MOVW	R3, R4
296xdecloop:
297	DCBF	(R4)	/* fix for 603x bug */
298	LWAR	(R4), R3
299	ADD		$-1, R3
300	STWCCC	R3, (R4)
301	BNE		xdecloop
302	RETURN
303
304TEXT cmpswap(SB),$0	/* int cmpswap(long*, long, long) */
305	MOVW	R3, R4	/* addr */
306	MOVW	old+4(FP), R5
307	MOVW	new+8(FP), R6
308	DCBF	(R4)		/* fix for 603x bug? */
309	LWAR	(R4), R3
310	CMP	R3, R5
311	BNE fail
312	STWCCC	R6, (R4)
313	BNE fail
314	MOVW $1, R3
315	RETURN
316fail:
317	MOVW $0, R3
318	RETURN
319
320TEXT	getpvr(SB), $0
321	MOVW	SPR(PVR), R3
322	RETURN
323
324TEXT	getdec(SB), $0
325	MOVW	SPR(DEC), R3
326	RETURN
327
328TEXT	putdec(SB), $0
329	MOVW	R3, SPR(DEC)
330	RETURN
331
332TEXT	getdar(SB), $0
333	MOVW	SPR(DAR), R3
334	RETURN
335
336TEXT	getdsisr(SB), $0
337	MOVW	SPR(DSISR), R3
338	RETURN
339
340TEXT	getmsr(SB), $0
341	MOVW	MSR, R3
342	RETURN
343
344TEXT	putmsr(SB), $0
345	SYNC
346	MOVW	R3, MSR
347	MSRSYNC
348	RETURN
349
350TEXT	putsdr1(SB), $0
351	MOVW	R3, SPR(SDR1)
352	RETURN
353
354TEXT	putsr(SB), $0
355	MOVW	4(FP), R4
356	MOVW	R4, SEG(R3)
357	RETURN
358
359TEXT	gethid0(SB), $0
360	MOVW	SPR(HID0), R3
361	RETURN
362
363TEXT	gethid1(SB), $0
364	MOVW	SPR(HID1), R3
365	RETURN
366
367TEXT	puthid0(SB), $0
368	MOVW	R3, SPR(HID0)
369	RETURN
370
371TEXT	puthid1(SB), $0
372	MOVW	R3, SPR(HID1)
373	RETURN
374
375TEXT	eieio(SB), $0
376	EIEIO
377	RETURN
378
379TEXT	sync(SB), $0
380	SYNC
381	RETURN
382
383TEXT	tlbflushall(SB), $0
384	MOVW	$64, R3
385	MOVW	R3, CTR
386	MOVW	$0, R4
387tlbflushall0:
388	TLBIE	R4
389	ADD		$BIT(19), R4
390	BDNZ	tlbflushall0
391	EIEIO
392	TLBSYNC
393	SYNC
394	RETURN
395
396TEXT	tlbflush(SB), $0
397	TLBIE	R3
398	RETURN
399
400TEXT	gotopc(SB), $0
401	MOVW	R3, CTR
402	MOVW	LR, R31	/* for trace back */
403	BR	(CTR)
404
405/*
406 * traps force memory mapping off.
407 * the following code has been executed at the exception
408 * vector location
409 *	MOVW R0, SPR(SAVER0)
410 *	MOVW LR, R0
411 *	MOVW R0, SPR(SAVELR)
412 *	bl	trapvec(SB)
413 */
414TEXT	trapvec(SB), $-4
415	MOVW	LR, R0
416	MOVW	R1, SPR(SAVER1)
417	MOVW	R0, SPR(SAVEXX)	/* vector */
418
419	/* did we come from user space */
420	MOVW	SPR(SRR1), R0
421	MOVW	CR, R1
422	MOVW	R0, CR
423	BC	4,17,ktrap
424
425	/* switch to kernel stack */
426	MOVW	R1, CR
427	MOVW	R2, R0
428	MOVW	$setSB(SB), R2
429	RLWNM	$0, R2, $~KZERO, R2		/* PADDR(setSB) */
430	MOVW	$mach0(SB), R1	/* m-> */
431	RLWNM	$0, R1, $~KZERO, R1		/* PADDR(m->) */
432	MOVW	8(R1), R1				/* m->proc  */
433	RLWNM	$0, R1, $~KZERO, R1		/* PADDR(m->proc) */
434	MOVW	8(R1), R1				/* m->proc->kstack */
435	RLWNM	$0, R1, $~KZERO, R1		/* PADDR(m->proc->kstack) */
436	ADD	$(KSTACK-UREGSIZE), R1
437	MOVW	R0, R2
438	BL	saveureg(SB)
439	BL	trap(SB)
440	BR	restoreureg
441ktrap:
442	MOVW	R1, CR
443	MOVW	SPR(SAVER1), R1
444	RLWNM	$0, R1, $~KZERO, R1		/* PADDR(R1) */
445	SUB	$UREGSPACE, R1
446	BL	saveureg(SB)
447	BL	trap(SB)
448	BR	restoreureg
449
450/*
451 * enter with stack set and mapped.
452 * on return, SB (R2) has been set, and R3 has the Ureg*,
453 * the MMU has been re-enabled, kernel text and PC are in KSEG,
454 * R(MACH) has been set, and R0 contains 0.
455 *
456 */
457TEXT	saveureg(SB), $-4
458/*
459 * save state
460 */
461	MOVMW	R2, 48(R1)	/* r2:r31 */
462	MOVW	$setSB(SB), R2
463	RLWNM	$0, R2, $~KZERO, R2		/* PADDR(setSB) */
464	MOVW	$mach0(SB), R(MACH)
465	RLWNM	$0, R(MACH), $~KZERO, R(MACH)		/* PADDR(m->) */
466	MOVW	8(R(MACH)), R(USER)
467	MOVW	$mach0(SB), R(MACH)
468	MOVW	$setSB(SB), R2
469	MOVW	SPR(SAVER1), R4
470	MOVW	R4, 44(R1)
471	MOVW	SPR(SAVER0), R5
472	MOVW	R5, 40(R1)
473	MOVW	CTR, R6
474	MOVW	R6, 36(R1)
475	MOVW	XER, R4
476	MOVW	R4, 32(R1)
477	MOVW	CR, R5
478	MOVW	R5, 28(R1)
479	MOVW	SPR(SAVELR), R6	/* LR */
480	MOVW	R6, 24(R1)
481	/* pad at 20(R1) */
482	MOVW	SPR(SRR0), R0
483	MOVW	R0, 16(R1)				/* old PC */
484	MOVW	SPR(SRR1), R0
485	MOVW	R0, 12(R1)				/* old status */
486	MOVW	SPR(SAVEXX), R0
487	MOVW	R0, 8(R1)	/* cause/vector */
488	ADD	$8, R1, R3	/* Ureg* */
489	OR	$KZERO, R3	/* fix ureg */
490	STWCCC	R3, (R1)	/* break any pending reservations */
491	MOVW	$0, R0	/* compiler/linker expect R0 to be zero */
492
493	MOVW	MSR, R5
494	OR	$(MSR_IR|MSR_DR|MSR_FP|MSR_RI), R5	/* enable MMU */
495	MOVW	R5, SPR(SRR1)
496	MOVW	LR, R31
497	OR	$KZERO, R31	/* return PC in KSEG0 */
498	MOVW	R31, SPR(SRR0)
499	OR	$KZERO, R1	/* fix stack pointer */
500	RFI	/* returns to trap handler */
501
502/*
503 * restore state from Ureg and return from trap/interrupt
504 */
505TEXT	forkret(SB), $0
506	BR	restoreureg
507
508restoreureg:
509	MOVMW	48(R1), R2	/* r2:r31 */
510	/* defer R1 */
511	MOVW	40(R1), R0
512	MOVW	R0, SPR(SAVER0)
513	MOVW	36(R1), R0
514	MOVW	R0, CTR
515	MOVW	32(R1), R0
516	MOVW	R0, XER
517	MOVW	28(R1), R0
518	MOVW	R0, CR	/* CR */
519	MOVW	24(R1), R0
520	MOVW	R0, LR
521	/* pad, skip */
522	MOVW	16(R1), R0
523	MOVW	R0, SPR(SRR0)	/* old PC */
524	MOVW	12(R1), R0
525	MOVW	R0, SPR(SRR1)	/* old MSR */
526	/* cause, skip */
527	MOVW	44(R1), R1	/* old SP */
528	MOVW	SPR(SAVER0), R0
529	RFI
530
531TEXT	fpsave(SB), $0
532	FMOVD	F0, (0*8)(R3)
533	FMOVD	F1, (1*8)(R3)
534	FMOVD	F2, (2*8)(R3)
535	FMOVD	F3, (3*8)(R3)
536	FMOVD	F4, (4*8)(R3)
537	FMOVD	F5, (5*8)(R3)
538	FMOVD	F6, (6*8)(R3)
539	FMOVD	F7, (7*8)(R3)
540	FMOVD	F8, (8*8)(R3)
541	FMOVD	F9, (9*8)(R3)
542	FMOVD	F10, (10*8)(R3)
543	FMOVD	F11, (11*8)(R3)
544	FMOVD	F12, (12*8)(R3)
545	FMOVD	F13, (13*8)(R3)
546	FMOVD	F14, (14*8)(R3)
547	FMOVD	F15, (15*8)(R3)
548	FMOVD	F16, (16*8)(R3)
549	FMOVD	F17, (17*8)(R3)
550	FMOVD	F18, (18*8)(R3)
551	FMOVD	F19, (19*8)(R3)
552	FMOVD	F20, (20*8)(R3)
553	FMOVD	F21, (21*8)(R3)
554	FMOVD	F22, (22*8)(R3)
555	FMOVD	F23, (23*8)(R3)
556	FMOVD	F24, (24*8)(R3)
557	FMOVD	F25, (25*8)(R3)
558	FMOVD	F26, (26*8)(R3)
559	FMOVD	F27, (27*8)(R3)
560	FMOVD	F28, (28*8)(R3)
561	FMOVD	F29, (29*8)(R3)
562	FMOVD	F30, (30*8)(R3)
563	FMOVD	F31, (31*8)(R3)
564	MOVFL	FPSCR, F0
565	FMOVD	F0, (32*8)(R3)
566	RETURN
567
568TEXT	fprestore(SB), $0
569	FMOVD	(32*8)(R3), F0
570	MOVFL	F0, FPSCR
571	FMOVD	(0*8)(R3), F0
572	FMOVD	(1*8)(R3), F1
573	FMOVD	(2*8)(R3), F2
574	FMOVD	(3*8)(R3), F3
575	FMOVD	(4*8)(R3), F4
576	FMOVD	(5*8)(R3), F5
577	FMOVD	(6*8)(R3), F6
578	FMOVD	(7*8)(R3), F7
579	FMOVD	(8*8)(R3), F8
580	FMOVD	(9*8)(R3), F9
581	FMOVD	(10*8)(R3), F10
582	FMOVD	(11*8)(R3), F11
583	FMOVD	(12*8)(R3), F12
584	FMOVD	(13*8)(R3), F13
585	FMOVD	(14*8)(R3), F14
586	FMOVD	(15*8)(R3), F15
587	FMOVD	(16*8)(R3), F16
588	FMOVD	(17*8)(R3), F17
589	FMOVD	(18*8)(R3), F18
590	FMOVD	(19*8)(R3), F19
591	FMOVD	(20*8)(R3), F20
592	FMOVD	(21*8)(R3), F21
593	FMOVD	(22*8)(R3), F22
594	FMOVD	(23*8)(R3), F23
595	FMOVD	(24*8)(R3), F24
596	FMOVD	(25*8)(R3), F25
597	FMOVD	(26*8)(R3), F26
598	FMOVD	(27*8)(R3), F27
599	FMOVD	(28*8)(R3), F28
600	FMOVD	(29*8)(R3), F29
601	FMOVD	(30*8)(R3), F30
602	FMOVD	(31*8)(R3), F31
603	RETURN
604