xref: /plan9-contrib/sys/src/9/omap/rebootcode.s (revision bb9c7457bbc95323d3ebe779d2114080683da0b8)
1/*
2 * omap3530 reboot code
3 *
4 * must fit in 11K to avoid stepping on PTEs; see mem.h.
5 *
6 * R11 is used by the loader as a temporary, so avoid it.
7 */
8#include "arm.s"
9
10/*
11 * Turn off MMU, then copy the new kernel to its correct location
12 * in physical memory.  Then jump to the start of the kernel.
13 */
14
15/* main(PADDR(entry), PADDR(code), size); */
16TEXT	main(SB), 1, $-4
17	MOVW	$setR12(SB), R12
18
19	MOVW	R0, p1+0(FP)		/* destination, passed in R0 */
20
21	MOVW	CPSR, R0
22	ORR	$(PsrDirq|PsrDfiq), R0
23	MOVW	R0, CPSR		/* splhi */
24	BARRIERS
25
26PUTC('R')
27	MRC	CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
28	BIC	$CpACasa, R1	/* no speculative I access forwarding to mem */
29	/* slow down */
30	ORR	$(CpACcachenopipe|CpACcp15serial|CpACcp15waitidle|CpACcp15pipeflush), R1
31	MCR	CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
32	BARRIERS
33
34	BL	cachesoff(SB)
35	/* now back in 29- or 26-bit addressing, mainly for SB */
36	/* double mapping of PHYSDRAM & KZERO now in effect */
37
38	/*
39	 * turn the MMU off
40	 */
41
42PUTC('e')
43	/* first switch to PHYSDRAM-based addresses */
44	DMB
45
46	MOVW	$KSEGM, R7		/* clear segment bits */
47	MOVW	$PHYSDRAM, R0		/* set dram base bits */
48	BIC	R7, R12			/* adjust SB */
49	ORR	R0, R12
50
51	BL	_r15warp(SB)
52	/* don't care about saving R14; we're not returning */
53
54	/*
55	 * now running in PHYSDRAM segment, not KZERO.
56	 */
57
58PUTC('b')
59	SUB	$12, SP				/* paranoia */
60	BL	cacheuwbinv(SB)
61	ADD	$12, SP				/* paranoia */
62
63	/* invalidate mmu mappings */
64	MOVW	$KZERO, R0			/* some valid virtual address */
65	MCR	CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
66	BARRIERS
67
68PUTC('o')
69	MRC	CpSC, 0, R0, C(CpCONTROL), C(0)
70	BIC	$(CpCmmu|CpCdcache|CpCicache), R0
71	MCR     CpSC, 0, R0, C(CpCONTROL), C(0)	/* mmu off */
72	BARRIERS
73
74PUTC('o')
75	/* copy in arguments from stack frame before moving stack */
76	MOVW	p2+4(FP), R4		/* phys source */
77	MOVW	n+8(FP), R5		/* byte count */
78	MOVW	p1+0(FP), R6		/* phys destination */
79
80	/* set up a new stack for local vars and memmove args */
81	MOVW	R6, SP			/* tiny trampoline stack */
82	SUB	$(0x20 + 4), SP		/* back up before a.out header */
83
84//	MOVW	R14, -48(SP)		/* store return addr */
85	SUB	$48, SP			/* allocate stack frame */
86
87	MOVW	R5, 40(SP)		/* save count */
88	MOVW	R6, 44(SP)		/* save dest/entry */
89
90	DELAY(printloop2, 2)
91PUTC('t')
92
93	MOVW	40(SP), R5		/* restore count */
94	MOVW	44(SP), R6		/* restore dest/entry */
95	MOVW	R6, 0(SP)		/* normally saved LR goes here */
96	MOVW	R6, 4(SP)		/* push dest */
97	MOVW	R6, R0
98	MOVW	R4, 8(SP)		/* push src */
99	MOVW	R5, 12(SP)		/* push size */
100	BL	memmove(SB)
101
102PUTC('-')
103	/*
104	 * flush caches
105	 */
106	BL	cacheuwbinv(SB)
107
108PUTC('>')
109	DELAY(printloopret, 1)
110PUTC('\r')
111	DELAY(printloopnl, 1)
112PUTC('\n')
113/*
114 * jump to kernel entry point.  Note the true kernel entry point is
115 * the virtual address KZERO|R6, but this must wait until
116 * the MMU is enabled by the kernel in l.s
117 */
118	MOVW	44(SP), R6		/* restore R6 (dest/entry) */
119	ORR	R6, R6			/* NOP: avoid link bug */
120	B	(R6)
121PUTC('?')
122	B	0(PC)
123
124/*
125 * turn the caches off, double map PHYSDRAM & KZERO, invalidate TLBs, revert
126 * to tiny addresses.  upon return, it will be safe to turn off the mmu.
127 */
128TEXT cachesoff(SB), 1, $-4
129	MOVM.DB.W [R14,R1-R10], (R13)		/* save regs on stack */
130	MOVW	CPSR, R0
131	ORR	$(PsrDirq|PsrDfiq), R0
132	MOVW	R0, CPSR
133	BARRIERS
134
135	SUB	$12, SP				/* paranoia */
136	BL	cacheuwbinv(SB)
137	ADD	$12, SP				/* paranoia */
138
139	MRC	CpSC, 0, R0, C(CpCONTROL), C(0)
140	BIC	$(CpCicache|CpCdcache), R0
141	MCR     CpSC, 0, R0, C(CpCONTROL), C(0)	/* caches off */
142	BARRIERS
143
144	/*
145	 * caches are off
146	 */
147
148	/* invalidate stale TLBs before changing them */
149	MOVW	$KZERO, R0			/* some valid virtual address */
150	MCR	CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
151	BARRIERS
152
153	/* redo double map of PHYSDRAM, KZERO */
154	MOVW	$PHYSDRAM, R3
155	CMP	$KZERO, R3
156	BEQ	noun2map
157	MOVW	$(L1+L1X(PHYSDRAM)), R4		/* address of PHYSDRAM's PTE */
158	MOVW	$PTEDRAM, R2			/* PTE bits */
159	MOVW	$DOUBLEMAPMBS, R5
160_ptrdbl:
161	ORR	R3, R2, R1		/* first identity-map 0 to 0, etc. */
162	MOVW	R1, (R4)
163	ADD	$4, R4				/* bump PTE address */
164	ADD	$MiB, R3			/* bump pa */
165	SUB.S	$1, R5
166	BNE	_ptrdbl
167noun2map:
168
169	/*
170	 * flush stale TLB entries
171	 */
172
173	BARRIERS
174	MOVW	$KZERO, R0			/* some valid virtual address */
175	MCR	CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
176	BARRIERS
177
178	/* switch back to PHYSDRAM addressing, mainly for SB */
179	MOVW	$KSEGM, R7		/* clear segment bits */
180	MOVW	$PHYSDRAM, R0		/* set dram base bits */
181	BIC	R7, R12			/* adjust SB */
182	ORR	R0, R12
183	BIC	R7, SP
184	ORR	R0, SP
185
186	MOVM.IA.W (R13), [R14,R1-R10]		/* restore regs from stack */
187
188	MOVW	$KSEGM, R0		/* clear segment bits */
189	BIC	R0, R14			/* adjust link */
190	MOVW	$PHYSDRAM, R0		/* set dram base bits */
191	ORR	R0, R14
192
193	RET
194
195TEXT _r15warp(SB), 1, $-4
196	BIC	R7, R14			/* link */
197	ORR	R0, R14
198
199	BIC	R7, R13			/* SP */
200	ORR	R0, R13
201	RET
202
203TEXT panic(SB), 1, $-4		/* stub */
204PUTC('?')
205	RET
206TEXT pczeroseg(SB), 1, $-4	/* stub */
207	RET
208
209#include "cache.v7.s"
210