xref: /plan9/sys/src/9/bcm/rebootcode.s (revision 5d9de2d38d2503efca29e12e0e32036368a7a75f)
1/*
2 * armv6 reboot code
3 */
4#include "arm.s"
5
6/*
7 * Turn off MMU, then copy the new kernel to its correct location
8 * in physical memory.  Then jump to the start of the kernel.
9 */
10
11/* main(PADDR(entry), PADDR(code), size); */
12TEXT	main(SB), 1, $-4
13	MOVW	$setR12(SB), R12
14
15	/* copy in arguments before stack gets unmapped */
16	MOVW	R0, R8			/* entry point */
17	MOVW	p2+4(FP), R9		/* source */
18	MOVW	n+8(FP), R10		/* byte count */
19
20	/* SVC mode, interrupts disabled */
21	MOVW	$(PsrDirq|PsrDfiq|PsrMsvc), R1
22	MOVW	R1, CPSR
23
24	/* prepare to turn off mmu  */
25	BL	cachesoff(SB)
26
27	/* turn off mmu */
28	MRC	CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
29	BIC	$CpCmmu, R1
30	MCR	CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
31
32	/* set up a tiny stack for local vars and memmove args */
33	MOVW	R8, SP			/* stack top just before kernel dest */
34	SUB	$20, SP			/* allocate stack frame */
35
36	/* copy the kernel to final destination */
37	MOVW	R8, 16(SP)		/* save dest (entry point) */
38	MOVW	R8, R0			/* first arg is dest */
39	MOVW	R9, 8(SP)		/* push src */
40	MOVW	R10, 12(SP)		/* push size */
41	BL	memmove(SB)
42	MOVW	16(SP), R8		/* restore entry point */
43
44	/* jump to kernel physical entry point */
45	B	(R8)
46	B	0(PC)
47
48/*
49 * turn the caches off, double map PHYSDRAM & KZERO, invalidate TLBs, revert
50 * to tiny addresses.  upon return, it will be safe to turn off the mmu.
51 * clobbers R0-R2, and returns with SP invalid.
52 */
53TEXT cachesoff(SB), 1, $-4
54
55	/* write back and invalidate caches */
56	BARRIERS
57	MOVW	$0, R0
58	MCR	CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEall
59	MCR	CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
60
61	/* turn caches off */
62	MRC	CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
63	BIC	$(CpCdcache|CpCicache|CpCpredict), R1
64	MCR	CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
65
66	/* invalidate stale TLBs before changing them */
67	BARRIERS
68	MOVW	$KZERO, R0			/* some valid virtual address */
69	MCR	CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
70	BARRIERS
71
72	/* from here on, R0 is base of physical memory */
73	MOVW	$PHYSDRAM, R0
74
75	/* redo double map of first MiB PHYSDRAM = KZERO */
76	MOVW	$(L1+L1X(PHYSDRAM)), R2		/* address of PHYSDRAM's PTE */
77	MOVW	$PTEDRAM, R1			/* PTE bits */
78	ORR	R0, R1				/* dram base */
79	MOVW	R1, (R2)
80
81	/* invalidate stale TLBs again */
82	BARRIERS
83	MCR	CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
84	BARRIERS
85
86	/* relocate SB and return address to PHYSDRAM addressing */
87	MOVW	$KSEGM, R1		/* clear segment bits */
88	BIC	R1, R12			/* adjust SB */
89	ORR	R0, R12
90	BIC	R1, R14			/* adjust return address */
91	ORR	R0, R14
92
93	RET
94