xref: /netbsd-src/sys/arch/evbarm/tsarm/tsarm_start.S (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1/*	$NetBSD: tsarm_start.S,v 1.6 2009/10/21 14:15:51 rmind Exp $ */
2
3/*
4 * Copyright (c) 2003
5 *	Ichiro FUKUHARA <ichiro@ichiro.org>.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29#include <machine/asm.h>
30#include <arm/armreg.h>
31#include <arm/arm32/pte.h>
32
33#include <arm/ep93xx/ep93xxreg.h>
34
35	.section .start,"ax",%progbits
36
37	.global	_C_LABEL(ts7xxx_start)
38_C_LABEL(ts7xxx_start):
39
40        /*
41         * We will go ahead and disable the MMU here so that we don't
42         * have to worry about flushing caches, etc.
43         *
44         * Note that we may not currently be running VA==PA, which means
45         * we'll need to leap to the next insn after disabing the MMU.
46         */
47        adr     r8, Lunmapped
48        bic     r8, r8, #0xff000000     /* clear upper 8 bits */
49
50	/*
51	 * Setup coprocessor 15.
52	 */
53        mrc     p15, 0, r2, c1, c0, 0
54        bic     r2, r2, #CPU_CONTROL_MMU_ENABLE
55        mcr     p15, 0, r2, c1, c0, 0
56
57        nop
58        nop
59        nop
60
61        mov     pc, r8                  /* Heave-ho! */
62
63Lunmapped:
64	/*
65	 * We want to construct a memory map that maps us
66	 * VA==PA (SDRAM at 0x00000000). We create these
67	 * mappings uncached and unbuffered to be safe.
68	 */
69	/*
70	 * Step 1: Map the entire address space VA==PA.
71	 */
72	adr	r4, Ltable
73	ldr	r0, [r4]			/* r0 = &l1table */
74
75	mov	r1, #(L1_TABLE_SIZE / 4)	/* 4096 entry */
76	mov	r2, #(L1_S_SIZE)		/* 1MB / section */
77	mov	r3, #(L1_S_AP(AP_KRW))		/* kernel read/write */
78	orr	r3, r3, #(L1_TYPE_S)		/* L1 entry is section */
791:
80	str	r3, [r0], #0x04
81	add	r3, r3, r2
82	subs	r1, r1, #1
83	bgt	1b
84
85
86        /*
87         * Step 2: Map VA 0xc0000000->0xc07fffff to PA 0x00000000->0x007fffff.
88         */
89        ldr     r0, [r4]
90        add     r0, r0, #(0xc00 * 4)            /* offset to 0xc00xxxxx */
91
92        mov     r1, #0x8                        /* 8MB */
93        mov     r3, #(L1_S_AP(AP_KRW))
94        orr     r3, r3, #(L1_TYPE_S)
951:
96        str     r3, [r0], #0x04
97        add     r3, r3, r2
98        subs    r1, r1, #1
99        bgt     1b
100
101	/*
102	 * Step 3: Map VA 0xf0000000->0xf0100000 to PA 0x80000000->0x80100000.
103	 */
104	ldr	r0, [r4]
105
106	add	r0, r0, #(0xf00 * 4)		/* offset to 0xf0000000 */
107	mov	r3, #0x80000000
108	orr	r3, r3, #(L1_S_AP(AP_KRW))
109	orr	r3, r3, #(L1_TYPE_S)
110	str	r3, [r0], #4
111
112	/*
113	 * Step 4: Map VA 0xf0100000->0xf0300000 to PA 0x80800000->0x80a00000.
114	 */
115	mov	r3, #0x80000000
116	add	r3, r3, #0x00800000
117	orr	r3, r3, #(L1_S_AP(AP_KRW))
118	orr	r3, r3, #(L1_TYPE_S)
119	str	r3, [r0], #0x4
120	add	r3, r3, r2
121	str	r3, [r0], #0x4
122
123	/*
124	 * Step 5: Map VA 0xf0300000->0xf4300000 to PA 0x10000000->0x14000000.
125	 */
126        mov     r1, #0x40                       /* 64MB */
127        mov     r3, #(L1_S_AP(AP_KRW))
128        orr     r3, r3, #(L1_TYPE_S)
129	orr	r3, r3, #0x10000000
1301:
131        str     r3, [r0], #0x04
132        add     r3, r3, r2
133        subs    r1, r1, #1
134        bgt     1b
135
136	/*
137	 * Step 6: Map VA 0xf4300000->0xf8300000 to PA 0x20000000->0x24000000.
138	 */
139        mov     r1, #0x40                       /* 64MB */
140        mov     r3, #(L1_S_AP(AP_KRW))
141        orr     r3, r3, #(L1_TYPE_S)
142	orr	r3, r3, #0x20000000
1431:
144        str     r3, [r0], #0x04
145        add     r3, r3, r2
146        subs    r1, r1, #1
147        bgt     1b
148
149
150	/* OK!  Page table is set up.  Give it to the CPU. */
151	adr	r0, Ltable
152	ldr	r0, [r0]
153	mcr	p15, 0, r0, c2, c0, 0
154
155	/* Flush the old TLBs, just in case. */
156	mcr	p15, 0, r0, c8, c7, 0
157
158	/* Set the Domain Access register.  Very important! */
159	mov	r0, #1
160	mcr	p15, 0, r0, c3, c0, 0
161
162	/* Get ready to jump to the "real" kernel entry point... */
163	ldr	r1, Lstart
164	mov	r1, r1			/* Make sure the load completes! */
165
166	/* OK, let's enable the MMU. */
167	mrc	p15, 0, r2, c1, c0, 0
168	orr	r2, r2, #CPU_CONTROL_MMU_ENABLE
169	mcr	p15, 0, r2, c1, c0, 0
170
171	nop
172	nop
173	nop
174
175	/* CPWAIT sequence to make sure the MMU is on... */
176	mrc	p15, 0, r2, c2, c0, 0	/* arbitrary read of CP15 */
177	mov	r2, r2			/* force it to complete */
178	mov	pc, r1			/* leap to kernel entry point! */
179
180Ltable:
181	.word	0x4000
182
183Lstart:
184	.word	start
185