xref: /netbsd-src/sys/arch/evbarm/imx23_olinuxino/imx23_olinuxino_start.S (revision ae7ee7f4509dc0f498d7d8042282a716c7d3fec5)
1/* $Id: imx23_olinuxino_start.S,v 1.3 2015/01/10 12:11:39 jmcneill Exp $ */
2
3/*
4 * Copyright (c) 2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Petri Laakso.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_imx.h"
33
34#include <machine/asm.h>
35#include <machine/pmap.h>
36#include <arm/armreg.h>
37#include <arm/imx/imx23var.h>
38#ifdef DEBUG
39#include <arm/imx/imx23_uartdbgreg.h>
40#endif
41
42.section .start,"ax",%progbits
43
44.global	_C_LABEL(olinuxino_start)
45_C_LABEL(olinuxino_start):
46	/*
47	 * Set up the first level page table. The page table has 4096 section
48	 * page table entries which each one maps 1MB of virtual memory.
49	 * Section entries are mapped from mmu_init_table to the page table.
50	 */
51	l1pt_p	.req r0
52	mit_p	.req r1
53	va	.req r2
54	pa	.req r3
55	n_sec	.req r4
56	attr	.req r5
57	pte_p	.req r6
58	sec	.req r7
59	tmp	.req r8
60	tmp2	.req r9
61
62	ldr	l1pt_p, .Ll1_pt
63
64	/* Zero the page table. */
65	mov	tmp, #0
66	add	tmp2, l1pt_p, #L1_TABLE_SIZE
671:	str	tmp, [l1pt_p], #4
68	cmp	l1pt_p, tmp2
69	blt	1b
70
71	ldr	l1pt_p, .Ll1_pt
72
73	/* Map sections to the page table. */
74	ldr	mit_p, =mmu_init_table
75	ldmia	mit_p!, {va, pa, n_sec, attr}
76
77	/*
78	 * Calculate PTE addresses for a MVA's.
79	 *
80	 * Bits[31:14] of the Translation Table Base register are concatenated
81	 * with bits[31:20] of the modified virtual address and two zero bits
82	 * to produce a physical address of the page table entry for a MVA:
83	 *
84	 * PTE = (TTBR & 0xffffc000) | ((MVA & 0xfff00000)>>18)
85	 */
863:	ldr	tmp, =0xffffc000
87	and	pte_p, l1pt_p, tmp
88	ldr	tmp, =0xfff00000
89	and	va, va, tmp
90	mov	va, va, LSR #18
91	orr	pte_p, pte_p, va
92
932:	orr	sec, pa, attr
94	str	sec, [pte_p], #4	/* Store #n_sec sections to the page */
95	add	pa, pa, #0x100000	/* table. */
96	subs	n_sec, #1
97	bne	2b
98
99	ldmia	mit_p!, {va, pa, n_sec, attr}
100	cmp	n_sec, #0
101	bne	3b
102
103	/*
104	 * The Translation Table Base Register holds the physical address of
105	 * the page table.
106	 */
107	mcr	p15, 0, l1pt_p, c2, c0, 0
108
109	.unreq	l1pt_p
110	.unreq	mit_p
111	.unreq	va
112	.unreq	pa
113	.unreq	n_sec
114	.unreq	attr
115	.unreq	pte_p
116	.unreq	sec
117	.unreq	tmp
118	.unreq	tmp2
119
120	/*
121	 * Sections are in domain 0 and we set D0 access control to client
122	 * mode, which means AP bits are checked. Since we are running
123	 * privileged mode and APs are kernel read/write, access is granted.
124	 */
125	mov	r0, #DOMAIN_CLIENT<<(PMAP_DOMAIN_KERNEL*2)
126	mcr	p15, 0, r0, c3, c0, 0
127
128	/*
129	 * Enable the MMU.
130	 */
131	mrc	p15, 0, r0, c1, c0, 0
132	ldr	r1, =(CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE \
133			| CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_MMU_ENABLE)
134	orr	r0, r0, r1
135	mcr	p15, 0, r0, c1, c0, 0
136	nop	/* Fetch flat. */
137	nop	/* Fetch flat. */
138
139	/*
140	 * Now MMU is on and instruction fetches are translated.
141	 */
142
143	/*
144	 * Jump to start in locore.S. start sets the sp point to DRAM, zeroes
145	 * the .bss and calls initarm. start never returns.
146	 */
147	ldr	pc, =start
1481:	b	1b
149
150	/* NOTREACHED */
151
152/*
153 * Initial first level translation table on a 16kB boundary located at the
154 * end of the DRAM.
155 *
156 * The translation table has 4096 32-bit section entries, each describing 1MB of
157 * virtual memory which means 4GB of virtual memory to be addressed.
158 */
159.Ll1_pt:
160	.word (DRAM_BASE + MEMSIZE * 1024 * 1024 - L1_TABLE_SIZE)
161
162#define MMU_INIT(va,pa,n_sec,attr)					\
163	.word va;							\
164	.word pa;							\
165	.word n_sec;							\
166	.word attr;
167
168mmu_init_table:
169	/* On-chip RAM */
170	MMU_INIT(0x00000000, 0x00000000,
171		1,
172		L1_S_AP(AP_KRW) | L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_S_PROTO)
173
174	/* On-chip ROM (Vectors) */
175	MMU_INIT(0xFFFF0000, 0xFFFF0000,
176		1,
177		L1_S_AP(AP_KRW) | L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_S_PROTO)
178
179	/* DRAM */
180	MMU_INIT(KERNEL_BASE_virt, KERNEL_BASE_phys,
181		MEMSIZE,
182		L1_S_AP(AP_KRW) | L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_S_C |\
183			L1_S_B | L1_S_PROTO)
184
185	/* VA == PA mapping for instruction fetches just after MMU_ENABLE. */
186	MMU_INIT(KERNEL_BASE_phys, KERNEL_BASE_phys,
187		1,
188		L1_S_AP(AP_KRW) | L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_S_C |\
189			L1_S_B | L1_S_PROTO)
190
191	/* Peripherals */
192	MMU_INIT(APBH_BASE, APBH_BASE,
193		1,
194		L1_S_AP(AP_KRW) | L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_S_PROTO)
195
196	MMU_INIT(0, 0, 0, 0)
197
198#ifdef DEBUG
199/*
200 * Write character in r0 register to Debug UART.
201 */
202.global	_C_LABEL(dbputc)
203_C_LABEL(dbputc):
204	stmfd	sp!, {r0, r1, r2, lr}
205
206        /* Wait until transmit FIFO has space for the new character. */
207	ldr	r1, =(HW_UARTDBG_BASE + HW_UARTDBGFR)
2081:	ldr	r2, [r1]
209	ands	r2, r2, #0x20	/* HW_UARTDBGFR_TXFF */
210	bne	1b
211
212	ldr	r1, =(HW_UARTDBG_BASE + HW_UARTDBGDR)
213	strb	r0, [r1]
214
215	ldmfd	sp!, {r0, r1, r2, pc}
216#endif
217