xref: /netbsd-src/sys/arch/evbarm/nslu2/nslu2_start.S (revision bbde328be4e75ea9ad02e9715ea13ca54b797ada)
1/*	$NetBSD: nslu2_start.S,v 1.2 2008/04/28 20:23:17 martin Exp $	*/
2
3/*
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ichiro FUKUHARA, Jason R. Thorpe, and Steve C. Woodford.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <machine/asm.h>
33#include <arm/armreg.h>
34#include <arm/arm32/pte.h>
35
36#include <arm/xscale/ixp425reg.h>
37
38	.section .start,"ax",%progbits
39
40	.global	_C_LABEL(nslu2_start)
41_C_LABEL(nslu2_start):
42        /*
43         * We will go ahead and disable the MMU here so that we don't
44         * have to worry about flushing caches, etc.
45         *
46         * Note that we may not currently be running VA==PA, which means
47         * we'll need to leap to the next insn after disabing the MMU.
48         */
49        adr     r8, Lunmapped
50        bic     r8, r8, #0xff000000     /* clear upper 8 bits */
51        orr     r8, r8, #0x10000000     /* OR in physical base address */
52
53        mrc     p15, 0, r2, c1, c0, 0
54        bic     r2, r2, #CPU_CONTROL_MMU_ENABLE
55	orr	r2, r2, #CPU_CONTROL_BEND_ENABLE
56        mcr     p15, 0, r2, c1, c0, 0
57
58        nop
59        nop
60        nop
61
62        mov     pc, r8                  /* Heave-ho! */
63
64Lunmapped:
65	/*
66	 * We want to construct a memory map that maps us
67	 * VA==PA (SDRAM at 0x10000000). We create these
68	 * mappings uncached and unbuffered to be safe.
69	 */
70
71	/*
72	 * Step 1: Map the entire address space VA==PA.
73	 */
74	adr	r0, Ltable
75	ldr	r0, [r0]			/* r0 = &l1table */
76
77	mov	r1, #(L1_TABLE_SIZE / 4)	/* 4096 entry */
78	mov	r2, #(L1_S_SIZE)		/* 1MB / section */
79	mov	r3, #(L1_S_AP(AP_KRW))		/* kernel read/write */
80	orr	r3, r3, #(L1_TYPE_S)		/* L1 entry is section */
811:
82	str	r3, [r0], #0x04
83	add	r3, r3, r2
84	subs	r1, r1, #1
85	bgt	1b
86
87        /*
88         * Step 2: Map VA 0xc0000000->0xc3ffffff to PA 0x10000000->0x13ffffff.
89         */
90        adr     r0, Ltable                      /* r0 = &l1table */
91        ldr     r0, [r0]
92
93        mov     r3, #(L1_S_AP(AP_KRW))
94        orr     r3, r3, #(L1_TYPE_S)
95        orr     r3, r3, #0x10000000
96        add     r0, r0, #(0xc00 * 4)            /* offset to 0xc00xxxxx */
97        mov     r1, #0x40                       /* 64MB */
981:
99        str     r3, [r0], #0x04
100        add     r3, r3, r2
101        subs    r1, r1, #1
102        bgt     1b
103
104	/*
105	 * Step 3: Map VA 0xf0000000->0xf0100000 to PA 0xc8000000->0xc8100000.
106	 */
107	adr	r0, Ltable			/* r0 = &l1table */
108	ldr	r0, [r0]
109
110	add	r0, r0, #(0xf00 * 4)		/* offset to 0xf0000000 */
111	mov	r3, #0xc8000000
112	add	r3, r3, #0x00100000
113	orr	r3, r3, #(L1_S_AP(AP_KRW))
114	orr	r3, r3, #(L1_TYPE_S)
115	str	r3, [r0]
116
117	/*
118	 * Step 4: Map VA 0xf0200000->0xf0300000 to PA 0xcc000000->0xcc100000.
119	 */
120	adr	r0, Ltable			/* r0 = &l1table */
121	ldr	r0, [r0]
122
123	add	r0, r0, #(0xf00 * 4)		/* offset to 0xf0200000 */
124	add	r0, r0, #(0x002 * 4)
125	mov	r3, #0xcc000000
126	add	r3, r3, #0x00100000
127	orr	r3, r3, #(L1_S_AP(AP_KRW))
128	orr	r3, r3, #(L1_TYPE_S)
129	str	r3, [r0]
130
131	/* OK!  Page table is set up.  Give it to the CPU. */
132	adr	r0, Ltable
133	ldr	r0, [r0]
134	mcr	p15, 0, r0, c2, c0, 0
135
136	/* Flush the old TLBs, just in case. */
137	mcr	p15, 0, r0, c8, c7, 0
138
139	/* Set the Domain Access register.  Very important! */
140	mov	r0, #1
141	mcr	p15, 0, r0, c3, c0, 0
142
143	/* Get ready to jump to the "real" kernel entry point... */
144	ldr	r1, Lstart
145	mov	r1, r1			/* Make sure the load completes! */
146
147	/* OK, let's enable the MMU. */
148	mrc	p15, 0, r2, c1, c0, 0
149	orr	r2, r2, #CPU_CONTROL_MMU_ENABLE
150	orr	r2, r2, #CPU_CONTROL_BEND_ENABLE
151	mcr	p15, 0, r2, c1, c0, 0
152
153	nop
154	nop
155	nop
156
157	/* CPWAIT sequence to make sure the MMU is on... */
158	mrc	p15, 0, r2, c2, c0, 0	/* arbitrary read of CP15 */
159	mov	r2, r2			/* force it to complete */
160	mov	pc, r1			/* leap to kernel entry point! */
161
162Ltable:
163	.word	0x10200000 - 0x4000
164
165Lstart:
166	.word	start
167