xref: /netbsd-src/sys/arch/arm/arm/cpufunc_asm_arm10.S (revision f648d12d47727113ad5330b0753bb2f2ef8e1045)
1/*	$NetBSD: cpufunc_asm_arm10.S,v 1.1 2003/09/06 09:12:29 rearnsha Exp $	*/
2
3/*
4 * Copyright (c) 2002 ARM Limited
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the company may not be used to endorse or promote
16 *    products derived from this software without specific prior written
17 *    permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * ARM10 assembly functions for CPU / MMU / TLB specific operations
32 */
33
34#include <machine/cpu.h>
35#include <machine/asm.h>
36
37/*
38 * Functions to set the MMU Translation Table Base register
39 *
40 * We need to clean and flush the cache as it uses virtual
41 * addresses that are about to change.
42 */
43ENTRY(arm10_setttb)
44	stmfd	sp!, {r0, lr}
45	bl	_C_LABEL(arm10_idcache_wbinv_all)
46	ldmfd	sp!, {r0, lr}
47
48	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
49
50	mcr	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
51	bx	lr
52
53/*
54 * TLB functions
55 */
56ENTRY(arm10_tlb_flushID_SE)
57	mcr	p15, 0, r0, c8, c6, 1	/* flush D tlb single entry */
58	mcr	p15, 0, r0, c8, c5, 1	/* flush I tlb single entry */
59	bx	lr
60
61ENTRY(arm10_tlb_flushI_SE)
62	mcr	p15, 0, r0, c8, c5, 1	/* flush I tlb single entry */
63	bx	lr
64
65
66/*
67 * Cache operations.  For the entire cache we use the set/index
68 * operations.
69 */
70	s_max	.req r0
71	i_max	.req r1
72	s_inc	.req r2
73	i_inc	.req r3
74
75ENTRY_NP(arm10_icache_sync_range)
76	ldr	ip, .Larm10_line_size
77	cmp	r1, #0x4000
78	bcs	.Larm10_icache_sync_all
79	ldr	ip, [ip]
80	sub	r3, ip, #1
81	and	r2, r0, r3
82	add	r1, r1, r2
83	bic	r0, r0, r3
84.Larm10_sync_next:
85	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
86	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
87	add	r0, r0, ip
88	subs	r1, r1, ip
89	bpl	.Larm10_sync_next
90	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
91	bx	lr
92
93ENTRY_NP(arm10_icache_sync_all)
94.Larm10_icache_sync_all:
95	/*
96	 * We assume that the code here can never be out of sync with the
97	 * dcache, so that we can safely flush the Icache and fall through
98	 * into the Dcache cleaning code.
99	 */
100	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
101	/* Fall through to clean Dcache. */
102
103.Larm10_dcache_wb:
104	ldr	ip, .Larm10_cache_data
105	ldmia	ip, {s_max, i_max, s_inc, i_inc}
106.Lnext_set:
107	orr	ip, s_max, i_max
108.Lnext_index:
109	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
110	sub	ip, ip, i_inc
111	tst	ip, i_max		/* Index 0 is last one */
112	bne	.Lnext_index		/* Next index */
113	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
114	subs	s_max, s_max, s_inc
115	bpl	.Lnext_set		/* Next set */
116	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
117	bx	lr
118
119.Larm10_line_size:
120	.word	_C_LABEL(arm_pdcache_line_size)
121
122ENTRY(arm10_dcache_wb_range)
123	ldr	ip, .Larm10_line_size
124	cmp	r1, #0x4000
125	bcs	.Larm10_dcache_wb
126	ldr	ip, [ip]
127	sub	r3, ip, #1
128	and	r2, r0, r3
129	add	r1, r1, r2
130	bic	r0, r0, r3
131.Larm10_wb_next:
132	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
133	add	r0, r0, ip
134	subs	r1, r1, ip
135	bpl	.Larm10_wb_next
136	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
137	bx	lr
138
139ENTRY(arm10_dcache_wbinv_range)
140	ldr	ip, .Larm10_line_size
141	cmp	r1, #0x4000
142	bcs	.Larm10_dcache_wbinv_all
143	ldr	ip, [ip]
144	sub	r3, ip, #1
145	and	r2, r0, r3
146	add	r1, r1, r2
147	bic	r0, r0, r3
148.Larm10_wbinv_next:
149	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
150	add	r0, r0, ip
151	subs	r1, r1, ip
152	bpl	.Larm10_wbinv_next
153	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
154	bx	lr
155
156/*
157 * Note, we must not invalidate everything.  If the range is too big we
158 * must use wb-inv of the entire cache.
159 */
160ENTRY(arm10_dcache_inv_range)
161	ldr	ip, .Larm10_line_size
162	cmp	r1, #0x4000
163	bcs	.Larm10_dcache_wbinv_all
164	ldr	ip, [ip]
165	sub	r3, ip, #1
166	and	r2, r0, r3
167	add	r1, r1, r2
168	bic	r0, r0, r3
169.Larm10_inv_next:
170	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
171	add	r0, r0, ip
172	subs	r1, r1, ip
173	bpl	.Larm10_inv_next
174	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
175	bx	lr
176
177ENTRY(arm10_idcache_wbinv_range)
178	ldr	ip, .Larm10_line_size
179	cmp	r1, #0x4000
180	bcs	.Larm10_idcache_wbinv_all
181	ldr	ip, [ip]
182	sub	r3, ip, #1
183	and	r2, r0, r3
184	add	r1, r1, r2
185	bic	r0, r0, r3
186.Larm10_id_wbinv_next:
187	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
188	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
189	add	r0, r0, ip
190	subs	r1, r1, ip
191	bpl	.Larm10_id_wbinv_next
192	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
193	bx	lr
194
195ENTRY_NP(arm10_idcache_wbinv_all)
196.Larm10_idcache_wbinv_all:
197	/*
198	 * We assume that the code here can never be out of sync with the
199	 * dcache, so that we can safely flush the Icache and fall through
200	 * into the Dcache purging code.
201	 */
202	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
203	/* Fall through to purge Dcache. */
204
205ENTRY(arm10_dcache_wbinv_all)
206.Larm10_dcache_wbinv_all:
207	ldr	ip, .Larm10_cache_data
208	ldmia	ip, {s_max, i_max, s_inc, i_inc}
209.Lnext_set_inv:
210	orr	ip, s_max, i_max
211.Lnext_index_inv:
212	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
213	sub	ip, ip, i_inc
214	tst	ip, i_max		/* Index 0 is last one */
215	bne	.Lnext_index_inv		/* Next index */
216	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
217	subs	s_max, s_max, s_inc
218	bpl	.Lnext_set_inv		/* Next set */
219	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
220	bx	lr
221
222.Larm10_cache_data:
223	.word	_C_LABEL(arm10_dcache_sets_max)
224
225/*
226 * Context switch.
227 *
228 * These is the CPU-specific parts of the context switcher cpu_switch()
229 * These functions actually perform the TTB reload.
230 *
231 * NOTE: Special calling convention
232 *	r1, r4-r13 must be preserved
233 */
234ENTRY(arm10_context_switch)
235	/*
236	 * We can assume that the caches will only contain kernel addresses
237	 * at this point.  So no need to flush them again.
238	 */
239	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
240	mcr	p15, 0, r0, c2, c0, 0	/* set the new TTB */
241	mcr	p15, 0, r0, c8, c7, 0	/* and flush the I+D tlbs */
242
243	/* Paranoia -- make sure the pipeline is empty. */
244	nop
245	nop
246	nop
247	bx	lr
248
249	.bss
250
251/* XXX The following macros should probably be moved to asm.h */
252#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
253#define C_OBJECT(x)	_DATA_OBJECT(_C_LABEL(x))
254
255/*
256 * Parameters for the cache cleaning code.  Note that the order of these
257 * four variables is assumed in the code above.  Hence the reason for
258 * declaring them in the assembler file.
259 */
260	.align 0
261C_OBJECT(arm10_dcache_sets_max)
262	.space	4
263C_OBJECT(arm10_dcache_index_max)
264	.space	4
265C_OBJECT(arm10_dcache_sets_inc)
266	.space	4
267C_OBJECT(arm10_dcache_index_inc)
268	.space	4
269