xref: /netbsd-src/sys/arch/arm/arm/cpufunc_asm_armv5.S (revision beb9bdb00e5421761976d5c277c0da84fd703f9b)
1/*	$NetBSD: cpufunc_asm_armv5.S,v 1.9 2022/10/20 06:58:38 skrll Exp $	*/
2
3/*
4 * Copyright (c) 2002, 2005 ARM Limited
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the company may not be used to endorse or promote
16 *    products derived from this software without specific prior written
17 *    permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * ARMv5 assembly functions for manipulating caches.
32 * These routines can be used by any core that supports the set/index
33 * operations.
34 */
35
36#include "assym.h"
37#include <machine/asm.h>
38#include <arm/locore.h>
39
40/*
41 * Functions to set the MMU Translation Table Base register
42 *
43 * We need to clean and flush the cache as it uses virtual
44 * addresses that are about to change.
45 */
46ENTRY(armv5_setttb)
47	cmp	r1, #0
48	beq	1f
49
50	stmfd	sp!, {r0, lr}
51	bl	_C_LABEL(armv5_idcache_wbinv_all)
52	ldmfd	sp!, {r0, lr}
53	cmp	r0, #1
54
551:	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
56	mcrne	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
57	RET
58
59/*
60 * Cache operations.  For the entire cache we use the set/index
61 * operations.
62 */
63	s_max	.req r0
64	i_max	.req r1
65	s_inc	.req r2
66	i_inc	.req r3
67
68ENTRY_NP(armv5_icache_sync_range)
69	ldr	ip, .Larmv5_line_size
70	cmp	r1, #0x4000
71	bcs	.Larmv5_icache_sync_all
72	ldr	ip, [ip]
73	sub	r1, r1, #1		/* Don't overrun */
74	sub	r3, ip, #1
75	and	r2, r0, r3
76	add	r1, r1, r2
77	bic	r0, r0, r3
781:
79	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
80	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
81	add	r0, r0, ip
82	subs	r1, r1, ip
83	bpl	1b
84	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
85	RET
86
87ENTRY_NP(armv5_icache_sync_all)
88.Larmv5_icache_sync_all:
89	/*
90	 * We assume that the code here can never be out of sync with the
91	 * dcache, so that we can safely flush the Icache and fall through
92	 * into the Dcache cleaning code.
93	 */
94	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
95	/* Fall through to clean Dcache. */
96
97.Larmv5_dcache_wb:
98	ldr	ip, .Larmv5_cache_data
99	ldmia	ip, {s_max, i_max, s_inc, i_inc}
1001:
101	orr	ip, s_max, i_max
1022:
103	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
104	sub	ip, ip, i_inc
105	tst	ip, i_max		/* Index 0 is last one */
106	bne	2b			/* Next index */
107	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
108	subs	s_max, s_max, s_inc
109	bpl	1b			/* Next set */
110	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
111	RET
112
113.Larmv5_line_size:
114	.word	_C_LABEL(arm_pcache) + DCACHE_LINE_SIZE
115
116ENTRY(armv5_dcache_wb_range)
117	ldr	ip, .Larmv5_line_size
118	cmp	r1, #0x4000
119	bcs	.Larmv5_dcache_wb
120	ldr	ip, [ip]
121	sub	r1, r1, #1		/* Don't overrun */
122	sub	r3, ip, #1
123	and	r2, r0, r3
124	add	r1, r1, r2
125	bic	r0, r0, r3
1261:
127	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
128	add	r0, r0, ip
129	subs	r1, r1, ip
130	bpl	1b
131	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
132	RET
133
134ENTRY(armv5_dcache_wbinv_range)
135	ldr	ip, .Larmv5_line_size
136	cmp	r1, #0x4000
137	bcs	.Larmv5_dcache_wbinv_all
138	ldr	ip, [ip]
139	sub	r1, r1, #1		/* Don't overrun */
140	sub	r3, ip, #1
141	and	r2, r0, r3
142	add	r1, r1, r2
143	bic	r0, r0, r3
1441:
145	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
146	add	r0, r0, ip
147	subs	r1, r1, ip
148	bpl	1b
149	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
150	RET
151
152/*
153 * Note, we must not invalidate everything.  If the range is too big we
154 * must use wb-inv of the entire cache.
155 */
156ENTRY(armv5_dcache_inv_range)
157	ldr	ip, .Larmv5_line_size
158	cmp	r1, #0x4000
159	bcs	.Larmv5_dcache_wbinv_all
160	ldr	ip, [ip]
161	sub	r1, r1, #1		/* Don't overrun */
162	sub	r3, ip, #1
163	and	r2, r0, r3
164	add	r1, r1, r2
165	bic	r0, r0, r3
1661:
167	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
168	add	r0, r0, ip
169	subs	r1, r1, ip
170	bpl	1b
171	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
172	RET
173
174ENTRY(armv5_idcache_wbinv_range)
175	ldr	ip, .Larmv5_line_size
176	cmp	r1, #0x4000
177	bcs	.Larmv5_idcache_wbinv_all
178	ldr	ip, [ip]
179	sub	r1, r1, #1		/* Don't overrun */
180	sub	r3, ip, #1
181	and	r2, r0, r3
182	add	r1, r1, r2
183	bic	r0, r0, r3
1841:
185	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
186	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
187	add	r0, r0, ip
188	subs	r1, r1, ip
189	bpl	1b
190	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
191	RET
192
193ENTRY_NP(armv5_idcache_wbinv_all)
194.Larmv5_idcache_wbinv_all:
195	/*
196	 * We assume that the code here can never be out of sync with the
197	 * dcache, so that we can safely flush the Icache and fall through
198	 * into the Dcache purging code.
199	 */
200	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
201	/* Fall through to purge Dcache. */
202
203ENTRY(armv5_dcache_wbinv_all)
204.Larmv5_dcache_wbinv_all:
205	ldr	ip, .Larmv5_cache_data
206	ldmia	ip, {s_max, i_max, s_inc, i_inc}
2071:
208	orr	ip, s_max, i_max
2092:
210	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
211	sub	ip, ip, i_inc
212	tst	ip, i_max		/* Index 0 is last one */
213	bne	2b			/* Next index */
214	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
215	subs	s_max, s_max, s_inc
216	bpl	1b			/* Next set */
217	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
218	RET
219
220.Larmv5_cache_data:
221	.word	_C_LABEL(armv5_dcache_sets_max)
222
223	.bss
224
225/* XXX The following macros should probably be moved to asm.h */
226#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
227#define C_OBJECT(x)	_DATA_OBJECT(_C_LABEL(x))
228
229/*
230 * Parameters for the cache cleaning code.  Note that the order of these
231 * four variables is assumed in the code above.  Hence the reason for
232 * declaring them in the assembler file.
233 */
234	.align 0
235C_OBJECT(armv5_dcache_sets_max)
236	.space	4
237C_OBJECT(armv5_dcache_index_max)
238	.space	4
239C_OBJECT(armv5_dcache_sets_inc)
240	.space	4
241C_OBJECT(armv5_dcache_index_inc)
242	.space	4
243