xref: /netbsd-src/sys/arch/arm/arm/cpufunc_asm_armv5_ec.S (revision d093aae8d2b8d8cf9aa11a81f2848c5166a828ee)
1/*	$NetBSD: cpufunc_asm_armv5_ec.S,v 1.7 2021/10/07 09:58:27 rin Exp $	*/
2
3/*
4 * Copyright (c) 2002, 2005 ARM Limited
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the company may not be used to endorse or promote
16 *    products derived from this software without specific prior written
17 *    permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * ARMv5 assembly functions for manipulating caches.
32 * These routines can be used by any core that supports both the set/index
33 * operations and the test and clean operations for efficiently cleaning the
34 * entire DCache.  If a core does not have the test and clean operations, but
35 * does have the set/index operations, use the routines in cpufunc_asm_armv5.S.
36 * This source was derived from that file.
37 */
38
39#include "assym.h"
40#include <machine/asm.h>
41#include <arm/locore.h>
42
43/*
44 * Functions to set the MMU Translation Table Base register
45 *
46 * We need to clean and flush the cache as it uses virtual
47 * addresses that are about to change.
48 */
49ENTRY(armv5_ec_setttb)
50	/*
51	 * Some other ARM ports save registers on the stack, call the
52	 * idcache_wbinv_all function and then restore the registers from the
53	 * stack before setting the TTB.  I observed that this caused a
54	 * problem when the old and new translation table entries' buffering
55	 * bits were different.  If I saved the registers in other registers
56	 * or invalidated the caches when I returned from idcache_wbinv_all,
57	 * it worked fine.  If not, I ended up executing at an invalid PC.
58	 * For armv5_ec_settb, the idcache_wbinv_all is simple enough, I just
59	 * do it directly and entirely avoid the problem.
60	 */
61	cmp	r1, #0
62	beq	1f
63
64	mcr	p15, 0, r0, c7, c5, 0	/* Invalidate ICache */
652:	mrc	p15, 0, APSR_nzcv, c7, c14, 3	/* Test, clean and invalidate DCache */
66	bne	2b			/* More to do? */
67	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
68	cmp	r0, #1
69
701:	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
71	mcrne	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
72	RET
73
74/*
75 * Cache operations.  For the entire cache we use the enhanced cache
76 * operations.
77 */
78
79ENTRY_NP(armv5_ec_icache_sync_range)
80	ldr	ip, .Larmv5_ec_line_size
81	cmp	r1, #0x4000
82	bcs	.Larmv5_ec_icache_sync_all
83	ldr	ip, [ip]
84	sub	r1, r1, #1		/* Don't overrun */
85	sub	r3, ip, #1
86	and	r2, r0, r3
87	add	r1, r1, r2
88	bic	r0, r0, r3
891:
90	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
91	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
92	add	r0, r0, ip
93	subs	r1, r1, ip
94	bpl	1b
95	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
96	RET
97
98ENTRY_NP(armv5_ec_icache_sync_all)
99.Larmv5_ec_icache_sync_all:
100	/*
101	 * We assume that the code here can never be out of sync with the
102	 * dcache, so that we can safely flush the Icache and fall through
103	 * into the Dcache cleaning code.
104	 */
105	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
106	/* Fall through to clean Dcache. */
107
108.Larmv5_ec_dcache_wb:
1091:
110	mrc	p15, 0, APSR_nzcv, c7, c10, 3	/* Test and clean (don't invalidate) */
111	bne	1b			/* More to do? */
112	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
113	RET
114
115.Larmv5_ec_line_size:
116	.word	_C_LABEL(arm_pcache) + DCACHE_LINE_SIZE
117
118ENTRY(armv5_ec_dcache_wb_range)
119	ldr	ip, .Larmv5_ec_line_size
120	cmp	r1, #0x4000
121	bcs	.Larmv5_ec_dcache_wb
122	ldr	ip, [ip]
123	sub	r1, r1, #1		/* Don't overrun */
124	sub	r3, ip, #1
125	and	r2, r0, r3
126	add	r1, r1, r2
127	bic	r0, r0, r3
1281:
129	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
130	add	r0, r0, ip
131	subs	r1, r1, ip
132	bpl	1b
133	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
134	RET
135
136ENTRY(armv5_ec_dcache_wbinv_range)
137	ldr	ip, .Larmv5_ec_line_size
138	cmp	r1, #0x4000
139	bcs	.Larmv5_ec_dcache_wbinv_all
140	ldr	ip, [ip]
141	sub	r1, r1, #1		/* Don't overrun */
142	sub	r3, ip, #1
143	and	r2, r0, r3
144	add	r1, r1, r2
145	bic	r0, r0, r3
1461:
147	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
148	add	r0, r0, ip
149	subs	r1, r1, ip
150	bpl	1b
151	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
152	RET
153
154/*
155 * Note, we must not invalidate everything.  If the range is too big we
156 * must use wb-inv of the entire cache.
157 */
158ENTRY(armv5_ec_dcache_inv_range)
159	ldr	ip, .Larmv5_ec_line_size
160	cmp	r1, #0x4000
161	bcs	.Larmv5_ec_dcache_wbinv_all
162	ldr	ip, [ip]
163	sub	r1, r1, #1		/* Don't overrun */
164	sub	r3, ip, #1
165	and	r2, r0, r3
166	add	r1, r1, r2
167	bic	r0, r0, r3
1681:
169	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
170	add	r0, r0, ip
171	subs	r1, r1, ip
172	bpl	1b
173	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
174	RET
175
176ENTRY(armv5_ec_idcache_wbinv_range)
177	ldr	ip, .Larmv5_ec_line_size
178	cmp	r1, #0x4000
179	bcs	.Larmv5_ec_idcache_wbinv_all
180	ldr	ip, [ip]
181	sub	r1, r1, #1		/* Don't overrun */
182	sub	r3, ip, #1
183	and	r2, r0, r3
184	add	r1, r1, r2
185	bic	r0, r0, r3
1861:
187	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
188	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
189	add	r0, r0, ip
190	subs	r1, r1, ip
191	bpl	1b
192	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
193	RET
194
195ENTRY_NP(armv5_ec_idcache_wbinv_all)
196.Larmv5_ec_idcache_wbinv_all:
197	/*
198	 * We assume that the code here can never be out of sync with the
199	 * dcache, so that we can safely flush the Icache and fall through
200	 * into the Dcache purging code.
201	 */
202	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
203	/* Fall through to purge Dcache. */
204
205ENTRY(armv5_ec_dcache_wbinv_all)
206.Larmv5_ec_dcache_wbinv_all:
2071:	mrc	p15, 0, APSR_nzcv, c7, c14, 3	/* Test, clean and invalidate DCache */
208	bne	1b			/* More to do? */
209	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
210	RET
211