xref: /netbsd-src/sys/arch/arm/arm/cpufunc_asm_sheeva.S (revision 7550e9f8ccad8d0099a1556f053820f7c17d06fd)
1/*-
2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3 * All rights reserved.
4 *
5 * Developed by Semihalf.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include "assym.h"
33#include <arm/asm.h>
34#include <arm/locore.h>
35
36.Lsheeva_cache_line_size:
37	.word	_C_LABEL(arm_pcache) + DCACHE_LINE_SIZE
38.Lsheeva_asm_page_mask:
39	.word	_C_LABEL(PAGE_MASK)
40
41ENTRY(sheeva_dcache_wbinv_range)
42	push	{r4,r5}
43	mrs	r4, cpsr
44	orr	r5, r4, #I32_bit | F32_bit
45
46	/* Start with cache line aligned address */
47	ldr	ip, .Lsheeva_cache_line_size
48	ldr	r3, [ip]
49	sub	r3, r3, #1
50	and	r2, r0, r3
51	add	r1, r1, r2
52	add	r1, r1, r3
53	bic	r1, r1, r3
54	bic	r0, r0, r3
55
56	ldr	ip, .Lsheeva_asm_page_mask
57	and	r2, r0, ip
58	rsb	r2, r2, #PAGE_SIZE
59	cmp	r1, r2
60	movcc	ip, r1
61	movcs	ip, r2
62	sub	r2, r0, #1
631:
64	add	r2, r2, ip
65	msr	cpsr_c, r5		/* Disable irqs */
66	mcr	p15, 5, r0, c15, c15, 0	/* Clean and inv zone start address */
67	mcr	p15, 5, r2, c15, c15, 1	/* Clean and inv zone end address */
68	msr	cpsr_c, r4		/* Enable irqs */
69
70	add	r0, r0, ip
71	sub	r1, r1, ip
72	cmp	r1, #PAGE_SIZE
73	movcc	ip, r1
74	movcs	ip, #PAGE_SIZE
75	cmp	r1, #0
76	bne	1b
77	mov	r0, #0
78	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
79	pop	{r4, r5}
80	RET
81END(sheeva_dcache_wbinv_range)
82
83ENTRY(sheeva_dcache_inv_range)
84	push	{r4,r5}
85	mrs	r4, cpsr
86	orr	r5, r4, #I32_bit | F32_bit
87
88	/* Start with cache line aligned address */
89	ldr	ip, .Lsheeva_cache_line_size
90	ldr	r3, [ip]
91	sub	r3, r3, #1
92	and	r2, r0, r3
93	add	r1, r1, r2
94	add	r1, r1, r3
95	bic	r1, r1, r3
96	bic	r0, r0, r3
97
98	ldr	ip, .Lsheeva_asm_page_mask
99	and	r2, r0, ip
100	rsb	r2, r2, #PAGE_SIZE
101	cmp	r1, r2
102	movcc	ip, r1
103	movcs	ip, r2
104	sub	r2, r0, #1
1051:
106	add	r2, r2, ip
107	msr	cpsr_c, r5		/* Disable irqs */
108	mcr	p15, 5, r0, c15, c14, 0	/* Inv zone start address */
109	mcr	p15, 5, r2, c15, c14, 1	/* Inv zone end address */
110	msr	cpsr_c, r4		/* Enable irqs */
111
112	add	r0, r0, ip
113	sub	r1, r1, ip
114	cmp	r1, #PAGE_SIZE
115	movcc	ip, r1
116	movcs	ip, #PAGE_SIZE
117	cmp	r1, #0
118	bne	1b
119	mov	r0, #0
120	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
121	pop	{r4, r5}
122	RET
123END(sheeva_dcache_inv_range)
124
125ENTRY(sheeva_dcache_wb_range)
126	push	{r4,r5}
127	mrs	r4, cpsr
128	orr	r5, r4, #I32_bit | F32_bit
129
130	/* Start with cache line aligned address */
131	ldr	ip, .Lsheeva_cache_line_size
132	ldr	r3, [ip]
133	sub	r3, r3, #1
134	and	r2, r0, r3
135	add	r1, r1, r2
136	add	r1, r1, r3
137	bic	r1, r1, r3
138	bic	r0, r0, r3
139
140	ldr	ip, .Lsheeva_asm_page_mask
141	and	r2, r0, ip
142	rsb	r2, r2, #PAGE_SIZE
143	cmp	r1, r2
144	movcc	ip, r1
145	movcs	ip, r2
146	sub	r2, r0, #1
1471:
148	add	r2, r2, ip
149	msr	cpsr_c, r5		/* Disable irqs */
150	mcr	p15, 5, r0, c15, c13, 0	/* Clean zone start address */
151	mcr	p15, 5, r2, c15, c13, 1	/* Clean zone end address */
152	msr	cpsr_c, r4		/* Enable irqs */
153
154	add	r0, r0, ip
155	sub	r1, r1, ip
156	cmp	r1, #PAGE_SIZE
157	movcc	ip, r1
158	movcs	ip, #PAGE_SIZE
159	cmp	r1, #0
160	bne	1b
161	mov	r0, #0
162	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
163	pop	{r4, r5}
164	RET
165END(sheeva_dcache_wb_range)
166
167ENTRY(sheeva_idcache_wbinv_range)
168	push	{r4,r5}
169	mrs	r4, cpsr
170	orr	r5, r4, #I32_bit | F32_bit
171
172	/* Start with cache line aligned address */
173	ldr	ip, .Lsheeva_cache_line_size
174	ldr	r3, [ip]
175	sub	r3, r3, #1
176	and	r2, r0, r3
177	add	r1, r1, r2
178	add	r1, r1, r3
179	bic	r1, r1, r3
180	bic	r0, r0, r3
181
182	ldr	ip, .Lsheeva_asm_page_mask
183	and	r2, r0, ip
184	rsb	r2, r2, #PAGE_SIZE
185	cmp	r1, r2
186	movcc	ip, r1
187	movcs	ip, r2
188	sub	r2, r0, #1
1891:
190	add	r2, r2, ip
191	msr	cpsr_c, r5		/* Disable irqs */
192	mcr	p15, 5, r0, c15, c15, 0	/* Clean and inv zone start address */
193	mcr	p15, 5, r2, c15, c15, 1	/* Clean and inv zone end address */
194	msr	cpsr_c, r4		/* Enable irqs */
195
196	/* Invalidate and clean icache line by line */
1972:
198	mcr	p15, 0, r0, c7, c5, 1
199	add	r0, r0, r3
200	cmp	r2, r0
201	bhi	2b
202
203	add	r0, r0, ip
204	sub	r1, r1, ip
205	cmp	r1, #PAGE_SIZE
206	movcc	ip, r1
207	movcs	ip, #PAGE_SIZE
208	cmp	r1, #0
209	bne	1b
210	mov	r0, #0
211	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
212	pop	{r4, r5}
213	RET
214END(sheeva_idcache_wbinv_range)
215
216ENTRY(sheeva_sdcache_wbinv_range)
217	push	{r4,r5}
218	mrs	r4, cpsr
219	orr	r5, r4, #I32_bit | F32_bit
220
221	mov	r1, r2		/* ignore paddr_t argument */
222
223	/* Start with cache line aligned address */
224	ldr	ip, .Lsheeva_cache_line_size
225	ldr	ip, [ip]
226	sub	ip, ip, #1
227	and	r2, r0, ip
228	add	r1, r1, r2
229	add	r1, r1, ip
230	bic	r1, r1, ip
231	bic	r0, r0, ip
232
233	ldr	ip, .Lsheeva_asm_page_mask
234	and	r2, r0, ip
235	rsb	r2, r2, #PAGE_SIZE
236	cmp	r1, r2
237	movcc	ip, r1
238	movcs	ip, r2
239	sub	r2, r0, #1
2401:
241	add	r2, r2, ip
242	msr	cpsr_c, r5		/* Disable irqs */
243	mcr	p15, 1, r0, c15, c9, 4	/* Clean L2 zone start address */
244	mcr	p15, 1, r2, c15, c9, 5	/* Clean L2 zone end address */
245	mcr	p15, 1, r0, c15, c11, 4	/* Inv L2 zone start address */
246	mcr	p15, 1, r2, c15, c11, 5	/* Inv L2 zone end address */
247	msr	cpsr_c, r4		/* Enable irqs */
248
249	add	r0, r0, ip
250	sub	r1, r1, ip
251	cmp	r1, #PAGE_SIZE
252	movcc	ip, r1
253	movcs	ip, #PAGE_SIZE
254	cmp	r1, #0
255	bne	1b
256	mov	r0, #0
257	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
258	pop	{r4, r5}
259	RET
260END(sheeva_sdcache_wbinv_range)
261
262ENTRY(sheeva_sdcache_inv_range)
263	push	{r4,r5}
264	mrs	r4, cpsr
265	orr	r5, r4, #I32_bit | F32_bit
266
267	mov	r1, r2		/* ignore paddr_t argument */
268
269	/* Start with cache line aligned address */
270	ldr	ip, .Lsheeva_cache_line_size
271	ldr	r3, [ip]
272	sub	r3, r3, #1
273	and	r2, r0, r3
274	add	r1, r1, r2
275	add	r1, r1, r3
276	bic	r1, r1, r3
277	bic	r0, r0, r3
278
279	ldr	ip, .Lsheeva_asm_page_mask
280	and	r2, r0, ip
281	rsb	r2, r2, #PAGE_SIZE
282	cmp	r1, r2
283	movcc	ip, r1
284	movcs	ip, r2
285	sub	r2, r2, #1
2861:
287	add	r2, r2, ip
288	msr	cpsr_c, r5		/* Disable irqs */
289	mcr	p15, 1, r0, c15, c11, 4	/* Inv L2 zone start address */
290	mcr	p15, 1, r2, c15, c11, 5	/* Inv L2 zone end address */
291	msr	cpsr_c, r4		/* Enable irqs */
292
293	add	r0, r0, ip
294	sub	r1, r1, ip
295	cmp	r1, #PAGE_SIZE
296	movcc	ip, r1
297	movcs	ip, #PAGE_SIZE
298	cmp	r1, #0
299	bne	1b
300	mov	r0, #0
301	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
302	pop	{r4, r5}
303	RET
304END(sheeva_sdcache_inv_range)
305
306ENTRY(sheeva_sdcache_wb_range)
307	push	{r4,r5}
308	mrs	r4, cpsr
309	orr	r5, r4, #I32_bit | F32_bit
310
311	mov	r1, r2		/* ignore paddr_t argument */
312
313	/* Start with cache line aligned address */
314	ldr	ip, .Lsheeva_cache_line_size
315	ldr	r3, [ip]
316	sub	r3, r3, #1
317	and	r2, r0, r3
318	add	r1, r1, r2
319	add	r1, r1, r3
320	bic	r1, r1, r3
321	bic	r0, r0, r3
322
323	ldr	ip, .Lsheeva_asm_page_mask
324	and	r2, r0,	ip
325	rsb	r2, r2, #PAGE_SIZE
326	cmp	r1, r2
327	movcc	ip, r1
328	movcs	ip, r2
329	sub	r2, r0, #1
3301:
331	add	r2, r2, ip
332	msr	cpsr_c, r5		/* Disable irqs */
333	mcr	p15, 1, r0, c15, c9, 4	/* Clean L2 zone start address */
334	mcr	p15, 1, r2, c15, c9, 5	/* Clean L2 zone end address */
335	msr	cpsr_c, r4		/* Enable irqs */
336
337	add	r0, r0, ip
338	sub	r1, r1, ip
339	cmp	r1, #PAGE_SIZE
340	movcc	ip, r1
341	movcs	ip, #PAGE_SIZE
342	cmp	r1, #0
343	bne	1b
344	mov	r0, #0
345	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
346	pop	{r4, r5}
347	RET
348END(sheeva_sdcache_wb_range)
349
350ENTRY(sheeva_sdcache_wbinv_all)
351	mov	r0, #0
352	mcr	p15, 1, r0, c15, c9, 0	/* Clean L2 */
353	mcr	p15, 1, r0, c15, c11, 0	/* Invalidate L2 */
354	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
355	RET
356END(sheeva_sdcache_wbinv_all)
357
358/*
359 * CPU sleep
360 */
361ENTRY_NP(sheeva_cpu_sleep)
362	mov	r0, #0
363	mcr	p15, 0, r0, c7, c0, 4	/* wait for interrupt */
364	RET
365END(sheeva_cpu_sleep)
366