xref: /netbsd-src/sys/arch/arm/include/cpufunc.h (revision 1b9578b8c2c1f848eeb16dabbfd7d1f0d9fdefbd)
1 /*	cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp	*/
2 
3 /*
4  * Copyright (c) 1997 Mark Brinicombe.
5  * Copyright (c) 1997 Causality Limited
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Causality Limited.
19  * 4. The name of Causality Limited may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * RiscBSD kernel project
36  *
37  * cpufunc.h
38  *
39  * Prototypes for cpu, mmu and tlb related functions.
40  */
41 
42 #ifndef _ARM32_CPUFUNC_H_
43 #define _ARM32_CPUFUNC_H_
44 
45 #ifdef _KERNEL
46 
47 #include <sys/types.h>
48 #include <arm/armreg.h>
49 #include <arm/cpuconf.h>
50 #include <arm/armreg.h>
51 
52 struct cpu_functions {
53 
54 	/* CPU functions */
55 
56 	u_int	(*cf_id)		(void);
57 	void	(*cf_cpwait)		(void);
58 
59 	/* MMU functions */
60 
61 	u_int	(*cf_control)		(u_int, u_int);
62 	void	(*cf_domains)		(u_int);
63 	void	(*cf_setttb)		(u_int);
64 	u_int	(*cf_faultstatus)	(void);
65 	u_int	(*cf_faultaddress)	(void);
66 
67 	/* TLB functions */
68 
69 	void	(*cf_tlb_flushID)	(void);
70 	void	(*cf_tlb_flushID_SE)	(u_int);
71 	void	(*cf_tlb_flushI)	(void);
72 	void	(*cf_tlb_flushI_SE)	(u_int);
73 	void	(*cf_tlb_flushD)	(void);
74 	void	(*cf_tlb_flushD_SE)	(u_int);
75 
76 	/*
77 	 * Cache operations:
78 	 *
79 	 * We define the following primitives:
80 	 *
81 	 *	icache_sync_all		Synchronize I-cache
82 	 *	icache_sync_range	Synchronize I-cache range
83 	 *
84 	 *	dcache_wbinv_all	Write-back and Invalidate D-cache
85 	 *	dcache_wbinv_range	Write-back and Invalidate D-cache range
86 	 *	dcache_inv_range	Invalidate D-cache range
87 	 *	dcache_wb_range		Write-back D-cache range
88 	 *
89 	 *	idcache_wbinv_all	Write-back and Invalidate D-cache,
90 	 *				Invalidate I-cache
91 	 *	idcache_wbinv_range	Write-back and Invalidate D-cache,
92 	 *				Invalidate I-cache range
93 	 *
94 	 * Note that the ARM term for "write-back" is "clean".  We use
95 	 * the term "write-back" since it's a more common way to describe
96 	 * the operation.
97 	 *
98 	 * There are some rules that must be followed:
99 	 *
100 	 *	I-cache Synch (all or range):
101 	 *		The goal is to synchronize the instruction stream,
102 	 *		so you may beed to write-back dirty D-cache blocks
103 	 *		first.  If a range is requested, and you can't
104 	 *		synchronize just a range, you have to hit the whole
105 	 *		thing.
106 	 *
107 	 *	D-cache Write-Back and Invalidate range:
108 	 *		If you can't WB-Inv a range, you must WB-Inv the
109 	 *		entire D-cache.
110 	 *
111 	 *	D-cache Invalidate:
112 	 *		If you can't Inv the D-cache, you must Write-Back
113 	 *		and Invalidate.  Code that uses this operation
114 	 *		MUST NOT assume that the D-cache will not be written
115 	 *		back to memory.
116 	 *
117 	 *	D-cache Write-Back:
118 	 *		If you can't Write-back without doing an Inv,
119 	 *		that's fine.  Then treat this as a WB-Inv.
120 	 *		Skipping the invalidate is merely an optimization.
121 	 *
122 	 *	All operations:
123 	 *		Valid virtual addresses must be passed to each
124 	 *		cache operation.
125 	 */
126 	void	(*cf_icache_sync_all)	(void);
127 	void	(*cf_icache_sync_range)	(vaddr_t, vsize_t);
128 
129 	void	(*cf_dcache_wbinv_all)	(void);
130 	void	(*cf_dcache_wbinv_range)(vaddr_t, vsize_t);
131 	void	(*cf_dcache_inv_range)	(vaddr_t, vsize_t);
132 	void	(*cf_dcache_wb_range)	(vaddr_t, vsize_t);
133 
134 	void	(*cf_idcache_wbinv_all)	(void);
135 	void	(*cf_idcache_wbinv_range)(vaddr_t, vsize_t);
136 
137 	/* Other functions */
138 
139 	void	(*cf_flush_prefetchbuf)	(void);
140 	void	(*cf_drain_writebuf)	(void);
141 	void	(*cf_flush_brnchtgt_C)	(void);
142 	void	(*cf_flush_brnchtgt_E)	(u_int);
143 
144 	void	(*cf_sleep)		(int mode);
145 
146 	/* Soft functions */
147 
148 	int	(*cf_dataabt_fixup)	(void *);
149 	int	(*cf_prefetchabt_fixup)	(void *);
150 
151 	void	(*cf_context_switch)	(u_int);
152 
153 	void	(*cf_setup)		(char *);
154 };
155 
156 extern struct cpu_functions cpufuncs;
157 extern u_int cputype;
158 
159 #define cpu_id()		cpufuncs.cf_id()
160 
161 #define cpu_control(c, e)	cpufuncs.cf_control(c, e)
162 #define cpu_domains(d)		cpufuncs.cf_domains(d)
163 #define cpu_setttb(t)		cpufuncs.cf_setttb(t)
164 #define cpu_faultstatus()	cpufuncs.cf_faultstatus()
165 #define cpu_faultaddress()	cpufuncs.cf_faultaddress()
166 
167 #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
168 #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
169 #define	cpu_tlb_flushI()	cpufuncs.cf_tlb_flushI()
170 #define	cpu_tlb_flushI_SE(e)	cpufuncs.cf_tlb_flushI_SE(e)
171 #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
172 #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
173 
174 #define	cpu_icache_sync_all()	cpufuncs.cf_icache_sync_all()
175 #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
176 
177 #define	cpu_dcache_wbinv_all()	cpufuncs.cf_dcache_wbinv_all()
178 #define	cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
179 #define	cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
180 #define	cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
181 
182 #define	cpu_idcache_wbinv_all()	cpufuncs.cf_idcache_wbinv_all()
183 #define	cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
184 
185 #define	cpu_flush_prefetchbuf()	cpufuncs.cf_flush_prefetchbuf()
186 #define	cpu_drain_writebuf()	cpufuncs.cf_drain_writebuf()
187 #define	cpu_flush_brnchtgt_C()	cpufuncs.cf_flush_brnchtgt_C()
188 #define	cpu_flush_brnchtgt_E(e)	cpufuncs.cf_flush_brnchtgt_E(e)
189 
190 #define cpu_sleep(m)		cpufuncs.cf_sleep(m)
191 
192 #define cpu_dataabt_fixup(a)		cpufuncs.cf_dataabt_fixup(a)
193 #define cpu_prefetchabt_fixup(a)	cpufuncs.cf_prefetchabt_fixup(a)
194 #define ABORT_FIXUP_OK		0	/* fixup succeeded */
195 #define ABORT_FIXUP_FAILED	1	/* fixup failed */
196 #define ABORT_FIXUP_RETURN	2	/* abort handler should return */
197 
198 #define cpu_context_switch(a)		cpufuncs.cf_context_switch(a)
199 #define cpu_setup(a)			cpufuncs.cf_setup(a)
200 
201 int	set_cpufuncs		(void);
202 int	set_cpufuncs_id		(u_int);
203 #define ARCHITECTURE_NOT_PRESENT	1	/* known but not configured */
204 #define ARCHITECTURE_NOT_SUPPORTED	2	/* not known */
205 
206 void	cpufunc_nullop		(void);
207 int	cpufunc_null_fixup	(void *);
208 int	early_abort_fixup	(void *);
209 int	late_abort_fixup	(void *);
210 u_int	cpufunc_id		(void);
211 u_int	cpufunc_control		(u_int, u_int);
212 void	cpufunc_domains		(u_int);
213 u_int	cpufunc_faultstatus	(void);
214 u_int	cpufunc_faultaddress	(void);
215 
216 #ifdef CPU_ARM2
217 u_int	arm2_id			(void);
218 #endif /* CPU_ARM2 */
219 
220 #ifdef CPU_ARM250
221 u_int	arm250_id		(void);
222 #endif
223 
224 #ifdef CPU_ARM3
225 u_int	arm3_control		(u_int, u_int);
226 void	arm3_cache_flush	(void);
227 #endif	/* CPU_ARM3 */
228 
229 #if defined(CPU_ARM6) || defined(CPU_ARM7)
230 void	arm67_setttb		(u_int);
231 void	arm67_tlb_flush		(void);
232 void	arm67_tlb_purge		(u_int);
233 void	arm67_cache_flush	(void);
234 void	arm67_context_switch	(u_int);
235 #endif	/* CPU_ARM6 || CPU_ARM7 */
236 
237 #ifdef CPU_ARM6
238 void	arm6_setup		(char *);
239 #endif	/* CPU_ARM6 */
240 
241 #ifdef CPU_ARM7
242 void	arm7_setup		(char *);
243 #endif	/* CPU_ARM7 */
244 
245 #ifdef CPU_ARM7TDMI
246 int	arm7_dataabt_fixup	(void *);
247 void	arm7tdmi_setup		(char *);
248 void	arm7tdmi_setttb		(u_int);
249 void	arm7tdmi_tlb_flushID	(void);
250 void	arm7tdmi_tlb_flushID_SE	(u_int);
251 void	arm7tdmi_cache_flushID	(void);
252 void	arm7tdmi_context_switch	(u_int);
253 #endif /* CPU_ARM7TDMI */
254 
255 #ifdef CPU_ARM8
256 void	arm8_setttb		(u_int);
257 void	arm8_tlb_flushID	(void);
258 void	arm8_tlb_flushID_SE	(u_int);
259 void	arm8_cache_flushID	(void);
260 void	arm8_cache_flushID_E	(u_int);
261 void	arm8_cache_cleanID	(void);
262 void	arm8_cache_cleanID_E	(u_int);
263 void	arm8_cache_purgeID	(void);
264 void	arm8_cache_purgeID_E	(u_int entry);
265 
266 void	arm8_cache_syncI	(void);
267 void	arm8_cache_cleanID_rng	(vaddr_t, vsize_t);
268 void	arm8_cache_cleanD_rng	(vaddr_t, vsize_t);
269 void	arm8_cache_purgeID_rng	(vaddr_t, vsize_t);
270 void	arm8_cache_purgeD_rng	(vaddr_t, vsize_t);
271 void	arm8_cache_syncI_rng	(vaddr_t, vsize_t);
272 
273 void	arm8_context_switch	(u_int);
274 
275 void	arm8_setup		(char *);
276 
277 u_int	arm8_clock_config	(u_int, u_int);
278 #endif
279 
280 #ifdef CPU_FA526
281 void	fa526_setup		(char *);
282 void	fa526_setttb		(u_int);
283 void	fa526_context_switch	(u_int);
284 void	fa526_cpu_sleep		(int);
285 void	fa526_tlb_flushI_SE	(u_int);
286 void	fa526_tlb_flushID_SE	(u_int);
287 void	fa526_flush_prefetchbuf	(void);
288 void	fa526_flush_brnchtgt_E	(u_int);
289 
290 void	fa526_icache_sync_all	(void);
291 void	fa526_icache_sync_range(vaddr_t, vsize_t);
292 void	fa526_dcache_wbinv_all	(void);
293 void	fa526_dcache_wbinv_range(vaddr_t, vsize_t);
294 void	fa526_dcache_inv_range	(vaddr_t, vsize_t);
295 void	fa526_dcache_wb_range	(vaddr_t, vsize_t);
296 void	fa526_idcache_wbinv_all(void);
297 void	fa526_idcache_wbinv_range(vaddr_t, vsize_t);
298 #endif
299 
300 #ifdef CPU_SA110
301 void	sa110_setup		(char *);
302 void	sa110_context_switch	(u_int);
303 #endif	/* CPU_SA110 */
304 
305 #if defined(CPU_SA1100) || defined(CPU_SA1110)
306 void	sa11x0_drain_readbuf	(void);
307 
308 void	sa11x0_context_switch	(u_int);
309 void	sa11x0_cpu_sleep	(int);
310 
311 void	sa11x0_setup		(char *);
312 #endif
313 
314 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
315 void	sa1_setttb		(u_int);
316 
317 void	sa1_tlb_flushID_SE	(u_int);
318 
319 void	sa1_cache_flushID	(void);
320 void	sa1_cache_flushI	(void);
321 void	sa1_cache_flushD	(void);
322 void	sa1_cache_flushD_SE	(u_int);
323 
324 void	sa1_cache_cleanID	(void);
325 void	sa1_cache_cleanD	(void);
326 void	sa1_cache_cleanD_E	(u_int);
327 
328 void	sa1_cache_purgeID	(void);
329 void	sa1_cache_purgeID_E	(u_int);
330 void	sa1_cache_purgeD	(void);
331 void	sa1_cache_purgeD_E	(u_int);
332 
333 void	sa1_cache_syncI		(void);
334 void	sa1_cache_cleanID_rng	(vaddr_t, vsize_t);
335 void	sa1_cache_cleanD_rng	(vaddr_t, vsize_t);
336 void	sa1_cache_purgeID_rng	(vaddr_t, vsize_t);
337 void	sa1_cache_purgeD_rng	(vaddr_t, vsize_t);
338 void	sa1_cache_syncI_rng	(vaddr_t, vsize_t);
339 
340 #endif
341 
342 #ifdef CPU_ARM9
343 void	arm9_setttb		(u_int);
344 
345 void	arm9_tlb_flushID_SE	(u_int);
346 
347 void	arm9_icache_sync_all	(void);
348 void	arm9_icache_sync_range	(vaddr_t, vsize_t);
349 
350 void	arm9_dcache_wbinv_all	(void);
351 void	arm9_dcache_wbinv_range (vaddr_t, vsize_t);
352 void	arm9_dcache_inv_range	(vaddr_t, vsize_t);
353 void	arm9_dcache_wb_range	(vaddr_t, vsize_t);
354 
355 void	arm9_idcache_wbinv_all	(void);
356 void	arm9_idcache_wbinv_range (vaddr_t, vsize_t);
357 
358 void	arm9_context_switch	(u_int);
359 
360 void	arm9_setup		(char *);
361 
362 extern unsigned arm9_dcache_sets_max;
363 extern unsigned arm9_dcache_sets_inc;
364 extern unsigned arm9_dcache_index_max;
365 extern unsigned arm9_dcache_index_inc;
366 #endif
367 
368 #if defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_SHEEVA)
369 void	arm10_tlb_flushID_SE	(u_int);
370 void	arm10_tlb_flushI_SE	(u_int);
371 
372 void	arm10_context_switch	(u_int);
373 
374 void	arm10_setup		(char *);
375 #endif
376 
377 #if defined(CPU_ARM9E) || defined (CPU_ARM10) || defined(CPU_SHEEVA)
378 void	armv5_ec_setttb			(u_int);
379 
380 void	armv5_ec_icache_sync_all	(void);
381 void	armv5_ec_icache_sync_range	(vaddr_t, vsize_t);
382 
383 void	armv5_ec_dcache_wbinv_all	(void);
384 void	armv5_ec_dcache_wbinv_range	(vaddr_t, vsize_t);
385 void	armv5_ec_dcache_inv_range	(vaddr_t, vsize_t);
386 void	armv5_ec_dcache_wb_range	(vaddr_t, vsize_t);
387 
388 void	armv5_ec_idcache_wbinv_all	(void);
389 void	armv5_ec_idcache_wbinv_range	(vaddr_t, vsize_t);
390 #endif
391 
392 #if defined (CPU_ARM10) || defined (CPU_ARM11MPCORE)
393 void	armv5_setttb		(u_int);
394 
395 void	armv5_icache_sync_all	(void);
396 void	armv5_icache_sync_range	(vaddr_t, vsize_t);
397 
398 void	armv5_dcache_wbinv_all	(void);
399 void	armv5_dcache_wbinv_range (vaddr_t, vsize_t);
400 void	armv5_dcache_inv_range	(vaddr_t, vsize_t);
401 void	armv5_dcache_wb_range	(vaddr_t, vsize_t);
402 
403 void	armv5_idcache_wbinv_all	(void);
404 void	armv5_idcache_wbinv_range (vaddr_t, vsize_t);
405 
406 extern unsigned armv5_dcache_sets_max;
407 extern unsigned armv5_dcache_sets_inc;
408 extern unsigned armv5_dcache_index_max;
409 extern unsigned armv5_dcache_index_inc;
410 #endif
411 
412 #if defined(CPU_ARM11MPCORE)
413 void	arm11mpcore_setup		(char *);
414 #endif
415 
416 #if defined(CPU_ARM11) || defined(CPU_CORTEX)
417 void	arm11_setttb		(u_int);
418 
419 void	arm11_tlb_flushID_SE	(u_int);
420 void	arm11_tlb_flushI_SE	(u_int);
421 
422 void	arm11_context_switch	(u_int);
423 
424 void	arm11_cpu_sleep		(int);
425 void	arm11_setup		(char *string);
426 void	arm11_tlb_flushID	(void);
427 void	arm11_tlb_flushI	(void);
428 void	arm11_tlb_flushD	(void);
429 void	arm11_tlb_flushD_SE	(u_int va);
430 
431 void	armv11_dcache_wbinv_all (void);
432 void	armv11_idcache_wbinv_all(void);
433 
434 void	arm11_drain_writebuf	(void);
435 void	arm11_sleep		(int);
436 
437 void	armv6_setttb		(u_int);
438 
439 void	armv6_icache_sync_all	(void);
440 void	armv6_icache_sync_range	(vaddr_t, vsize_t);
441 
442 void	armv6_dcache_wbinv_all	(void);
443 void	armv6_dcache_wbinv_range (vaddr_t, vsize_t);
444 void	armv6_dcache_inv_range	(vaddr_t, vsize_t);
445 void	armv6_dcache_wb_range	(vaddr_t, vsize_t);
446 
447 void	armv6_idcache_wbinv_all	(void);
448 void	armv6_idcache_wbinv_range (vaddr_t, vsize_t);
449 #endif
450 
451 #if defined(CPU_CORTEX)
452 void	armv7_setttb(u_int);
453 
454 void	armv7_icache_sync_range(vaddr_t, vsize_t);
455 void	armv7_dcache_wb_range(vaddr_t, vsize_t);
456 void	armv7_dcache_wbinv_range(vaddr_t, vsize_t);
457 void	armv7_dcache_inv_range(vaddr_t, vsize_t);
458 void	armv7_idcache_wbinv_range(vaddr_t, vsize_t);
459 
460 void 	armv7_dcache_wbinv_all (void);
461 void	armv7_idcache_wbinv_all(void);
462 void	armv7_icache_sync_all(void);
463 void	armv7_cpu_sleep(int);
464 void	armv7_context_switch(u_int);
465 void	armv7_tlb_flushID_SE(u_int);
466 void	armv7_setup		(char *string);
467 #endif
468 
469 
470 #if defined(CPU_ARM1136)
471 void	arm1136_setttb			(u_int);
472 void	arm1136_idcache_wbinv_all	(void);
473 void	arm1136_dcache_wbinv_all	(void);
474 void	arm1136_icache_sync_all		(void);
475 void	arm1136_flush_prefetchbuf	(void);
476 void	arm1136_icache_sync_range	(vaddr_t, vsize_t);
477 void	arm1136_idcache_wbinv_range	(vaddr_t, vsize_t);
478 void	arm1136_setup			(char *string);
479 void	arm1136_sleep_rev0		(int);	/* for errata 336501 */
480 #endif
481 
482 
483 #if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
484     defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
485     defined(CPU_FA526) || \
486     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
487     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
488     defined(CPU_CORTEX) || defined(CPU_SHEEVA)
489 
490 void	armv4_tlb_flushID	(void);
491 void	armv4_tlb_flushI	(void);
492 void	armv4_tlb_flushD	(void);
493 void	armv4_tlb_flushD_SE	(u_int);
494 
495 void	armv4_drain_writebuf	(void);
496 #endif
497 
498 #if defined(CPU_IXP12X0)
499 void	ixp12x0_drain_readbuf	(void);
500 void	ixp12x0_context_switch	(u_int);
501 void	ixp12x0_setup		(char *);
502 #endif
503 
504 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
505     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
506     defined(CPU_CORTEX)
507 
508 void	xscale_cpwait		(void);
509 #define	cpu_cpwait()		cpufuncs.cf_cpwait()
510 
511 void	xscale_cpu_sleep	(int);
512 
513 u_int	xscale_control		(u_int, u_int);
514 
515 void	xscale_setttb		(u_int);
516 
517 void	xscale_tlb_flushID_SE	(u_int);
518 
519 void	xscale_cache_flushID	(void);
520 void	xscale_cache_flushI	(void);
521 void	xscale_cache_flushD	(void);
522 void	xscale_cache_flushD_SE	(u_int);
523 
524 void	xscale_cache_cleanID	(void);
525 void	xscale_cache_cleanD	(void);
526 void	xscale_cache_cleanD_E	(u_int);
527 
528 void	xscale_cache_clean_minidata (void);
529 
530 void	xscale_cache_purgeID	(void);
531 void	xscale_cache_purgeID_E	(u_int);
532 void	xscale_cache_purgeD	(void);
533 void	xscale_cache_purgeD_E	(u_int);
534 
535 void	xscale_cache_syncI	(void);
536 void	xscale_cache_cleanID_rng (vaddr_t, vsize_t);
537 void	xscale_cache_cleanD_rng	(vaddr_t, vsize_t);
538 void	xscale_cache_purgeID_rng (vaddr_t, vsize_t);
539 void	xscale_cache_purgeD_rng	(vaddr_t, vsize_t);
540 void	xscale_cache_syncI_rng	(vaddr_t, vsize_t);
541 void	xscale_cache_flushD_rng	(vaddr_t, vsize_t);
542 
543 void	xscale_context_switch	(u_int);
544 
545 void	xscale_setup		(char *);
546 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 || CPU_CORTEX */
547 
548 #if defined(CPU_SHEEVA)
549 void	sheeva_dcache_wbinv_range (vaddr_t, vsize_t);
550 void	sheeva_dcache_inv_range	(vaddr_t, vsize_t);
551 void	sheeva_dcache_wb_range	(vaddr_t, vsize_t);
552 void	sheeva_idcache_wbinv_range (vaddr_t, vsize_t);
553 void	sheeva_setup(char *);
554 #endif
555 
556 #define tlb_flush	cpu_tlb_flushID
557 #define setttb		cpu_setttb
558 #define drain_writebuf	cpu_drain_writebuf
559 
560 #ifndef cpu_cpwait
561 #define	cpu_cpwait()
562 #endif
563 
564 /*
565  * Macros for manipulating CPU interrupts
566  */
567 #ifdef __PROG32
568 static __inline u_int32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__));
569 static __inline u_int32_t disable_interrupts(uint32_t mask) __attribute__((__unused__));
570 static __inline u_int32_t enable_interrupts(uint32_t mask) __attribute__((__unused__));
571 
572 static __inline uint32_t
573 __set_cpsr_c(uint32_t bic, uint32_t eor)
574 {
575 	uint32_t	tmp, ret;
576 
577 	__asm volatile(
578 		"mrs     %0, cpsr\n"	/* Get the CPSR */
579 		"bic	 %1, %0, %2\n"	/* Clear bits */
580 		"eor	 %1, %1, %3\n"	/* XOR bits */
581 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
582 	: "=&r" (ret), "=&r" (tmp)
583 	: "r" (bic), "r" (eor) : "memory");
584 
585 	return ret;
586 }
587 
588 static __inline uint32_t
589 disable_interrupts(uint32_t mask)
590 {
591 	uint32_t	tmp, ret;
592 	mask &= (I32_bit | F32_bit);
593 
594 	__asm volatile(
595 		"mrs     %0, cpsr\n"	/* Get the CPSR */
596 		"orr	 %1, %0, %2\n"	/* set bits */
597 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
598 	: "=&r" (ret), "=&r" (tmp)
599 	: "r" (mask)
600 	: "memory");
601 
602 	return ret;
603 }
604 
605 static __inline uint32_t
606 enable_interrupts(uint32_t mask)
607 {
608 	uint32_t	ret, tmp;
609 	mask &= (I32_bit | F32_bit);
610 
611 	__asm volatile(
612 		"mrs     %0, cpsr\n"	/* Get the CPSR */
613 		"bic	 %1, %0, %2\n"	/* Clear bits */
614 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
615 	: "=&r" (ret), "=&r" (tmp)
616 	: "r" (mask)
617 	: "memory");
618 
619 	return ret;
620 }
621 
622 #define restore_interrupts(old_cpsr)					\
623 	(__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
624 
625 static inline void cpsie(register_t psw) __attribute__((__unused__));
626 static inline register_t cpsid(register_t psw) __attribute__((__unused__));
627 
628 static inline void
629 cpsie(register_t psw)
630 {
631 #ifdef _ARM_ARCH_6
632 	if (!__builtin_constant_p(psw)) {
633 		enable_interrupts(psw);
634 		return;
635 	}
636 	switch (psw & (I32_bit|F32_bit)) {
637 	case I32_bit:		__asm("cpsie\ti"); break;
638 	case F32_bit:		__asm("cpsie\tf"); break;
639 	case I32_bit|F32_bit:	__asm("cpsie\tif"); break;
640 	}
641 #else
642 	enable_interrupts(psw);
643 #endif
644 }
645 
646 static inline register_t
647 cpsid(register_t psw)
648 {
649 #ifdef _ARM_ARCH_6
650 	register_t oldpsw;
651 	if (!__builtin_constant_p(psw))
652 		return disable_interrupts(psw);
653 
654 	__asm("mrs	%0, cpsr" : "=r"(oldpsw));
655 	switch (psw & (I32_bit|F32_bit)) {
656 	case I32_bit:		__asm("cpsid\ti"); break;
657 	case F32_bit:		__asm("cpsid\tf"); break;
658 	case I32_bit|F32_bit:	__asm("cpsid\tif"); break;
659 	}
660 	return oldpsw;
661 #else
662 	return disable_interrupts(psw);
663 #endif
664 }
665 
666 #else /* ! __PROG32 */
667 #define	disable_interrupts(mask)					\
668 	(set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE),		\
669 		 (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
670 
671 #define	enable_interrupts(mask)						\
672 	(set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0))
673 
674 #define	restore_interrupts(old_r15)					\
675 	(set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE),			\
676 		 (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
677 #endif /* __PROG32 */
678 
679 #ifdef __PROG32
680 /* Functions to manipulate the CPSR. */
681 u_int	SetCPSR(u_int, u_int);
682 u_int	GetCPSR(void);
683 #else
684 /* Functions to manipulate the processor control bits in r15. */
685 u_int	set_r15(u_int, u_int);
686 u_int	get_r15(void);
687 #endif /* __PROG32 */
688 
689 /*
690  * Functions to manipulate cpu r13
691  * (in arm/arm32/setstack.S)
692  */
693 
694 void set_stackptr	(u_int, u_int);
695 u_int get_stackptr	(u_int);
696 
697 /*
698  * Miscellany
699  */
700 
701 int get_pc_str_offset	(void);
702 
703 /*
704  * CPU functions from locore.S
705  */
706 
707 void cpu_reset		(void) __attribute__((__noreturn__));
708 
709 /*
710  * Cache info variables.
711  */
712 
713 /* PRIMARY CACHE VARIABLES */
714 extern int	arm_picache_size;
715 extern int	arm_picache_line_size;
716 extern int	arm_picache_ways;
717 
718 extern int	arm_pdcache_size;	/* and unified */
719 extern int	arm_pdcache_line_size;
720 extern int	arm_pdcache_ways;
721 extern int	arm_cache_prefer_mask;
722 
723 extern int	arm_pcache_type;
724 extern int	arm_pcache_unified;
725 
726 extern int	arm_dcache_align;
727 extern int	arm_dcache_align_mask;
728 
729 #endif	/* _KERNEL */
730 #endif	/* _ARM32_CPUFUNC_H_ */
731 
732 /* End of cpufunc.h */
733