xref: /netbsd-src/sys/arch/arm/include/cpufunc.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp	*/
2 
3 /*
4  * Copyright (c) 1997 Mark Brinicombe.
5  * Copyright (c) 1997 Causality Limited
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Causality Limited.
19  * 4. The name of Causality Limited may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * RiscBSD kernel project
36  *
37  * cpufunc.h
38  *
39  * Prototypes for cpu, mmu and tlb related functions.
40  */
41 
42 #ifndef _ARM_CPUFUNC_H_
43 #define _ARM_CPUFUNC_H_
44 
45 #ifdef _ARM_ARCH_7
46 /*
47  * Options for DMB and DSB:
48  *	oshld	Outer Shareable, load
49  *	oshst	Outer Shareable, store
50  *	osh	Outer Shareable, all
51  *	nshld	Non-shareable, load
52  *	nshst	Non-shareable, store
53  *	nsh	Non-shareable, all
54  *	ishld	Inner Shareable, load
55  *	ishst	Inner Shareable, store
56  *	ish	Inner Shareable, all
57  *	ld	Full system, load
58  *	st	Full system, store
59  *	sy	Full system, all
60  */
61 #define	dsb(opt)	__asm __volatile("dsb " __STRING(opt) : : : "memory")
62 #define	dmb(opt)	__asm __volatile("dmb " __STRING(opt) : : : "memory")
63 #define	isb()		__asm __volatile("isb" : : : "memory")
64 #define	sev()		__asm __volatile("sev" : : : "memory")
65 
66 #else
67 
68 #define dsb(opt)	\
69 	__asm __volatile("mcr p15, 0, %0, c7, c10, 4" :: "r" (0) : "memory")
70 #define dmb(opt)	\
71 	__asm __volatile("mcr p15, 0, %0, c7, c10, 5" :: "r" (0) : "memory")
72 #define isb()		\
73 	__asm __volatile("mcr p15, 0, %0, c7, c5, 4" :: "r" (0) : "memory")
74 #define sev()		__nothing
75 
76 #endif
77 
78 #ifdef __arm__
79 
80 #ifdef _KERNEL
81 
82 #include <sys/types.h>
83 
84 #include <arm/armreg.h>
85 #include <arm/cpuconf.h>
86 #include <arm/cpufunc_proto.h>
87 
88 struct cpu_functions {
89 
90 	/* CPU functions */
91 
92 	u_int	(*cf_id)		(void);
93 	void	(*cf_cpwait)		(void);
94 
95 	/* MMU functions */
96 
97 	u_int	(*cf_control)		(u_int, u_int);
98 	void	(*cf_domains)		(u_int);
99 #if defined(ARM_MMU_EXTENDED)
100 	void	(*cf_setttb)		(u_int, tlb_asid_t);
101 #else
102 	void	(*cf_setttb)		(u_int, bool);
103 #endif
104 	u_int	(*cf_faultstatus)	(void);
105 	u_int	(*cf_faultaddress)	(void);
106 
107 	/* TLB functions */
108 
109 	void	(*cf_tlb_flushID)	(void);
110 	void	(*cf_tlb_flushID_SE)	(vaddr_t);
111 	void	(*cf_tlb_flushI)	(void);
112 	void	(*cf_tlb_flushI_SE)	(vaddr_t);
113 	void	(*cf_tlb_flushD)	(void);
114 	void	(*cf_tlb_flushD_SE)	(vaddr_t);
115 
116 	/*
117 	 * Cache operations:
118 	 *
119 	 * We define the following primitives:
120 	 *
121 	 *	icache_sync_all		Synchronize I-cache
122 	 *	icache_sync_range	Synchronize I-cache range
123 	 *
124 	 *	dcache_wbinv_all	Write-back and Invalidate D-cache
125 	 *	dcache_wbinv_range	Write-back and Invalidate D-cache range
126 	 *	dcache_inv_range	Invalidate D-cache range
127 	 *	dcache_wb_range		Write-back D-cache range
128 	 *
129 	 *	idcache_wbinv_all	Write-back and Invalidate D-cache,
130 	 *				Invalidate I-cache
131 	 *	idcache_wbinv_range	Write-back and Invalidate D-cache,
132 	 *				Invalidate I-cache range
133 	 *
134 	 * Note that the ARM term for "write-back" is "clean".  We use
135 	 * the term "write-back" since it's a more common way to describe
136 	 * the operation.
137 	 *
138 	 * There are some rules that must be followed:
139 	 *
140 	 *	I-cache Synch (all or range):
141 	 *		The goal is to synchronize the instruction stream,
142 	 *		so you may beed to write-back dirty D-cache blocks
143 	 *		first.  If a range is requested, and you can't
144 	 *		synchronize just a range, you have to hit the whole
145 	 *		thing.
146 	 *
147 	 *	D-cache Write-Back and Invalidate range:
148 	 *		If you can't WB-Inv a range, you must WB-Inv the
149 	 *		entire D-cache.
150 	 *
151 	 *	D-cache Invalidate:
152 	 *		If you can't Inv the D-cache, you must Write-Back
153 	 *		and Invalidate.  Code that uses this operation
154 	 *		MUST NOT assume that the D-cache will not be written
155 	 *		back to memory.
156 	 *
157 	 *	D-cache Write-Back:
158 	 *		If you can't Write-back without doing an Inv,
159 	 *		that's fine.  Then treat this as a WB-Inv.
160 	 *		Skipping the invalidate is merely an optimization.
161 	 *
162 	 *	All operations:
163 	 *		Valid virtual addresses must be passed to each
164 	 *		cache operation.
165 	 */
166 	void	(*cf_icache_sync_all)	(void);
167 	void	(*cf_icache_sync_range)	(vaddr_t, vsize_t);
168 
169 	void	(*cf_dcache_wbinv_all)	(void);
170 	void	(*cf_dcache_wbinv_range)(vaddr_t, vsize_t);
171 	void	(*cf_dcache_inv_range)	(vaddr_t, vsize_t);
172 	void	(*cf_dcache_wb_range)	(vaddr_t, vsize_t);
173 
174 	void	(*cf_sdcache_wbinv_range)(vaddr_t, paddr_t, psize_t);
175 	void	(*cf_sdcache_inv_range)	(vaddr_t, paddr_t, psize_t);
176 	void	(*cf_sdcache_wb_range)	(vaddr_t, paddr_t, psize_t);
177 
178 	void	(*cf_idcache_wbinv_all)	(void);
179 	void	(*cf_idcache_wbinv_range)(vaddr_t, vsize_t);
180 
181 	/* Other functions */
182 
183 	void	(*cf_flush_prefetchbuf)	(void);
184 	void	(*cf_drain_writebuf)	(void);
185 	void	(*cf_flush_brnchtgt_C)	(void);
186 	void	(*cf_flush_brnchtgt_E)	(u_int);
187 
188 	void	(*cf_sleep)		(int mode);
189 
190 	/* Soft functions */
191 
192 	int	(*cf_dataabt_fixup)	(void *);
193 	int	(*cf_prefetchabt_fixup)	(void *);
194 
195 #if defined(ARM_MMU_EXTENDED)
196 	void	(*cf_context_switch)	(u_int, tlb_asid_t);
197 #else
198 	void	(*cf_context_switch)	(u_int);
199 #endif
200 
201 	void	(*cf_setup)		(char *);
202 };
203 
204 extern struct cpu_functions cpufuncs;
205 extern u_int cputype;
206 
207 #define cpu_idnum()		cpufuncs.cf_id()
208 
209 #define cpu_control(c, e)	cpufuncs.cf_control(c, e)
210 #define cpu_domains(d)		cpufuncs.cf_domains(d)
211 #define cpu_setttb(t, f)	cpufuncs.cf_setttb(t, f)
212 #define cpu_faultstatus()	cpufuncs.cf_faultstatus()
213 #define cpu_faultaddress()	cpufuncs.cf_faultaddress()
214 
215 #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
216 #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
217 #define	cpu_tlb_flushI()	cpufuncs.cf_tlb_flushI()
218 #define	cpu_tlb_flushI_SE(e)	cpufuncs.cf_tlb_flushI_SE(e)
219 #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
220 #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
221 
222 #define	cpu_icache_sync_all()	cpufuncs.cf_icache_sync_all()
223 #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
224 
225 #define	cpu_dcache_wbinv_all()	cpufuncs.cf_dcache_wbinv_all()
226 #define	cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
227 #define	cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
228 #define	cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
229 
230 #define	cpu_sdcache_wbinv_range(a, b, s) cpufuncs.cf_sdcache_wbinv_range((a), (b), (s))
231 #define	cpu_sdcache_inv_range(a, b, s) cpufuncs.cf_sdcache_inv_range((a), (b), (s))
232 #define	cpu_sdcache_wb_range(a, b, s) cpufuncs.cf_sdcache_wb_range((a), (b), (s))
233 
234 #define	cpu_idcache_wbinv_all()	cpufuncs.cf_idcache_wbinv_all()
235 #define	cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
236 
237 #define	cpu_flush_prefetchbuf()	cpufuncs.cf_flush_prefetchbuf()
238 #define	cpu_drain_writebuf()	cpufuncs.cf_drain_writebuf()
239 #define	cpu_flush_brnchtgt_C()	cpufuncs.cf_flush_brnchtgt_C()
240 #define	cpu_flush_brnchtgt_E(e)	cpufuncs.cf_flush_brnchtgt_E(e)
241 
242 #define cpu_sleep(m)		cpufuncs.cf_sleep(m)
243 
244 #define cpu_dataabt_fixup(a)		cpufuncs.cf_dataabt_fixup(a)
245 #define cpu_prefetchabt_fixup(a)	cpufuncs.cf_prefetchabt_fixup(a)
246 #define ABORT_FIXUP_OK		0	/* fixup succeeded */
247 #define ABORT_FIXUP_FAILED	1	/* fixup failed */
248 #define ABORT_FIXUP_RETURN	2	/* abort handler should return */
249 
250 #define cpu_context_switch(a)		cpufuncs.cf_context_switch(a)
251 #define cpu_setup(a)			cpufuncs.cf_setup(a)
252 
253 int	set_cpufuncs		(void);
254 int	set_cpufuncs_id		(u_int);
255 #define ARCHITECTURE_NOT_PRESENT	1	/* known but not configured */
256 #define ARCHITECTURE_NOT_SUPPORTED	2	/* not known */
257 
258 void	cpufunc_nullop		(void);
259 int	cpufunc_null_fixup	(void *);
260 int	early_abort_fixup	(void *);
261 int	late_abort_fixup	(void *);
262 u_int	cpufunc_id		(void);
263 u_int	cpufunc_control		(u_int, u_int);
264 void	cpufunc_domains		(u_int);
265 u_int	cpufunc_faultstatus	(void);
266 u_int	cpufunc_faultaddress	(void);
267 
268 #define setttb		cpu_setttb
269 #define drain_writebuf	cpu_drain_writebuf
270 
271 
272 #if defined(CPU_XSCALE)
273 #define	cpu_cpwait()		cpufuncs.cf_cpwait()
274 #endif
275 
276 #ifndef cpu_cpwait
277 #define	cpu_cpwait()
278 #endif
279 
280 /*
281  * Macros for manipulating CPU interrupts
282  */
283 static __inline uint32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__));
284 static __inline uint32_t disable_interrupts(uint32_t mask) __attribute__((__unused__));
285 static __inline uint32_t enable_interrupts(uint32_t mask) __attribute__((__unused__));
286 
287 static __inline uint32_t
288 __set_cpsr_c(uint32_t bic, uint32_t eor)
289 {
290 	uint32_t	tmp, ret;
291 
292 	__asm volatile(
293 		"mrs     %0, cpsr\n"	/* Get the CPSR */
294 		"bic	 %1, %0, %2\n"	/* Clear bits */
295 		"eor	 %1, %1, %3\n"	/* XOR bits */
296 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
297 	: "=&r" (ret), "=&r" (tmp)
298 	: "r" (bic), "r" (eor) : "memory");
299 
300 	return ret;
301 }
302 
303 static __inline uint32_t
304 disable_interrupts(uint32_t mask)
305 {
306 	uint32_t	tmp, ret;
307 	mask &= (I32_bit | F32_bit);
308 
309 	__asm volatile(
310 		"mrs     %0, cpsr\n"	/* Get the CPSR */
311 		"orr	 %1, %0, %2\n"	/* set bits */
312 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
313 	: "=&r" (ret), "=&r" (tmp)
314 	: "r" (mask)
315 	: "memory");
316 
317 	return ret;
318 }
319 
320 static __inline uint32_t
321 enable_interrupts(uint32_t mask)
322 {
323 	uint32_t	ret;
324 	mask &= (I32_bit | F32_bit);
325 
326 	/* Get the CPSR */
327 	__asm __volatile("mrs\t%0, cpsr\n" : "=r"(ret));
328 #ifdef _ARM_ARCH_6
329 	if (__builtin_constant_p(mask)) {
330 		switch (mask) {
331 		case I32_bit | F32_bit:
332 			__asm __volatile("cpsie\tif");
333 			break;
334 		case I32_bit:
335 			__asm __volatile("cpsie\ti");
336 			break;
337 		case F32_bit:
338 			__asm __volatile("cpsie\tf");
339 			break;
340 		default:
341 			break;
342 		}
343 		return ret;
344 	}
345 #endif /* _ARM_ARCH_6 */
346 
347 	/* Set the control field of CPSR */
348 	__asm volatile("msr\tcpsr_c, %0" :: "r"(ret & ~mask));
349 
350 	return ret;
351 }
352 
353 #define restore_interrupts(old_cpsr)					\
354 	(__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
355 
356 #define	ENABLE_INTERRUPT()		cpsie(I32_bit)
357 #define	DISABLE_INTERRUPT()		cpsid(I32_bit)
358 #define	DISABLE_INTERRUPT_SAVE()	cpsid(I32_bit)
359 
360 static inline void cpsie(register_t psw) __attribute__((__unused__));
361 static inline register_t cpsid(register_t psw) __attribute__((__unused__));
362 
363 static inline void
364 cpsie(register_t psw)
365 {
366 #ifdef _ARM_ARCH_6
367 	if (!__builtin_constant_p(psw)) {
368 		enable_interrupts(psw);
369 		return;
370 	}
371 	switch (psw & (I32_bit|F32_bit)) {
372 	case I32_bit:		__asm("cpsie\ti"); break;
373 	case F32_bit:		__asm("cpsie\tf"); break;
374 	case I32_bit|F32_bit:	__asm("cpsie\tif"); break;
375 	}
376 #else
377 	enable_interrupts(psw);
378 #endif
379 }
380 
381 static inline register_t
382 cpsid(register_t psw)
383 {
384 #ifdef _ARM_ARCH_6
385 	register_t oldpsw;
386 	if (!__builtin_constant_p(psw))
387 		return disable_interrupts(psw);
388 
389 	__asm("mrs	%0, cpsr" : "=r"(oldpsw));
390 	switch (psw & (I32_bit|F32_bit)) {
391 	case I32_bit:		__asm("cpsid\ti"); break;
392 	case F32_bit:		__asm("cpsid\tf"); break;
393 	case I32_bit|F32_bit:	__asm("cpsid\tif"); break;
394 	}
395 	return oldpsw;
396 #else
397 	return disable_interrupts(psw);
398 #endif
399 }
400 
401 
402 /* Functions to manipulate the CPSR. */
403 u_int	SetCPSR(u_int, u_int);
404 u_int	GetCPSR(void);
405 
406 
407 /*
408  * CPU functions from locore.S
409  */
410 
411 void cpu_reset		(void) __dead;
412 
413 /*
414  * Cache info variables.
415  */
416 #define	CACHE_TYPE_VIVT		0
417 #define	CACHE_TYPE_xxPT		1
418 #define	CACHE_TYPE_VIPT		1
419 #define	CACHE_TYPE_PIxx		2
420 #define	CACHE_TYPE_PIPT		3
421 
422 /* PRIMARY CACHE VARIABLES */
423 struct arm_cache_info {
424 	u_int icache_size;
425 	u_int icache_line_size;
426 	u_int icache_ways;
427 	u_int icache_way_size;
428 	u_int icache_sets;
429 
430 	u_int dcache_size;
431 	u_int dcache_line_size;
432 	u_int dcache_ways;
433 	u_int dcache_way_size;
434 	u_int dcache_sets;
435 
436 	uint8_t cache_type;
437 	bool cache_unified;
438 	uint8_t icache_type;
439 	uint8_t dcache_type;
440 };
441 
442 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
443 extern u_int arm_cache_prefer_mask;
444 #endif
445 extern u_int arm_dcache_align;
446 extern u_int arm_dcache_align_mask;
447 
448 extern struct arm_cache_info arm_pcache;
449 extern struct arm_cache_info arm_scache;
450 
451 extern uint32_t cpu_ttb;
452 
453 #endif	/* _KERNEL */
454 
455 #if defined(_KERNEL) || defined(_KMEMUSER)
456 /*
457  * Miscellany
458  */
459 
460 int get_pc_str_offset	(void);
461 
462 bool cpu_gtmr_exists_p(void);
463 u_int cpu_clusterid(void);
464 bool cpu_earlydevice_va_p(void);
465 
466 /*
467  * Functions to manipulate cpu r13
468  * (in arm/arm32/setstack.S)
469  */
470 
471 void set_stackptr	(u_int, u_int);
472 u_int get_stackptr	(u_int);
473 
474 #endif /* _KERNEL || _KMEMUSER */
475 
476 #elif defined(__aarch64__)
477 
478 #include <aarch64/cpufunc.h>
479 
480 #endif /* __arm__/__aarch64__ */
481 
482 #endif	/* _ARM_CPUFUNC_H_ */
483 
484 /* End of cpufunc.h */
485