xref: /netbsd-src/sys/arch/arm/include/cpu.h (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: cpu.h,v 1.100 2019/01/03 10:26:41 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1996 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * RiscBSD kernel project
38  *
39  * cpu.h
40  *
41  * CPU specific symbols
42  *
43  * Created      : 18/09/94
44  *
45  * Based on kate/katelib/arm6.h
46  */
47 
48 #ifndef _ARM_CPU_H_
49 #define _ARM_CPU_H_
50 
51 #ifdef __arm__
52 
53 /*
54  * User-visible definitions
55  */
56 
57 /*  CTL_MACHDEP definitions. */
58 #define	CPU_DEBUG		1	/* int: misc kernel debug control */
59 #define	CPU_BOOTED_DEVICE	2	/* string: device we booted from */
60 #define	CPU_BOOTED_KERNEL	3	/* string: kernel we booted */
61 #define	CPU_CONSDEV		4	/* struct: dev_t of our console */
62 #define	CPU_POWERSAVE		5	/* int: use CPU powersave mode */
63 
64 #if defined(_KERNEL) || defined(_KMEMUSER)
65 
66 /*
67  * Kernel-only definitions
68  */
69 
70 #if !defined(_MODULE) && defined(_KERNEL_OPT)
71 #include "opt_multiprocessor.h"
72 #include "opt_cpuoptions.h"
73 #include "opt_lockdebug.h"
74 #include "opt_cputypes.h"
75 #endif /* !_MODULE && _KERNEL_OPT */
76 
77 #ifndef _LOCORE
78 #if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU)
79 #include <arm/armreg.h>
80 #endif
81 
82 /* 1 == use cpu_sleep(), 0 == don't */
83 extern int cpu_do_powersave;
84 extern int cpu_fpu_present;
85 
86 /* All the CLKF_* macros take a struct clockframe * as an argument. */
87 
88 /*
89  * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
90  * frame came from USR mode or not.
91  */
92 #define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE)
93 
94 /*
95  * CLKF_INTR: True if we took the interrupt from inside another
96  * interrupt handler.
97  */
98 #if !defined(__ARM_EABI__)
99 /* Hack to treat FPE time as interrupt time so we can measure it */
100 #define CLKF_INTR(cf)						\
101 	((curcpu()->ci_intr_depth > 1) ||			\
102 	    ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE)
103 #else
104 #define CLKF_INTR(cf)	((void)(cf), curcpu()->ci_intr_depth > 1)
105 #endif
106 
107 /*
108  * CLKF_PC: Extract the program counter from a clockframe
109  */
110 #define CLKF_PC(frame)		(frame->cf_tf.tf_pc)
111 
112 /*
113  * LWP_PC: Find out the program counter for the given lwp.
114  */
115 #define LWP_PC(l)		(lwp_trapframe(l)->tf_pc)
116 
117 /*
118  * Per-CPU information.  For now we assume one CPU.
119  */
120 #ifdef _KERNEL
121 static inline int curcpl(void);
122 static inline void set_curcpl(int);
123 static inline void cpu_dosoftints(void);
124 #endif
125 
126 #ifdef _KMEMUSER
127 #include <sys/intr.h>
128 #endif
129 #include <sys/atomic.h>
130 #include <sys/cpu_data.h>
131 #include <sys/device_if.h>
132 #include <sys/evcnt.h>
133 
134 struct cpu_info {
135 	struct cpu_data	ci_data;	/* MI per-cpu data */
136 	device_t	ci_dev;		/* Device corresponding to this CPU */
137 	cpuid_t		ci_cpuid;
138 	uint32_t	ci_arm_cpuid;	/* aggregate CPU id */
139 	uint32_t	ci_arm_cputype;	/* CPU type */
140 	uint32_t	ci_arm_cpurev;	/* CPU revision */
141 	uint32_t	ci_ctrl;	/* The CPU control register */
142 	int		ci_cpl;		/* current processor level (spl) */
143 	volatile int	ci_astpending;	/* */
144 	int		ci_want_resched;/* resched() was called */
145 	int		ci_intr_depth;	/* */
146 
147 	struct cpu_softc *
148 			ci_softc;	/* platform softc */
149 
150 	lwp_t *		ci_softlwps[SOFTINT_COUNT];
151 	volatile uint32_t
152 			ci_softints;
153 
154 	lwp_t *		ci_curlwp;	/* current lwp */
155 	lwp_t *		ci_lastlwp;	/* last lwp */
156 
157 	struct evcnt	ci_arm700bugcount;
158 	int32_t		ci_mtx_count;
159 	int		ci_mtx_oldspl;
160 	register_t	ci_undefsave[3];
161 	uint32_t	ci_vfp_id;
162 	uint64_t	ci_lastintr;
163 
164 	struct pmap_tlb_info *
165 			ci_tlb_info;
166 	struct pmap *	ci_pmap_lastuser;
167 	struct pmap *	ci_pmap_cur;
168 	tlb_asid_t	ci_pmap_asid_cur;
169 
170 	struct trapframe *
171 			ci_ddb_regs;
172 
173 	struct evcnt	ci_abt_evs[16];
174 	struct evcnt	ci_und_ev;
175 	struct evcnt	ci_und_cp15_ev;
176 	struct evcnt	ci_vfp_evs[3];
177 
178 	uint32_t	ci_midr;
179 	uint32_t	ci_mpidr;
180 
181 	struct arm_cache_info *
182 			ci_cacheinfo;
183 
184 #if defined(MP_CPU_INFO_MEMBERS)
185 	MP_CPU_INFO_MEMBERS
186 #endif
187 };
188 
189 extern struct cpu_info cpu_info_store;
190 
191 struct lwp *arm_curlwp(void);
192 struct cpu_info *arm_curcpu(void);
193 
194 #if defined(_MODULE)
195 
196 #define	curlwp		arm_curlwp()
197 #define curcpu()	arm_curcpu()
198 
199 #elif defined(TPIDRPRW_IS_CURLWP)
200 static inline struct lwp *
201 _curlwp(void)
202 {
203 	return (struct lwp *) armreg_tpidrprw_read();
204 }
205 
206 static inline void
207 _curlwp_set(struct lwp *l)
208 {
209 	armreg_tpidrprw_write((uintptr_t)l);
210 }
211 
212 // Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h>
213 static inline struct cpu_info *lwp_getcpu(struct lwp *);
214 
215 #define	curlwp		_curlwp()
216 // curcpu() expands into two instructions: a mrc and a ldr
217 #define	curcpu()	lwp_getcpu(_curlwp())
218 #elif defined(TPIDRPRW_IS_CURCPU)
219 #ifdef __HAVE_PREEMPTION
220 #error __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
221 #endif
222 static inline struct cpu_info *
223 curcpu(void)
224 {
225 	return (struct cpu_info *) armreg_tpidrprw_read();
226 }
227 #elif !defined(MULTIPROCESSOR)
228 #define	curcpu()	(&cpu_info_store)
229 #elif !defined(__HAVE_PREEMPTION)
230 #error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP
231 #else
232 #error MULTIPROCESSOR && __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
233 #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */
234 
235 #ifndef curlwp
236 #define	curlwp		(curcpu()->ci_curlwp)
237 #endif
238 
239 #define CPU_INFO_ITERATOR	int
240 #if defined(_MODULE) || defined(MULTIPROCESSOR)
241 extern struct cpu_info *cpu_info[];
242 #define cpu_number()		(curcpu()->ci_index)
243 #define CPU_IS_PRIMARY(ci)	((ci)->ci_index == 0)
244 #define CPU_INFO_FOREACH(cii, ci)			\
245 	cii = 0, ci = cpu_info[0]; cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; cii++
246 #else
247 #define cpu_number()            0
248 
249 #define CPU_IS_PRIMARY(ci)	true
250 #define CPU_INFO_FOREACH(cii, ci)			\
251 	cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
252 #endif
253 
254 #if defined(MULTIPROCESSOR)
255 
256 extern volatile u_int arm_cpu_hatched;
257 extern uint32_t cpu_mpidr[];
258 
259 void cpu_mpstart(void);
260 void cpu_init_secondary_processor(int);
261 void cpu_boot_secondary_processors(void);
262 #endif
263 
264 #define	LWP0_CPU_INFO	(&cpu_info_store)
265 
266 static inline int
267 curcpl(void)
268 {
269 	return curcpu()->ci_cpl;
270 }
271 
272 static inline void
273 set_curcpl(int pri)
274 {
275 	curcpu()->ci_cpl = pri;
276 }
277 
278 static inline void
279 cpu_dosoftints(void)
280 {
281 #ifdef __HAVE_FAST_SOFTINTS
282 	void	dosoftints(void);
283 #ifndef __HAVE_PIC_FAST_SOFTINTS
284 	struct cpu_info * const ci = curcpu();
285 	if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0)
286 		dosoftints();
287 #endif
288 #endif
289 }
290 
291 void	cpu_proc_fork(struct proc *, struct proc *);
292 
293 /*
294  * Scheduling glue
295  */
296 
297 #ifdef __HAVE_PREEMPTION
298 #define setsoftast(ci)		atomic_or_uint(&(ci)->ci_astpending, __BIT(0))
299 #else
300 #define setsoftast(ci)		((ci)->ci_astpending = __BIT(0))
301 #endif
302 
303 /*
304  * Notify the current process (p) that it has a signal pending,
305  * process as soon as possible.
306  */
307 
308 #define cpu_signotify(l)		setsoftast((l)->l_cpu)
309 
310 /*
311  * Give a profiling tick to the current process when the user profiling
312  * buffer pages are invalid.  On the i386, request an ast to send us
313  * through trap(), marking the proc as needing a profiling tick.
314  */
315 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, \
316 				 setsoftast((l)->l_cpu))
317 
318 /* for preeemption. */
319 void	cpu_set_curpri(int);
320 
321 /*
322  * We've already preallocated the stack for the idlelwps for additional CPUs.
323  * This hook allows to return them.
324  */
325 vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *);
326 
327 /*
328  * cpu device glue (belongs in cpuvar.h)
329  */
330 void	cpu_attach(device_t, cpuid_t);
331 
332 #endif /* !_LOCORE */
333 
334 #endif /* _KERNEL */
335 
336 #elif defined(__aarch64__)
337 
338 #include <aarch64/cpu.h>
339 
340 #endif /* __arm__/__aarch64__ */
341 
342 #endif /* !_ARM_CPU_H_ */
343