xref: /netbsd-src/sys/arch/arm/include/cpu.h (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: cpu.h,v 1.96 2018/04/01 04:35:04 ryo Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1996 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * RiscBSD kernel project
38  *
39  * cpu.h
40  *
41  * CPU specific symbols
42  *
43  * Created      : 18/09/94
44  *
45  * Based on kate/katelib/arm6.h
46  */
47 
48 #ifndef _ARM_CPU_H_
49 #define _ARM_CPU_H_
50 
51 #ifdef __arm__
52 
53 /*
54  * User-visible definitions
55  */
56 
57 /*  CTL_MACHDEP definitions. */
58 #define	CPU_DEBUG		1	/* int: misc kernel debug control */
59 #define	CPU_BOOTED_DEVICE	2	/* string: device we booted from */
60 #define	CPU_BOOTED_KERNEL	3	/* string: kernel we booted */
61 #define	CPU_CONSDEV		4	/* struct: dev_t of our console */
62 #define	CPU_POWERSAVE		5	/* int: use CPU powersave mode */
63 #define	CPU_MAXID		6	/* number of valid machdep ids */
64 
65 #if defined(_KERNEL) || defined(_KMEMUSER)
66 
67 /*
68  * Kernel-only definitions
69  */
70 
71 #if !defined(_MODULE) && defined(_KERNEL_OPT)
72 #include "opt_multiprocessor.h"
73 #include "opt_cpuoptions.h"
74 #include "opt_lockdebug.h"
75 #include "opt_cputypes.h"
76 #endif /* !_MODULE && _KERNEL_OPT */
77 
78 #ifndef _LOCORE
79 #if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU)
80 #include <arm/armreg.h>
81 #endif
82 
83 /* 1 == use cpu_sleep(), 0 == don't */
84 extern int cpu_do_powersave;
85 extern int cpu_fpu_present;
86 
87 /* All the CLKF_* macros take a struct clockframe * as an argument. */
88 
89 /*
90  * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
91  * frame came from USR mode or not.
92  */
93 #define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE)
94 
95 /*
96  * CLKF_INTR: True if we took the interrupt from inside another
97  * interrupt handler.
98  */
99 #if !defined(__ARM_EABI__)
100 /* Hack to treat FPE time as interrupt time so we can measure it */
101 #define CLKF_INTR(cf)						\
102 	((curcpu()->ci_intr_depth > 1) ||			\
103 	    ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE)
104 #else
105 #define CLKF_INTR(cf)	((void)(cf), curcpu()->ci_intr_depth > 1)
106 #endif
107 
108 /*
109  * CLKF_PC: Extract the program counter from a clockframe
110  */
111 #define CLKF_PC(frame)		(frame->cf_tf.tf_pc)
112 
113 /*
114  * LWP_PC: Find out the program counter for the given lwp.
115  */
116 #define LWP_PC(l)		(lwp_trapframe(l)->tf_pc)
117 
118 /*
119  * Per-CPU information.  For now we assume one CPU.
120  */
121 #ifdef _KERNEL
122 static inline int curcpl(void);
123 static inline void set_curcpl(int);
124 static inline void cpu_dosoftints(void);
125 #endif
126 
127 #ifdef _KMEMUSER
128 #include <sys/intr.h>
129 #endif
130 #include <sys/atomic.h>
131 #include <sys/cpu_data.h>
132 #include <sys/device_if.h>
133 #include <sys/evcnt.h>
134 
135 struct cpu_info {
136 	struct cpu_data ci_data;	/* MI per-cpu data */
137 	device_t ci_dev;		/* Device corresponding to this CPU */
138 	cpuid_t ci_cpuid;
139 	uint32_t ci_arm_cpuid;		/* aggregate CPU id */
140 	uint32_t ci_arm_cputype;	/* CPU type */
141 	uint32_t ci_arm_cpurev;		/* CPU revision */
142 	uint32_t ci_ctrl;		/* The CPU control register */
143 	int ci_cpl;			/* current processor level (spl) */
144 	volatile int ci_astpending;	/* */
145 	int ci_want_resched;		/* resched() was called */
146 	int ci_intr_depth;		/* */
147 	struct cpu_softc *ci_softc;	/* platform softc */
148 	lwp_t *ci_softlwps[SOFTINT_COUNT];
149 	volatile uint32_t ci_softints;
150 	lwp_t *ci_curlwp;		/* current lwp */
151 	lwp_t *ci_lastlwp;		/* last lwp */
152 	struct evcnt ci_arm700bugcount;
153 	int32_t ci_mtx_count;
154 	int ci_mtx_oldspl;
155 	register_t ci_undefsave[3];
156 	uint32_t ci_vfp_id;
157 	uint64_t ci_lastintr;
158 	struct pmap_tlb_info *ci_tlb_info;
159 	struct pmap *ci_pmap_lastuser;
160 	struct pmap *ci_pmap_cur;
161 	tlb_asid_t ci_pmap_asid_cur;
162 	struct trapframe *ci_ddb_regs;
163 	struct evcnt ci_abt_evs[16];
164 	struct evcnt ci_und_ev;
165 	struct evcnt ci_und_cp15_ev;
166 	struct evcnt ci_vfp_evs[3];
167 #if defined(MP_CPU_INFO_MEMBERS)
168 	MP_CPU_INFO_MEMBERS
169 #endif
170 };
171 
172 extern struct cpu_info cpu_info_store;
173 
174 struct lwp *arm_curlwp(void);
175 struct cpu_info *arm_curcpu(void);
176 
177 #if defined(_MODULE)
178 
179 #define	curlwp		arm_curlwp()
180 #define curcpu()	arm_curcpu()
181 
182 #elif defined(TPIDRPRW_IS_CURLWP)
183 static inline struct lwp *
184 _curlwp(void)
185 {
186 	return (struct lwp *) armreg_tpidrprw_read();
187 }
188 
189 static inline void
190 _curlwp_set(struct lwp *l)
191 {
192 	armreg_tpidrprw_write((uintptr_t)l);
193 }
194 
195 // Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h>
196 static inline struct cpu_info *lwp_getcpu(struct lwp *);
197 
198 #define	curlwp		_curlwp()
199 // curcpu() expands into two instructions: a mrc and a ldr
200 #define	curcpu()	lwp_getcpu(_curlwp())
201 #elif defined(TPIDRPRW_IS_CURCPU)
202 #ifdef __HAVE_PREEMPTION
203 #error __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
204 #endif
205 static inline struct cpu_info *
206 curcpu(void)
207 {
208 	return (struct cpu_info *) armreg_tpidrprw_read();
209 }
210 #elif !defined(MULTIPROCESSOR)
211 #define	curcpu()	(&cpu_info_store)
212 #elif !defined(__HAVE_PREEMPTION)
213 #error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP
214 #else
215 #error MULTIPROCESSOR && __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
216 #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */
217 
218 #ifndef curlwp
219 #define	curlwp		(curcpu()->ci_curlwp)
220 #endif
221 
222 #define CPU_INFO_ITERATOR	int
223 #if defined(_MODULE) || defined(MULTIPROCESSOR)
224 extern struct cpu_info *cpu_info[];
225 #define cpu_number()		(curcpu()->ci_index)
226 #define CPU_IS_PRIMARY(ci)	((ci)->ci_index == 0)
227 #define CPU_INFO_FOREACH(cii, ci)			\
228 	cii = 0, ci = cpu_info[0]; cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; cii++
229 #else
230 #define cpu_number()            0
231 
232 #define CPU_IS_PRIMARY(ci)	true
233 #define CPU_INFO_FOREACH(cii, ci)			\
234 	cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
235 #endif
236 
237 #if defined(MULTIPROCESSOR)
238 void cpu_boot_secondary_processors(void);
239 #endif
240 
241 #define	LWP0_CPU_INFO	(&cpu_info_store)
242 
243 static inline int
244 curcpl(void)
245 {
246 	return curcpu()->ci_cpl;
247 }
248 
249 static inline void
250 set_curcpl(int pri)
251 {
252 	curcpu()->ci_cpl = pri;
253 }
254 
255 static inline void
256 cpu_dosoftints(void)
257 {
258 #ifdef __HAVE_FAST_SOFTINTS
259 	void	dosoftints(void);
260 #ifndef __HAVE_PIC_FAST_SOFTINTS
261 	struct cpu_info * const ci = curcpu();
262 	if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0)
263 		dosoftints();
264 #endif
265 #endif
266 }
267 
268 void	cpu_proc_fork(struct proc *, struct proc *);
269 
270 /*
271  * Scheduling glue
272  */
273 
274 #ifdef __HAVE_PREEMPTION
275 #define setsoftast(ci)		atomic_or_uint(&(ci)->ci_astpending, __BIT(0))
276 #else
277 #define setsoftast(ci)		((ci)->ci_astpending = __BIT(0))
278 #endif
279 
280 /*
281  * Notify the current process (p) that it has a signal pending,
282  * process as soon as possible.
283  */
284 
285 #define cpu_signotify(l)		setsoftast((l)->l_cpu)
286 
287 /*
288  * Give a profiling tick to the current process when the user profiling
289  * buffer pages are invalid.  On the i386, request an ast to send us
290  * through trap(), marking the proc as needing a profiling tick.
291  */
292 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, \
293 				 setsoftast((l)->l_cpu))
294 
295 /* for preeemption. */
296 void	cpu_set_curpri(int);
297 
298 /*
299  * We've already preallocated the stack for the idlelwps for additional CPUs.
300  * This hook allows to return them.
301  */
302 vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *);
303 
304 /*
305  * cpu device glue (belongs in cpuvar.h)
306  */
307 void	cpu_attach(device_t, cpuid_t);
308 
309 #endif /* !_LOCORE */
310 
311 #endif /* _KERNEL */
312 
313 #elif defined(__aarch64__)
314 
315 #include <aarch64/cpu.h>
316 
317 #endif /* __arm__/__aarch64__ */
318 
319 #endif /* !_ARM_CPU_H_ */
320