1 /* $NetBSD: cpu.h,v 1.121 2021/11/01 14:45:24 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 1994-1996 Mark Brinicombe. 5 * Copyright (c) 1994 Brini. 6 * All rights reserved. 7 * 8 * This code is derived from software written for Brini by Mark Brinicombe 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Brini. 21 * 4. The name of the company nor the name of the author may be used to 22 * endorse or promote products derived from this software without specific 23 * prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * RiscBSD kernel project 38 * 39 * cpu.h 40 * 41 * CPU specific symbols 42 * 43 * Created : 18/09/94 44 * 45 * Based on kate/katelib/arm6.h 46 */ 47 48 #ifndef _ARM_CPU_H_ 49 #define _ARM_CPU_H_ 50 51 #ifdef _KERNEL 52 #ifndef _LOCORE 53 54 typedef unsigned long mpidr_t; 55 56 #ifdef MULTIPROCESSOR 57 extern u_int arm_cpu_max; 58 extern mpidr_t cpu_mpidr[]; 59 60 void cpu_init_secondary_processor(int); 61 void cpu_boot_secondary_processors(void); 62 void cpu_mpstart(void); 63 bool cpu_hatched_p(u_int); 64 65 void cpu_clr_mbox(int); 66 void cpu_set_hatched(int); 67 68 #endif 69 70 void cpu_proc_fork(struct proc *, struct proc *); 71 72 #endif /* !_LOCORE */ 73 #endif /* _KERNEL */ 74 75 #ifdef __arm__ 76 77 /* 78 * User-visible definitions 79 */ 80 81 /* CTL_MACHDEP definitions. */ 82 #define CPU_DEBUG 1 /* int: misc kernel debug control */ 83 #define CPU_BOOTED_DEVICE 2 /* string: device we booted from */ 84 #define CPU_BOOTED_KERNEL 3 /* string: kernel we booted */ 85 #define CPU_CONSDEV 4 /* struct: dev_t of our console */ 86 #define CPU_POWERSAVE 5 /* int: use CPU powersave mode */ 87 88 #if defined(_KERNEL) || defined(_KMEMUSER) 89 90 /* 91 * Kernel-only definitions 92 */ 93 94 #if !defined(_MODULE) && defined(_KERNEL_OPT) 95 #include "opt_gprof.h" 96 #include "opt_multiprocessor.h" 97 #include "opt_cpuoptions.h" 98 #include "opt_lockdebug.h" 99 #include "opt_cputypes.h" 100 #endif /* !_MODULE && _KERNEL_OPT */ 101 102 #ifndef _LOCORE 103 #if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU) 104 #include <arm/armreg.h> 105 #endif /* TPIDRPRW_IS_CURLWP || TPIDRPRW_IS_CURCPU */ 106 107 /* 1 == use cpu_sleep(), 0 == don't */ 108 extern int cpu_do_powersave; 109 extern int cpu_fpu_present; 110 111 /* All the CLKF_* macros take a struct clockframe * as an argument. */ 112 113 /* 114 * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the 115 * frame came from USR mode or not. 116 */ 117 #define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE) 118 119 /* 120 * CLKF_INTR: True if we took the interrupt from inside another 121 * interrupt handler. 122 */ 123 #if !defined(__ARM_EABI__) 124 /* Hack to treat FPE time as interrupt time so we can measure it */ 125 #define CLKF_INTR(cf) \ 126 ((curcpu()->ci_intr_depth > 1) || \ 127 ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE) 128 #else 129 #define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1) 130 #endif 131 132 /* 133 * CLKF_PC: Extract the program counter from a clockframe 134 */ 135 #define CLKF_PC(frame) (frame->cf_tf.tf_pc) 136 137 /* 138 * LWP_PC: Find out the program counter for the given lwp. 139 */ 140 #define LWP_PC(l) (lwp_trapframe(l)->tf_pc) 141 142 /* 143 * Per-CPU information. For now we assume one CPU. 144 */ 145 #ifdef _KERNEL 146 static inline int curcpl(void); 147 static inline void set_curcpl(int); 148 static inline void cpu_dosoftints(void); 149 #endif 150 151 #ifdef _KMEMUSER 152 #include <sys/intr.h> 153 #endif 154 #include <sys/atomic.h> 155 #include <sys/cpu_data.h> 156 #include <sys/device_if.h> 157 #include <sys/evcnt.h> 158 159 #include <machine/param.h> 160 161 /* 162 * Cache info variables. 163 */ 164 #define CACHE_TYPE_VIVT 0 165 #define CACHE_TYPE_xxPT 1 166 #define CACHE_TYPE_VIPT 1 167 #define CACHE_TYPE_PIxx 2 168 #define CACHE_TYPE_PIPT 3 169 170 /* PRIMARY CACHE VARIABLES */ 171 struct arm_cache_info { 172 u_int icache_size; 173 u_int icache_line_size; 174 u_int icache_ways; 175 u_int icache_way_size; 176 u_int icache_sets; 177 178 u_int dcache_size; 179 u_int dcache_line_size; 180 u_int dcache_ways; 181 u_int dcache_way_size; 182 u_int dcache_sets; 183 184 uint8_t cache_type; 185 bool cache_unified; 186 uint8_t icache_type; 187 uint8_t dcache_type; 188 }; 189 190 struct cpu_info { 191 struct cpu_data ci_data; /* MI per-cpu data */ 192 device_t ci_dev; /* Device corresponding to this CPU */ 193 cpuid_t ci_cpuid; 194 uint32_t ci_arm_cpuid; /* aggregate CPU id */ 195 uint32_t ci_arm_cputype; /* CPU type */ 196 uint32_t ci_arm_cpurev; /* CPU revision */ 197 uint32_t ci_ctrl; /* The CPU control register */ 198 199 /* 200 * the following are in their own cache line, as they are stored to 201 * regularly by remote CPUs; when they were mixed with other fields 202 * we observed frequent cache misses. 203 */ 204 int ci_want_resched __aligned(COHERENCY_UNIT); 205 /* resched() was called */ 206 lwp_t * ci_curlwp __aligned(COHERENCY_UNIT); 207 /* current lwp */ 208 lwp_t * ci_onproc; /* current user LWP / kthread */ 209 210 /* 211 * largely CPU-private. 212 */ 213 lwp_t * ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT); 214 215 struct cpu_softc * 216 ci_softc; /* platform softc */ 217 218 int ci_cpl; /* current processor level (spl) */ 219 int ci_hwpl; /* current hardware priority */ 220 int ci_kfpu_spl; 221 222 volatile u_int ci_intr_depth; /* */ 223 volatile u_int ci_softints; 224 volatile uint32_t ci_blocked_pics; 225 volatile uint32_t ci_pending_pics; 226 volatile uint32_t ci_pending_ipls; 227 228 lwp_t * ci_lastlwp; /* last lwp */ 229 230 struct evcnt ci_arm700bugcount; 231 int32_t ci_mtx_count; 232 int ci_mtx_oldspl; 233 register_t ci_undefsave[3]; 234 uint32_t ci_vfp_id; 235 uint64_t ci_lastintr; 236 237 struct pmap_tlb_info * 238 ci_tlb_info; 239 struct pmap * ci_pmap_lastuser; 240 struct pmap * ci_pmap_cur; 241 tlb_asid_t ci_pmap_asid_cur; 242 243 struct trapframe * 244 ci_ddb_regs; 245 246 struct evcnt ci_abt_evs[16]; 247 struct evcnt ci_und_ev; 248 struct evcnt ci_und_cp15_ev; 249 struct evcnt ci_vfp_evs[3]; 250 251 uint32_t ci_midr; 252 uint32_t ci_actlr; 253 uint32_t ci_revidr; 254 uint32_t ci_mpidr; 255 uint32_t ci_mvfr[2]; 256 257 uint32_t ci_capacity_dmips_mhz; 258 259 struct arm_cache_info 260 ci_cacheinfo; 261 262 #if defined(GPROF) && defined(MULTIPROCESSOR) 263 struct gmonparam *ci_gmon; /* MI per-cpu GPROF */ 264 #endif 265 }; 266 267 extern struct cpu_info cpu_info_store[]; 268 269 struct lwp *arm_curlwp(void); 270 struct cpu_info *arm_curcpu(void); 271 272 #ifdef _KERNEL 273 #if defined(_MODULE) 274 275 #define curlwp arm_curlwp() 276 #define curcpu() arm_curcpu() 277 278 #elif defined(TPIDRPRW_IS_CURLWP) 279 static inline struct lwp * 280 _curlwp(void) 281 { 282 return (struct lwp *) armreg_tpidrprw_read(); 283 } 284 285 static inline void 286 _curlwp_set(struct lwp *l) 287 { 288 armreg_tpidrprw_write((uintptr_t)l); 289 } 290 291 // Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h> 292 static inline struct cpu_info *lwp_getcpu(struct lwp *); 293 294 #define curlwp _curlwp() 295 // curcpu() expands into two instructions: a mrc and a ldr 296 #define curcpu() lwp_getcpu(_curlwp()) 297 #elif defined(TPIDRPRW_IS_CURCPU) 298 #ifdef __HAVE_PREEMPTION 299 #error __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP 300 #endif 301 static inline struct cpu_info * 302 curcpu(void) 303 { 304 return (struct cpu_info *) armreg_tpidrprw_read(); 305 } 306 #elif !defined(MULTIPROCESSOR) 307 #define curcpu() (&cpu_info_store[0]) 308 #elif !defined(__HAVE_PREEMPTION) 309 #error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP 310 #else 311 #error MULTIPROCESSOR && __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP 312 #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */ 313 314 #ifndef curlwp 315 #define curlwp (curcpu()->ci_curlwp) 316 #endif 317 #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) 318 319 #define CPU_INFO_ITERATOR int 320 #if defined(_MODULE) || defined(MULTIPROCESSOR) 321 extern struct cpu_info *cpu_info[]; 322 #define cpu_number() (curcpu()->ci_index) 323 #define CPU_IS_PRIMARY(ci) ((ci)->ci_index == 0) 324 #define CPU_INFO_FOREACH(cii, ci) \ 325 cii = 0, ci = cpu_info[0]; cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; cii++ 326 #else 327 #define cpu_number() 0 328 329 #define CPU_IS_PRIMARY(ci) true 330 #define CPU_INFO_FOREACH(cii, ci) \ 331 cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL 332 #endif 333 334 #define LWP0_CPU_INFO (&cpu_info_store[0]) 335 336 static inline int 337 curcpl(void) 338 { 339 return curcpu()->ci_cpl; 340 } 341 342 static inline void 343 set_curcpl(int pri) 344 { 345 curcpu()->ci_cpl = pri; 346 } 347 348 static inline void 349 cpu_dosoftints(void) 350 { 351 #ifdef __HAVE_FAST_SOFTINTS 352 void dosoftints(void); 353 #ifndef __HAVE_PIC_FAST_SOFTINTS 354 struct cpu_info * const ci = curcpu(); 355 if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0) 356 dosoftints(); 357 #endif 358 #endif 359 } 360 361 /* 362 * Scheduling glue 363 */ 364 void cpu_signotify(struct lwp *); 365 #define setsoftast(ci) (cpu_signotify((ci)->ci_onproc)) 366 367 /* 368 * Give a profiling tick to the current process when the user profiling 369 * buffer pages are invalid. On the i386, request an ast to send us 370 * through trap(), marking the proc as needing a profiling tick. 371 */ 372 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, \ 373 setsoftast(lwp_getcpu(l))) 374 375 /* 376 * We've already preallocated the stack for the idlelwps for additional CPUs. 377 * This hook allows to return them. 378 */ 379 vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *); 380 381 #ifdef _ARM_ARCH_6 382 int cpu_maxproc_hook(int); 383 #endif 384 385 #endif /* _KERNEL */ 386 387 #endif /* !_LOCORE */ 388 389 #endif /* _KERNEL || _KMEMUSER */ 390 391 #elif defined(__aarch64__) 392 393 #include <aarch64/cpu.h> 394 395 #endif /* __arm__/__aarch64__ */ 396 397 #endif /* !_ARM_CPU_H_ */ 398