1 /* $NetBSD: cpu.h,v 1.48 2022/11/03 09:04:56 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas of 3am Software Foundry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _AARCH64_CPU_H_ 33 #define _AARCH64_CPU_H_ 34 35 #include <arm/cpu.h> 36 37 #ifdef __aarch64__ 38 39 #ifdef _KERNEL_OPT 40 #include "opt_gprof.h" 41 #include "opt_multiprocessor.h" 42 #include "opt_pmap.h" 43 #endif 44 45 #include <sys/param.h> 46 47 #if defined(_KERNEL) || defined(_KMEMUSER) 48 #include <sys/evcnt.h> 49 50 #include <aarch64/armreg.h> 51 #include <aarch64/frame.h> 52 53 struct clockframe { 54 struct trapframe cf_tf; 55 }; 56 57 /* (spsr & 15) == SPSR_M_EL0T(64bit,0) or USER(32bit,0) */ 58 #define CLKF_USERMODE(cf) ((((cf)->cf_tf.tf_spsr) & 0x0f) == 0) 59 #define CLKF_PC(cf) ((cf)->cf_tf.tf_pc) 60 #define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1) 61 62 /* 63 * LWP_PC: Find out the program counter for the given lwp. 64 */ 65 #define LWP_PC(l) ((l)->l_md.md_utf->tf_pc) 66 67 #include <sys/cpu_data.h> 68 #include <sys/device_if.h> 69 #include <sys/intr.h> 70 71 struct aarch64_cpufuncs { 72 void (*cf_set_ttbr0)(uint64_t); 73 void (*cf_icache_sync_range)(vaddr_t, vsize_t); 74 }; 75 76 #define MAX_CACHE_LEVEL 8 /* ARMv8 has maximum 8 level cache */ 77 78 struct aarch64_cache_unit { 79 u_int cache_type; 80 #define CACHE_TYPE_VPIPT 0 /* VMID-aware PIPT */ 81 #define CACHE_TYPE_VIVT 1 /* ASID-tagged VIVT */ 82 #define CACHE_TYPE_VIPT 2 83 #define CACHE_TYPE_PIPT 3 84 u_int cache_line_size; 85 u_int cache_ways; 86 u_int cache_sets; 87 u_int cache_way_size; 88 u_int cache_size; 89 }; 90 91 struct aarch64_cache_info { 92 u_int cacheable; 93 #define CACHE_CACHEABLE_NONE 0 94 #define CACHE_CACHEABLE_ICACHE 1 /* instruction cache only */ 95 #define CACHE_CACHEABLE_DCACHE 2 /* data cache only */ 96 #define CACHE_CACHEABLE_IDCACHE 3 /* instruction and data caches */ 97 #define CACHE_CACHEABLE_UNIFIED 4 /* unified cache */ 98 struct aarch64_cache_unit icache; 99 struct aarch64_cache_unit dcache; 100 }; 101 102 struct cpu_info { 103 struct cpu_data ci_data; 104 device_t ci_dev; 105 cpuid_t ci_cpuid; 106 107 /* 108 * the following are in their own cache line, as they are stored to 109 * regularly by remote CPUs; when they were mixed with other fields 110 * we observed frequent cache misses. 111 */ 112 int ci_want_resched __aligned(COHERENCY_UNIT); 113 /* XXX pending IPIs? */ 114 115 /* 116 * this is stored frequently, and is fetched by remote CPUs. 117 */ 118 struct lwp *ci_curlwp __aligned(COHERENCY_UNIT); 119 struct lwp *ci_onproc; 120 121 /* 122 * largely CPU-private. 123 */ 124 struct lwp *ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT); 125 126 uint64_t ci_lastintr; 127 128 int ci_mtx_oldspl; 129 int ci_mtx_count; 130 131 int ci_cpl; /* current processor level (spl) */ 132 volatile int ci_hwpl; /* current hardware priority */ 133 volatile u_int ci_softints; 134 volatile u_int ci_intr_depth; 135 volatile uint32_t ci_blocked_pics; 136 volatile uint32_t ci_pending_pics; 137 volatile uint32_t ci_pending_ipls; 138 139 int ci_kfpu_spl; 140 141 #if defined(PMAP_MI) 142 struct pmap_tlb_info *ci_tlb_info; 143 struct pmap *ci_pmap_lastuser; 144 struct pmap *ci_pmap_cur; 145 #endif 146 147 /* ASID of current pmap */ 148 tlb_asid_t ci_pmap_asid_cur; 149 150 /* event counters */ 151 struct evcnt ci_vfp_use; 152 struct evcnt ci_vfp_reuse; 153 struct evcnt ci_vfp_save; 154 struct evcnt ci_vfp_release; 155 struct evcnt ci_uct_trap; 156 struct evcnt ci_intr_preempt; 157 158 /* FDT or similar supplied "cpu capacity" */ 159 uint32_t ci_capacity_dmips_mhz; 160 161 /* interrupt controller */ 162 u_int ci_gic_redist; /* GICv3 redistributor index */ 163 uint64_t ci_gic_sgir; /* GICv3 SGIR target */ 164 165 /* ACPI */ 166 uint32_t ci_acpiid; /* ACPI Processor Unique ID */ 167 168 /* cached system registers */ 169 uint64_t ci_sctlr_el1; 170 uint64_t ci_sctlr_el2; 171 172 /* sysctl(9) exposed system registers */ 173 struct aarch64_sysctl_cpu_id ci_id; 174 175 /* cache information and function pointers */ 176 struct aarch64_cache_info ci_cacheinfo[MAX_CACHE_LEVEL]; 177 struct aarch64_cpufuncs ci_cpufuncs; 178 179 #if defined(GPROF) && defined(MULTIPROCESSOR) 180 struct gmonparam *ci_gmon; /* MI per-cpu GPROF */ 181 #endif 182 } __aligned(COHERENCY_UNIT); 183 184 #ifdef _KERNEL 185 static inline __always_inline struct lwp * __attribute__ ((const)) 186 aarch64_curlwp(void) 187 { 188 struct lwp *l; 189 __asm("mrs %0, tpidr_el1" : "=r"(l)); 190 return l; 191 } 192 193 /* forward declaration; defined in sys/lwp.h. */ 194 static __inline struct cpu_info *lwp_getcpu(struct lwp *); 195 196 #define curcpu() (lwp_getcpu(aarch64_curlwp())) 197 #define setsoftast(ci) (cpu_signotify((ci)->ci_onproc)) 198 #undef curlwp 199 #define curlwp (aarch64_curlwp()) 200 #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) 201 202 void cpu_signotify(struct lwp *l); 203 void cpu_need_proftick(struct lwp *l); 204 205 void cpu_hatch(struct cpu_info *); 206 207 extern struct cpu_info *cpu_info[]; 208 extern struct cpu_info cpu_info_store[]; 209 210 #define CPU_INFO_ITERATOR int 211 #if defined(MULTIPROCESSOR) || defined(_MODULE) 212 #define cpu_number() (curcpu()->ci_index) 213 #define CPU_IS_PRIMARY(ci) ((ci)->ci_index == 0) 214 #define CPU_INFO_FOREACH(cii, ci) \ 215 cii = 0, ci = cpu_info[0]; \ 216 cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; \ 217 cii++ 218 #else /* MULTIPROCESSOR */ 219 #define cpu_number() 0 220 #define CPU_IS_PRIMARY(ci) true 221 #define CPU_INFO_FOREACH(cii, ci) \ 222 cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL 223 #endif /* MULTIPROCESSOR */ 224 225 #define LWP0_CPU_INFO (&cpu_info_store[0]) 226 227 #define __HAVE_CPU_DOSOFTINTS_CI 228 229 static inline void 230 cpu_dosoftints_ci(struct cpu_info *ci) 231 { 232 #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS) 233 void dosoftints(void); 234 235 if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0) { 236 dosoftints(); 237 } 238 #endif 239 } 240 241 static inline void 242 cpu_dosoftints(void) 243 { 244 #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS) 245 cpu_dosoftints_ci(curcpu()); 246 #endif 247 } 248 249 250 #endif /* _KERNEL */ 251 252 #endif /* _KERNEL || _KMEMUSER */ 253 254 #endif 255 256 #endif /* _AARCH64_CPU_H_ */ 257