xref: /netbsd-src/sys/arch/aarch64/include/cpu.h (revision 388bf930e98f3210893b768a943a5e120c619053)
1 /* $NetBSD: cpu.h,v 1.53 2024/12/30 19:17:21 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _AARCH64_CPU_H_
33 #define _AARCH64_CPU_H_
34 
35 #include <arm/cpu.h>
36 
37 #ifdef __aarch64__
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_gprof.h"
41 #include "opt_multiprocessor.h"
42 #include "opt_pmap.h"
43 #endif
44 
45 #include <sys/param.h>
46 
47 #if defined(_KERNEL) || defined(_KMEMUSER)
48 #include <sys/evcnt.h>
49 
50 #include <aarch64/armreg.h>
51 #include <aarch64/frame.h>
52 
53 struct clockframe {
54 	struct trapframe cf_tf;
55 };
56 
57 /* (spsr & 15) == SPSR_M_EL0T(64bit,0) or USER(32bit,0) */
58 #define CLKF_USERMODE(cf)	((((cf)->cf_tf.tf_spsr) & 0x0f) == 0)
59 #define CLKF_PC(cf)		((cf)->cf_tf.tf_pc)
60 #define CLKF_INTR(cf)		((void)(cf), curcpu()->ci_intr_depth > 1)
61 
62 /*
63  * LWP_PC: Find out the program counter for the given lwp.
64  */
65 #define LWP_PC(l)		((l)->l_md.md_utf->tf_pc)
66 
67 #include <sys/cpu_data.h>
68 #include <sys/device_if.h>
69 #include <sys/intr.h>
70 
71 struct aarch64_cpufuncs {
72 	void (*cf_set_ttbr0)(uint64_t);
73 	void (*cf_icache_sync_range)(vaddr_t, vsize_t);
74 };
75 
76 #define MAX_CACHE_LEVEL	8		/* ARMv8 has maximum 8 level cache */
77 
78 struct aarch64_cache_unit {
79 	u_int cache_type;
80 #define CACHE_TYPE_VPIPT	0	/* VMID-aware PIPT */
81 #define CACHE_TYPE_VIVT		1	/* ASID-tagged VIVT */
82 #define CACHE_TYPE_VIPT		2
83 #define CACHE_TYPE_PIPT		3
84 	u_int cache_line_size;
85 	u_int cache_ways;
86 	u_int cache_sets;
87 	u_int cache_way_size;
88 	u_int cache_size;
89 };
90 
91 struct aarch64_cache_info {
92 	u_int cacheable;
93 #define CACHE_CACHEABLE_NONE	0
94 #define CACHE_CACHEABLE_ICACHE	1	/* instruction cache only */
95 #define CACHE_CACHEABLE_DCACHE	2	/* data cache only */
96 #define CACHE_CACHEABLE_IDCACHE	3	/* instruction and data caches */
97 #define CACHE_CACHEABLE_UNIFIED	4	/* unified cache */
98 	struct aarch64_cache_unit icache;
99 	struct aarch64_cache_unit dcache;
100 };
101 
102 struct aarch64_low_power_idle {
103 	uint32_t min_res;		/* minimum residency */
104 	uint32_t wakeup_latency;	/* worst case */
105 	uint32_t save_restore_flags;
106 #define LPI_SAVE_RESTORE_CORE	__BIT(0)
107 #define LPI_SAVE_RESTORE_TRACE	__BIT(1)
108 #define LPI_SAVE_RESTORE_GICR	__BIT(2)
109 #define LPI_SAVE_RESTORE_GICD	__BIT(3)
110 	uint32_t reg_addr;
111 #define LPI_REG_ADDR_WFI	0xffffffff
112 
113 	char *name;
114 	struct evcnt events;
115 };
116 
117 struct cpu_info {
118 	struct cpu_data ci_data;
119 	device_t ci_dev;
120 	cpuid_t ci_cpuid;
121 
122 	/*
123 	 * the following are in their own cache line, as they are stored to
124 	 * regularly by remote CPUs; when they were mixed with other fields
125 	 * we observed frequent cache misses.
126 	 */
127 	int ci_want_resched __aligned(COHERENCY_UNIT);
128 	/* XXX pending IPIs? */
129 
130 	/*
131 	 * this is stored frequently, and is fetched by remote CPUs.
132 	 */
133 	struct lwp *ci_curlwp __aligned(COHERENCY_UNIT);
134 	struct lwp *ci_onproc;
135 
136 	/*
137 	 * largely CPU-private.
138 	 */
139 	struct lwp *ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
140 
141 	uint64_t ci_lastintr;
142 
143 	int ci_mtx_oldspl;
144 	int ci_mtx_count;
145 
146 	int ci_cpl;		/* current processor level (spl) */
147 	volatile int ci_hwpl;	/* current hardware priority */
148 	volatile u_int ci_softints;
149 	volatile u_int ci_intr_depth;
150 	volatile uint32_t ci_blocked_pics;
151 	volatile uint32_t ci_pending_pics;
152 	volatile uint32_t ci_pending_ipls;
153 
154 	int ci_kfpu_spl;
155 
156 #if defined(PMAP_MI)
157         struct pmap_tlb_info *ci_tlb_info;
158         struct pmap *ci_pmap_lastuser;
159         struct pmap *ci_pmap_cur;
160 #endif
161 
162 	/* ASID of current pmap */
163 	tlb_asid_t ci_pmap_asid_cur;
164 
165 	/* event counters */
166 	struct evcnt ci_vfp_use;
167 	struct evcnt ci_vfp_reuse;
168 	struct evcnt ci_vfp_save;
169 	struct evcnt ci_vfp_release;
170 	struct evcnt ci_uct_trap;
171 	struct evcnt ci_intr_preempt;
172 	struct evcnt ci_rndrrs_fail;
173 
174 	/* FDT or similar supplied "cpu capacity" */
175 	uint32_t ci_capacity_dmips_mhz;
176 
177 	/* interrupt controller */
178 	u_int ci_gic_redist;	/* GICv3 redistributor index */
179 	uint64_t ci_gic_sgir;	/* GICv3 SGIR target */
180 
181 	/* ACPI */
182 	uint32_t ci_acpiid;	/* ACPI Processor Unique ID */
183 
184 	/* ACPI low power idle */
185 	uint32_t ci_nlpi;
186 	struct aarch64_low_power_idle *ci_lpi;
187 	uint64_t ci_last_idle;
188 
189 	/* cached system registers */
190 	uint64_t ci_sctlr_el1;
191 	uint64_t ci_sctlr_el2;
192 
193 	/* sysctl(9) exposed system registers */
194 	struct aarch64_sysctl_cpu_id ci_id;
195 #define ci_midr		ci_id.ac_midr
196 
197 	/* cache information and function pointers */
198 	struct aarch64_cache_info ci_cacheinfo[MAX_CACHE_LEVEL];
199 	struct aarch64_cpufuncs ci_cpufuncs;
200 
201 #if defined(GPROF) && defined(MULTIPROCESSOR)
202 	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
203 #endif
204 } __aligned(COHERENCY_UNIT);
205 
206 #ifdef _KERNEL
207 static inline __always_inline struct lwp * __attribute__ ((const))
208 aarch64_curlwp(void)
209 {
210 	struct lwp *l;
211 	__asm("mrs %0, tpidr_el1" : "=r"(l));
212 	return l;
213 }
214 
215 /* forward declaration; defined in sys/lwp.h. */
216 static __inline struct cpu_info *lwp_getcpu(struct lwp *);
217 
218 #define	curcpu()		(lwp_getcpu(aarch64_curlwp()))
219 #define	setsoftast(ci)		(cpu_signotify((ci)->ci_onproc))
220 #undef curlwp
221 #define	curlwp			(aarch64_curlwp())
222 #define	curpcb			((struct pcb *)lwp_getpcb(curlwp))
223 
224 void	cpu_signotify(struct lwp *l);
225 void	cpu_need_proftick(struct lwp *l);
226 
227 void	cpu_hatch(struct cpu_info *);
228 
229 extern struct cpu_info *cpu_info[];
230 extern struct cpu_info cpu_info_store[];
231 
232 #define CPU_INFO_ITERATOR	int
233 #if defined(MULTIPROCESSOR) || defined(_MODULE)
234 #define cpu_number()		(curcpu()->ci_index)
235 #define CPU_IS_PRIMARY(ci)	((ci)->ci_index == 0)
236 #define CPU_INFO_FOREACH(cii, ci)					\
237 	cii = 0, ci = cpu_info[0];					\
238 	cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL;	\
239 	cii++
240 #else /* MULTIPROCESSOR */
241 #define cpu_number()		0
242 #define CPU_IS_PRIMARY(ci)	true
243 #define CPU_INFO_FOREACH(cii, ci)					\
244 	cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
245 #endif /* MULTIPROCESSOR */
246 
247 #define	LWP0_CPU_INFO	(&cpu_info_store[0])
248 
249 #define	__HAVE_CPU_DOSOFTINTS_CI
250 
251 static inline void
252 cpu_dosoftints_ci(struct cpu_info *ci)
253 {
254 #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)
255 	void dosoftints(void);
256 
257 	if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0) {
258 		dosoftints();
259 	}
260 #endif
261 }
262 
263 static inline void
264 cpu_dosoftints(void)
265 {
266 #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)
267 	KDASSERT(kpreempt_disabled());
268 	cpu_dosoftints_ci(curcpu());
269 #endif
270 }
271 
272 struct cpufeature_attach_args {
273 	struct cpu_info *ci;
274 };
275 
276 #endif /* _KERNEL */
277 
278 #endif /* _KERNEL || _KMEMUSER */
279 
280 #endif
281 
282 #endif /* _AARCH64_CPU_H_ */
283