xref: /netbsd-src/sys/arch/aarch64/aarch64/cpu.c (revision 15a984a0d95c8f96abe9717ee6241762c55dc106)
1 /* $NetBSD: cpu.c,v 1.73 2023/02/03 08:05:27 skrll Exp $ */
2 
3 /*
4  * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(1, "$NetBSD: cpu.c,v 1.73 2023/02/03 08:05:27 skrll Exp $");
31 
32 #include "locators.h"
33 #include "opt_arm_debug.h"
34 #include "opt_ddb.h"
35 #include "opt_fdt.h"
36 #include "opt_multiprocessor.h"
37 
38 #include <sys/param.h>
39 #include <sys/atomic.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42 #include <sys/kmem.h>
43 #include <sys/reboot.h>
44 #include <sys/rndsource.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 
48 #include <crypto/aes/aes_impl.h>
49 #include <crypto/aes/arch/arm/aes_armv8.h>
50 #include <crypto/aes/arch/arm/aes_neon.h>
51 #include <crypto/chacha/chacha_impl.h>
52 #include <crypto/chacha/arch/arm/chacha_neon.h>
53 
54 #include <aarch64/armreg.h>
55 #include <aarch64/cpu.h>
56 #include <aarch64/cpu_counter.h>
57 #ifdef DDB
58 #include <aarch64/db_machdep.h>
59 #endif
60 #include <aarch64/machdep.h>
61 
62 #include <arm/cpufunc.h>
63 #include <arm/cpu_topology.h>
64 #ifdef FDT
65 #include <arm/fdt/arm_fdtvar.h>
66 #endif
67 
68 #ifdef VERBOSE_INIT_ARM
69 #define VPRINTF(...)	printf(__VA_ARGS__)
70 #else
71 #define VPRINTF(...)	__nothing
72 #endif
73 
74 void cpu_attach(device_t, cpuid_t);
75 void cpu_setup_id(struct cpu_info *);
76 
77 static void identify_aarch64_model(uint32_t, char *, size_t);
78 static void cpu_identify(device_t self, struct cpu_info *);
79 static void cpu_identify1(device_t self, struct cpu_info *);
80 static void cpu_identify2(device_t self, struct cpu_info *);
81 static void cpu_init_counter(struct cpu_info *);
82 static void cpu_setup_sysctl(device_t, struct cpu_info *);
83 static void cpu_setup_rng(device_t, struct cpu_info *);
84 static void cpu_setup_aes(device_t, struct cpu_info *);
85 static void cpu_setup_chacha(device_t, struct cpu_info *);
86 
87 #ifdef MULTIPROCESSOR
88 #define NCPUINFO	MAXCPUS
89 #else
90 #define NCPUINFO	1
91 #endif /* MULTIPROCESSOR */
92 
93 /*
94  * Our exported cpu_info structs; these will be first used by the
95  * secondary cpus as part of cpu_mpstart and the hatching process.
96  */
97 struct cpu_info cpu_info_store[NCPUINFO] = {
98 	[0] = {
99 		.ci_cpl = IPL_HIGH,
100 		.ci_curlwp = &lwp0
101 	}
102 };
103 
104 void
105 cpu_attach(device_t dv, cpuid_t id)
106 {
107 	struct cpu_info *ci;
108 	const int unit = device_unit(dv);
109 
110 	if (unit == 0) {
111 		ci = curcpu();
112 		ci->ci_cpuid = id;
113 	} else {
114 #ifdef MULTIPROCESSOR
115 		if ((boothowto & RB_MD1) != 0) {
116 			aprint_naive("\n");
117 			aprint_normal(": multiprocessor boot disabled\n");
118 			return;
119 		}
120 
121 		KASSERT(unit < MAXCPUS);
122 		ci = &cpu_info_store[unit];
123 
124 		ci->ci_cpl = IPL_HIGH;
125 		ci->ci_cpuid = id;
126 		/* ci_id is stored by own cpus when hatching */
127 
128 		cpu_info[ncpu] = ci;
129 		if (cpu_hatched_p(unit) == 0) {
130 			ci->ci_dev = dv;
131 			device_set_private(dv, ci);
132 			ci->ci_index = -1;
133 
134 			aprint_naive(": disabled\n");
135 			aprint_normal(": disabled (unresponsive)\n");
136 			return;
137 		}
138 #else /* MULTIPROCESSOR */
139 		aprint_naive(": disabled\n");
140 		aprint_normal(": disabled (uniprocessor kernel)\n");
141 		return;
142 #endif /* MULTIPROCESSOR */
143 	}
144 
145 	ci->ci_dev = dv;
146 	device_set_private(dv, ci);
147 
148 	ci->ci_kfpu_spl = -1;
149 
150 	arm_cpu_do_topology(ci);	// XXXNH move this after mi_cpu_attach
151 	cpu_identify(dv, ci);
152 
153 	cpu_setup_sysctl(dv, ci);
154 
155 #ifdef MULTIPROCESSOR
156 	if (unit != 0) {
157 		mi_cpu_attach(ci);
158 		pmap_tlb_info_attach(&pmap_tlb0_info, ci);
159 		aarch64_parsecacheinfo(ci);
160 	}
161 #endif /* MULTIPROCESSOR */
162 
163 	fpu_attach(ci);
164 
165 	cpu_identify1(dv, ci);
166 	aarch64_printcacheinfo(dv, ci);
167 	cpu_identify2(dv, ci);
168 
169 	if (unit != 0) {
170 	    return;
171 	}
172 
173 #ifdef DDB
174 	db_machdep_init(ci);
175 #endif
176 
177 	cpu_init_counter(ci);
178 
179 	/* These currently only check the BP. */
180 	cpu_setup_rng(dv, ci);
181 	cpu_setup_aes(dv, ci);
182 	cpu_setup_chacha(dv, ci);
183 }
184 
185 struct cpuidtab {
186 	uint32_t cpu_partnum;
187 	const char *cpu_name;
188 	const char *cpu_vendor;
189 	const char *cpu_architecture;
190 };
191 
192 #define CPU_PARTMASK	(CPU_ID_IMPLEMENTOR_MASK | CPU_ID_PARTNO_MASK)
193 
194 const struct cpuidtab cpuids[] = {
195 	{ CPU_ID_CORTEXA35R0 & CPU_PARTMASK, "Cortex-A35", "Arm", "v8-A" },
196 	{ CPU_ID_CORTEXA53R0 & CPU_PARTMASK, "Cortex-A53", "Arm", "v8-A" },
197 	{ CPU_ID_CORTEXA57R0 & CPU_PARTMASK, "Cortex-A57", "Arm", "v8-A" },
198 	{ CPU_ID_CORTEXA55R1 & CPU_PARTMASK, "Cortex-A55", "Arm", "v8.2-A+" },
199 	{ CPU_ID_CORTEXA65R0 & CPU_PARTMASK, "Cortex-A65", "Arm", "v8.2-A+" },
200 	{ CPU_ID_CORTEXA72R0 & CPU_PARTMASK, "Cortex-A72", "Arm", "v8-A" },
201 	{ CPU_ID_CORTEXA73R0 & CPU_PARTMASK, "Cortex-A73", "Arm", "v8-A" },
202 	{ CPU_ID_CORTEXA75R2 & CPU_PARTMASK, "Cortex-A75", "Arm", "v8.2-A+" },
203 	{ CPU_ID_CORTEXA76R3 & CPU_PARTMASK, "Cortex-A76", "Arm", "v8.2-A+" },
204 	{ CPU_ID_CORTEXA76AER1 & CPU_PARTMASK, "Cortex-A76AE", "Arm", "v8.2-A+" },
205 	{ CPU_ID_CORTEXA77R0 & CPU_PARTMASK, "Cortex-A77", "Arm", "v8.2-A+" },
206 	{ CPU_ID_NVIDIADENVER2 & CPU_PARTMASK, "Denver2", "NVIDIA", "v8-A" },
207 	{ CPU_ID_EMAG8180 & CPU_PARTMASK, "eMAG", "Ampere", "v8-A" },
208 	{ CPU_ID_NEOVERSEE1R1 & CPU_PARTMASK, "Neoverse E1", "Arm", "v8.2-A+" },
209 	{ CPU_ID_NEOVERSEN1R3 & CPU_PARTMASK, "Neoverse N1", "Arm", "v8.2-A+" },
210 	{ CPU_ID_THUNDERXRX, "ThunderX", "Cavium", "v8-A" },
211 	{ CPU_ID_THUNDERX81XXRX, "ThunderX CN81XX", "Cavium", "v8-A" },
212 	{ CPU_ID_THUNDERX83XXRX, "ThunderX CN83XX", "Cavium", "v8-A" },
213 	{ CPU_ID_THUNDERX2RX, "ThunderX2", "Marvell", "v8.1-A" },
214 	{ CPU_ID_APPLE_M1_ICESTORM & CPU_PARTMASK, "M1 Icestorm", "Apple", "Apple Silicon" },
215 	{ CPU_ID_APPLE_M1_FIRESTORM & CPU_PARTMASK, "M1 Firestorm", "Apple", "Apple Silicon" },
216 };
217 
218 static void
219 identify_aarch64_model(uint32_t cpuid, char *buf, size_t len)
220 {
221 	int i;
222 	uint32_t cpupart, variant, revision;
223 
224 	cpupart = cpuid & CPU_PARTMASK;
225 	variant = __SHIFTOUT(cpuid, CPU_ID_VARIANT_MASK);
226 	revision = __SHIFTOUT(cpuid, CPU_ID_REVISION_MASK);
227 
228 	for (i = 0; i < __arraycount(cpuids); i++) {
229 		if (cpupart == cpuids[i].cpu_partnum) {
230 			snprintf(buf, len, "%s %s r%dp%d (%s)",
231 			    cpuids[i].cpu_vendor, cpuids[i].cpu_name,
232 			    variant, revision,
233 			    cpuids[i].cpu_architecture);
234 			return;
235 		}
236 	}
237 
238 	snprintf(buf, len, "unknown CPU (ID = 0x%08x)", cpuid);
239 }
240 
241 static void
242 cpu_identify(device_t self, struct cpu_info *ci)
243 {
244 	char model[128];
245 	const char *m;
246 
247 	identify_aarch64_model(ci->ci_id.ac_midr, model, sizeof(model));
248 
249 	aprint_naive("\n");
250 	aprint_normal(": %s, id 0x%lx\n", model, ci->ci_cpuid);
251 	aprint_normal_dev(ci->ci_dev, "package %u, core %u, smt %u\n",
252 	    ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id);
253 
254 	if (ci->ci_index == 0) {
255 		m = cpu_getmodel();
256 		if (m == NULL || *m == 0)
257 			cpu_setmodel("%s", model);
258 
259 		if (CPU_ID_ERRATA_CAVIUM_THUNDERX_1_1_P(ci->ci_id.ac_midr))
260 			aprint_normal("WARNING: ThunderX Pass 1.1 detected.\n"
261 			    "This has known hardware bugs that may cause the "
262 			    "incorrect operation of atomic operations.\n");
263 	}
264 }
265 
266 static void
267 cpu_identify1(device_t self, struct cpu_info *ci)
268 {
269 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
270 	uint64_t sctlr = ci->ci_sctlr_el1;
271 
272 	if (sctlr & SCTLR_I)
273 		aprint_verbose_dev(self, "IC enabled");
274 	else
275 		aprint_verbose_dev(self, "IC disabled");
276 
277 	if (sctlr & SCTLR_C)
278 		aprint_verbose(", DC enabled");
279 	else
280 		aprint_verbose(", DC disabled");
281 
282 	if (sctlr & SCTLR_A)
283 		aprint_verbose(", Alignment check enabled\n");
284 	else {
285 		switch (sctlr & (SCTLR_SA | SCTLR_SA0)) {
286 		case SCTLR_SA | SCTLR_SA0:
287 			aprint_verbose(
288 			    ", EL0/EL1 stack Alignment check enabled\n");
289 			break;
290 		case SCTLR_SA:
291 			aprint_verbose(", EL1 stack Alignment check enabled\n");
292 			break;
293 		case SCTLR_SA0:
294 			aprint_verbose(", EL0 stack Alignment check enabled\n");
295 			break;
296 		case 0:
297 			aprint_verbose(", Alignment check disabled\n");
298 			break;
299 		}
300 	}
301 
302 	/*
303 	 * CTR - Cache Type Register
304 	 */
305 	const uint64_t ctr = id->ac_ctr;
306 	const uint64_t clidr = id->ac_clidr;
307 	aprint_verbose_dev(self, "Cache Writeback Granule %" PRIu64 "B,"
308 	    " Exclusives Reservation Granule %" PRIu64 "B\n",
309 	    __SHIFTOUT(ctr, CTR_EL0_CWG_LINE) * 4,
310 	    __SHIFTOUT(ctr, CTR_EL0_ERG_LINE) * 4);
311 
312 	aprint_verbose_dev(self, "Dcache line %ld, Icache line %ld"
313 	    ", DIC=%lu, IDC=%lu, LoUU=%lu, LoC=%lu, LoUIS=%lu\n",
314 	    sizeof(int) << __SHIFTOUT(ctr, CTR_EL0_DMIN_LINE),
315 	    sizeof(int) << __SHIFTOUT(ctr, CTR_EL0_IMIN_LINE),
316 	    __SHIFTOUT(ctr, CTR_EL0_DIC),
317 	    __SHIFTOUT(ctr, CTR_EL0_IDC),
318 	    __SHIFTOUT(clidr, CLIDR_LOUU),
319 	    __SHIFTOUT(clidr, CLIDR_LOC),
320 	    __SHIFTOUT(clidr, CLIDR_LOUIS));
321 }
322 
323 
324 /*
325  * identify vfp, etc.
326  */
327 static void
328 cpu_identify2(device_t self, struct cpu_info *ci)
329 {
330 	struct aarch64_sysctl_cpu_id * const id = &ci->ci_id;
331 
332 	aprint_debug_dev(self, "midr=0x%" PRIx64 " mpidr=0x%" PRIx64 "\n",
333 	    id->ac_midr, id->ac_mpidr);
334 	aprint_verbose_dev(self, "revID=0x%" PRIx64, id->ac_revidr);
335 
336 	/* ID_AA64DFR0_EL1 */
337 	switch (__SHIFTOUT(id->ac_aa64dfr0, ID_AA64DFR0_EL1_PMUVER)) {
338 	case ID_AA64DFR0_EL1_PMUVER_V3:
339 		aprint_verbose(", PMCv3");
340 		break;
341 	case ID_AA64DFR0_EL1_PMUVER_NOV3:
342 		aprint_verbose(", PMC");
343 		break;
344 	}
345 
346 	/* ID_AA64MMFR0_EL1 */
347 	switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN4)) {
348 	case ID_AA64MMFR0_EL1_TGRAN4_4KB:
349 		aprint_verbose(", 4k table");
350 		break;
351 	}
352 	switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN16)) {
353 	case ID_AA64MMFR0_EL1_TGRAN16_16KB:
354 		aprint_verbose(", 16k table");
355 		break;
356 	}
357 	switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN64)) {
358 	case ID_AA64MMFR0_EL1_TGRAN64_64KB:
359 		aprint_verbose(", 64k table");
360 		break;
361 	}
362 
363 	switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_ASIDBITS)) {
364 	case ID_AA64MMFR0_EL1_ASIDBITS_8BIT:
365 		aprint_verbose(", 8bit ASID");
366 		break;
367 	case ID_AA64MMFR0_EL1_ASIDBITS_16BIT:
368 		aprint_verbose(", 16bit ASID");
369 		break;
370 	}
371 	aprint_verbose("\n");
372 
373 	aprint_verbose_dev(self, "auxID=0x%" PRIx64, ci->ci_id.ac_aa64isar0);
374 
375 	/* PFR0 */
376 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_CSV3)) {
377 	case ID_AA64PFR0_EL1_CSV3_IMPL:
378 		aprint_verbose(", CSV3");
379 		break;
380 	}
381 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_CSV2)) {
382 	case ID_AA64PFR0_EL1_CSV2_IMPL:
383 		aprint_verbose(", CSV2");
384 		break;
385 	}
386 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_GIC)) {
387 	case ID_AA64PFR0_EL1_GIC_CPUIF_EN:
388 		aprint_verbose(", GICv3");
389 		break;
390 	}
391 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_FP)) {
392 	case ID_AA64PFR0_EL1_FP_NONE:
393 		break;
394 	default:
395 		aprint_verbose(", FP");
396 		break;
397 	}
398 
399 	/* ISAR0 */
400 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_CRC32)) {
401 	case ID_AA64ISAR0_EL1_CRC32_CRC32X:
402 		aprint_verbose(", CRC32");
403 		break;
404 	}
405 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_SHA1)) {
406 	case ID_AA64ISAR0_EL1_SHA1_SHA1CPMHSU:
407 		aprint_verbose(", SHA1");
408 		break;
409 	}
410 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_SHA2)) {
411 	case ID_AA64ISAR0_EL1_SHA2_SHA256HSU:
412 		aprint_verbose(", SHA256");
413 		break;
414 	}
415 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_AES)) {
416 	case ID_AA64ISAR0_EL1_AES_AES:
417 		aprint_verbose(", AES");
418 		break;
419 	case ID_AA64ISAR0_EL1_AES_PMUL:
420 		aprint_verbose(", AES+PMULL");
421 		break;
422 	}
423 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_RNDR)) {
424 	case ID_AA64ISAR0_EL1_RNDR_RNDRRS:
425 		aprint_verbose(", RNDRRS");
426 		break;
427 	}
428 
429 	/* PFR0:DIT -- data-independent timing support */
430 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_DIT)) {
431 	case ID_AA64PFR0_EL1_DIT_IMPL:
432 		aprint_verbose(", DIT");
433 		break;
434 	}
435 
436 	/* PFR0:AdvSIMD */
437 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_ADVSIMD)) {
438 	case ID_AA64PFR0_EL1_ADV_SIMD_NONE:
439 		break;
440 	default:
441 		aprint_verbose(", NEON");
442 		break;
443 	}
444 
445 	/* MVFR0/MVFR1 */
446 	switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_FPROUND)) {
447 	case MVFR0_FPROUND_ALL:
448 		aprint_verbose(", rounding");
449 		break;
450 	}
451 	switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_FPTRAP)) {
452 	case MVFR0_FPTRAP_TRAP:
453 		aprint_verbose(", exceptions");
454 		break;
455 	}
456 	switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_FPDNAN)) {
457 	case MVFR1_FPDNAN_NAN:
458 		aprint_verbose(", NaN propagation");
459 		break;
460 	}
461 	switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_FPFTZ)) {
462 	case MVFR1_FPFTZ_DENORMAL:
463 		aprint_verbose(", denormals");
464 		break;
465 	}
466 	switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_SIMDREG)) {
467 	case MVFR0_SIMDREG_16x64:
468 		aprint_verbose(", 16x64bitRegs");
469 		break;
470 	case MVFR0_SIMDREG_32x64:
471 		aprint_verbose(", 32x64bitRegs");
472 		break;
473 	}
474 	switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_SIMDFMAC)) {
475 	case MVFR1_SIMDFMAC_FMAC:
476 		aprint_verbose(", Fused Multiply-Add");
477 		break;
478 	}
479 
480 	aprint_verbose("\n");
481 }
482 
483 /*
484  * Enable the performance counter, then estimate frequency for
485  * the current PE and store the result in cpu_cc_freq.
486  */
487 static void
488 cpu_init_counter(struct cpu_info *ci)
489 {
490 	const uint64_t dfr0 = reg_id_aa64dfr0_el1_read();
491 	const u_int pmuver = __SHIFTOUT(dfr0, ID_AA64DFR0_EL1_PMUVER);
492 	if (pmuver == ID_AA64DFR0_EL1_PMUVER_NONE) {
493 		/* Performance Monitors Extension not implemented. */
494 		return;
495 	}
496 	if (pmuver == ID_AA64DFR0_EL1_PMUVER_IMPL) {
497 		/* Non-standard Performance Monitors are not supported. */
498 		return;
499 	}
500 
501 	reg_pmcr_el0_write(PMCR_E | PMCR_C | PMCR_LC);
502 	reg_pmintenclr_el1_write(PMINTEN_C | PMINTEN_P);
503 	reg_pmcntenset_el0_write(PMCNTEN_C);
504 
505 	const uint32_t prev = cpu_counter32();
506 	delay(100000);
507 	ci->ci_data.cpu_cc_freq = (cpu_counter32() - prev) * 10;
508 }
509 
510 /*
511  * Fill in this CPUs id data.  Must be called on all cpus.
512  */
513 void __noasan
514 cpu_setup_id(struct cpu_info *ci)
515 {
516 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
517 
518 	/* SCTLR - System Control Register */
519 	ci->ci_sctlr_el1 = reg_sctlr_el1_read();
520 
521 	memset(id, 0, sizeof *id);
522 
523 	id->ac_midr      = reg_midr_el1_read();
524 	id->ac_revidr    = reg_revidr_el1_read();
525 	id->ac_mpidr     = reg_mpidr_el1_read();
526 
527 	id->ac_aa64dfr0  = reg_id_aa64dfr0_el1_read();
528 	id->ac_aa64dfr1  = reg_id_aa64dfr1_el1_read();
529 
530 	id->ac_aa64isar0 = reg_id_aa64isar0_el1_read();
531 	id->ac_aa64isar1 = reg_id_aa64isar1_el1_read();
532 
533 	id->ac_aa64mmfr0 = reg_id_aa64mmfr0_el1_read();
534 	id->ac_aa64mmfr1 = reg_id_aa64mmfr1_el1_read();
535 	id->ac_aa64mmfr2 = reg_id_aa64mmfr2_el1_read();
536 
537 	id->ac_mvfr0     = reg_mvfr0_el1_read();
538 	id->ac_mvfr1     = reg_mvfr1_el1_read();
539 	id->ac_mvfr2     = reg_mvfr2_el1_read();
540 
541 	id->ac_clidr     = reg_clidr_el1_read();
542 	id->ac_ctr       = reg_ctr_el0_read();
543 
544 	/* Only in ARMv8.2. */
545 	id->ac_aa64zfr0  = 0 /* reg_id_aa64zfr0_el1_read() */;
546 
547 	id->ac_aa64pfr0  = reg_id_aa64pfr0_el1_read();
548 	id->ac_aa64pfr1  = reg_id_aa64pfr1_el1_read();
549 }
550 
551 /*
552  * setup the per-cpu sysctl tree.
553  */
554 static void
555 cpu_setup_sysctl(device_t dv, struct cpu_info *ci)
556 {
557 	const struct sysctlnode *cpunode = NULL;
558 
559 	sysctl_createv(NULL, 0, NULL, &cpunode,
560 		       CTLFLAG_PERMANENT,
561 		       CTLTYPE_NODE, device_xname(dv), NULL,
562 		       NULL, 0, NULL, 0,
563 		       CTL_MACHDEP,
564 		       CTL_CREATE, CTL_EOL);
565 
566 	if (cpunode == NULL)
567 		return;
568 
569 	sysctl_createv(NULL, 0, &cpunode, NULL,
570 		       CTLFLAG_PERMANENT,
571 		       CTLTYPE_STRUCT, "cpu_id", NULL,
572 		       NULL, 0, &ci->ci_id, sizeof(ci->ci_id),
573 		       CTL_CREATE, CTL_EOL);
574 }
575 
576 static struct krndsource rndrrs_source;
577 
578 static void
579 rndrrs_get(size_t nbytes, void *cookie)
580 {
581 	/* Entropy bits per data byte, wild-arse guess.  */
582 	const unsigned bpb = 4;
583 	size_t nbits = nbytes*NBBY;
584 	uint64_t x;
585 	int error;
586 
587 	while (nbits) {
588 		/*
589 		 * x := random 64-bit sample
590 		 * error := Z bit, set to 1 if sample is bad
591 		 *
592 		 * XXX This should be done by marking the function
593 		 * __attribute__((target("arch=armv8.5-a+rng"))) and
594 		 * using `mrs %0, rndrrs', but:
595 		 *
596 		 * (a) the version of gcc we use doesn't support that,
597 		 * and
598 		 * (b) clang doesn't seem to like `rndrrs' itself.
599 		 *
600 		 * So we use the numeric encoding for now.
601 		 */
602 		__asm __volatile(""
603 		    "mrs	%0, s3_3_c2_c4_1\n"
604 		    "cset	%w1, eq"
605 		    : "=r"(x), "=r"(error));
606 		if (error)
607 			break;
608 		rnd_add_data_sync(&rndrrs_source, &x, sizeof(x),
609 		    bpb*sizeof(x));
610 		nbits -= MIN(nbits, bpb*sizeof(x));
611 	}
612 
613 	explicit_memset(&x, 0, sizeof x);
614 }
615 
616 /*
617  * setup the RNDRRS entropy source
618  */
619 static void
620 cpu_setup_rng(device_t dv, struct cpu_info *ci)
621 {
622 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
623 
624 	/* Verify that it is supported.  */
625 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_RNDR)) {
626 	case ID_AA64ISAR0_EL1_RNDR_RNDRRS:
627 		break;
628 	default:
629 		return;
630 	}
631 
632 	/* Attach it.  */
633 	rndsource_setcb(&rndrrs_source, rndrrs_get, NULL);
634 	rnd_attach_source(&rndrrs_source, "rndrrs", RND_TYPE_RNG,
635 	    RND_FLAG_DEFAULT|RND_FLAG_HASCB);
636 }
637 
638 /*
639  * setup the AES implementation
640  */
641 static void
642 cpu_setup_aes(device_t dv, struct cpu_info *ci)
643 {
644 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
645 
646 	/* Check for ARMv8.0-AES support.  */
647 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_AES)) {
648 	case ID_AA64ISAR0_EL1_AES_AES:
649 	case ID_AA64ISAR0_EL1_AES_PMUL:
650 		aes_md_init(&aes_armv8_impl);
651 		return;
652 	default:
653 		break;
654 	}
655 
656 	/* Failing that, check for SIMD support.  */
657 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_ADVSIMD)) {
658 	case ID_AA64PFR0_EL1_ADV_SIMD_IMPL:
659 		aes_md_init(&aes_neon_impl);
660 		return;
661 	default:
662 		break;
663 	}
664 }
665 
666 /*
667  * setup the ChaCha implementation
668  */
669 static void
670 cpu_setup_chacha(device_t dv, struct cpu_info *ci)
671 {
672 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
673 
674 	/* Check for SIMD support.  */
675 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_ADVSIMD)) {
676 	case ID_AA64PFR0_EL1_ADV_SIMD_IMPL:
677 		chacha_md_init(&chacha_neon_impl);
678 		return;
679 	default:
680 		break;
681 	}
682 }
683 
684 #ifdef MULTIPROCESSOR
685 /*
686  * Initialise a secondary processor.
687  *
688  * printf isn't available as kmutex(9) relies on curcpu which isn't setup yet.
689  *
690  */
691 void __noasan
692 cpu_init_secondary_processor(int cpuindex)
693 {
694 	struct cpu_info * ci = &cpu_info_store[cpuindex];
695 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
696 
697 	aarch64_setcpufuncs(ci);
698 
699 	/* Sets ci->ci_{sctlr,midr,mpidr}, etc */
700 	cpu_setup_id(ci);
701 
702 	arm_cpu_topology_set(ci, id->ac_mpidr);
703 	aarch64_getcacheinfo(ci);
704 
705 	cpu_set_hatched(cpuindex);
706 
707 	/*
708 	 * return to assembly to wait for cpu_boot_secondary_processors
709 	 */
710 }
711 
712 
713 /*
714  * When we are called, the MMU and caches are on and we are running on the stack
715  * of the idlelwp for this cpu.
716  */
717 void
718 cpu_hatch(struct cpu_info *ci)
719 {
720 	KASSERT(curcpu() == ci);
721 	KASSERT((reg_tcr_el1_read() & TCR_EPD0) != 0);
722 
723 #ifdef DDB
724 	db_machdep_cpu_init();
725 #endif
726 
727 	cpu_init_counter(ci);
728 
729 	intr_cpu_init(ci);
730 
731 #ifdef FDT
732 	arm_fdt_cpu_hatch(ci);
733 #endif
734 
735 	/*
736 	 * clear my bit of arm_cpu_mbox to tell cpu_boot_secondary_processors().
737 	 * there are cpu0,1,2,3, and if cpu2 is unresponsive,
738 	 * ci_index are each cpu0=0, cpu1=1, cpu2=undef, cpu3=2.
739 	 * therefore we have to use device_unit instead of ci_index for mbox.
740 	 */
741 
742 	cpu_clr_mbox(device_unit(ci->ci_dev));
743 }
744 #endif /* MULTIPROCESSOR */
745