xref: /netbsd-src/sys/arch/aarch64/aarch64/cpu.c (revision 4d342c046e3288fb5a1edcd33cfec48c41c80664)
1 /* $NetBSD: cpu.c,v 1.54 2020/07/25 22:51:57 riastradh Exp $ */
2 
3 /*
4  * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(1, "$NetBSD: cpu.c,v 1.54 2020/07/25 22:51:57 riastradh Exp $");
31 
32 #include "locators.h"
33 #include "opt_arm_debug.h"
34 #include "opt_fdt.h"
35 #include "opt_multiprocessor.h"
36 
37 #include <sys/param.h>
38 #include <sys/atomic.h>
39 #include <sys/cpu.h>
40 #include <sys/device.h>
41 #include <sys/kmem.h>
42 #include <sys/reboot.h>
43 #include <sys/rndsource.h>
44 #include <sys/sysctl.h>
45 #include <sys/systm.h>
46 
47 #include <crypto/aes/aes_impl.h>
48 #include <crypto/aes/arch/arm/aes_armv8.h>
49 #include <crypto/aes/arch/arm/aes_neon.h>
50 #include <crypto/chacha/chacha_impl.h>
51 #include <crypto/chacha/arch/arm/chacha_neon.h>
52 
53 #include <aarch64/armreg.h>
54 #include <aarch64/cpu.h>
55 #include <aarch64/cpufunc.h>
56 #include <aarch64/cpu_counter.h>
57 #include <aarch64/machdep.h>
58 
59 #include <arm/cpu_topology.h>
60 #ifdef FDT
61 #include <arm/fdt/arm_fdtvar.h>
62 #endif
63 
64 #ifdef VERBOSE_INIT_ARM
65 #define VPRINTF(...)	printf(__VA_ARGS__)
66 #else
67 #define VPRINTF(...)	__nothing
68 #endif
69 
70 void cpu_attach(device_t, cpuid_t);
71 static void identify_aarch64_model(uint32_t, char *, size_t);
72 static void cpu_identify(device_t self, struct cpu_info *);
73 static void cpu_identify1(device_t self, struct cpu_info *);
74 static void cpu_identify2(device_t self, struct cpu_info *);
75 static void cpu_init_counter(struct cpu_info *);
76 static void cpu_setup_id(struct cpu_info *);
77 static void cpu_setup_sysctl(device_t, struct cpu_info *);
78 static void cpu_setup_rng(device_t, struct cpu_info *);
79 static void cpu_setup_aes(device_t, struct cpu_info *);
80 static void cpu_setup_chacha(device_t, struct cpu_info *);
81 
82 #ifdef MULTIPROCESSOR
83 #define NCPUINFO	MAXCPUS
84 #else
85 #define NCPUINFO	1
86 #endif /* MULTIPROCESSOR */
87 
88 /*
89  * Our exported CPU info;
90  * these will be refered from secondary cpus in the middle of hatching.
91  */
92 struct cpu_info cpu_info_store[NCPUINFO] = {
93 	[0] = {
94 		.ci_cpl = IPL_HIGH,
95 		.ci_curlwp = &lwp0
96 	}
97 };
98 
99 void
100 cpu_attach(device_t dv, cpuid_t id)
101 {
102 	struct cpu_info *ci;
103 	const int unit = device_unit(dv);
104 
105 	if (unit == 0) {
106 		ci = curcpu();
107 		ci->ci_cpuid = id;
108 		cpu_setup_id(ci);
109 	} else {
110 #ifdef MULTIPROCESSOR
111 		if ((boothowto & RB_MD1) != 0) {
112 			aprint_naive("\n");
113 			aprint_normal(": multiprocessor boot disabled\n");
114 			return;
115 		}
116 
117 		KASSERT(unit < MAXCPUS);
118 		ci = &cpu_info_store[unit];
119 
120 		ci->ci_cpl = IPL_HIGH;
121 		ci->ci_cpuid = id;
122 		/* ci_id is stored by own cpus when hatching */
123 
124 		cpu_info[ncpu] = ci;
125 		if (cpu_hatched_p(unit) == 0) {
126 			ci->ci_dev = dv;
127 			dv->dv_private = ci;
128 			ci->ci_index = -1;
129 
130 			aprint_naive(": disabled\n");
131 			aprint_normal(": disabled (unresponsive)\n");
132 			return;
133 		}
134 #else /* MULTIPROCESSOR */
135 		aprint_naive(": disabled\n");
136 		aprint_normal(": disabled (uniprocessor kernel)\n");
137 		return;
138 #endif /* MULTIPROCESSOR */
139 	}
140 
141 	ci->ci_dev = dv;
142 	dv->dv_private = ci;
143 
144 	ci->ci_kfpu_spl = -1;
145 
146 	arm_cpu_do_topology(ci);
147 	cpu_identify(ci->ci_dev, ci);
148 
149 #ifdef MULTIPROCESSOR
150 	if (unit != 0) {
151 		mi_cpu_attach(ci);
152 		return;
153 	}
154 #endif /* MULTIPROCESSOR */
155 
156 	set_cpufuncs();
157 	fpu_attach(ci);
158 
159 	cpu_identify1(dv, ci);
160 
161 	/* aarch64_getcacheinfo(0) was called by locore.S */
162 	aarch64_printcacheinfo(dv);
163 	cpu_identify2(dv, ci);
164 
165 	cpu_init_counter(ci);
166 
167 	cpu_setup_sysctl(dv, ci);
168 	cpu_setup_rng(dv, ci);
169 	cpu_setup_aes(dv, ci);
170 	cpu_setup_chacha(dv, ci);
171 }
172 
173 struct cpuidtab {
174 	uint32_t cpu_partnum;
175 	const char *cpu_name;
176 	const char *cpu_vendor;
177 	const char *cpu_architecture;
178 };
179 
180 #define CPU_PARTMASK	(CPU_ID_IMPLEMENTOR_MASK | CPU_ID_PARTNO_MASK)
181 
182 const struct cpuidtab cpuids[] = {
183 	{ CPU_ID_CORTEXA35R0 & CPU_PARTMASK, "Cortex-A35", "Arm", "v8-A" },
184 	{ CPU_ID_CORTEXA53R0 & CPU_PARTMASK, "Cortex-A53", "Arm", "v8-A" },
185 	{ CPU_ID_CORTEXA57R0 & CPU_PARTMASK, "Cortex-A57", "Arm", "v8-A" },
186 	{ CPU_ID_CORTEXA55R1 & CPU_PARTMASK, "Cortex-A55", "Arm", "v8.2-A+" },
187 	{ CPU_ID_CORTEXA65R0 & CPU_PARTMASK, "Cortex-A65", "Arm", "v8.2-A+" },
188 	{ CPU_ID_CORTEXA72R0 & CPU_PARTMASK, "Cortex-A72", "Arm", "v8-A" },
189 	{ CPU_ID_CORTEXA73R0 & CPU_PARTMASK, "Cortex-A73", "Arm", "v8-A" },
190 	{ CPU_ID_CORTEXA75R2 & CPU_PARTMASK, "Cortex-A75", "Arm", "v8.2-A+" },
191 	{ CPU_ID_CORTEXA76R3 & CPU_PARTMASK, "Cortex-A76", "Arm", "v8.2-A+" },
192 	{ CPU_ID_CORTEXA76AER1 & CPU_PARTMASK, "Cortex-A76AE", "Arm", "v8.2-A+" },
193 	{ CPU_ID_CORTEXA77R0 & CPU_PARTMASK, "Cortex-A77", "Arm", "v8.2-A+" },
194 	{ CPU_ID_NVIDIADENVER2 & CPU_PARTMASK, "Denver2", "NVIDIA", "v8-A" },
195 	{ CPU_ID_EMAG8180 & CPU_PARTMASK, "eMAG", "Ampere", "v8-A" },
196 	{ CPU_ID_NEOVERSEE1R1 & CPU_PARTMASK, "Neoverse E1", "Arm", "v8.2-A+" },
197 	{ CPU_ID_NEOVERSEN1R3 & CPU_PARTMASK, "Neoverse N1", "Arm", "v8.2-A+" },
198 	{ CPU_ID_THUNDERXRX, "ThunderX", "Cavium", "v8-A" },
199 	{ CPU_ID_THUNDERX81XXRX, "ThunderX CN81XX", "Cavium", "v8-A" },
200 	{ CPU_ID_THUNDERX83XXRX, "ThunderX CN83XX", "Cavium", "v8-A" },
201 	{ CPU_ID_THUNDERX2RX, "ThunderX2", "Marvell", "v8.1-A" },
202 };
203 
204 static void
205 identify_aarch64_model(uint32_t cpuid, char *buf, size_t len)
206 {
207 	int i;
208 	uint32_t cpupart, variant, revision;
209 
210 	cpupart = cpuid & CPU_PARTMASK;
211 	variant = __SHIFTOUT(cpuid, CPU_ID_VARIANT_MASK);
212 	revision = __SHIFTOUT(cpuid, CPU_ID_REVISION_MASK);
213 
214 	for (i = 0; i < __arraycount(cpuids); i++) {
215 		if (cpupart == cpuids[i].cpu_partnum) {
216 			snprintf(buf, len, "%s %s r%dp%d (%s)",
217 			    cpuids[i].cpu_vendor, cpuids[i].cpu_name,
218 			    variant, revision,
219 			    cpuids[i].cpu_architecture);
220 			return;
221 		}
222 	}
223 
224 	snprintf(buf, len, "unknown CPU (ID = 0x%08x)", cpuid);
225 }
226 
227 static void
228 cpu_identify(device_t self, struct cpu_info *ci)
229 {
230 	char model[128];
231 	const char *m;
232 
233 	identify_aarch64_model(ci->ci_id.ac_midr, model, sizeof(model));
234 	if (ci->ci_index == 0) {
235 		m = cpu_getmodel();
236 		if (m == NULL || *m == 0)
237 			cpu_setmodel("%s", model);
238 	}
239 
240 	aprint_naive("\n");
241 	aprint_normal(": %s, id 0x%lx\n", model, ci->ci_cpuid);
242 }
243 
244 static void
245 cpu_identify1(device_t self, struct cpu_info *ci)
246 {
247 	uint64_t ctr, clidr, sctlr;	/* for cache */
248 
249 	/* SCTLR - System Control Register */
250 	sctlr = reg_sctlr_el1_read();
251 	if (sctlr & SCTLR_I)
252 		aprint_verbose_dev(self, "IC enabled");
253 	else
254 		aprint_verbose_dev(self, "IC disabled");
255 
256 	if (sctlr & SCTLR_C)
257 		aprint_verbose(", DC enabled");
258 	else
259 		aprint_verbose(", DC disabled");
260 
261 	if (sctlr & SCTLR_A)
262 		aprint_verbose(", Alignment check enabled\n");
263 	else {
264 		switch (sctlr & (SCTLR_SA | SCTLR_SA0)) {
265 		case SCTLR_SA | SCTLR_SA0:
266 			aprint_verbose(
267 			    ", EL0/EL1 stack Alignment check enabled\n");
268 			break;
269 		case SCTLR_SA:
270 			aprint_verbose(", EL1 stack Alignment check enabled\n");
271 			break;
272 		case SCTLR_SA0:
273 			aprint_verbose(", EL0 stack Alignment check enabled\n");
274 			break;
275 		case 0:
276 			aprint_verbose(", Alignment check disabled\n");
277 			break;
278 		}
279 	}
280 
281 	/*
282 	 * CTR - Cache Type Register
283 	 */
284 	ctr = reg_ctr_el0_read();
285 	clidr = reg_clidr_el1_read();
286 	aprint_verbose_dev(self, "Cache Writeback Granule %" PRIu64 "B,"
287 	    " Exclusives Reservation Granule %" PRIu64 "B\n",
288 	    __SHIFTOUT(ctr, CTR_EL0_CWG_LINE) * 4,
289 	    __SHIFTOUT(ctr, CTR_EL0_ERG_LINE) * 4);
290 
291 	aprint_verbose_dev(self, "Dcache line %ld, Icache line %ld"
292 	    ", DIC=%lu, IDC=%lu, LoUU=%lu, LoC=%lu, LoUIS=%lu\n",
293 	    sizeof(int) << __SHIFTOUT(ctr, CTR_EL0_DMIN_LINE),
294 	    sizeof(int) << __SHIFTOUT(ctr, CTR_EL0_IMIN_LINE),
295 	    __SHIFTOUT(ctr, CTR_EL0_DIC),
296 	    __SHIFTOUT(ctr, CTR_EL0_IDC),
297 	    __SHIFTOUT(clidr, CLIDR_LOUU),
298 	    __SHIFTOUT(clidr, CLIDR_LOC),
299 	    __SHIFTOUT(clidr, CLIDR_LOUIS));
300 }
301 
302 
303 /*
304  * identify vfp, etc.
305  */
306 static void
307 cpu_identify2(device_t self, struct cpu_info *ci)
308 {
309 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
310 	uint64_t dfr0;
311 
312 	if (!CPU_IS_PRIMARY(ci)) {
313 		cpu_setup_id(ci);
314 		cpu_setup_sysctl(self, ci);
315 	}
316 
317 	dfr0 = reg_id_aa64dfr0_el1_read();
318 
319 	aprint_debug_dev(self, "midr=0x%" PRIx32 " mpidr=0x%" PRIx32 "\n",
320 	    (uint32_t)ci->ci_id.ac_midr, (uint32_t)ci->ci_id.ac_mpidr);
321 	aprint_verbose_dev(self, "revID=0x%" PRIx64, id->ac_revidr);
322 
323 	/* ID_AA64DFR0_EL1 */
324 	switch (__SHIFTOUT(dfr0, ID_AA64DFR0_EL1_PMUVER)) {
325 	case ID_AA64DFR0_EL1_PMUVER_V3:
326 		aprint_verbose(", PMCv3");
327 		break;
328 	case ID_AA64DFR0_EL1_PMUVER_NOV3:
329 		aprint_verbose(", PMC");
330 		break;
331 	}
332 
333 	/* ID_AA64MMFR0_EL1 */
334 	switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN4)) {
335 	case ID_AA64MMFR0_EL1_TGRAN4_4KB:
336 		aprint_verbose(", 4k table");
337 		break;
338 	}
339 	switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN16)) {
340 	case ID_AA64MMFR0_EL1_TGRAN16_16KB:
341 		aprint_verbose(", 16k table");
342 		break;
343 	}
344 	switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN64)) {
345 	case ID_AA64MMFR0_EL1_TGRAN64_64KB:
346 		aprint_verbose(", 64k table");
347 		break;
348 	}
349 
350 	switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_ASIDBITS)) {
351 	case ID_AA64MMFR0_EL1_ASIDBITS_8BIT:
352 		aprint_verbose(", 8bit ASID");
353 		break;
354 	case ID_AA64MMFR0_EL1_ASIDBITS_16BIT:
355 		aprint_verbose(", 16bit ASID");
356 		break;
357 	}
358 	aprint_verbose("\n");
359 
360 
361 
362 	aprint_verbose_dev(self, "auxID=0x%" PRIx64, ci->ci_id.ac_aa64isar0);
363 
364 	/* PFR0 */
365 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_CSV3)) {
366 	case ID_AA64PFR0_EL1_CSV3_IMPL:
367 		aprint_verbose(", CSV3");
368 		break;
369 	}
370 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_CSV2)) {
371 	case ID_AA64PFR0_EL1_CSV2_IMPL:
372 		aprint_verbose(", CSV2");
373 		break;
374 	}
375 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_GIC)) {
376 	case ID_AA64PFR0_EL1_GIC_CPUIF_EN:
377 		aprint_verbose(", GICv3");
378 		break;
379 	}
380 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_FP)) {
381 	case ID_AA64PFR0_EL1_FP_IMPL:
382 		aprint_verbose(", FP");
383 		break;
384 	}
385 
386 	/* ISAR0 */
387 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_CRC32)) {
388 	case ID_AA64ISAR0_EL1_CRC32_CRC32X:
389 		aprint_verbose(", CRC32");
390 		break;
391 	}
392 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_SHA1)) {
393 	case ID_AA64ISAR0_EL1_SHA1_SHA1CPMHSU:
394 		aprint_verbose(", SHA1");
395 		break;
396 	}
397 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_SHA2)) {
398 	case ID_AA64ISAR0_EL1_SHA2_SHA256HSU:
399 		aprint_verbose(", SHA256");
400 		break;
401 	}
402 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_AES)) {
403 	case ID_AA64ISAR0_EL1_AES_AES:
404 		aprint_verbose(", AES");
405 		break;
406 	case ID_AA64ISAR0_EL1_AES_PMUL:
407 		aprint_verbose(", AES+PMULL");
408 		break;
409 	}
410 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_RNDR)) {
411 	case ID_AA64ISAR0_EL1_RNDR_RNDRRS:
412 		aprint_verbose(", RNDRRS");
413 		break;
414 	}
415 
416 	/* PFR0:DIT -- data-independent timing support */
417 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_DIT)) {
418 	case ID_AA64PFR0_EL1_DIT_IMPL:
419 		aprint_verbose(", DIT");
420 		break;
421 	}
422 
423 	/* PFR0:AdvSIMD */
424 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_ADVSIMD)) {
425 	case ID_AA64PFR0_EL1_ADV_SIMD_IMPL:
426 		aprint_verbose(", NEON");
427 		break;
428 	}
429 
430 	/* MVFR0/MVFR1 */
431 	switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_FPROUND)) {
432 	case MVFR0_FPROUND_ALL:
433 		aprint_verbose(", rounding");
434 		break;
435 	}
436 	switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_FPTRAP)) {
437 	case MVFR0_FPTRAP_TRAP:
438 		aprint_verbose(", exceptions");
439 		break;
440 	}
441 	switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_FPDNAN)) {
442 	case MVFR1_FPDNAN_NAN:
443 		aprint_verbose(", NaN propagation");
444 		break;
445 	}
446 	switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_FPFTZ)) {
447 	case MVFR1_FPFTZ_DENORMAL:
448 		aprint_verbose(", denormals");
449 		break;
450 	}
451 	switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_SIMDREG)) {
452 	case MVFR0_SIMDREG_16x64:
453 		aprint_verbose(", 16x64bitRegs");
454 		break;
455 	case MVFR0_SIMDREG_32x64:
456 		aprint_verbose(", 32x64bitRegs");
457 		break;
458 	}
459 	switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_SIMDFMAC)) {
460 	case MVFR1_SIMDFMAC_FMAC:
461 		aprint_verbose(", Fused Multiply-Add");
462 		break;
463 	}
464 
465 	aprint_verbose("\n");
466 }
467 
468 /*
469  * Enable the performance counter, then estimate frequency for
470  * the current PE and store the result in cpu_cc_freq.
471  */
472 static void
473 cpu_init_counter(struct cpu_info *ci)
474 {
475 	reg_pmcr_el0_write(PMCR_E | PMCR_C);
476 	reg_pmcntenset_el0_write(PMCNTEN_C);
477 
478 	const uint32_t prev = cpu_counter32();
479 	delay(100000);
480 	ci->ci_data.cpu_cc_freq = (cpu_counter32() - prev) * 10;
481 }
482 
483 /*
484  * Fill in this CPUs id data.  Must be called from hatched cpus.
485  */
486 static void
487 cpu_setup_id(struct cpu_info *ci)
488 {
489 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
490 
491 	memset(id, 0, sizeof *id);
492 
493 	id->ac_midr      = reg_midr_el1_read();
494 	id->ac_revidr    = reg_revidr_el1_read();
495 	id->ac_mpidr     = reg_mpidr_el1_read();
496 
497 	id->ac_aa64dfr0  = reg_id_aa64dfr0_el1_read();
498 	id->ac_aa64dfr1  = reg_id_aa64dfr1_el1_read();
499 
500 	id->ac_aa64isar0 = reg_id_aa64isar0_el1_read();
501 	id->ac_aa64isar1 = reg_id_aa64isar1_el1_read();
502 
503 	id->ac_aa64mmfr0 = reg_id_aa64mmfr0_el1_read();
504 	id->ac_aa64mmfr1 = reg_id_aa64mmfr1_el1_read();
505 	id->ac_aa64mmfr2 = reg_id_aa64mmfr2_el1_read();
506 
507 	id->ac_mvfr0     = reg_mvfr0_el1_read();
508 	id->ac_mvfr1     = reg_mvfr1_el1_read();
509 	id->ac_mvfr2     = reg_mvfr2_el1_read();
510 
511 	id->ac_clidr     = reg_clidr_el1_read();
512 	id->ac_ctr       = reg_ctr_el0_read();
513 
514 	/* Only in ARMv8.2. */
515 	id->ac_aa64zfr0  = 0 /* reg_id_aa64zfr0_el1_read() */;
516 
517 	id->ac_aa64pfr0  = reg_id_aa64pfr0_el1_read();
518 	id->ac_aa64pfr1  = reg_id_aa64pfr1_el1_read();
519 }
520 
521 /*
522  * setup the per-cpu sysctl tree.
523  */
524 static void
525 cpu_setup_sysctl(device_t dv, struct cpu_info *ci)
526 {
527 	const struct sysctlnode *cpunode = NULL;
528 
529 	sysctl_createv(NULL, 0, NULL, &cpunode,
530 		       CTLFLAG_PERMANENT,
531 		       CTLTYPE_NODE, device_xname(dv), NULL,
532 		       NULL, 0, NULL, 0,
533 		       CTL_MACHDEP,
534 		       CTL_CREATE, CTL_EOL);
535 
536 	if (cpunode == NULL)
537 		return;
538 
539 	sysctl_createv(NULL, 0, &cpunode, NULL,
540 		       CTLFLAG_PERMANENT,
541 		       CTLTYPE_STRUCT, "cpu_id", NULL,
542 		       NULL, 0, &ci->ci_id, sizeof(ci->ci_id),
543 		       CTL_CREATE, CTL_EOL);
544 }
545 
546 static struct krndsource rndrrs_source;
547 
548 static void
549 rndrrs_get(size_t nbytes, void *cookie)
550 {
551 	/* Entropy bits per data byte, wild-arse guess.  */
552 	const unsigned bpb = 4;
553 	size_t nbits = nbytes*NBBY;
554 	uint64_t x;
555 	int error;
556 
557 	while (nbits) {
558 		/*
559 		 * x := random 64-bit sample
560 		 * error := Z bit, set to 1 if sample is bad
561 		 *
562 		 * XXX This should be done by marking the function
563 		 * __attribute__((target("arch=armv8.5-a+rng"))) and
564 		 * using `mrs %0, rndrrs', but:
565 		 *
566 		 * (a) the version of gcc we use doesn't support that,
567 		 * and
568 		 * (b) clang doesn't seem to like `rndrrs' itself.
569 		 *
570 		 * So we use the numeric encoding for now.
571 		 */
572 		__asm __volatile(""
573 		    "mrs	%0, s3_3_c2_c4_1\n"
574 		    "cset	%w1, eq"
575 		    : "=r"(x), "=r"(error));
576 		if (error)
577 			break;
578 		rnd_add_data_sync(&rndrrs_source, &x, sizeof(x),
579 		    bpb*sizeof(x));
580 		nbits -= MIN(nbits, bpb*sizeof(x));
581 	}
582 
583 	explicit_memset(&x, 0, sizeof x);
584 }
585 
586 /*
587  * setup the RNDRRS entropy source
588  */
589 static void
590 cpu_setup_rng(device_t dv, struct cpu_info *ci)
591 {
592 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
593 
594 	/* Probably shared between cores.  */
595 	if (!CPU_IS_PRIMARY(ci))
596 		return;
597 
598 	/* Verify that it is supported.  */
599 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_RNDR)) {
600 	case ID_AA64ISAR0_EL1_RNDR_RNDRRS:
601 		break;
602 	default:
603 		return;
604 	}
605 
606 	/* Attach it.  */
607 	rndsource_setcb(&rndrrs_source, rndrrs_get, NULL);
608 	rnd_attach_source(&rndrrs_source, "rndrrs", RND_TYPE_RNG,
609 	    RND_FLAG_DEFAULT|RND_FLAG_HASCB);
610 }
611 
612 /*
613  * setup the AES implementation
614  */
615 static void
616 cpu_setup_aes(device_t dv, struct cpu_info *ci)
617 {
618 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
619 
620 	/* Check for ARMv8.0-AES support.  */
621 	switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_AES)) {
622 	case ID_AA64ISAR0_EL1_AES_AES:
623 	case ID_AA64ISAR0_EL1_AES_PMUL:
624 		aes_md_init(&aes_armv8_impl);
625 		return;
626 	default:
627 		break;
628 	}
629 
630 	/* Failing that, check for SIMD support.  */
631 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_ADVSIMD)) {
632 	case ID_AA64PFR0_EL1_ADV_SIMD_IMPL:
633 		aes_md_init(&aes_neon_impl);
634 		return;
635 	default:
636 		break;
637 	}
638 }
639 
640 /*
641  * setup the ChaCha implementation
642  */
643 static void
644 cpu_setup_chacha(device_t dv, struct cpu_info *ci)
645 {
646 	struct aarch64_sysctl_cpu_id *id = &ci->ci_id;
647 
648 	/* Check for SIMD support.  */
649 	switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_ADVSIMD)) {
650 	case ID_AA64PFR0_EL1_ADV_SIMD_IMPL:
651 		chacha_md_init(&chacha_neon_impl);
652 		return;
653 	default:
654 		break;
655 	}
656 }
657 
658 #ifdef MULTIPROCESSOR
659 void
660 cpu_hatch(struct cpu_info *ci)
661 {
662 	KASSERT(curcpu() == ci);
663 
664 	mutex_enter(&cpu_hatch_lock);
665 
666 	set_cpufuncs();
667 	fpu_attach(ci);
668 
669 	cpu_identify1(ci->ci_dev, ci);
670 	aarch64_getcacheinfo(device_unit(ci->ci_dev));
671 	aarch64_printcacheinfo(ci->ci_dev);
672 	cpu_identify2(ci->ci_dev, ci);
673 
674 	mutex_exit(&cpu_hatch_lock);
675 
676 	cpu_init_counter(ci);
677 
678 	intr_cpu_init(ci);
679 
680 #ifdef FDT
681 	arm_fdt_cpu_hatch(ci);
682 #endif
683 #ifdef MD_CPU_HATCH
684 	MD_CPU_HATCH(ci);	/* for non-fdt arch? */
685 #endif
686 
687 	/*
688 	 * clear my bit of arm_cpu_mbox to tell cpu_boot_secondary_processors().
689 	 * there are cpu0,1,2,3, and if cpu2 is unresponsive,
690 	 * ci_index are each cpu0=0, cpu1=1, cpu2=undef, cpu3=2.
691 	 * therefore we have to use device_unit instead of ci_index for mbox.
692 	 */
693 
694 	cpu_clr_mbox(device_unit(ci->ci_dev));
695 }
696 #endif /* MULTIPROCESSOR */
697