xref: /freebsd-src/sys/arm64/arm64/machdep.c (revision 10ff414c14eef433d8157f0c17904d740693933b)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/csan.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/limits.h>
50 #include <sys/linker.h>
51 #include <sys/msgbuf.h>
52 #include <sys/pcpu.h>
53 #include <sys/physmem.h>
54 #include <sys/proc.h>
55 #include <sys/ptrace.h>
56 #include <sys/reboot.h>
57 #include <sys/reg.h>
58 #include <sys/rwlock.h>
59 #include <sys/sched.h>
60 #include <sys/signalvar.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/sysent.h>
63 #include <sys/sysproto.h>
64 #include <sys/ucontext.h>
65 #include <sys/vdso.h>
66 #include <sys/vmmeter.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_phys.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_pager.h>
77 
78 #include <machine/armreg.h>
79 #include <machine/cpu.h>
80 #include <machine/debug_monitor.h>
81 #include <machine/kdb.h>
82 #include <machine/machdep.h>
83 #include <machine/metadata.h>
84 #include <machine/md_var.h>
85 #include <machine/pcb.h>
86 #include <machine/undefined.h>
87 #include <machine/vmparam.h>
88 
89 #ifdef VFP
90 #include <machine/vfp.h>
91 #endif
92 
93 #ifdef DEV_ACPI
94 #include <contrib/dev/acpica/include/acpi.h>
95 #include <machine/acpica_machdep.h>
96 #endif
97 
98 #ifdef FDT
99 #include <dev/fdt/fdt_common.h>
100 #include <dev/ofw/openfirm.h>
101 #endif
102 
103 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
104 static void set_fpcontext(struct thread *td, mcontext_t *mcp);
105 
106 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
107 
108 struct pcpu __pcpu[MAXCPU];
109 
110 static struct trapframe proc0_tf;
111 
112 int early_boot = 1;
113 int cold = 1;
114 static int boot_el;
115 
116 struct kva_md_info kmi;
117 
118 int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
119 int has_pan;
120 
121 /*
122  * Physical address of the EFI System Table. Stashed from the metadata hints
123  * passed into the kernel and used by the EFI code to call runtime services.
124  */
125 vm_paddr_t efi_systbl_phys;
126 static struct efi_map_header *efihdr;
127 
128 /* pagezero_* implementations are provided in support.S */
129 void pagezero_simple(void *);
130 void pagezero_cache(void *);
131 
132 /* pagezero_simple is default pagezero */
133 void (*pagezero)(void *p) = pagezero_simple;
134 
135 int (*apei_nmi)(void);
136 
137 static void
138 pan_setup(void)
139 {
140 	uint64_t id_aa64mfr1;
141 
142 	id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
143 	if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
144 		has_pan = 1;
145 }
146 
147 void
148 pan_enable(void)
149 {
150 
151 	/*
152 	 * The LLVM integrated assembler doesn't understand the PAN
153 	 * PSTATE field. Because of this we need to manually create
154 	 * the instruction in an asm block. This is equivalent to:
155 	 * msr pan, #1
156 	 *
157 	 * This sets the PAN bit, stopping the kernel from accessing
158 	 * memory when userspace can also access it unless the kernel
159 	 * uses the userspace load/store instructions.
160 	 */
161 	if (has_pan) {
162 		WRITE_SPECIALREG(sctlr_el1,
163 		    READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
164 		__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
165 	}
166 }
167 
168 bool
169 has_hyp(void)
170 {
171 
172 	return (boot_el == 2);
173 }
174 
175 static void
176 cpu_startup(void *dummy)
177 {
178 	vm_paddr_t size;
179 	int i;
180 
181 	printf("real memory  = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
182 	    ptoa((uintmax_t)realmem) / 1024 / 1024);
183 
184 	if (bootverbose) {
185 		printf("Physical memory chunk(s):\n");
186 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
187 			size = phys_avail[i + 1] - phys_avail[i];
188 			printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n",
189 			    (uintmax_t)phys_avail[i],
190 			    (uintmax_t)phys_avail[i + 1] - 1,
191 			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
192 		}
193 	}
194 
195 	printf("avail memory = %ju (%ju MB)\n",
196 	    ptoa((uintmax_t)vm_free_count()),
197 	    ptoa((uintmax_t)vm_free_count()) / 1024 / 1024);
198 
199 	undef_init();
200 	install_cpu_errata();
201 
202 	vm_ksubmap_init(&kmi);
203 	bufinit();
204 	vm_pager_bufferinit();
205 }
206 
207 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
208 
209 static void
210 late_ifunc_resolve(void *dummy __unused)
211 {
212 	link_elf_late_ireloc();
213 }
214 SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL);
215 
216 int
217 cpu_idle_wakeup(int cpu)
218 {
219 
220 	return (0);
221 }
222 
223 int
224 fill_regs(struct thread *td, struct reg *regs)
225 {
226 	struct trapframe *frame;
227 
228 	frame = td->td_frame;
229 	regs->sp = frame->tf_sp;
230 	regs->lr = frame->tf_lr;
231 	regs->elr = frame->tf_elr;
232 	regs->spsr = frame->tf_spsr;
233 
234 	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
235 
236 #ifdef COMPAT_FREEBSD32
237 	/*
238 	 * We may be called here for a 32bits process, if we're using a
239 	 * 64bits debugger. If so, put PC and SPSR where it expects it.
240 	 */
241 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
242 		regs->x[15] = frame->tf_elr;
243 		regs->x[16] = frame->tf_spsr;
244 	}
245 #endif
246 	return (0);
247 }
248 
249 int
250 set_regs(struct thread *td, struct reg *regs)
251 {
252 	struct trapframe *frame;
253 
254 	frame = td->td_frame;
255 	frame->tf_sp = regs->sp;
256 	frame->tf_lr = regs->lr;
257 	frame->tf_spsr &= ~PSR_FLAGS;
258 
259 	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
260 
261 #ifdef COMPAT_FREEBSD32
262 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
263 		/*
264 		 * We may be called for a 32bits process if we're using
265 		 * a 64bits debugger. If so, get PC and SPSR from where
266 		 * it put it.
267 		 */
268 		frame->tf_elr = regs->x[15];
269 		frame->tf_spsr |= regs->x[16] & PSR_FLAGS;
270 	} else
271 #endif
272 	{
273 		frame->tf_elr = regs->elr;
274 		frame->tf_spsr |= regs->spsr & PSR_FLAGS;
275 	}
276 	return (0);
277 }
278 
279 int
280 fill_fpregs(struct thread *td, struct fpreg *regs)
281 {
282 #ifdef VFP
283 	struct pcb *pcb;
284 
285 	pcb = td->td_pcb;
286 	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
287 		/*
288 		 * If we have just been running VFP instructions we will
289 		 * need to save the state to memcpy it below.
290 		 */
291 		if (td == curthread)
292 			vfp_save_state(td, pcb);
293 
294 		KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
295 		    ("Called fill_fpregs while the kernel is using the VFP"));
296 		memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
297 		    sizeof(regs->fp_q));
298 		regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
299 		regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
300 	} else
301 #endif
302 		memset(regs, 0, sizeof(*regs));
303 	return (0);
304 }
305 
306 int
307 set_fpregs(struct thread *td, struct fpreg *regs)
308 {
309 #ifdef VFP
310 	struct pcb *pcb;
311 
312 	pcb = td->td_pcb;
313 	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
314 	    ("Called set_fpregs while the kernel is using the VFP"));
315 	memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
316 	pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
317 	pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
318 #endif
319 	return (0);
320 }
321 
322 int
323 fill_dbregs(struct thread *td, struct dbreg *regs)
324 {
325 	struct debug_monitor_state *monitor;
326 	int i;
327 	uint8_t debug_ver, nbkpts, nwtpts;
328 
329 	memset(regs, 0, sizeof(*regs));
330 
331 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_DebugVer_SHIFT,
332 	    &debug_ver);
333 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_BRPs_SHIFT,
334 	    &nbkpts);
335 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_WRPs_SHIFT,
336 	    &nwtpts);
337 
338 	/*
339 	 * The BRPs field contains the number of breakpoints - 1. Armv8-A
340 	 * allows the hardware to provide 2-16 breakpoints so this won't
341 	 * overflow an 8 bit value. The same applies to the WRPs field.
342 	 */
343 	nbkpts++;
344 	nwtpts++;
345 
346 	regs->db_debug_ver = debug_ver;
347 	regs->db_nbkpts = nbkpts;
348 	regs->db_nwtpts = nwtpts;
349 
350 	monitor = &td->td_pcb->pcb_dbg_regs;
351 	if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
352 		for (i = 0; i < nbkpts; i++) {
353 			regs->db_breakregs[i].dbr_addr = monitor->dbg_bvr[i];
354 			regs->db_breakregs[i].dbr_ctrl = monitor->dbg_bcr[i];
355 		}
356 		for (i = 0; i < nwtpts; i++) {
357 			regs->db_watchregs[i].dbw_addr = monitor->dbg_wvr[i];
358 			regs->db_watchregs[i].dbw_ctrl = monitor->dbg_wcr[i];
359 		}
360 	}
361 
362 	return (0);
363 }
364 
365 int
366 set_dbregs(struct thread *td, struct dbreg *regs)
367 {
368 	struct debug_monitor_state *monitor;
369 	uint64_t addr;
370 	uint32_t ctrl;
371 	int count;
372 	int i;
373 
374 	monitor = &td->td_pcb->pcb_dbg_regs;
375 	count = 0;
376 	monitor->dbg_enable_count = 0;
377 
378 	for (i = 0; i < DBG_BRP_MAX; i++) {
379 		addr = regs->db_breakregs[i].dbr_addr;
380 		ctrl = regs->db_breakregs[i].dbr_ctrl;
381 
382 		/*
383 		 * Don't let the user set a breakpoint on a kernel or
384 		 * non-canonical user address.
385 		 */
386 		if (addr >= VM_MAXUSER_ADDRESS)
387 			return (EINVAL);
388 
389 		/*
390 		 * The lowest 2 bits are ignored, so record the effective
391 		 * address.
392 		 */
393 		addr = rounddown2(addr, 4);
394 
395 		/*
396 		 * Some control fields are ignored, and other bits reserved.
397 		 * Only unlinked, address-matching breakpoints are supported.
398 		 *
399 		 * XXX: fields that appear unvalidated, such as BAS, have
400 		 * constrained undefined behaviour. If the user mis-programs
401 		 * these, there is no risk to the system.
402 		 */
403 		ctrl &= DBG_BCR_EN | DBG_BCR_PMC | DBG_BCR_BAS;
404 		if ((ctrl & DBG_BCR_EN) != 0) {
405 			/* Only target EL0. */
406 			if ((ctrl & DBG_BCR_PMC) != DBG_BCR_PMC_EL0)
407 				return (EINVAL);
408 
409 			monitor->dbg_enable_count++;
410 		}
411 
412 		monitor->dbg_bvr[i] = addr;
413 		monitor->dbg_bcr[i] = ctrl;
414 	}
415 
416 	for (i = 0; i < DBG_WRP_MAX; i++) {
417 		addr = regs->db_watchregs[i].dbw_addr;
418 		ctrl = regs->db_watchregs[i].dbw_ctrl;
419 
420 		/*
421 		 * Don't let the user set a watchpoint on a kernel or
422 		 * non-canonical user address.
423 		 */
424 		if (addr >= VM_MAXUSER_ADDRESS)
425 			return (EINVAL);
426 
427 		/*
428 		 * Some control fields are ignored, and other bits reserved.
429 		 * Only unlinked watchpoints are supported.
430 		 */
431 		ctrl &= DBG_WCR_EN | DBG_WCR_PAC | DBG_WCR_LSC | DBG_WCR_BAS |
432 		    DBG_WCR_MASK;
433 
434 		if ((ctrl & DBG_WCR_EN) != 0) {
435 			/* Only target EL0. */
436 			if ((ctrl & DBG_WCR_PAC) != DBG_WCR_PAC_EL0)
437 				return (EINVAL);
438 
439 			/* Must set at least one of the load/store bits. */
440 			if ((ctrl & DBG_WCR_LSC) == 0)
441 				return (EINVAL);
442 
443 			/*
444 			 * When specifying the address range with BAS, the MASK
445 			 * field must be zero.
446 			 */
447 			if ((ctrl & DBG_WCR_BAS) != DBG_WCR_BAS_MASK &&
448 			    (ctrl & DBG_WCR_MASK) != 0)
449 				return (EINVAL);
450 
451 			monitor->dbg_enable_count++;
452 		}
453 		monitor->dbg_wvr[i] = addr;
454 		monitor->dbg_wcr[i] = ctrl;
455 	}
456 
457 	if (monitor->dbg_enable_count > 0)
458 		monitor->dbg_flags |= DBGMON_ENABLED;
459 
460 	return (0);
461 }
462 
463 #ifdef COMPAT_FREEBSD32
464 int
465 fill_regs32(struct thread *td, struct reg32 *regs)
466 {
467 	int i;
468 	struct trapframe *tf;
469 
470 	tf = td->td_frame;
471 	for (i = 0; i < 13; i++)
472 		regs->r[i] = tf->tf_x[i];
473 	/* For arm32, SP is r13 and LR is r14 */
474 	regs->r_sp = tf->tf_x[13];
475 	regs->r_lr = tf->tf_x[14];
476 	regs->r_pc = tf->tf_elr;
477 	regs->r_cpsr = tf->tf_spsr;
478 
479 	return (0);
480 }
481 
482 int
483 set_regs32(struct thread *td, struct reg32 *regs)
484 {
485 	int i;
486 	struct trapframe *tf;
487 
488 	tf = td->td_frame;
489 	for (i = 0; i < 13; i++)
490 		tf->tf_x[i] = regs->r[i];
491 	/* For arm 32, SP is r13 an LR is r14 */
492 	tf->tf_x[13] = regs->r_sp;
493 	tf->tf_x[14] = regs->r_lr;
494 	tf->tf_elr = regs->r_pc;
495 	tf->tf_spsr &= ~PSR_FLAGS;
496 	tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
497 
498 	return (0);
499 }
500 
501 /* XXX fill/set dbregs/fpregs are stubbed on 32-bit arm. */
502 int
503 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
504 {
505 
506 	memset(regs, 0, sizeof(*regs));
507 	return (0);
508 }
509 
510 int
511 set_fpregs32(struct thread *td, struct fpreg32 *regs)
512 {
513 
514 	return (0);
515 }
516 
517 int
518 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
519 {
520 
521 	memset(regs, 0, sizeof(*regs));
522 	return (0);
523 }
524 
525 int
526 set_dbregs32(struct thread *td, struct dbreg32 *regs)
527 {
528 
529 	return (0);
530 }
531 #endif
532 
533 int
534 ptrace_set_pc(struct thread *td, u_long addr)
535 {
536 
537 	td->td_frame->tf_elr = addr;
538 	return (0);
539 }
540 
541 int
542 ptrace_single_step(struct thread *td)
543 {
544 
545 	td->td_frame->tf_spsr |= PSR_SS;
546 	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
547 	return (0);
548 }
549 
550 int
551 ptrace_clear_single_step(struct thread *td)
552 {
553 
554 	td->td_frame->tf_spsr &= ~PSR_SS;
555 	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
556 	return (0);
557 }
558 
559 void
560 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
561 {
562 	struct trapframe *tf = td->td_frame;
563 	struct pcb *pcb = td->td_pcb;
564 
565 	memset(tf, 0, sizeof(struct trapframe));
566 
567 	tf->tf_x[0] = stack;
568 	tf->tf_sp = STACKALIGN(stack);
569 	tf->tf_lr = imgp->entry_addr;
570 	tf->tf_elr = imgp->entry_addr;
571 
572 	td->td_pcb->pcb_tpidr_el0 = 0;
573 	td->td_pcb->pcb_tpidrro_el0 = 0;
574 	WRITE_SPECIALREG(tpidrro_el0, 0);
575 	WRITE_SPECIALREG(tpidr_el0, 0);
576 
577 #ifdef VFP
578 	vfp_reset_state(td, pcb);
579 #endif
580 
581 	/*
582 	 * Clear debug register state. It is not applicable to the new process.
583 	 */
584 	bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs));
585 }
586 
587 /* Sanity check these are the same size, they will be memcpy'd to and fro */
588 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
589     sizeof((struct gpregs *)0)->gp_x);
590 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
591     sizeof((struct reg *)0)->x);
592 
593 int
594 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
595 {
596 	struct trapframe *tf = td->td_frame;
597 
598 	if (clear_ret & GET_MC_CLEAR_RET) {
599 		mcp->mc_gpregs.gp_x[0] = 0;
600 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
601 	} else {
602 		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
603 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
604 	}
605 
606 	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
607 	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
608 
609 	mcp->mc_gpregs.gp_sp = tf->tf_sp;
610 	mcp->mc_gpregs.gp_lr = tf->tf_lr;
611 	mcp->mc_gpregs.gp_elr = tf->tf_elr;
612 	get_fpcontext(td, mcp);
613 
614 	return (0);
615 }
616 
617 int
618 set_mcontext(struct thread *td, mcontext_t *mcp)
619 {
620 	struct trapframe *tf = td->td_frame;
621 	uint32_t spsr;
622 
623 	spsr = mcp->mc_gpregs.gp_spsr;
624 	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
625 	    (spsr & PSR_AARCH32) != 0 ||
626 	    (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
627 		return (EINVAL);
628 
629 	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
630 
631 	tf->tf_sp = mcp->mc_gpregs.gp_sp;
632 	tf->tf_lr = mcp->mc_gpregs.gp_lr;
633 	tf->tf_elr = mcp->mc_gpregs.gp_elr;
634 	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
635 	set_fpcontext(td, mcp);
636 
637 	return (0);
638 }
639 
640 static void
641 get_fpcontext(struct thread *td, mcontext_t *mcp)
642 {
643 #ifdef VFP
644 	struct pcb *curpcb;
645 
646 	critical_enter();
647 
648 	curpcb = curthread->td_pcb;
649 
650 	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
651 		/*
652 		 * If we have just been running VFP instructions we will
653 		 * need to save the state to memcpy it below.
654 		 */
655 		vfp_save_state(td, curpcb);
656 
657 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
658 		    ("Called get_fpcontext while the kernel is using the VFP"));
659 		KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
660 		    ("Non-userspace FPU flags set in get_fpcontext"));
661 		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
662 		    sizeof(mcp->mc_fpregs.fp_q));
663 		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
664 		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
665 		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
666 		mcp->mc_flags |= _MC_FP_VALID;
667 	}
668 
669 	critical_exit();
670 #endif
671 }
672 
673 static void
674 set_fpcontext(struct thread *td, mcontext_t *mcp)
675 {
676 #ifdef VFP
677 	struct pcb *curpcb;
678 
679 	critical_enter();
680 
681 	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
682 		curpcb = curthread->td_pcb;
683 
684 		/*
685 		 * Discard any vfp state for the current thread, we
686 		 * are about to override it.
687 		 */
688 		vfp_discard(td);
689 
690 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
691 		    ("Called set_fpcontext while the kernel is using the VFP"));
692 		memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
693 		    sizeof(mcp->mc_fpregs.fp_q));
694 		curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
695 		curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
696 		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
697 	}
698 
699 	critical_exit();
700 #endif
701 }
702 
703 void
704 cpu_idle(int busy)
705 {
706 
707 	spinlock_enter();
708 	if (!busy)
709 		cpu_idleclock();
710 	if (!sched_runnable())
711 		__asm __volatile(
712 		    "dsb sy \n"
713 		    "wfi    \n");
714 	if (!busy)
715 		cpu_activeclock();
716 	spinlock_exit();
717 }
718 
719 void
720 cpu_halt(void)
721 {
722 
723 	/* We should have shutdown by now, if not enter a low power sleep */
724 	intr_disable();
725 	while (1) {
726 		__asm __volatile("wfi");
727 	}
728 }
729 
730 /*
731  * Flush the D-cache for non-DMA I/O so that the I-cache can
732  * be made coherent later.
733  */
734 void
735 cpu_flush_dcache(void *ptr, size_t len)
736 {
737 
738 	/* ARM64TODO TBD */
739 }
740 
741 /* Get current clock frequency for the given CPU ID. */
742 int
743 cpu_est_clockrate(int cpu_id, uint64_t *rate)
744 {
745 	struct pcpu *pc;
746 
747 	pc = pcpu_find(cpu_id);
748 	if (pc == NULL || rate == NULL)
749 		return (EINVAL);
750 
751 	if (pc->pc_clock == 0)
752 		return (EOPNOTSUPP);
753 
754 	*rate = pc->pc_clock;
755 	return (0);
756 }
757 
758 void
759 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
760 {
761 
762 	pcpu->pc_acpi_id = 0xffffffff;
763 	pcpu->pc_mpidr = 0xffffffff;
764 }
765 
766 void
767 spinlock_enter(void)
768 {
769 	struct thread *td;
770 	register_t daif;
771 
772 	td = curthread;
773 	if (td->td_md.md_spinlock_count == 0) {
774 		daif = intr_disable();
775 		td->td_md.md_spinlock_count = 1;
776 		td->td_md.md_saved_daif = daif;
777 		critical_enter();
778 	} else
779 		td->td_md.md_spinlock_count++;
780 }
781 
782 void
783 spinlock_exit(void)
784 {
785 	struct thread *td;
786 	register_t daif;
787 
788 	td = curthread;
789 	daif = td->td_md.md_saved_daif;
790 	td->td_md.md_spinlock_count--;
791 	if (td->td_md.md_spinlock_count == 0) {
792 		critical_exit();
793 		intr_restore(daif);
794 	}
795 }
796 
797 #ifndef	_SYS_SYSPROTO_H_
798 struct sigreturn_args {
799 	ucontext_t *ucp;
800 };
801 #endif
802 
803 int
804 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
805 {
806 	ucontext_t uc;
807 	int error;
808 
809 	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
810 		return (EFAULT);
811 
812 	error = set_mcontext(td, &uc.uc_mcontext);
813 	if (error != 0)
814 		return (error);
815 
816 	/* Restore signal mask. */
817 	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
818 
819 	return (EJUSTRETURN);
820 }
821 
822 /*
823  * Construct a PCB from a trapframe. This is called from kdb_trap() where
824  * we want to start a backtrace from the function that caused us to enter
825  * the debugger. We have the context in the trapframe, but base the trace
826  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
827  * enough for a backtrace.
828  */
829 void
830 makectx(struct trapframe *tf, struct pcb *pcb)
831 {
832 	int i;
833 
834 	for (i = 0; i < nitems(pcb->pcb_x); i++)
835 		pcb->pcb_x[i] = tf->tf_x[i];
836 
837 	/* NB: pcb_lr is the PC, see PC_REGS() in db_machdep.h */
838 	pcb->pcb_lr = tf->tf_elr;
839 	pcb->pcb_sp = tf->tf_sp;
840 }
841 
842 void
843 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
844 {
845 	struct thread *td;
846 	struct proc *p;
847 	struct trapframe *tf;
848 	struct sigframe *fp, frame;
849 	struct sigacts *psp;
850 	struct sysentvec *sysent;
851 	int onstack, sig;
852 
853 	td = curthread;
854 	p = td->td_proc;
855 	PROC_LOCK_ASSERT(p, MA_OWNED);
856 
857 	sig = ksi->ksi_signo;
858 	psp = p->p_sigacts;
859 	mtx_assert(&psp->ps_mtx, MA_OWNED);
860 
861 	tf = td->td_frame;
862 	onstack = sigonstack(tf->tf_sp);
863 
864 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
865 	    catcher, sig);
866 
867 	/* Allocate and validate space for the signal handler context. */
868 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
869 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
870 		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
871 		    td->td_sigstk.ss_size);
872 #if defined(COMPAT_43)
873 		td->td_sigstk.ss_flags |= SS_ONSTACK;
874 #endif
875 	} else {
876 		fp = (struct sigframe *)td->td_frame->tf_sp;
877 	}
878 
879 	/* Make room, keeping the stack aligned */
880 	fp--;
881 	fp = (struct sigframe *)STACKALIGN(fp);
882 
883 	/* Fill in the frame to copy out */
884 	bzero(&frame, sizeof(frame));
885 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
886 	frame.sf_si = ksi->ksi_info;
887 	frame.sf_uc.uc_sigmask = *mask;
888 	frame.sf_uc.uc_stack = td->td_sigstk;
889 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
890 	    (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
891 	mtx_unlock(&psp->ps_mtx);
892 	PROC_UNLOCK(td->td_proc);
893 
894 	/* Copy the sigframe out to the user's stack. */
895 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
896 		/* Process has trashed its stack. Kill it. */
897 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
898 		PROC_LOCK(p);
899 		sigexit(td, SIGILL);
900 	}
901 
902 	tf->tf_x[0]= sig;
903 	tf->tf_x[1] = (register_t)&fp->sf_si;
904 	tf->tf_x[2] = (register_t)&fp->sf_uc;
905 
906 	tf->tf_elr = (register_t)catcher;
907 	tf->tf_sp = (register_t)fp;
908 	sysent = p->p_sysent;
909 	if (sysent->sv_sigcode_base != 0)
910 		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
911 	else
912 		tf->tf_lr = (register_t)(sysent->sv_psstrings -
913 		    *(sysent->sv_szsigcode));
914 
915 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
916 	    tf->tf_sp);
917 
918 	PROC_LOCK(p);
919 	mtx_lock(&psp->ps_mtx);
920 }
921 
922 static void
923 init_proc0(vm_offset_t kstack)
924 {
925 	struct pcpu *pcpup = &__pcpu[0];
926 
927 	proc_linkup0(&proc0, &thread0);
928 	thread0.td_kstack = kstack;
929 	thread0.td_kstack_pages = KSTACK_PAGES;
930 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
931 	    thread0.td_kstack_pages * PAGE_SIZE) - 1;
932 	thread0.td_pcb->pcb_fpflags = 0;
933 	thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
934 	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
935 	thread0.td_frame = &proc0_tf;
936 	pcpup->pc_curpcb = thread0.td_pcb;
937 
938 	/*
939 	 * Unmask SError exceptions. They are used to signal a RAS failure,
940 	 * or other hardware error.
941 	 */
942 	serror_enable();
943 }
944 
945 /*
946  * Get an address to be used to write to kernel data that may be mapped
947  * read-only, e.g. to patch kernel code.
948  */
949 bool
950 arm64_get_writable_addr(vm_offset_t addr, vm_offset_t *out)
951 {
952 	vm_paddr_t pa;
953 
954 	/* Check if the page is writable */
955 	if (PAR_SUCCESS(arm64_address_translate_s1e1w(addr))) {
956 		*out = addr;
957 		return (true);
958 	}
959 
960 	/*
961 	 * Find the physical address of the given page.
962 	 */
963 	if (!pmap_klookup(addr, &pa)) {
964 		return (false);
965 	}
966 
967 	/*
968 	 * If it is within the DMAP region and is writable use that.
969 	 */
970 	if (PHYS_IN_DMAP(pa)) {
971 		addr = PHYS_TO_DMAP(pa);
972 		if (PAR_SUCCESS(arm64_address_translate_s1e1w(addr))) {
973 			*out = addr;
974 			return (true);
975 		}
976 	}
977 
978 	return (false);
979 }
980 
981 typedef struct {
982 	uint32_t type;
983 	uint64_t phys_start;
984 	uint64_t virt_start;
985 	uint64_t num_pages;
986 	uint64_t attr;
987 } EFI_MEMORY_DESCRIPTOR;
988 
989 typedef void (*efi_map_entry_cb)(struct efi_md *);
990 
991 static void
992 foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
993 {
994 	struct efi_md *map, *p;
995 	size_t efisz;
996 	int ndesc, i;
997 
998 	/*
999 	 * Memory map data provided by UEFI via the GetMemoryMap
1000 	 * Boot Services API.
1001 	 */
1002 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1003 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1004 
1005 	if (efihdr->descriptor_size == 0)
1006 		return;
1007 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1008 
1009 	for (i = 0, p = map; i < ndesc; i++,
1010 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1011 		cb(p);
1012 	}
1013 }
1014 
1015 static void
1016 exclude_efi_map_entry(struct efi_md *p)
1017 {
1018 
1019 	switch (p->md_type) {
1020 	case EFI_MD_TYPE_CODE:
1021 	case EFI_MD_TYPE_DATA:
1022 	case EFI_MD_TYPE_BS_CODE:
1023 	case EFI_MD_TYPE_BS_DATA:
1024 	case EFI_MD_TYPE_FREE:
1025 		/*
1026 		 * We're allowed to use any entry with these types.
1027 		 */
1028 		break;
1029 	default:
1030 		physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE,
1031 		    EXFLAG_NOALLOC);
1032 	}
1033 }
1034 
1035 static void
1036 exclude_efi_map_entries(struct efi_map_header *efihdr)
1037 {
1038 
1039 	foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
1040 }
1041 
1042 static void
1043 add_efi_map_entry(struct efi_md *p)
1044 {
1045 
1046 	switch (p->md_type) {
1047 	case EFI_MD_TYPE_RT_DATA:
1048 		/*
1049 		 * Runtime data will be excluded after the DMAP
1050 		 * region is created to stop it from being added
1051 		 * to phys_avail.
1052 		 */
1053 	case EFI_MD_TYPE_CODE:
1054 	case EFI_MD_TYPE_DATA:
1055 	case EFI_MD_TYPE_BS_CODE:
1056 	case EFI_MD_TYPE_BS_DATA:
1057 	case EFI_MD_TYPE_FREE:
1058 		/*
1059 		 * We're allowed to use any entry with these types.
1060 		 */
1061 		physmem_hardware_region(p->md_phys,
1062 		    p->md_pages * PAGE_SIZE);
1063 		break;
1064 	}
1065 }
1066 
1067 static void
1068 add_efi_map_entries(struct efi_map_header *efihdr)
1069 {
1070 
1071 	foreach_efi_map_entry(efihdr, add_efi_map_entry);
1072 }
1073 
1074 static void
1075 print_efi_map_entry(struct efi_md *p)
1076 {
1077 	const char *type;
1078 	static const char *types[] = {
1079 		"Reserved",
1080 		"LoaderCode",
1081 		"LoaderData",
1082 		"BootServicesCode",
1083 		"BootServicesData",
1084 		"RuntimeServicesCode",
1085 		"RuntimeServicesData",
1086 		"ConventionalMemory",
1087 		"UnusableMemory",
1088 		"ACPIReclaimMemory",
1089 		"ACPIMemoryNVS",
1090 		"MemoryMappedIO",
1091 		"MemoryMappedIOPortSpace",
1092 		"PalCode",
1093 		"PersistentMemory"
1094 	};
1095 
1096 	if (p->md_type < nitems(types))
1097 		type = types[p->md_type];
1098 	else
1099 		type = "<INVALID>";
1100 	printf("%23s %012lx %012lx %08lx ", type, p->md_phys,
1101 	    p->md_virt, p->md_pages);
1102 	if (p->md_attr & EFI_MD_ATTR_UC)
1103 		printf("UC ");
1104 	if (p->md_attr & EFI_MD_ATTR_WC)
1105 		printf("WC ");
1106 	if (p->md_attr & EFI_MD_ATTR_WT)
1107 		printf("WT ");
1108 	if (p->md_attr & EFI_MD_ATTR_WB)
1109 		printf("WB ");
1110 	if (p->md_attr & EFI_MD_ATTR_UCE)
1111 		printf("UCE ");
1112 	if (p->md_attr & EFI_MD_ATTR_WP)
1113 		printf("WP ");
1114 	if (p->md_attr & EFI_MD_ATTR_RP)
1115 		printf("RP ");
1116 	if (p->md_attr & EFI_MD_ATTR_XP)
1117 		printf("XP ");
1118 	if (p->md_attr & EFI_MD_ATTR_NV)
1119 		printf("NV ");
1120 	if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
1121 		printf("MORE_RELIABLE ");
1122 	if (p->md_attr & EFI_MD_ATTR_RO)
1123 		printf("RO ");
1124 	if (p->md_attr & EFI_MD_ATTR_RT)
1125 		printf("RUNTIME");
1126 	printf("\n");
1127 }
1128 
1129 static void
1130 print_efi_map_entries(struct efi_map_header *efihdr)
1131 {
1132 
1133 	printf("%23s %12s %12s %8s %4s\n",
1134 	    "Type", "Physical", "Virtual", "#Pages", "Attr");
1135 	foreach_efi_map_entry(efihdr, print_efi_map_entry);
1136 }
1137 
1138 #ifdef FDT
1139 static void
1140 try_load_dtb(caddr_t kmdp)
1141 {
1142 	vm_offset_t dtbp;
1143 
1144 	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1145 #if defined(FDT_DTB_STATIC)
1146 	/*
1147 	 * In case the device tree blob was not retrieved (from metadata) try
1148 	 * to use the statically embedded one.
1149 	 */
1150 	if (dtbp == 0)
1151 		dtbp = (vm_offset_t)&fdt_static_dtb;
1152 #endif
1153 
1154 	if (dtbp == (vm_offset_t)NULL) {
1155 #ifndef TSLOG
1156 		printf("ERROR loading DTB\n");
1157 #endif
1158 		return;
1159 	}
1160 
1161 	if (OF_install(OFW_FDT, 0) == FALSE)
1162 		panic("Cannot install FDT");
1163 
1164 	if (OF_init((void *)dtbp) != 0)
1165 		panic("OF_init failed with the found device tree");
1166 
1167 	parse_fdt_bootargs();
1168 }
1169 #endif
1170 
1171 static bool
1172 bus_probe(void)
1173 {
1174 	bool has_acpi, has_fdt;
1175 	char *order, *env;
1176 
1177 	has_acpi = has_fdt = false;
1178 
1179 #ifdef FDT
1180 	has_fdt = (OF_peer(0) != 0);
1181 #endif
1182 #ifdef DEV_ACPI
1183 	has_acpi = (AcpiOsGetRootPointer() != 0);
1184 #endif
1185 
1186 	env = kern_getenv("kern.cfg.order");
1187 	if (env != NULL) {
1188 		order = env;
1189 		while (order != NULL) {
1190 			if (has_acpi &&
1191 			    strncmp(order, "acpi", 4) == 0 &&
1192 			    (order[4] == ',' || order[4] == '\0')) {
1193 				arm64_bus_method = ARM64_BUS_ACPI;
1194 				break;
1195 			}
1196 			if (has_fdt &&
1197 			    strncmp(order, "fdt", 3) == 0 &&
1198 			    (order[3] == ',' || order[3] == '\0')) {
1199 				arm64_bus_method = ARM64_BUS_FDT;
1200 				break;
1201 			}
1202 			order = strchr(order, ',');
1203 		}
1204 		freeenv(env);
1205 
1206 		/* If we set the bus method it is valid */
1207 		if (arm64_bus_method != ARM64_BUS_NONE)
1208 			return (true);
1209 	}
1210 	/* If no order or an invalid order was set use the default */
1211 	if (arm64_bus_method == ARM64_BUS_NONE) {
1212 		if (has_fdt)
1213 			arm64_bus_method = ARM64_BUS_FDT;
1214 		else if (has_acpi)
1215 			arm64_bus_method = ARM64_BUS_ACPI;
1216 	}
1217 
1218 	/*
1219 	 * If no option was set the default is valid, otherwise we are
1220 	 * setting one to get cninit() working, then calling panic to tell
1221 	 * the user about the invalid bus setup.
1222 	 */
1223 	return (env == NULL);
1224 }
1225 
1226 static void
1227 cache_setup(void)
1228 {
1229 	int dczva_line_shift;
1230 	uint32_t dczid_el0;
1231 
1232 	identify_cache(READ_SPECIALREG(ctr_el0));
1233 
1234 	dczid_el0 = READ_SPECIALREG(dczid_el0);
1235 
1236 	/* Check if dc zva is not prohibited */
1237 	if (dczid_el0 & DCZID_DZP)
1238 		dczva_line_size = 0;
1239 	else {
1240 		/* Same as with above calculations */
1241 		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1242 		dczva_line_size = sizeof(int) << dczva_line_shift;
1243 
1244 		/* Change pagezero function */
1245 		pagezero = pagezero_cache;
1246 	}
1247 }
1248 
1249 int
1250 memory_mapping_mode(vm_paddr_t pa)
1251 {
1252 	struct efi_md *map, *p;
1253 	size_t efisz;
1254 	int ndesc, i;
1255 
1256 	if (efihdr == NULL)
1257 		return (VM_MEMATTR_WRITE_BACK);
1258 
1259 	/*
1260 	 * Memory map data provided by UEFI via the GetMemoryMap
1261 	 * Boot Services API.
1262 	 */
1263 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1264 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1265 
1266 	if (efihdr->descriptor_size == 0)
1267 		return (VM_MEMATTR_WRITE_BACK);
1268 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1269 
1270 	for (i = 0, p = map; i < ndesc; i++,
1271 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1272 		if (pa < p->md_phys ||
1273 		    pa >= p->md_phys + p->md_pages * EFI_PAGE_SIZE)
1274 			continue;
1275 		if (p->md_type == EFI_MD_TYPE_IOMEM ||
1276 		    p->md_type == EFI_MD_TYPE_IOPORT)
1277 			return (VM_MEMATTR_DEVICE);
1278 		else if ((p->md_attr & EFI_MD_ATTR_WB) != 0 ||
1279 		    p->md_type == EFI_MD_TYPE_RECLAIM)
1280 			return (VM_MEMATTR_WRITE_BACK);
1281 		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
1282 			return (VM_MEMATTR_WRITE_THROUGH);
1283 		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
1284 			return (VM_MEMATTR_WRITE_COMBINING);
1285 		break;
1286 	}
1287 
1288 	return (VM_MEMATTR_DEVICE);
1289 }
1290 
1291 void
1292 initarm(struct arm64_bootparams *abp)
1293 {
1294 	struct efi_fb *efifb;
1295 	struct pcpu *pcpup;
1296 	char *env;
1297 #ifdef FDT
1298 	struct mem_region mem_regions[FDT_MEM_REGIONS];
1299 	int mem_regions_sz;
1300 	phandle_t root;
1301 	char dts_version[255];
1302 #endif
1303 	vm_offset_t lastaddr;
1304 	caddr_t kmdp;
1305 	bool valid;
1306 
1307 	TSRAW(&thread0, TS_ENTER, __func__, NULL);
1308 
1309 	boot_el = abp->boot_el;
1310 
1311 	/* Parse loader or FDT boot parametes. Determine last used address. */
1312 	lastaddr = parse_boot_param(abp);
1313 
1314 	/* Find the kernel address */
1315 	kmdp = preload_search_by_type("elf kernel");
1316 	if (kmdp == NULL)
1317 		kmdp = preload_search_by_type("elf64 kernel");
1318 
1319 	identify_cpu(0);
1320 	update_special_regs(0);
1321 
1322 	link_elf_ireloc(kmdp);
1323 	try_load_dtb(kmdp);
1324 
1325 	efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1326 
1327 	/* Load the physical memory ranges */
1328 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1329 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1330 	if (efihdr != NULL)
1331 		add_efi_map_entries(efihdr);
1332 #ifdef FDT
1333 	else {
1334 		/* Grab physical memory regions information from device tree. */
1335 		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1336 		    NULL) != 0)
1337 			panic("Cannot get physical memory regions");
1338 		physmem_hardware_regions(mem_regions, mem_regions_sz);
1339 	}
1340 	if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
1341 		physmem_exclude_regions(mem_regions, mem_regions_sz,
1342 		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1343 #endif
1344 
1345 	/* Exclude the EFI framebuffer from our view of physical memory. */
1346 	efifb = (struct efi_fb *)preload_search_info(kmdp,
1347 	    MODINFO_METADATA | MODINFOMD_EFI_FB);
1348 	if (efifb != NULL)
1349 		physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
1350 		    EXFLAG_NOALLOC);
1351 
1352 	/* Set the pcpu data, this is needed by pmap_bootstrap */
1353 	pcpup = &__pcpu[0];
1354 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1355 
1356 	/*
1357 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
1358 	 * loaded when entering the kernel from userland.
1359 	 */
1360 	__asm __volatile(
1361 	    "mov x18, %0 \n"
1362 	    "msr tpidr_el1, %0" :: "r"(pcpup));
1363 
1364 	PCPU_SET(curthread, &thread0);
1365 	PCPU_SET(midr, get_midr());
1366 
1367 	/* Do basic tuning, hz etc */
1368 	init_param1();
1369 
1370 	cache_setup();
1371 	pan_setup();
1372 
1373 	/* Bootstrap enough of pmap  to enter the kernel proper */
1374 	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1375 	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1376 	/* Exclude entries neexed in teh DMAP region, but not phys_avail */
1377 	if (efihdr != NULL)
1378 		exclude_efi_map_entries(efihdr);
1379 	physmem_init_kernel_globals();
1380 
1381 	devmap_bootstrap(0, NULL);
1382 
1383 	valid = bus_probe();
1384 
1385 	cninit();
1386 	set_ttbr0(abp->kern_ttbr0);
1387 	cpu_tlb_flushID();
1388 
1389 	if (!valid)
1390 		panic("Invalid bus configuration: %s",
1391 		    kern_getenv("kern.cfg.order"));
1392 
1393 	/*
1394 	 * Dump the boot metadata. We have to wait for cninit() since console
1395 	 * output is required. If it's grossly incorrect the kernel will never
1396 	 * make it this far.
1397 	 */
1398 	if (getenv_is_true("debug.dump_modinfo_at_boot"))
1399 		preload_dump();
1400 
1401 	init_proc0(abp->kern_stack);
1402 	msgbufinit(msgbufp, msgbufsize);
1403 	mutex_init();
1404 	init_param2(physmem);
1405 
1406 	dbg_init();
1407 	kdb_init();
1408 	pan_enable();
1409 
1410 	kcsan_cpu_init(0);
1411 
1412 	env = kern_getenv("kernelname");
1413 	if (env != NULL)
1414 		strlcpy(kernelname, env, sizeof(kernelname));
1415 
1416 #ifdef FDT
1417 	if (arm64_bus_method == ARM64_BUS_FDT) {
1418 		root = OF_finddevice("/");
1419 		if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
1420 			if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
1421 				printf("WARNING: DTB version is %s while kernel expects %s, "
1422 				    "please update the DTB in the ESP\n",
1423 				    dts_version,
1424 				    LINUX_DTS_VERSION);
1425 		} else {
1426 			printf("WARNING: Cannot find freebsd,dts-version property, "
1427 			    "cannot check DTB compliance\n");
1428 		}
1429 	}
1430 #endif
1431 
1432 	if (boothowto & RB_VERBOSE) {
1433 		if (efihdr != NULL)
1434 			print_efi_map_entries(efihdr);
1435 		physmem_print_tables();
1436 	}
1437 
1438 	early_boot = 0;
1439 
1440 	TSEXIT();
1441 }
1442 
1443 void
1444 dbg_init(void)
1445 {
1446 
1447 	/* Clear OS lock */
1448 	WRITE_SPECIALREG(oslar_el1, 0);
1449 
1450 	/* This permits DDB to use debug registers for watchpoints. */
1451 	dbg_monitor_init();
1452 
1453 	/* TODO: Eventually will need to initialize debug registers here. */
1454 }
1455 
1456 #ifdef DDB
1457 #include <ddb/ddb.h>
1458 
1459 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1460 {
1461 #define	PRINT_REG(reg)	\
1462     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1463 
1464 	PRINT_REG(actlr_el1);
1465 	PRINT_REG(afsr0_el1);
1466 	PRINT_REG(afsr1_el1);
1467 	PRINT_REG(aidr_el1);
1468 	PRINT_REG(amair_el1);
1469 	PRINT_REG(ccsidr_el1);
1470 	PRINT_REG(clidr_el1);
1471 	PRINT_REG(contextidr_el1);
1472 	PRINT_REG(cpacr_el1);
1473 	PRINT_REG(csselr_el1);
1474 	PRINT_REG(ctr_el0);
1475 	PRINT_REG(currentel);
1476 	PRINT_REG(daif);
1477 	PRINT_REG(dczid_el0);
1478 	PRINT_REG(elr_el1);
1479 	PRINT_REG(esr_el1);
1480 	PRINT_REG(far_el1);
1481 #if 0
1482 	/* ARM64TODO: Enable VFP before reading floating-point registers */
1483 	PRINT_REG(fpcr);
1484 	PRINT_REG(fpsr);
1485 #endif
1486 	PRINT_REG(id_aa64afr0_el1);
1487 	PRINT_REG(id_aa64afr1_el1);
1488 	PRINT_REG(id_aa64dfr0_el1);
1489 	PRINT_REG(id_aa64dfr1_el1);
1490 	PRINT_REG(id_aa64isar0_el1);
1491 	PRINT_REG(id_aa64isar1_el1);
1492 	PRINT_REG(id_aa64pfr0_el1);
1493 	PRINT_REG(id_aa64pfr1_el1);
1494 	PRINT_REG(id_afr0_el1);
1495 	PRINT_REG(id_dfr0_el1);
1496 	PRINT_REG(id_isar0_el1);
1497 	PRINT_REG(id_isar1_el1);
1498 	PRINT_REG(id_isar2_el1);
1499 	PRINT_REG(id_isar3_el1);
1500 	PRINT_REG(id_isar4_el1);
1501 	PRINT_REG(id_isar5_el1);
1502 	PRINT_REG(id_mmfr0_el1);
1503 	PRINT_REG(id_mmfr1_el1);
1504 	PRINT_REG(id_mmfr2_el1);
1505 	PRINT_REG(id_mmfr3_el1);
1506 #if 0
1507 	/* Missing from llvm */
1508 	PRINT_REG(id_mmfr4_el1);
1509 #endif
1510 	PRINT_REG(id_pfr0_el1);
1511 	PRINT_REG(id_pfr1_el1);
1512 	PRINT_REG(isr_el1);
1513 	PRINT_REG(mair_el1);
1514 	PRINT_REG(midr_el1);
1515 	PRINT_REG(mpidr_el1);
1516 	PRINT_REG(mvfr0_el1);
1517 	PRINT_REG(mvfr1_el1);
1518 	PRINT_REG(mvfr2_el1);
1519 	PRINT_REG(revidr_el1);
1520 	PRINT_REG(sctlr_el1);
1521 	PRINT_REG(sp_el0);
1522 	PRINT_REG(spsel);
1523 	PRINT_REG(spsr_el1);
1524 	PRINT_REG(tcr_el1);
1525 	PRINT_REG(tpidr_el0);
1526 	PRINT_REG(tpidr_el1);
1527 	PRINT_REG(tpidrro_el0);
1528 	PRINT_REG(ttbr0_el1);
1529 	PRINT_REG(ttbr1_el1);
1530 	PRINT_REG(vbar_el1);
1531 #undef PRINT_REG
1532 }
1533 
1534 DB_SHOW_COMMAND(vtop, db_show_vtop)
1535 {
1536 	uint64_t phys;
1537 
1538 	if (have_addr) {
1539 		phys = arm64_address_translate_s1e1r(addr);
1540 		db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1541 		phys = arm64_address_translate_s1e1w(addr);
1542 		db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1543 		phys = arm64_address_translate_s1e0r(addr);
1544 		db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1545 		phys = arm64_address_translate_s1e0w(addr);
1546 		db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1547 	} else
1548 		db_printf("show vtop <virt_addr>\n");
1549 }
1550 #endif
1551