xref: /openbsd-src/sys/arch/arm64/arm64/machdep.c (revision b0d2811898b6af5f105845a12bf9d62865077da0)
1 /* $OpenBSD: machdep.c,v 1.95 2024/11/18 05:32:39 jsg Exp $ */
2 /*
3  * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
4  * Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/sched.h>
22 #include <sys/proc.h>
23 #include <sys/sysctl.h>
24 #include <sys/reboot.h>
25 #include <sys/mount.h>
26 #include <sys/exec.h>
27 #include <sys/user.h>
28 #include <sys/conf.h>
29 #include <sys/kcore.h>
30 #include <sys/core.h>
31 #include <sys/msgbuf.h>
32 #include <sys/buf.h>
33 #include <sys/termios.h>
34 #include <sys/sensors.h>
35 #include <sys/malloc.h>
36 
37 #include <net/if.h>
38 #include <uvm/uvm_extern.h>
39 #include <dev/cons.h>
40 #include <dev/ofw/fdt.h>
41 #include <dev/ofw/openfirm.h>
42 #include <machine/param.h>
43 #include <machine/kcore.h>
44 #include <machine/bootconfig.h>
45 #include <machine/bus.h>
46 #include <machine/fpu.h>
47 
48 #include <machine/db_machdep.h>
49 #include <ddb/db_extern.h>
50 
51 #include <dev/efi/efi.h>
52 
53 #include "softraid.h"
54 #if NSOFTRAID > 0
55 #include <dev/softraidvar.h>
56 #endif
57 
58 extern vaddr_t virtual_avail;
59 extern uint64_t esym;
60 
61 extern char _start[];
62 
63 char *boot_args = NULL;
64 uint8_t *bootmac = NULL;
65 
66 int stdout_node;
67 int stdout_speed;
68 
69 void (*cpuresetfn)(void);
70 void (*powerdownfn)(void);
71 
72 int cold = 1;
73 int lid_action = 1;
74 
75 struct vm_map *exec_map = NULL;
76 struct vm_map *phys_map = NULL;
77 
78 int physmem;
79 
80 struct consdev *cn_tab;
81 
82 caddr_t msgbufaddr;
83 paddr_t msgbufphys;
84 
85 struct user *proc0paddr;
86 
87 struct uvm_constraint_range  dma_constraint = { 0x0, (paddr_t)-1 };
88 struct uvm_constraint_range *uvm_md_constraints[] = {
89 	&dma_constraint,
90 	NULL,
91 };
92 
93 /* the following is used externally (sysctl_hw) */
94 char    machine[] = MACHINE;            /* from <machine/param.h> */
95 
96 int safepri = 0;
97 
98 struct cpu_info cpu_info_primary;
99 struct cpu_info *cpu_info[MAXCPUS] = { &cpu_info_primary };
100 
101 struct fdt_reg memreg[VM_PHYSSEG_MAX];
102 int nmemreg;
103 
104 void memreg_add(const struct fdt_reg *);
105 void memreg_remove(const struct fdt_reg *);
106 
107 static int
108 atoi(const char *s)
109 {
110 	int n, neg;
111 
112 	n = 0;
113 	neg = 0;
114 
115 	while (*s == '-') {
116 		s++;
117 		neg = !neg;
118 	}
119 
120 	while (*s != '\0') {
121 		if (*s < '0' || *s > '9')
122 			break;
123 
124 		n = (10 * n) + (*s - '0');
125 		s++;
126 	}
127 
128 	return (neg ? -n : n);
129 }
130 
131 void *
132 fdt_find_cons(const char *name)
133 {
134 	char *alias = "serial0";
135 	char buf[128];
136 	char *stdout = NULL;
137 	char *p;
138 	void *node;
139 
140 	/* First check if "stdout-path" is set. */
141 	node = fdt_find_node("/chosen");
142 	if (node) {
143 		if (fdt_node_property(node, "stdout-path", &stdout) > 0) {
144 			if (strchr(stdout, ':') != NULL) {
145 				strlcpy(buf, stdout, sizeof(buf));
146 				if ((p = strchr(buf, ':')) != NULL) {
147 					*p++ = '\0';
148 					stdout_speed = atoi(p);
149 				}
150 				stdout = buf;
151 			}
152 			if (stdout[0] != '/') {
153 				/* It's an alias. */
154 				alias = stdout;
155 				stdout = NULL;
156 			}
157 		}
158 	}
159 
160 	/* Perform alias lookup if necessary. */
161 	if (stdout == NULL) {
162 		node = fdt_find_node("/aliases");
163 		if (node)
164 			fdt_node_property(node, alias, &stdout);
165 	}
166 
167 	/* Lookup the physical address of the interface. */
168 	if (stdout) {
169 		node = fdt_find_node(stdout);
170 		if (node && fdt_is_compatible(node, name)) {
171 			stdout_node = OF_finddevice(stdout);
172 			return (node);
173 		}
174 	}
175 
176 	return (NULL);
177 }
178 
179 void	amluart_init_cons(void);
180 void	cduart_init_cons(void);
181 void	com_fdt_init_cons(void);
182 void	exuart_init_cons(void);
183 void	imxuart_init_cons(void);
184 void	mvuart_init_cons(void);
185 void	pluart_init_cons(void);
186 void	simplefb_init_cons(bus_space_tag_t);
187 
188 void
189 consinit(void)
190 {
191 	static int consinit_called = 0;
192 
193 	if (consinit_called != 0)
194 		return;
195 
196 	consinit_called = 1;
197 
198 	amluart_init_cons();
199 	cduart_init_cons();
200 	com_fdt_init_cons();
201 	exuart_init_cons();
202 	imxuart_init_cons();
203 	mvuart_init_cons();
204 	pluart_init_cons();
205 	simplefb_init_cons(&arm64_bs_tag);
206 }
207 
208 void
209 cpu_idle_enter(void)
210 {
211 	disable_irq_daif();
212 }
213 
214 void (*cpu_idle_cycle_fcn)(void) = cpu_wfi;
215 
216 void
217 cpu_idle_cycle(void)
218 {
219 	cpu_idle_cycle_fcn();
220 	enable_irq_daif();
221 	disable_irq_daif();
222 }
223 
224 void
225 cpu_idle_leave(void)
226 {
227 	enable_irq_daif();
228 }
229 
230 /* Dummy trapframe for proc0. */
231 struct trapframe proc0tf;
232 
233 void
234 cpu_startup(void)
235 {
236 	u_int loop;
237 	paddr_t minaddr;
238 	paddr_t maxaddr;
239 
240 	proc0.p_addr = proc0paddr;
241 
242 	/*
243 	 * Give pmap a chance to set up a few more things now the vm
244 	 * is initialised
245 	 */
246 	pmap_postinit();
247 
248 	/*
249 	 * Initialize error message buffer (at end of core).
250 	 */
251 
252 	/* msgbufphys was setup during the secondary boot strap */
253 	for (loop = 0; loop < atop(MSGBUFSIZE); ++loop)
254 		pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
255 		    msgbufphys + loop * PAGE_SIZE, PROT_READ | PROT_WRITE);
256 	pmap_update(pmap_kernel());
257 	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
258 
259 	/*
260 	 * Identify ourselves for the msgbuf (everything printed earlier will
261 	 * not be buffered).
262 	 */
263 	printf("%s", version);
264 
265 	printf("real mem  = %lu (%luMB)\n", ptoa(physmem),
266 	    ptoa(physmem) / 1024 / 1024);
267 
268 	/*
269 	 * Allocate a submap for exec arguments.  This map effectively
270 	 * limits the number of processes exec'ing at any time.
271 	 */
272 	minaddr = vm_map_min(kernel_map);
273 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
274 	    16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
275 
276 	/*
277 	 * Allocate a submap for physio
278 	 */
279 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
280 	    VM_PHYS_SIZE, 0, FALSE, NULL);
281 
282 	/*
283 	 * Set up buffers, so they can be used to read disk labels.
284 	 */
285 	bufinit();
286 
287 	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
288 	    ptoa(uvmexp.free) / 1024 / 1024);
289 
290 	curpcb = &proc0.p_addr->u_pcb;
291 	curpcb->pcb_flags = 0;
292 	curpcb->pcb_tf = &proc0tf;
293 
294 	if (boothowto & RB_CONFIG) {
295 #ifdef BOOT_CONFIG
296 		user_config();
297 #else
298 		printf("kernel does not support -c; continuing..\n");
299 #endif
300 	}
301 }
302 
303 void    cpu_switchto_asm(struct proc *, struct proc *);
304 
305 void
306 cpu_switchto(struct proc *old, struct proc *new)
307 {
308 	if (old) {
309 		struct pcb *pcb = &old->p_addr->u_pcb;
310 
311 		if (pcb->pcb_flags & PCB_FPU)
312 			fpu_save(old);
313 
314 		fpu_drop();
315 	}
316 
317 	cpu_switchto_asm(old, new);
318 }
319 
320 /*
321  * machine dependent system variables.
322  */
323 
324 const struct sysctl_bounded_args cpuctl_vars[] = {
325 	{ CPU_LIDACTION, &lid_action, 0, 2 },
326 };
327 
328 int
329 cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
330     size_t newlen, struct proc *p)
331 {
332 	char *compatible;
333 	int node, len, error;
334 
335 	/* all sysctl names at this level are terminal */
336 	if (namelen != 1)
337 		return (ENOTDIR);		/* overloaded */
338 
339 	switch (name[0]) {
340 	case CPU_COMPATIBLE:
341 		node = OF_finddevice("/");
342 		len = OF_getproplen(node, "compatible");
343 		if (len <= 0)
344 			return (EOPNOTSUPP);
345 		compatible = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
346 		OF_getprop(node, "compatible", compatible, len);
347 		compatible[len - 1] = 0;
348 		error = sysctl_rdstring(oldp, oldlenp, newp, compatible);
349 		free(compatible, M_TEMP, len);
350 		return error;
351 	case CPU_ID_AA64ISAR0:
352 		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64isar0);
353 	case CPU_ID_AA64ISAR1:
354 		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64isar1);
355 	case CPU_ID_AA64ISAR2:
356 		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64isar2);
357 	case CPU_ID_AA64PFR0:
358 		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64pfr0);
359 	case CPU_ID_AA64PFR1:
360 		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64pfr1);
361 	case CPU_ID_AA64MMFR0:
362 		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64mmfr0);
363 	case CPU_ID_AA64MMFR1:
364 		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64mmfr1);
365 	case CPU_ID_AA64MMFR2:
366 		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64mmfr2);
367 	case CPU_ID_AA64SMFR0:
368 	case CPU_ID_AA64ZFR0:
369 		return sysctl_rdquad(oldp, oldlenp, newp, 0);
370 	default:
371 		return (sysctl_bounded_arr(cpuctl_vars, nitems(cpuctl_vars),
372 		    name, namelen, oldp, oldlenp, newp, newlen));
373 	}
374 	/* NOTREACHED */
375 }
376 
377 void dumpsys(void);
378 
379 int	waittime = -1;
380 
381 __dead void
382 boot(int howto)
383 {
384 	if ((howto & RB_RESET) != 0)
385 		goto doreset;
386 
387 	if (cold) {
388 		if ((howto & RB_USERREQ) == 0)
389 			howto |= RB_HALT;
390 		goto haltsys;
391 	}
392 
393 	boothowto = howto;
394 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
395 		waittime = 0;
396 		vfs_shutdown(curproc);
397 
398 		if ((howto & RB_TIMEBAD) == 0) {
399 			resettodr();
400 		} else {
401 			printf("WARNING: not updating battery clock\n");
402 		}
403 	}
404 	if_downall();
405 
406 	uvm_shutdown();
407 	splhigh();
408 	cold = 1;
409 
410 	if ((howto & RB_DUMP) != 0)
411 		dumpsys();
412 
413 haltsys:
414 	config_suspend_all(DVACT_POWERDOWN);
415 
416 	if ((howto & RB_HALT) != 0) {
417 		if ((howto & RB_POWERDOWN) != 0) {
418 			printf("\nAttempting to power down...\n");
419 			delay(500000);
420 			if (powerdownfn)
421 				(*powerdownfn)();
422 		}
423 
424 		printf("\n");
425 		printf("The operating system has halted.\n");
426 		printf("Please press any key to reboot.\n\n");
427 		cngetc();
428 	}
429 
430 doreset:
431 	printf("rebooting...\n");
432 	delay(500000);
433 	if (cpuresetfn)
434 		(*cpuresetfn)();
435 	printf("reboot failed; spinning\n");
436 	for (;;)
437 		continue;
438 	/* NOTREACHED */
439 }
440 
441 void
442 setregs(struct proc *p, struct exec_package *pack, u_long stack,
443     struct ps_strings *arginfo)
444 {
445 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
446 	struct pcb *pcb = &p->p_addr->u_pcb;
447 	struct trapframe *tf = pcb->pcb_tf;
448 
449 	if (pack->ep_flags & EXEC_NOBTCFI)
450 		pm->pm_guarded = 0;
451 	else
452 		pm->pm_guarded = ATTR_GP;
453 
454 	arc4random_buf(&pm->pm_apiakey, sizeof(pm->pm_apiakey));
455 	arc4random_buf(&pm->pm_apdakey, sizeof(pm->pm_apdakey));
456 	arc4random_buf(&pm->pm_apibkey, sizeof(pm->pm_apibkey));
457 	arc4random_buf(&pm->pm_apdbkey, sizeof(pm->pm_apdbkey));
458 	arc4random_buf(&pm->pm_apgakey, sizeof(pm->pm_apgakey));
459 	pmap_setpauthkeys(pm);
460 
461 	/* If we were using the FPU, forget about it. */
462 	memset(&pcb->pcb_fpstate, 0, sizeof(pcb->pcb_fpstate));
463 	pcb->pcb_flags &= ~PCB_FPU;
464 	fpu_drop();
465 
466 	memset(tf, 0, sizeof *tf);
467 	tf->tf_sp = stack;
468 	tf->tf_lr = pack->ep_entry;
469 	tf->tf_elr = pack->ep_entry; /* ??? */
470 	tf->tf_spsr = PSR_M_EL0t | PSR_DIT;
471 }
472 
473 void
474 need_resched(struct cpu_info *ci)
475 {
476 	ci->ci_want_resched = 1;
477 
478 	/* There's a risk we'll be called before the idle threads start */
479 	if (ci->ci_curproc) {
480 		aston(ci->ci_curproc);
481 		cpu_kick(ci);
482 	}
483 }
484 
485 int	cpu_dumpsize(void);
486 u_long	cpu_dump_mempagecnt(void);
487 
488 paddr_t dumpmem_paddr;
489 vaddr_t dumpmem_vaddr;
490 psize_t dumpmem_sz;
491 
492 /*
493  * These variables are needed by /sbin/savecore
494  */
495 u_long	dumpmag = 0x8fca0101;	/* magic number */
496 int 	dumpsize = 0;		/* pages */
497 long	dumplo = 0; 		/* blocks */
498 
499 /*
500  * cpu_dump: dump the machine-dependent kernel core dump headers.
501  */
502 int
503 cpu_dump(void)
504 {
505 	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
506 	char buf[dbtob(1)];
507 	kcore_seg_t *segp;
508 	cpu_kcore_hdr_t *cpuhdrp;
509 	phys_ram_seg_t *memsegp;
510 #if 0
511 	caddr_t va;
512 	int i;
513 #endif
514 
515 	dump = bdevsw[major(dumpdev)].d_dump;
516 
517 	memset(buf, 0, sizeof buf);
518 	segp = (kcore_seg_t *)buf;
519 	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
520 	memsegp = (phys_ram_seg_t *)&buf[ALIGN(sizeof(*segp)) +
521 	    ALIGN(sizeof(*cpuhdrp))];
522 
523 	/*
524 	 * Generate a segment header.
525 	 */
526 	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
527 	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
528 
529 	/*
530 	 * Add the machine-dependent header info.
531 	 */
532 	cpuhdrp->kernelbase = KERNEL_BASE;
533 	cpuhdrp->kerneloffs = 0;
534 	cpuhdrp->staticsize = 0;
535 	cpuhdrp->pmap_kernel_l1 = 0;
536 	cpuhdrp->pmap_kernel_l2 = 0;
537 
538 #if 0
539 	/*
540 	 * Fill in the memory segment descriptors.
541 	 */
542 	for (i = 0; i < mem_cluster_cnt; i++) {
543 		memsegp[i].start = mem_clusters[i].start;
544 		memsegp[i].size = mem_clusters[i].size & PMAP_PA_MASK;
545 	}
546 
547 	/*
548 	 * If we have dump memory then assume the kernel stack is in high
549 	 * memory and bounce
550 	 */
551 	if (dumpmem_vaddr != 0) {
552 		memcpy((char *)dumpmem_vaddr, buf, sizeof(buf));
553 		va = (caddr_t)dumpmem_vaddr;
554 	} else {
555 		va = (caddr_t)buf;
556 	}
557 	return (dump(dumpdev, dumplo, va, dbtob(1)));
558 #else
559 	return ENOSYS;
560 #endif
561 }
562 
563 /*
564  * This is called by main to set dumplo and dumpsize.
565  * Dumps always skip the first PAGE_SIZE of disk space
566  * in case there might be a disk label stored there.
567  * If there is extra space, put dump at the end to
568  * reduce the chance that swapping trashes it.
569  */
570 void
571 dumpconf(void)
572 {
573 	int nblks, dumpblks;	/* size of dump area */
574 
575 	if (dumpdev == NODEV ||
576 	    (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
577 		return;
578 	if (nblks <= ctod(1))
579 		return;
580 
581 	dumpblks = cpu_dumpsize();
582 	if (dumpblks < 0)
583 		return;
584 	dumpblks += ctod(cpu_dump_mempagecnt());
585 
586 	/* If dump won't fit (incl. room for possible label), punt. */
587 	if (dumpblks > (nblks - ctod(1)))
588 		return;
589 
590 	/* Put dump at end of partition */
591 	dumplo = nblks - dumpblks;
592 
593 	/* dumpsize is in page units, and doesn't include headers. */
594 	dumpsize = cpu_dump_mempagecnt();
595 }
596 
597 /*
598  * Doadump comes here after turning off memory management and
599  * getting on the dump stack, either when called above, or by
600  * the auto-restart code.
601  */
602 #define BYTES_PER_DUMP  MAXPHYS /* must be a multiple of pagesize */
603 
604 void
605 dumpsys(void)
606 {
607 	u_long totalbytesleft, bytes, i, n, memseg;
608 	u_long maddr;
609 	daddr_t blkno;
610 	void *va;
611 	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
612 	int error;
613 
614 	if (dumpdev == NODEV)
615 		return;
616 
617 	/*
618 	 * For dumps during autoconfiguration,
619 	 * if dump device has already configured...
620 	 */
621 	if (dumpsize == 0)
622 		dumpconf();
623 	if (dumplo <= 0 || dumpsize == 0) {
624 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
625 		    minor(dumpdev));
626 		return;
627 	}
628 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
629 	    minor(dumpdev), dumplo);
630 
631 	error = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
632 	printf("dump ");
633 	if (error == -1) {
634 		printf("area unavailable\n");
635 		return;
636 	}
637 
638 	if ((error = cpu_dump()) != 0)
639 		goto err;
640 
641 	totalbytesleft = ptoa(cpu_dump_mempagecnt());
642 	blkno = dumplo + cpu_dumpsize();
643 	dump = bdevsw[major(dumpdev)].d_dump;
644 	error = 0;
645 
646 	bytes = n = i = memseg = 0;
647 	maddr = 0;
648 	va = 0;
649 #if 0
650 	for (memseg = 0; memseg < mem_cluster_cnt; memseg++) {
651 		maddr = mem_clusters[memseg].start;
652 		bytes = mem_clusters[memseg].size;
653 
654 		for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
655 			/* Print out how many MBs we have left to go. */
656 			if ((totalbytesleft % (1024*1024)) < BYTES_PER_DUMP)
657 				printf("%ld ", totalbytesleft / (1024 * 1024));
658 
659 			/* Limit size for next transfer. */
660 			n = bytes - i;
661 			if (n > BYTES_PER_DUMP)
662 				n = BYTES_PER_DUMP;
663 			if (maddr > 0xffffffff) {
664 				va = (void *)dumpmem_vaddr;
665 				if (n > dumpmem_sz)
666 					n = dumpmem_sz;
667 				memcpy(va, (void *)PMAP_DIRECT_MAP(maddr), n);
668 			} else {
669 				va = (void *)PMAP_DIRECT_MAP(maddr);
670 			}
671 
672 			error = (*dump)(dumpdev, blkno, va, n);
673 			if (error)
674 				goto err;
675 			maddr += n;
676 			blkno += btodb(n);		/* XXX? */
677 
678 #if 0	/* XXX this doesn't work.  grr. */
679 			/* operator aborting dump? */
680 			if (sget() != NULL) {
681 				error = EINTR;
682 				break;
683 			}
684 #endif
685 		}
686 	}
687 #endif
688 
689  err:
690 	switch (error) {
691 
692 	case ENXIO:
693 		printf("device bad\n");
694 		break;
695 
696 	case EFAULT:
697 		printf("device not ready\n");
698 		break;
699 
700 	case EINVAL:
701 		printf("area improper\n");
702 		break;
703 
704 	case EIO:
705 		printf("i/o error\n");
706 		break;
707 
708 	case EINTR:
709 		printf("aborted from console\n");
710 		break;
711 
712 	case 0:
713 		printf("succeeded\n");
714 		break;
715 
716 	default:
717 		printf("error %d\n", error);
718 		break;
719 	}
720 	printf("\n\n");
721 	delay(5000000);		/* 5 seconds */
722 }
723 
724 
725 /*
726  * Size of memory segments, before any memory is stolen.
727  */
728 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
729 int     mem_cluster_cnt;
730 
731 /*
732  * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
733  */
734 int
735 cpu_dumpsize(void)
736 {
737 	int size;
738 
739 	size = ALIGN(sizeof(kcore_seg_t)) +
740 	    ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
741 	if (roundup(size, dbtob(1)) != dbtob(1))
742 		return (-1);
743 
744 	return (1);
745 }
746 
747 u_long
748 cpu_dump_mempagecnt(void)
749 {
750 	return 0;
751 }
752 
753 int64_t dcache_line_size;	/* The minimum D cache line size */
754 int64_t icache_line_size;	/* The minimum I cache line size */
755 int64_t idcache_line_size;	/* The minimum cache line size */
756 int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
757 
758 void
759 cache_setup(void)
760 {
761 	int dcache_line_shift, icache_line_shift, dczva_line_shift;
762 	uint32_t ctr_el0;
763 	uint32_t dczid_el0;
764 
765 	ctr_el0 = READ_SPECIALREG(ctr_el0);
766 
767 	/* Read the log2 words in each D cache line */
768 	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
769 	/* Get the D cache line size */
770 	dcache_line_size = sizeof(int) << dcache_line_shift;
771 
772 	/* And the same for the I cache */
773 	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
774 	icache_line_size = sizeof(int) << icache_line_shift;
775 
776 	idcache_line_size = MIN(dcache_line_size, icache_line_size);
777 
778 	dczid_el0 = READ_SPECIALREG(dczid_el0);
779 
780 	/* Check if dc zva is not prohibited */
781 	if (dczid_el0 & DCZID_DZP)
782 		dczva_line_size = 0;
783 	else {
784 		/* Same as with above calculations */
785 		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
786 		dczva_line_size = sizeof(int) << dczva_line_shift;
787 	}
788 }
789 
790 uint64_t mmap_start;
791 uint32_t mmap_size;
792 uint32_t mmap_desc_size;
793 uint32_t mmap_desc_ver;
794 
795 EFI_MEMORY_DESCRIPTOR *mmap;
796 
797 void	collect_kernel_args(const char *);
798 void	process_kernel_args(void);
799 
800 int	pmap_bootstrap_bs_map(bus_space_tag_t, bus_addr_t,
801 	    bus_size_t, int, bus_space_handle_t *);
802 
803 void
804 initarm(struct arm64_bootparams *abp)
805 {
806 	long kernbase = (long)_start & ~PAGE_MASK;
807 	long kvo = abp->kern_delta;
808 	paddr_t memstart, memend;
809 	paddr_t startpa, endpa, pa;
810 	vaddr_t vstart, va;
811 	struct fdt_head *fh;
812 	void *config = abp->arg2;
813 	void *fdt = NULL;
814 	struct fdt_reg reg;
815 	void *node;
816 	EFI_PHYSICAL_ADDRESS system_table = 0;
817 	int (*map_func_save)(bus_space_tag_t, bus_addr_t, bus_size_t, int,
818 	    bus_space_handle_t *);
819 	int i;
820 
821 	/*
822 	 * Set the per-CPU pointer with a backup in tpidr_el1 to be
823 	 * loaded when entering the kernel from userland.
824 	 */
825 	__asm volatile("mov x18, %0\n"
826 	    "msr tpidr_el1, %0" :: "r"(&cpu_info_primary));
827 
828 	cache_setup();
829 
830 	/* The bootloader has loaded us into a 64MB block. */
831 	memstart = KERNBASE + kvo;
832 	memend = memstart + 64 * 1024 * 1024;
833 
834 	/* Bootstrap enough of pmap to enter the kernel proper. */
835 	vstart = pmap_bootstrap(kvo, abp->kern_l1pt,
836 	    kernbase, esym, memstart, memend);
837 
838 	/* Map the FDT header to determine its size. */
839 	va = vstart;
840 	startpa = trunc_page((paddr_t)config);
841 	endpa = round_page((paddr_t)config + sizeof(struct fdt_head));
842 	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
843 		pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, PMAP_CACHE_WB);
844 	fh = (void *)(vstart + ((paddr_t)config - startpa));
845 	if (betoh32(fh->fh_magic) != FDT_MAGIC || betoh32(fh->fh_size) == 0)
846 		panic("%s: no FDT", __func__);
847 
848 	/* Map the remainder of the FDT. */
849 	endpa = round_page((paddr_t)config + betoh32(fh->fh_size));
850 	for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
851 		pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, PMAP_CACHE_WB);
852 	config = (void *)(vstart + ((paddr_t)config - startpa));
853 	vstart = va;
854 
855 	if (!fdt_init(config))
856 		panic("%s: corrupt FDT", __func__);
857 
858 	node = fdt_find_node("/chosen");
859 	if (node != NULL) {
860 		char *prop;
861 		int len;
862 		static uint8_t lladdr[6];
863 
864 		len = fdt_node_property(node, "bootargs", &prop);
865 		if (len > 0)
866 			collect_kernel_args(prop);
867 
868 		len = fdt_node_property(node, "openbsd,boothowto", &prop);
869 		if (len == sizeof(boothowto))
870 			boothowto = bemtoh32((uint32_t *)prop);
871 
872 		len = fdt_node_property(node, "openbsd,bootduid", &prop);
873 		if (len == sizeof(bootduid))
874 			memcpy(bootduid, prop, sizeof(bootduid));
875 
876 		len = fdt_node_property(node, "openbsd,bootmac", &prop);
877 		if (len == sizeof(lladdr)) {
878 			memcpy(lladdr, prop, sizeof(lladdr));
879 			bootmac = lladdr;
880 		}
881 
882 		len = fdt_node_property(node, "openbsd,sr-bootuuid", &prop);
883 #if NSOFTRAID > 0
884 		if (len == sizeof(sr_bootuuid))
885 			memcpy(&sr_bootuuid, prop, sizeof(sr_bootuuid));
886 #endif
887 		if (len > 0)
888 			explicit_bzero(prop, len);
889 
890 		len = fdt_node_property(node, "openbsd,sr-bootkey", &prop);
891 #if NSOFTRAID > 0
892 		if (len == sizeof(sr_bootkey))
893 			memcpy(&sr_bootkey, prop, sizeof(sr_bootkey));
894 #endif
895 		if (len > 0)
896 			explicit_bzero(prop, len);
897 
898 		len = fdt_node_property(node, "openbsd,uefi-mmap-start", &prop);
899 		if (len == sizeof(mmap_start))
900 			mmap_start = bemtoh64((uint64_t *)prop);
901 		len = fdt_node_property(node, "openbsd,uefi-mmap-size", &prop);
902 		if (len == sizeof(mmap_size))
903 			mmap_size = bemtoh32((uint32_t *)prop);
904 		len = fdt_node_property(node, "openbsd,uefi-mmap-desc-size", &prop);
905 		if (len == sizeof(mmap_desc_size))
906 			mmap_desc_size = bemtoh32((uint32_t *)prop);
907 		len = fdt_node_property(node, "openbsd,uefi-mmap-desc-ver", &prop);
908 		if (len == sizeof(mmap_desc_ver))
909 			mmap_desc_ver = bemtoh32((uint32_t *)prop);
910 
911 		len = fdt_node_property(node, "openbsd,uefi-system-table", &prop);
912 		if (len == sizeof(system_table))
913 			system_table = bemtoh64((uint64_t *)prop);
914 
915 		len = fdt_node_property(node, "openbsd,dma-constraint", &prop);
916 		if (len == sizeof(dma_constraint)) {
917 			dma_constraint.ucr_low = bemtoh64((uint64_t *)prop);
918 			dma_constraint.ucr_high = bemtoh64((uint64_t *)prop + 1);
919 		}
920 	}
921 
922 	process_kernel_args();
923 
924 	proc0paddr = (struct user *)abp->kern_stack;
925 
926 	msgbufaddr = (caddr_t)vstart;
927 	msgbufphys = pmap_steal_avail(round_page(MSGBUFSIZE), PAGE_SIZE, NULL);
928 	vstart += round_page(MSGBUFSIZE);
929 
930 	zero_page = vstart;
931 	vstart += MAXCPUS * PAGE_SIZE;
932 	copy_src_page = vstart;
933 	vstart += MAXCPUS * PAGE_SIZE;
934 	copy_dst_page = vstart;
935 	vstart += MAXCPUS * PAGE_SIZE;
936 
937 	/* Relocate the FDT to safe memory. */
938 	if (fdt_get_size(config) != 0) {
939 		uint32_t csize, size = round_page(fdt_get_size(config));
940 		paddr_t pa;
941 		vaddr_t va;
942 
943 		pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
944 		memcpy((void *)pa, config, size); /* copy to physical */
945 		for (va = vstart, csize = size; csize > 0;
946 		    csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
947 			pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB);
948 
949 		fdt = (void *)vstart;
950 		vstart += size;
951 	}
952 
953 	/* Relocate the EFI memory map too. */
954 	if (mmap_start != 0) {
955 		uint32_t csize, size = round_page(mmap_size);
956 		paddr_t pa, startpa, endpa;
957 		vaddr_t va;
958 
959 		startpa = trunc_page(mmap_start);
960 		endpa = round_page(mmap_start + mmap_size);
961 		for (pa = startpa, va = vstart; pa < endpa;
962 		    pa += PAGE_SIZE, va += PAGE_SIZE)
963 			pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB);
964 		pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
965 		memcpy((void *)pa, (caddr_t)vstart + (mmap_start - startpa),
966 		    mmap_size); /* copy to physical */
967 		pmap_kremove(vstart, endpa - startpa);
968 
969 		for (va = vstart, csize = size; csize > 0;
970 		    csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
971 			pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, PMAP_CACHE_WB);
972 
973 		mmap = (void *)vstart;
974 		vstart += size;
975 	}
976 
977 	/* No more KVA stealing after this point. */
978 	virtual_avail = vstart;
979 
980 	/* Now we can reinit the FDT, using the virtual address. */
981 	if (fdt)
982 		fdt_init(fdt);
983 
984 	map_func_save = arm64_bs_tag._space_map;
985 	arm64_bs_tag._space_map = pmap_bootstrap_bs_map;
986 
987 	consinit();
988 
989 	arm64_bs_tag._space_map = map_func_save;
990 
991 	pmap_avail_fixup();
992 
993 	uvmexp.pagesize = PAGE_SIZE;
994 	uvm_setpagesize();
995 
996 	/* Make what's left of the initial 64MB block available to UVM. */
997 	pmap_physload_avail();
998 
999 	/* Make all other physical memory available to UVM. */
1000 	if (mmap && mmap_desc_ver == EFI_MEMORY_DESCRIPTOR_VERSION) {
1001 		EFI_MEMORY_DESCRIPTOR *desc = mmap;
1002 
1003 		/*
1004 		 * Load all memory marked as EfiConventionalMemory,
1005 		 * EfiBootServicesCode or EfiBootServicesData.
1006 		 * The initial 64MB memory block should be marked as
1007 		 * EfiLoaderData so it won't be added here.
1008 		 */
1009 		for (i = 0; i < mmap_size / mmap_desc_size; i++) {
1010 #ifdef MMAP_DEBUG
1011 			printf("type 0x%x pa 0x%llx va 0x%llx pages 0x%llx attr 0x%llx\n",
1012 			    desc->Type, desc->PhysicalStart,
1013 			    desc->VirtualStart, desc->NumberOfPages,
1014 			    desc->Attribute);
1015 #endif
1016 			if (desc->Type == EfiConventionalMemory ||
1017 			    desc->Type == EfiBootServicesCode ||
1018 			    desc->Type == EfiBootServicesData) {
1019 				reg.addr = desc->PhysicalStart;
1020 				reg.size = ptoa(desc->NumberOfPages);
1021 				memreg_add(&reg);
1022 			}
1023 			desc = NextMemoryDescriptor(desc, mmap_desc_size);
1024 		}
1025 	} else {
1026 		node = fdt_find_node("/memory");
1027 		if (node == NULL)
1028 			panic("%s: no memory specified", __func__);
1029 
1030 		for (i = 0; nmemreg < nitems(memreg); i++) {
1031 			if (fdt_get_reg(node, i, &reg))
1032 				break;
1033 			if (reg.size == 0)
1034 				continue;
1035 			memreg_add(&reg);
1036 		}
1037 	}
1038 
1039 	/* Remove reserved memory. */
1040 	node = fdt_find_node("/reserved-memory");
1041 	if (node) {
1042 		for (node = fdt_child_node(node); node;
1043 		    node = fdt_next_node(node)) {
1044 			char *no_map;
1045 			if (fdt_node_property(node, "no-map", &no_map) < 0)
1046 				continue;
1047 			if (fdt_get_reg(node, 0, &reg))
1048 				continue;
1049 			if (reg.size == 0)
1050 				continue;
1051 			memreg_remove(&reg);
1052 		}
1053 	}
1054 
1055 	/* Remove the initial 64MB block. */
1056 	reg.addr = memstart;
1057 	reg.size = memend - memstart;
1058 	memreg_remove(&reg);
1059 
1060 	for (i = 0; i < nmemreg; i++) {
1061 		paddr_t start = memreg[i].addr;
1062 		paddr_t end = start + memreg[i].size;
1063 
1064 		uvm_page_physload(atop(start), atop(end),
1065 		    atop(start), atop(end), 0);
1066 		physmem += atop(end - start);
1067 	}
1068 
1069 	kmeminit_nkmempages();
1070 
1071 	/*
1072 	 * Make sure that we have enough KVA to initialize UVM.  In
1073 	 * particular, we need enough KVA to be able to allocate the
1074 	 * vm_page structures and nkmempages for malloc(9).
1075 	 */
1076 	pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 1024 * 1024 * 1024 +
1077 	    physmem * sizeof(struct vm_page) + ptoa(nkmempages));
1078 
1079 #ifdef DDB
1080 	db_machine_init();
1081 
1082 	/* Firmware doesn't load symbols. */
1083 	ddb_init();
1084 
1085 	if (boothowto & RB_KDB)
1086 		db_enter();
1087 #endif
1088 
1089 	softintr_init();
1090 	splraise(IPL_IPI);
1091 }
1092 
1093 char bootargs[256];
1094 
1095 void
1096 collect_kernel_args(const char *args)
1097 {
1098 	/* Make a local copy of the bootargs */
1099 	strlcpy(bootargs, args, sizeof(bootargs));
1100 }
1101 
1102 void
1103 process_kernel_args(void)
1104 {
1105 	char *cp = bootargs;
1106 
1107 	if (*cp == 0)
1108 		return;
1109 
1110 	/* Skip the kernel image filename */
1111 	while (*cp != ' ' && *cp != 0)
1112 		cp++;
1113 
1114 	if (*cp != 0)
1115 		*cp++ = 0;
1116 
1117 	while (*cp == ' ')
1118 		cp++;
1119 
1120 	boot_args = cp;
1121 
1122 	printf("bootargs: %s\n", boot_args);
1123 
1124 	/* Setup pointer to boot flags */
1125 	while (*cp != '-')
1126 		if (*cp++ == '\0')
1127 			return;
1128 
1129 	while (*cp != 0) {
1130 		switch (*cp) {
1131 		case 'a':
1132 			boothowto |= RB_ASKNAME;
1133 			break;
1134 		case 'c':
1135 			boothowto |= RB_CONFIG;
1136 			break;
1137 		case 'd':
1138 			boothowto |= RB_KDB;
1139 			break;
1140 		case 's':
1141 			boothowto |= RB_SINGLE;
1142 			break;
1143 		default:
1144 			printf("unknown option `%c'\n", *cp);
1145 			break;
1146 		}
1147 		cp++;
1148 	}
1149 }
1150 
1151 /*
1152  * Allow bootstrap to steal KVA after machdep has given it back to pmap.
1153  */
1154 int
1155 pmap_bootstrap_bs_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
1156     int flags, bus_space_handle_t *bshp)
1157 {
1158 	u_long startpa, pa, endpa;
1159 	vaddr_t va;
1160 	int cache = PMAP_CACHE_DEV_NGNRNE;
1161 
1162 	if (flags & BUS_SPACE_MAP_PREFETCHABLE)
1163 		cache = PMAP_CACHE_CI;
1164 
1165 	va = virtual_avail;	/* steal memory from virtual avail. */
1166 
1167 	startpa = trunc_page(bpa);
1168 	endpa = round_page((bpa + size));
1169 
1170 	*bshp = (bus_space_handle_t)(va + (bpa - startpa));
1171 
1172 	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
1173 		pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, cache);
1174 
1175 	virtual_avail = va;
1176 
1177 	return 0;
1178 }
1179 
1180 void
1181 memreg_add(const struct fdt_reg *reg)
1182 {
1183 	int i;
1184 
1185 	for (i = 0; i < nmemreg; i++) {
1186 		if (reg->addr == memreg[i].addr + memreg[i].size) {
1187 			memreg[i].size += reg->size;
1188 			return;
1189 		}
1190 		if (reg->addr + reg->size == memreg[i].addr) {
1191 			memreg[i].addr = reg->addr;
1192 			memreg[i].size += reg->size;
1193 			return;
1194 		}
1195 	}
1196 
1197 	if (nmemreg >= nitems(memreg))
1198 		return;
1199 
1200 	memreg[nmemreg++] = *reg;
1201 }
1202 
1203 void
1204 memreg_remove(const struct fdt_reg *reg)
1205 {
1206 	uint64_t start = reg->addr;
1207 	uint64_t end = reg->addr + reg->size;
1208 	int i, j;
1209 
1210 	for (i = 0; i < nmemreg; i++) {
1211 		uint64_t memstart = memreg[i].addr;
1212 		uint64_t memend = memreg[i].addr + memreg[i].size;
1213 
1214 		if (end <= memstart)
1215 			continue;
1216 		if (start >= memend)
1217 			continue;
1218 
1219 		if (start <= memstart)
1220 			memstart = MIN(end, memend);
1221 		if (end >= memend)
1222 			memend = MAX(start, memstart);
1223 
1224 		if (start > memstart && end < memend) {
1225 			if (nmemreg < nitems(memreg)) {
1226 				memreg[nmemreg].addr = end;
1227 				memreg[nmemreg].size = memend - end;
1228 				nmemreg++;
1229 			}
1230 			memend = start;
1231 		}
1232 		memreg[i].addr = memstart;
1233 		memreg[i].size = memend - memstart;
1234 	}
1235 
1236 	/* Remove empty slots. */
1237 	for (i = nmemreg - 1; i >= 0; i--) {
1238 		if (memreg[i].size == 0) {
1239 			for (j = i; (j + 1) < nmemreg; j++)
1240 				memreg[j] = memreg[j + 1];
1241 			nmemreg--;
1242 		}
1243 	}
1244 }
1245