1 /* $NetBSD: arm32_machdep.c,v 1.146 2023/04/07 08:55:30 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 1994-1998 Mark Brinicombe. 5 * Copyright (c) 1994 Brini. 6 * All rights reserved. 7 * 8 * This code is derived from software written for Brini by Mark Brinicombe 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Mark Brinicombe 21 * for the NetBSD Project. 22 * 4. The name of the company nor the name of the author may be used to 23 * endorse or promote products derived from this software without specific 24 * prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * Machine dependent functions for kernel setup 39 * 40 * Created : 17/09/94 41 * Updated : 18/04/01 updated for new wscons 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.146 2023/04/07 08:55:30 skrll Exp $"); 46 47 #include "opt_arm_debug.h" 48 #include "opt_arm_start.h" 49 #include "opt_fdt.h" 50 #include "opt_modular.h" 51 #include "opt_md.h" 52 #include "opt_multiprocessor.h" 53 54 #include <sys/param.h> 55 56 #include <sys/atomic.h> 57 #include <sys/buf.h> 58 #include <sys/cpu.h> 59 #include <sys/device.h> 60 #include <sys/intr.h> 61 #include <sys/ipi.h> 62 #include <sys/kauth.h> 63 #include <sys/kernel.h> 64 #include <sys/mbuf.h> 65 #include <sys/module.h> 66 #include <sys/mount.h> 67 #include <sys/msgbuf.h> 68 #include <sys/proc.h> 69 #include <sys/reboot.h> 70 #include <sys/sysctl.h> 71 #include <sys/systm.h> 72 #include <sys/xcall.h> 73 74 #include <uvm/uvm_extern.h> 75 76 #include <dev/cons.h> 77 #include <dev/mm.h> 78 79 #include <arm/locore.h> 80 81 #include <arm/cpu_topology.h> 82 #include <arm/arm32/machdep.h> 83 84 #include <machine/bootconfig.h> 85 #include <machine/pcb.h> 86 87 #if defined(FDT) 88 #include <dev/fdt/fdtvar.h> 89 90 #include <arm/fdt/arm_fdtvar.h> 91 #include <arch/evbarm/fdt/platform.h> 92 #endif 93 94 #ifdef VERBOSE_INIT_ARM 95 #define VPRINTF(...) printf(__VA_ARGS__) 96 #ifdef __HAVE_GENERIC_START 97 void generic_prints(const char *); 98 void generic_printx(int); 99 #define VPRINTS(s) generic_prints(s) 100 #define VPRINTX(x) generic_printx(x) 101 #else 102 #define VPRINTS(s) __nothing 103 #define VPRINTX(x) __nothing 104 #endif 105 #else 106 #define VPRINTF(...) __nothing 107 #define VPRINTS(s) __nothing 108 #define VPRINTX(x) __nothing 109 #endif 110 111 void (*cpu_reset_address)(void); /* Used by locore */ 112 paddr_t cpu_reset_address_paddr; /* Used by locore */ 113 114 struct vm_map *phys_map = NULL; 115 116 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 117 extern size_t md_root_size; /* Memory disc size */ 118 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 119 120 pv_addr_t kernelstack; 121 pv_addr_t abtstack; 122 pv_addr_t fiqstack; 123 pv_addr_t irqstack; 124 pv_addr_t undstack; 125 pv_addr_t idlestack; 126 127 void * msgbufaddr; 128 extern paddr_t msgbufphys; 129 130 int kernel_debug = 0; 131 int cpu_printfataltraps = 0; 132 int cpu_fpu_present; 133 int cpu_hwdiv_present; 134 int cpu_neon_present; 135 int cpu_simd_present; 136 int cpu_simdex_present; 137 int cpu_umull_present; 138 int cpu_synchprim_present; 139 int cpu_unaligned_sigbus; 140 const char *cpu_arch = ""; 141 142 int cpu_instruction_set_attributes[6]; 143 int cpu_memory_model_features[4]; 144 int cpu_processor_features[2]; 145 int cpu_media_and_vfp_features[2]; 146 147 /* exported variable to be filled in by the bootloaders */ 148 char *booted_kernel; 149 150 /* Prototypes */ 151 152 void data_abort_handler(trapframe_t *frame); 153 void prefetch_abort_handler(trapframe_t *frame); 154 extern void configure(void); 155 156 /* 157 * arm32_vector_init: 158 * 159 * Initialize the vector page, and select whether or not to 160 * relocate the vectors. 161 * 162 * NOTE: We expect the vector page to be mapped at its expected 163 * destination. 164 */ 165 void 166 arm32_vector_init(vaddr_t va, int which) 167 { 168 #if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR) 169 /* 170 * If this processor has the security extension, don't bother 171 * to move/map the vector page. Simply point VBAR to the copy 172 * that exists in the .text segment. 173 */ 174 #ifndef ARM_HAS_VBAR 175 if (va == ARM_VECTORS_LOW 176 && (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0) { 177 #endif 178 extern const uint32_t page0rel[]; 179 vector_page = (vaddr_t)page0rel; 180 KASSERT((vector_page & 0x1f) == 0); 181 armreg_vbar_write(vector_page); 182 VPRINTF(" vbar=%p", page0rel); 183 cpu_control(CPU_CONTROL_VECRELOC, 0); 184 return; 185 #ifndef ARM_HAS_VBAR 186 } 187 #endif 188 #endif 189 #ifndef ARM_HAS_VBAR 190 if (CPU_IS_PRIMARY(curcpu())) { 191 extern unsigned int page0[], page0_data[]; 192 unsigned int *vectors = (int *) va; 193 unsigned int *vectors_data = vectors + (page0_data - page0); 194 int vec; 195 196 /* 197 * Loop through the vectors we're taking over, and copy the 198 * vector's insn and data word. 199 */ 200 for (vec = 0; vec < ARM_NVEC; vec++) { 201 if ((which & (1 << vec)) == 0) { 202 /* Don't want to take over this vector. */ 203 continue; 204 } 205 vectors[vec] = page0[vec]; 206 vectors_data[vec] = page0_data[vec]; 207 } 208 209 /* Now sync the vectors. */ 210 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 211 212 vector_page = va; 213 } 214 215 if (va == ARM_VECTORS_HIGH) { 216 /* 217 * Assume the MD caller knows what it's doing here, and 218 * really does want the vector page relocated. 219 * 220 * Note: This has to be done here (and not just in 221 * cpu_setup()) because the vector page needs to be 222 * accessible *before* cpu_startup() is called. 223 * Think ddb(9) ... 224 * 225 * NOTE: If the CPU control register is not readable, 226 * this will totally fail! We'll just assume that 227 * any system that has high vector support has a 228 * readable CPU control register, for now. If we 229 * ever encounter one that does not, we'll have to 230 * rethink this. 231 */ 232 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 233 } 234 #endif 235 } 236 237 /* 238 * Debug function just to park the CPU 239 */ 240 241 void 242 halt(void) 243 { 244 while (1) 245 cpu_sleep(0); 246 } 247 248 249 /* Sync the discs, unmount the filesystems, and adjust the todr */ 250 251 void 252 bootsync(void) 253 { 254 static bool bootsyncdone = false; 255 256 if (bootsyncdone) return; 257 258 bootsyncdone = true; 259 260 /* Make sure we can still manage to do things */ 261 if (GetCPSR() & I32_bit) { 262 /* 263 * If we get here then boot has been called without RB_NOSYNC 264 * and interrupts were disabled. This means the boot() call 265 * did not come from a user process e.g. shutdown, but must 266 * have come from somewhere in the kernel. 267 */ 268 IRQenable; 269 printf("Warning IRQ's disabled during boot()\n"); 270 } 271 272 vfs_shutdown(); 273 274 resettodr(); 275 } 276 277 /* 278 * void cpu_startup(void) 279 * 280 * Machine dependent startup code. 281 * 282 */ 283 void 284 cpu_startup(void) 285 { 286 vaddr_t minaddr; 287 vaddr_t maxaddr; 288 289 #ifndef __HAVE_GENERIC_START 290 /* Set the CPU control register */ 291 cpu_setup(boot_args); 292 #endif 293 294 #ifndef ARM_HAS_VBAR 295 /* Lock down zero page */ 296 vector_page_setprot(VM_PROT_READ); 297 #endif 298 299 /* 300 * Give pmap a chance to set up a few more things now the vm 301 * is initialised 302 */ 303 pmap_postinit(); 304 305 #ifdef FDT 306 const struct fdt_platform * const plat = fdt_platform_find(); 307 if (plat->fp_startup != NULL) 308 plat->fp_startup(); 309 #endif 310 311 /* 312 * Initialize error message buffer (at end of core). 313 */ 314 315 /* msgbufphys was setup during the secondary boot strap */ 316 if (!pmap_extract(pmap_kernel(), (vaddr_t)msgbufaddr, NULL)) { 317 for (u_int loop = 0; loop < btoc(MSGBUFSIZE); ++loop) { 318 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE, 319 msgbufphys + loop * PAGE_SIZE, 320 VM_PROT_READ|VM_PROT_WRITE, 0); 321 } 322 } 323 pmap_update(pmap_kernel()); 324 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); 325 326 /* 327 * Allocate a submap for physio 328 */ 329 minaddr = 0; 330 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 331 VM_PHYS_SIZE, 0, false, NULL); 332 333 banner(); 334 335 /* 336 * This is actually done by initarm_common, but not all ports use it 337 * yet so do it here to catch them as well 338 */ 339 struct lwp * const l = &lwp0; 340 struct pcb * const pcb = lwp_getpcb(l); 341 342 /* Zero out the PCB. */ 343 memset(pcb, 0, sizeof(*pcb)); 344 345 pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP; 346 pcb->pcb_ksp -= sizeof(struct trapframe); 347 348 struct trapframe * tf = (struct trapframe *)pcb->pcb_ksp; 349 350 /* Zero out the trapframe. */ 351 memset(tf, 0, sizeof(*tf)); 352 lwp_settrapframe(l, tf); 353 354 tf->tf_spsr = PSR_USR32_MODE; 355 #ifdef _ARM_ARCH_BE8 356 tf->tf_spsr |= PSR_E_BIT; 357 #endif 358 359 cpu_startup_hook(); 360 } 361 362 __weak_alias(cpu_startup_hook,cpu_startup_default) 363 void 364 cpu_startup_default(void) 365 { 366 } 367 368 /* 369 * machine dependent system variables. 370 */ 371 static int 372 sysctl_machdep_booted_device(SYSCTLFN_ARGS) 373 { 374 struct sysctlnode node; 375 376 if (booted_device == NULL) 377 return EOPNOTSUPP; 378 379 node = *rnode; 380 node.sysctl_data = __UNCONST(device_xname(booted_device)); 381 node.sysctl_size = strlen(device_xname(booted_device)) + 1; 382 return sysctl_lookup(SYSCTLFN_CALL(&node)); 383 } 384 385 static int 386 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 387 { 388 struct sysctlnode node; 389 390 if (booted_kernel == NULL || booted_kernel[0] == '\0') 391 return EOPNOTSUPP; 392 393 node = *rnode; 394 node.sysctl_data = booted_kernel; 395 node.sysctl_size = strlen(booted_kernel) + 1; 396 return sysctl_lookup(SYSCTLFN_CALL(&node)); 397 } 398 399 static int 400 sysctl_machdep_cpu_arch(SYSCTLFN_ARGS) 401 { 402 struct sysctlnode node = *rnode; 403 node.sysctl_data = __UNCONST(cpu_arch); 404 node.sysctl_size = strlen(cpu_arch) + 1; 405 return sysctl_lookup(SYSCTLFN_CALL(&node)); 406 } 407 408 static int 409 sysctl_machdep_powersave(SYSCTLFN_ARGS) 410 { 411 struct sysctlnode node = *rnode; 412 int error, newval; 413 414 newval = cpu_do_powersave; 415 node.sysctl_data = &newval; 416 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop) 417 node.sysctl_flags &= ~CTLFLAG_READWRITE; 418 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 419 if (error || newp == NULL || newval == cpu_do_powersave) 420 return error; 421 422 if (newval < 0 || newval > 1) 423 return EINVAL; 424 cpu_do_powersave = newval; 425 426 return 0; 427 } 428 429 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 430 { 431 432 sysctl_createv(clog, 0, NULL, NULL, 433 CTLFLAG_PERMANENT, 434 CTLTYPE_NODE, "machdep", NULL, 435 NULL, 0, NULL, 0, 436 CTL_MACHDEP, CTL_EOL); 437 438 sysctl_createv(clog, 0, NULL, NULL, 439 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 440 CTLTYPE_INT, "debug", NULL, 441 NULL, 0, &kernel_debug, 0, 442 CTL_MACHDEP, CPU_DEBUG, CTL_EOL); 443 sysctl_createv(clog, 0, NULL, NULL, 444 CTLFLAG_PERMANENT, 445 CTLTYPE_STRING, "booted_device", NULL, 446 sysctl_machdep_booted_device, 0, NULL, 0, 447 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL); 448 sysctl_createv(clog, 0, NULL, NULL, 449 CTLFLAG_PERMANENT, 450 CTLTYPE_STRING, "booted_kernel", NULL, 451 sysctl_machdep_booted_kernel, 0, NULL, 0, 452 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 453 sysctl_createv(clog, 0, NULL, NULL, 454 CTLFLAG_PERMANENT, 455 CTLTYPE_STRUCT, "console_device", NULL, 456 sysctl_consdev, 0, NULL, sizeof(dev_t), 457 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 458 sysctl_createv(clog, 0, NULL, NULL, 459 CTLFLAG_PERMANENT, 460 CTLTYPE_STRING, "cpu_arch", NULL, 461 sysctl_machdep_cpu_arch, 0, NULL, 0, 462 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 463 sysctl_createv(clog, 0, NULL, NULL, 464 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 465 CTLTYPE_INT, "powersave", NULL, 466 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0, 467 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL); 468 sysctl_createv(clog, 0, NULL, NULL, 469 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 470 CTLTYPE_INT, "cpu_id", NULL, 471 NULL, curcpu()->ci_arm_cpuid, NULL, 0, 472 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 473 #ifdef FPU_VFP 474 sysctl_createv(clog, 0, NULL, NULL, 475 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 476 CTLTYPE_INT, "fpu_id", NULL, 477 NULL, 0, &cpu_info_store[0].ci_vfp_id, 0, 478 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 479 #endif 480 sysctl_createv(clog, 0, NULL, NULL, 481 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 482 CTLTYPE_INT, "fpu_present", NULL, 483 NULL, 0, &cpu_fpu_present, 0, 484 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 485 sysctl_createv(clog, 0, NULL, NULL, 486 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 487 CTLTYPE_INT, "hwdiv_present", NULL, 488 NULL, 0, &cpu_hwdiv_present, 0, 489 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 490 sysctl_createv(clog, 0, NULL, NULL, 491 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 492 CTLTYPE_INT, "neon_present", NULL, 493 NULL, 0, &cpu_neon_present, 0, 494 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 495 sysctl_createv(clog, 0, NULL, NULL, 496 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 497 CTLTYPE_STRUCT, "id_isar", NULL, 498 NULL, 0, 499 cpu_instruction_set_attributes, 500 sizeof(cpu_instruction_set_attributes), 501 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 502 sysctl_createv(clog, 0, NULL, NULL, 503 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 504 CTLTYPE_STRUCT, "id_mmfr", NULL, 505 NULL, 0, 506 cpu_memory_model_features, 507 sizeof(cpu_memory_model_features), 508 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 509 sysctl_createv(clog, 0, NULL, NULL, 510 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 511 CTLTYPE_STRUCT, "id_pfr", NULL, 512 NULL, 0, 513 cpu_processor_features, 514 sizeof(cpu_processor_features), 515 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 516 sysctl_createv(clog, 0, NULL, NULL, 517 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 518 CTLTYPE_STRUCT, "id_mvfr", NULL, 519 NULL, 0, 520 cpu_media_and_vfp_features, 521 sizeof(cpu_media_and_vfp_features), 522 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 523 sysctl_createv(clog, 0, NULL, NULL, 524 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 525 CTLTYPE_INT, "simd_present", NULL, 526 NULL, 0, &cpu_simd_present, 0, 527 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 528 sysctl_createv(clog, 0, NULL, NULL, 529 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 530 CTLTYPE_INT, "simdex_present", NULL, 531 NULL, 0, &cpu_simdex_present, 0, 532 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 533 sysctl_createv(clog, 0, NULL, NULL, 534 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 535 CTLTYPE_INT, "synchprim_present", NULL, 536 NULL, 0, &cpu_synchprim_present, 0, 537 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 538 sysctl_createv(clog, 0, NULL, NULL, 539 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 540 CTLTYPE_INT, "printfataltraps", NULL, 541 NULL, 0, &cpu_printfataltraps, 0, 542 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 543 cpu_unaligned_sigbus = 544 #if defined(__ARMEL__) 545 !CPU_IS_ARMV6_P() && !CPU_IS_ARMV7_P(); 546 #elif defined(_ARM_ARCH_BE8) 547 0; 548 #else 549 1; 550 #endif 551 sysctl_createv(clog, 0, NULL, NULL, 552 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 553 CTLTYPE_INT, "unaligned_sigbus", 554 SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"), 555 NULL, 0, &cpu_unaligned_sigbus, 0, 556 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 557 } 558 559 void 560 parse_mi_bootargs(char *args) 561 { 562 int integer; 563 564 if (get_bootconf_option(args, "-1", BOOTOPT_TYPE_BOOLEAN, &integer)) 565 if (integer) 566 boothowto |= RB_MD1; 567 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer) 568 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer)) 569 if (integer) 570 boothowto |= RB_SINGLE; 571 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer) 572 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer) 573 || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer)) 574 if (integer) 575 boothowto |= RB_KDB; 576 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer) 577 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer)) 578 if (integer) 579 boothowto |= RB_ASKNAME; 580 if (get_bootconf_option(args, "userconf", BOOTOPT_TYPE_BOOLEAN, &integer) 581 || get_bootconf_option(args, "-c", BOOTOPT_TYPE_BOOLEAN, &integer)) 582 if (integer) 583 boothowto |= RB_USERCONF; 584 if (get_bootconf_option(args, "halt", BOOTOPT_TYPE_BOOLEAN, &integer) 585 || get_bootconf_option(args, "-b", BOOTOPT_TYPE_BOOLEAN, &integer)) 586 if (integer) 587 boothowto |= RB_HALT; 588 if (get_bootconf_option(args, "-1", BOOTOPT_TYPE_BOOLEAN, &integer)) 589 if (integer) 590 boothowto |= RB_MD1; 591 if (get_bootconf_option(args, "-2", BOOTOPT_TYPE_BOOLEAN, &integer)) 592 if (integer) 593 boothowto |= RB_MD2; 594 if (get_bootconf_option(args, "-3", BOOTOPT_TYPE_BOOLEAN, &integer)) 595 if (integer) 596 boothowto |= RB_MD3; 597 if (get_bootconf_option(args, "-4", BOOTOPT_TYPE_BOOLEAN, &integer)) 598 if (integer) 599 boothowto |= RB_MD4; 600 601 /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer)) 602 bufpages = integer;*/ 603 604 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 605 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer) 606 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) { 607 md_root_size = integer; 608 md_root_size *= 1024; 609 if (md_root_size < 32*1024) 610 md_root_size = 32*1024; 611 if (md_root_size > 2048*1024) 612 md_root_size = 2048*1024; 613 } 614 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 615 616 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer) 617 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer)) 618 if (integer) 619 boothowto |= AB_QUIET; 620 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer) 621 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer)) 622 if (integer) 623 boothowto |= AB_VERBOSE; 624 if (get_bootconf_option(args, "debug", BOOTOPT_TYPE_BOOLEAN, &integer) 625 || get_bootconf_option(args, "-x", BOOTOPT_TYPE_BOOLEAN, &integer)) 626 if (integer) 627 boothowto |= AB_DEBUG; 628 if (get_bootconf_option(args, "silent", BOOTOPT_TYPE_BOOLEAN, &integer) 629 || get_bootconf_option(args, "-z", BOOTOPT_TYPE_BOOLEAN, &integer)) 630 if (integer) 631 boothowto |= AB_SILENT; 632 } 633 634 #ifdef __HAVE_FAST_SOFTINTS 635 #if IPL_SOFTSERIAL != IPL_SOFTNET + 1 636 #error IPLs are screwed up 637 #elif IPL_SOFTNET != IPL_SOFTBIO + 1 638 #error IPLs are screwed up 639 #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1 640 #error IPLs are screwed up 641 #elif !(IPL_SOFTCLOCK > IPL_NONE) 642 #error IPLs are screwed up 643 #elif (IPL_NONE != 0) 644 #error IPLs are screwed up 645 #endif 646 647 #ifndef __HAVE_PIC_FAST_SOFTINTS 648 #define SOFTINT2IPLMAP \ 649 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \ 650 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \ 651 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \ 652 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4))) 653 #define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f) 654 655 /* 656 * This returns a mask of softint IPLs that be dispatch at <ipl> 657 * SOFTIPLMASK(IPL_NONE) = 0x0000000f 658 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e 659 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c 660 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008 661 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000 662 */ 663 #define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f) 664 665 void softint_switch(lwp_t *, int); 666 667 void 668 softint_trigger(uintptr_t mask) 669 { 670 curcpu()->ci_softints |= mask; 671 } 672 673 void 674 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep) 675 { 676 lwp_t ** lp = &l->l_cpu->ci_softlwps[level]; 677 KASSERT(*lp == NULL || *lp == l); 678 *lp = l; 679 *machdep = 1 << SOFTINT2IPL(level); 680 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK))); 681 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK))); 682 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK))); 683 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK))); 684 } 685 686 void 687 dosoftints(void) 688 { 689 struct cpu_info * const ci = curcpu(); 690 const int opl = ci->ci_cpl; 691 const uint32_t softiplmask = SOFTIPLMASK(opl); 692 int s; 693 694 s = splhigh(); 695 KASSERT(s == opl); 696 for (;;) { 697 u_int softints = ci->ci_softints & softiplmask; 698 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0)); 699 KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0); 700 if (softints == 0) { 701 break; 702 } 703 #define DOSOFTINT(n) \ 704 if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \ 705 ci->ci_softints &= \ 706 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \ 707 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \ 708 IPL_SOFT ## n); \ 709 continue; \ 710 } 711 DOSOFTINT(SERIAL); 712 DOSOFTINT(NET); 713 DOSOFTINT(BIO); 714 DOSOFTINT(CLOCK); 715 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl); 716 } 717 splx(s); 718 } 719 #endif /* !__HAVE_PIC_FAST_SOFTINTS */ 720 #endif /* __HAVE_FAST_SOFTINTS */ 721 722 #ifdef MODULAR 723 /* 724 * Push any modules loaded by the boot loader. 725 */ 726 void 727 module_init_md(void) 728 { 729 #ifdef FDT 730 arm_fdt_module_init(); 731 #endif 732 } 733 #endif /* MODULAR */ 734 735 int 736 mm_md_physacc(paddr_t pa, vm_prot_t prot) 737 { 738 if (pa >= physical_start && pa < physical_end) 739 return 0; 740 741 return kauth_authorize_machdep(kauth_cred_get(), 742 KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL); 743 } 744 745 #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP 746 vaddr_t 747 cpu_uarea_alloc_idlelwp(struct cpu_info *ci) 748 { 749 const vaddr_t va = idlestack.pv_va + cpu_index(ci) * USPACE; 750 // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va); 751 return va; 752 } 753 #endif 754 755 #ifdef MULTIPROCESSOR 756 /* 757 * Initialise a secondary processor. 758 * 759 * printf isn't available to us for a number of reasons. 760 * 761 * - kprint_init has been called and printf will try to take locks which we 762 * can't do just yet because bootstrap translation tables do not allowing 763 * caching. 764 * 765 * - kmutex(9) relies on curcpu which isn't setup yet. 766 * 767 */ 768 void __noasan 769 cpu_init_secondary_processor(int cpuindex) 770 { 771 // pmap_kernel has been successfully built and we can switch to it 772 cpu_domains(DOMAIN_DEFAULT); 773 cpu_idcache_wbinv_all(); 774 775 VPRINTS("index: "); 776 VPRINTX(cpuindex); 777 VPRINTS(" ttb"); 778 779 cpu_setup(boot_args); 780 781 #ifdef ARM_MMU_EXTENDED 782 /* 783 * TTBCR should have been initialized by the MD start code. 784 */ 785 KASSERT((armreg_contextidr_read() & 0xff) == 0); 786 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 787 /* 788 * Disable lookups via TTBR0 until there is an activated pmap. 789 */ 790 791 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 792 cpu_setttb(pmap_kernel()->pm_l1_pa , KERNEL_PID); 793 isb(); 794 #else 795 cpu_setttb(pmap_kernel()->pm_l1->l1_physaddr, true); 796 #endif 797 798 cpu_tlb_flushID(); 799 800 VPRINTS(" (TTBR0="); 801 VPRINTX(armreg_ttbr_read()); 802 VPRINTS(")"); 803 804 #ifdef ARM_MMU_EXTENDED 805 VPRINTS(" (TTBR1="); 806 VPRINTX(armreg_ttbr1_read()); 807 VPRINTS(")"); 808 VPRINTS(" (TTBCR="); 809 VPRINTX(armreg_ttbcr_read()); 810 VPRINTS(")"); 811 #endif 812 813 struct cpu_info * ci = &cpu_info_store[cpuindex]; 814 815 VPRINTS(" ci = "); 816 VPRINTX((int)ci); 817 818 ci->ci_ctrl = armreg_sctlr_read(); 819 ci->ci_arm_cpuid = cpu_idnum(); 820 ci->ci_arm_cputype = ci->ci_arm_cpuid & CPU_ID_CPU_MASK; 821 ci->ci_arm_cpurev = ci->ci_arm_cpuid & CPU_ID_REVISION_MASK; 822 823 ci->ci_midr = armreg_midr_read(); 824 ci->ci_actlr = armreg_auxctl_read(); 825 ci->ci_revidr = armreg_revidr_read(); 826 ci->ci_mpidr = armreg_mpidr_read(); 827 828 arm_cpu_topology_set(ci, ci->ci_mpidr); 829 830 VPRINTS(" vfp"); 831 vfp_detect(ci); 832 833 VPRINTS(" hatched |="); 834 VPRINTX(__BIT(cpuindex)); 835 VPRINTS("\n\r"); 836 837 cpu_set_hatched(cpuindex); 838 839 /* 840 * return to assembly to wait for cpu_boot_secondary_processors 841 */ 842 } 843 844 void 845 xc_send_ipi(struct cpu_info *ci) 846 { 847 KASSERT(kpreempt_disabled()); 848 KASSERT(curcpu() != ci); 849 850 intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_XCALL); 851 } 852 853 void 854 cpu_ipi(struct cpu_info *ci) 855 { 856 KASSERT(kpreempt_disabled()); 857 KASSERT(curcpu() != ci); 858 859 intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_GENERIC); 860 } 861 862 #endif /* MULTIPROCESSOR */ 863 864 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 865 bool 866 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap) 867 { 868 bool rv; 869 vaddr_t va = pmap_direct_mapped_phys(pa, &rv, 0); 870 if (rv) { 871 *vap = va; 872 } 873 return rv; 874 } 875 #endif 876 877 bool 878 mm_md_page_color(paddr_t pa, int *colorp) 879 { 880 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 881 *colorp = atop(pa & arm_cache_prefer_mask); 882 883 return arm_cache_prefer_mask ? false : true; 884 #else 885 *colorp = 0; 886 887 return true; 888 #endif 889 } 890 891 #if defined(FDT) 892 extern char KERNEL_BASE_phys[]; 893 #define KERNEL_BASE_PHYS ((paddr_t)KERNEL_BASE_phys) 894 895 void 896 cpu_kernel_vm_init(paddr_t memory_start, psize_t memory_size) 897 { 898 const struct fdt_platform *plat = fdt_platform_find(); 899 900 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 901 const bool mapallmem_p = true; 902 #ifndef PMAP_NEED_ALLOC_POOLPAGE 903 if (memory_size > KERNEL_VM_BASE - KERNEL_BASE) { 904 VPRINTF("%s: dropping RAM size from %luMB to %uMB\n", 905 __func__, (unsigned long) (memory_size >> 20), 906 (KERNEL_VM_BASE - KERNEL_BASE) >> 20); 907 memory_size = KERNEL_VM_BASE - KERNEL_BASE; 908 } 909 #endif 910 #else 911 const bool mapallmem_p = false; 912 #endif 913 914 VPRINTF("%s: kernel phys start %" PRIxPADDR " end %" PRIxPADDR "\n", 915 __func__, memory_start, memory_start + memory_size); 916 917 arm32_bootmem_init(memory_start, memory_size, KERNEL_BASE_PHYS); 918 arm32_kernel_vm_init(KERNEL_VM_BASE, ARM_VECTORS_HIGH, 0, 919 plat->fp_devmap(), mapallmem_p); 920 } 921 #endif 922 923