1 /* $NetBSD: arm32_machdep.c,v 1.132 2020/02/15 08:16:11 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 1994-1998 Mark Brinicombe. 5 * Copyright (c) 1994 Brini. 6 * All rights reserved. 7 * 8 * This code is derived from software written for Brini by Mark Brinicombe 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Mark Brinicombe 21 * for the NetBSD Project. 22 * 4. The name of the company nor the name of the author may be used to 23 * endorse or promote products derived from this software without specific 24 * prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * Machine dependent functions for kernel setup 39 * 40 * Created : 17/09/94 41 * Updated : 18/04/01 updated for new wscons 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.132 2020/02/15 08:16:11 skrll Exp $"); 46 47 #include "opt_arm_debug.h" 48 #include "opt_arm_start.h" 49 #include "opt_fdt.h" 50 #include "opt_modular.h" 51 #include "opt_md.h" 52 #include "opt_multiprocessor.h" 53 #include "opt_pmap_debug.h" 54 55 #include <sys/param.h> 56 57 #include <sys/atomic.h> 58 #include <sys/buf.h> 59 #include <sys/cpu.h> 60 #include <sys/device.h> 61 #include <sys/intr.h> 62 #include <sys/ipi.h> 63 #include <sys/kauth.h> 64 #include <sys/kernel.h> 65 #include <sys/mbuf.h> 66 #include <sys/module.h> 67 #include <sys/mount.h> 68 #include <sys/msgbuf.h> 69 #include <sys/proc.h> 70 #include <sys/reboot.h> 71 #include <sys/sysctl.h> 72 #include <sys/systm.h> 73 #include <sys/xcall.h> 74 75 #include <uvm/uvm_extern.h> 76 77 #include <dev/cons.h> 78 #include <dev/mm.h> 79 80 #include <arm/locore.h> 81 82 #include <arm/arm32/machdep.h> 83 84 #include <machine/bootconfig.h> 85 #include <machine/pcb.h> 86 87 #if defined(FDT) 88 #include <arm/fdt/arm_fdtvar.h> 89 #include <arch/evbarm/fdt/platform.h> 90 #endif 91 92 #ifdef VERBOSE_INIT_ARM 93 #define VPRINTF(...) printf(__VA_ARGS__) 94 #ifdef __HAVE_GENERIC_START 95 void generic_prints(const char *); 96 void generic_printx(int); 97 #define VPRINTS(s) generic_prints(s) 98 #define VPRINTX(x) generic_printx(x) 99 #else 100 #define VPRINTS(s) __nothing 101 #define VPRINTX(x) __nothing 102 #endif 103 #else 104 #define VPRINTF(...) __nothing 105 #define VPRINTS(s) __nothing 106 #define VPRINTX(x) __nothing 107 #endif 108 109 void (*cpu_reset_address)(void); /* Used by locore */ 110 paddr_t cpu_reset_address_paddr; /* Used by locore */ 111 112 struct vm_map *phys_map = NULL; 113 114 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 115 extern size_t md_root_size; /* Memory disc size */ 116 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 117 118 pv_addr_t kernelstack; 119 pv_addr_t abtstack; 120 pv_addr_t fiqstack; 121 pv_addr_t irqstack; 122 pv_addr_t undstack; 123 pv_addr_t idlestack; 124 125 void * msgbufaddr; 126 extern paddr_t msgbufphys; 127 128 int kernel_debug = 0; 129 int cpu_printfataltraps = 0; 130 int cpu_fpu_present; 131 int cpu_hwdiv_present; 132 int cpu_neon_present; 133 int cpu_simd_present; 134 int cpu_simdex_present; 135 int cpu_umull_present; 136 int cpu_synchprim_present; 137 int cpu_unaligned_sigbus; 138 const char *cpu_arch = ""; 139 140 int cpu_instruction_set_attributes[6]; 141 int cpu_memory_model_features[4]; 142 int cpu_processor_features[2]; 143 int cpu_media_and_vfp_features[2]; 144 145 /* exported variable to be filled in by the bootloaders */ 146 char *booted_kernel; 147 148 /* Prototypes */ 149 150 void data_abort_handler(trapframe_t *frame); 151 void prefetch_abort_handler(trapframe_t *frame); 152 extern void configure(void); 153 154 /* 155 * arm32_vector_init: 156 * 157 * Initialize the vector page, and select whether or not to 158 * relocate the vectors. 159 * 160 * NOTE: We expect the vector page to be mapped at its expected 161 * destination. 162 */ 163 void 164 arm32_vector_init(vaddr_t va, int which) 165 { 166 #if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR) 167 /* 168 * If this processor has the security extension, don't bother 169 * to move/map the vector page. Simply point VBAR to the copy 170 * that exists in the .text segment. 171 */ 172 #ifndef ARM_HAS_VBAR 173 if (va == ARM_VECTORS_LOW 174 && (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0) { 175 #endif 176 extern const uint32_t page0rel[]; 177 vector_page = (vaddr_t)page0rel; 178 KASSERT((vector_page & 0x1f) == 0); 179 armreg_vbar_write(vector_page); 180 VPRINTF(" vbar=%p", page0rel); 181 cpu_control(CPU_CONTROL_VECRELOC, 0); 182 return; 183 #ifndef ARM_HAS_VBAR 184 } 185 #endif 186 #endif 187 #ifndef ARM_HAS_VBAR 188 if (CPU_IS_PRIMARY(curcpu())) { 189 extern unsigned int page0[], page0_data[]; 190 unsigned int *vectors = (int *) va; 191 unsigned int *vectors_data = vectors + (page0_data - page0); 192 int vec; 193 194 /* 195 * Loop through the vectors we're taking over, and copy the 196 * vector's insn and data word. 197 */ 198 for (vec = 0; vec < ARM_NVEC; vec++) { 199 if ((which & (1 << vec)) == 0) { 200 /* Don't want to take over this vector. */ 201 continue; 202 } 203 vectors[vec] = page0[vec]; 204 vectors_data[vec] = page0_data[vec]; 205 } 206 207 /* Now sync the vectors. */ 208 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 209 210 vector_page = va; 211 } 212 213 if (va == ARM_VECTORS_HIGH) { 214 /* 215 * Assume the MD caller knows what it's doing here, and 216 * really does want the vector page relocated. 217 * 218 * Note: This has to be done here (and not just in 219 * cpu_setup()) because the vector page needs to be 220 * accessible *before* cpu_startup() is called. 221 * Think ddb(9) ... 222 * 223 * NOTE: If the CPU control register is not readable, 224 * this will totally fail! We'll just assume that 225 * any system that has high vector support has a 226 * readable CPU control register, for now. If we 227 * ever encounter one that does not, we'll have to 228 * rethink this. 229 */ 230 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 231 } 232 #endif 233 } 234 235 /* 236 * Debug function just to park the CPU 237 */ 238 239 void 240 halt(void) 241 { 242 while (1) 243 cpu_sleep(0); 244 } 245 246 247 /* Sync the discs, unmount the filesystems, and adjust the todr */ 248 249 void 250 bootsync(void) 251 { 252 static bool bootsyncdone = false; 253 254 if (bootsyncdone) return; 255 256 bootsyncdone = true; 257 258 /* Make sure we can still manage to do things */ 259 if (GetCPSR() & I32_bit) { 260 /* 261 * If we get here then boot has been called without RB_NOSYNC 262 * and interrupts were disabled. This means the boot() call 263 * did not come from a user process e.g. shutdown, but must 264 * have come from somewhere in the kernel. 265 */ 266 IRQenable; 267 printf("Warning IRQ's disabled during boot()\n"); 268 } 269 270 vfs_shutdown(); 271 272 resettodr(); 273 } 274 275 /* 276 * void cpu_startup(void) 277 * 278 * Machine dependent startup code. 279 * 280 */ 281 void 282 cpu_startup(void) 283 { 284 vaddr_t minaddr; 285 vaddr_t maxaddr; 286 287 #ifndef __HAVE_GENERIC_START 288 /* Set the CPU control register */ 289 cpu_setup(boot_args); 290 #endif 291 292 #ifndef ARM_HAS_VBAR 293 /* Lock down zero page */ 294 vector_page_setprot(VM_PROT_READ); 295 #endif 296 297 /* 298 * Give pmap a chance to set up a few more things now the vm 299 * is initialised 300 */ 301 pmap_postinit(); 302 303 #ifdef FDT 304 if (arm_fdt_platform()->ap_startup != NULL) 305 arm_fdt_platform()->ap_startup(); 306 #endif 307 308 /* 309 * Initialize error message buffer (at end of core). 310 */ 311 312 /* msgbufphys was setup during the secondary boot strap */ 313 if (!pmap_extract(pmap_kernel(), (vaddr_t)msgbufaddr, NULL)) { 314 for (u_int loop = 0; loop < btoc(MSGBUFSIZE); ++loop) { 315 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE, 316 msgbufphys + loop * PAGE_SIZE, 317 VM_PROT_READ|VM_PROT_WRITE, 0); 318 } 319 } 320 pmap_update(pmap_kernel()); 321 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); 322 323 /* 324 * Allocate a submap for physio 325 */ 326 minaddr = 0; 327 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 328 VM_PHYS_SIZE, 0, false, NULL); 329 330 banner(); 331 332 /* 333 * This is actually done by initarm_common, but not all ports use it 334 * yet so do it here to catch them as well 335 */ 336 struct lwp * const l = &lwp0; 337 struct pcb * const pcb = lwp_getpcb(l); 338 339 /* Zero out the PCB. */ 340 memset(pcb, 0, sizeof(*pcb)); 341 342 pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP; 343 pcb->pcb_ksp -= sizeof(struct trapframe); 344 345 struct trapframe * tf = (struct trapframe *)pcb->pcb_ksp; 346 347 /* Zero out the trapframe. */ 348 memset(tf, 0, sizeof(*tf)); 349 lwp_settrapframe(l, tf); 350 351 #if defined(__ARMEB__) 352 tf->tf_spsr = PSR_USR32_MODE | (CPU_IS_ARMV7_P() ? PSR_E_BIT : 0); 353 #else 354 tf->tf_spsr = PSR_USR32_MODE; 355 #endif 356 357 cpu_startup_hook(); 358 } 359 360 __weak_alias(cpu_startup_hook,cpu_startup_default) 361 void 362 cpu_startup_default(void) 363 { 364 } 365 366 /* 367 * machine dependent system variables. 368 */ 369 static int 370 sysctl_machdep_booted_device(SYSCTLFN_ARGS) 371 { 372 struct sysctlnode node; 373 374 if (booted_device == NULL) 375 return (EOPNOTSUPP); 376 377 node = *rnode; 378 node.sysctl_data = __UNCONST(device_xname(booted_device)); 379 node.sysctl_size = strlen(device_xname(booted_device)) + 1; 380 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 381 } 382 383 static int 384 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 385 { 386 struct sysctlnode node; 387 388 if (booted_kernel == NULL || booted_kernel[0] == '\0') 389 return (EOPNOTSUPP); 390 391 node = *rnode; 392 node.sysctl_data = booted_kernel; 393 node.sysctl_size = strlen(booted_kernel) + 1; 394 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 395 } 396 397 static int 398 sysctl_machdep_cpu_arch(SYSCTLFN_ARGS) 399 { 400 struct sysctlnode node = *rnode; 401 node.sysctl_data = __UNCONST(cpu_arch); 402 node.sysctl_size = strlen(cpu_arch) + 1; 403 return sysctl_lookup(SYSCTLFN_CALL(&node)); 404 } 405 406 static int 407 sysctl_machdep_powersave(SYSCTLFN_ARGS) 408 { 409 struct sysctlnode node = *rnode; 410 int error, newval; 411 412 newval = cpu_do_powersave; 413 node.sysctl_data = &newval; 414 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop) 415 node.sysctl_flags &= ~CTLFLAG_READWRITE; 416 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 417 if (error || newp == NULL || newval == cpu_do_powersave) 418 return (error); 419 420 if (newval < 0 || newval > 1) 421 return (EINVAL); 422 cpu_do_powersave = newval; 423 424 return (0); 425 } 426 427 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 428 { 429 430 sysctl_createv(clog, 0, NULL, NULL, 431 CTLFLAG_PERMANENT, 432 CTLTYPE_NODE, "machdep", NULL, 433 NULL, 0, NULL, 0, 434 CTL_MACHDEP, CTL_EOL); 435 436 sysctl_createv(clog, 0, NULL, NULL, 437 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 438 CTLTYPE_INT, "debug", NULL, 439 NULL, 0, &kernel_debug, 0, 440 CTL_MACHDEP, CPU_DEBUG, CTL_EOL); 441 sysctl_createv(clog, 0, NULL, NULL, 442 CTLFLAG_PERMANENT, 443 CTLTYPE_STRING, "booted_device", NULL, 444 sysctl_machdep_booted_device, 0, NULL, 0, 445 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL); 446 sysctl_createv(clog, 0, NULL, NULL, 447 CTLFLAG_PERMANENT, 448 CTLTYPE_STRING, "booted_kernel", NULL, 449 sysctl_machdep_booted_kernel, 0, NULL, 0, 450 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 451 sysctl_createv(clog, 0, NULL, NULL, 452 CTLFLAG_PERMANENT, 453 CTLTYPE_STRUCT, "console_device", NULL, 454 sysctl_consdev, 0, NULL, sizeof(dev_t), 455 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 456 sysctl_createv(clog, 0, NULL, NULL, 457 CTLFLAG_PERMANENT, 458 CTLTYPE_STRING, "cpu_arch", NULL, 459 sysctl_machdep_cpu_arch, 0, NULL, 0, 460 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 461 sysctl_createv(clog, 0, NULL, NULL, 462 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 463 CTLTYPE_INT, "powersave", NULL, 464 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0, 465 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL); 466 sysctl_createv(clog, 0, NULL, NULL, 467 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 468 CTLTYPE_INT, "cpu_id", NULL, 469 NULL, curcpu()->ci_arm_cpuid, NULL, 0, 470 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 471 #ifdef FPU_VFP 472 sysctl_createv(clog, 0, NULL, NULL, 473 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 474 CTLTYPE_INT, "fpu_id", NULL, 475 NULL, 0, &cpu_info_store[0].ci_vfp_id, 0, 476 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 477 #endif 478 sysctl_createv(clog, 0, NULL, NULL, 479 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 480 CTLTYPE_INT, "fpu_present", NULL, 481 NULL, 0, &cpu_fpu_present, 0, 482 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 483 sysctl_createv(clog, 0, NULL, NULL, 484 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 485 CTLTYPE_INT, "hwdiv_present", NULL, 486 NULL, 0, &cpu_hwdiv_present, 0, 487 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 488 sysctl_createv(clog, 0, NULL, NULL, 489 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 490 CTLTYPE_INT, "neon_present", NULL, 491 NULL, 0, &cpu_neon_present, 0, 492 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 493 sysctl_createv(clog, 0, NULL, NULL, 494 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 495 CTLTYPE_STRUCT, "id_isar", NULL, 496 NULL, 0, 497 cpu_instruction_set_attributes, 498 sizeof(cpu_instruction_set_attributes), 499 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 500 sysctl_createv(clog, 0, NULL, NULL, 501 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 502 CTLTYPE_STRUCT, "id_mmfr", NULL, 503 NULL, 0, 504 cpu_memory_model_features, 505 sizeof(cpu_memory_model_features), 506 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 507 sysctl_createv(clog, 0, NULL, NULL, 508 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 509 CTLTYPE_STRUCT, "id_pfr", NULL, 510 NULL, 0, 511 cpu_processor_features, 512 sizeof(cpu_processor_features), 513 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 514 sysctl_createv(clog, 0, NULL, NULL, 515 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 516 CTLTYPE_STRUCT, "id_mvfr", NULL, 517 NULL, 0, 518 cpu_media_and_vfp_features, 519 sizeof(cpu_media_and_vfp_features), 520 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 521 sysctl_createv(clog, 0, NULL, NULL, 522 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 523 CTLTYPE_INT, "simd_present", NULL, 524 NULL, 0, &cpu_simd_present, 0, 525 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 526 sysctl_createv(clog, 0, NULL, NULL, 527 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 528 CTLTYPE_INT, "simdex_present", NULL, 529 NULL, 0, &cpu_simdex_present, 0, 530 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 531 sysctl_createv(clog, 0, NULL, NULL, 532 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 533 CTLTYPE_INT, "synchprim_present", NULL, 534 NULL, 0, &cpu_synchprim_present, 0, 535 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 536 sysctl_createv(clog, 0, NULL, NULL, 537 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 538 CTLTYPE_INT, "printfataltraps", NULL, 539 NULL, 0, &cpu_printfataltraps, 0, 540 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 541 cpu_unaligned_sigbus = !CPU_IS_ARMV6_P() && !CPU_IS_ARMV7_P(); 542 sysctl_createv(clog, 0, NULL, NULL, 543 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 544 CTLTYPE_INT, "unaligned_sigbus", 545 SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"), 546 NULL, 0, &cpu_unaligned_sigbus, 0, 547 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 548 } 549 550 void 551 parse_mi_bootargs(char *args) 552 { 553 int integer; 554 555 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer) 556 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer)) 557 if (integer) 558 boothowto |= RB_SINGLE; 559 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer) 560 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer) 561 || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer)) 562 if (integer) 563 boothowto |= RB_KDB; 564 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer) 565 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer)) 566 if (integer) 567 boothowto |= RB_ASKNAME; 568 569 #ifdef PMAP_DEBUG 570 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) { 571 pmap_debug_level = integer; 572 pmap_debug(pmap_debug_level); 573 } 574 #endif /* PMAP_DEBUG */ 575 576 /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer)) 577 bufpages = integer;*/ 578 579 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 580 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer) 581 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) { 582 md_root_size = integer; 583 md_root_size *= 1024; 584 if (md_root_size < 32*1024) 585 md_root_size = 32*1024; 586 if (md_root_size > 2048*1024) 587 md_root_size = 2048*1024; 588 } 589 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 590 591 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer) 592 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer)) 593 if (integer) 594 boothowto |= AB_QUIET; 595 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer) 596 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer)) 597 if (integer) 598 boothowto |= AB_VERBOSE; 599 if (get_bootconf_option(args, "debug", BOOTOPT_TYPE_BOOLEAN, &integer) 600 || get_bootconf_option(args, "-x", BOOTOPT_TYPE_BOOLEAN, &integer)) 601 if (integer) 602 boothowto |= AB_DEBUG; 603 } 604 605 #ifdef __HAVE_FAST_SOFTINTS 606 #if IPL_SOFTSERIAL != IPL_SOFTNET + 1 607 #error IPLs are screwed up 608 #elif IPL_SOFTNET != IPL_SOFTBIO + 1 609 #error IPLs are screwed up 610 #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1 611 #error IPLs are screwed up 612 #elif !(IPL_SOFTCLOCK > IPL_NONE) 613 #error IPLs are screwed up 614 #elif (IPL_NONE != 0) 615 #error IPLs are screwed up 616 #endif 617 618 #ifndef __HAVE_PIC_FAST_SOFTINTS 619 #define SOFTINT2IPLMAP \ 620 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \ 621 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \ 622 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \ 623 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4))) 624 #define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f) 625 626 /* 627 * This returns a mask of softint IPLs that be dispatch at <ipl> 628 * SOFTIPLMASK(IPL_NONE) = 0x0000000f 629 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e 630 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c 631 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008 632 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000 633 */ 634 #define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f) 635 636 void softint_switch(lwp_t *, int); 637 638 void 639 softint_trigger(uintptr_t mask) 640 { 641 curcpu()->ci_softints |= mask; 642 } 643 644 void 645 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep) 646 { 647 lwp_t ** lp = &l->l_cpu->ci_softlwps[level]; 648 KASSERT(*lp == NULL || *lp == l); 649 *lp = l; 650 *machdep = 1 << SOFTINT2IPL(level); 651 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK))); 652 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK))); 653 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK))); 654 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK))); 655 } 656 657 void 658 dosoftints(void) 659 { 660 struct cpu_info * const ci = curcpu(); 661 const int opl = ci->ci_cpl; 662 const uint32_t softiplmask = SOFTIPLMASK(opl); 663 664 splhigh(); 665 for (;;) { 666 u_int softints = ci->ci_softints & softiplmask; 667 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0)); 668 KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0); 669 if (softints == 0) { 670 splx(opl); 671 return; 672 } 673 #define DOSOFTINT(n) \ 674 if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \ 675 ci->ci_softints &= \ 676 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \ 677 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \ 678 IPL_SOFT ## n); \ 679 continue; \ 680 } 681 DOSOFTINT(SERIAL); 682 DOSOFTINT(NET); 683 DOSOFTINT(BIO); 684 DOSOFTINT(CLOCK); 685 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl); 686 } 687 } 688 #endif /* !__HAVE_PIC_FAST_SOFTINTS */ 689 #endif /* __HAVE_FAST_SOFTINTS */ 690 691 #ifdef MODULAR 692 /* 693 * Push any modules loaded by the boot loader. 694 */ 695 void 696 module_init_md(void) 697 { 698 } 699 #endif /* MODULAR */ 700 701 int 702 mm_md_physacc(paddr_t pa, vm_prot_t prot) 703 { 704 if (pa >= physical_start && pa < physical_end) 705 return 0; 706 707 return kauth_authorize_machdep(kauth_cred_get(), 708 KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL); 709 } 710 711 #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP 712 vaddr_t 713 cpu_uarea_alloc_idlelwp(struct cpu_info *ci) 714 { 715 const vaddr_t va = idlestack.pv_va + cpu_index(ci) * USPACE; 716 // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va); 717 return va; 718 } 719 #endif 720 721 #ifdef MULTIPROCESSOR 722 /* 723 * Initialise a secondary processor. 724 * 725 * printf isn't available to us for a number of reasons. 726 * 727 * - kprint_init has been called and printf will try to take locks which we 728 * can't do just yet because bootstrap translation tables do not allowing 729 * caching. 730 * 731 * - kmutex(9) relies on curcpu which isn't setup yet. 732 * 733 */ 734 void 735 cpu_init_secondary_processor(int cpuindex) 736 { 737 // pmap_kernel has been successfully built and we can switch to it 738 cpu_domains(DOMAIN_DEFAULT); 739 cpu_idcache_wbinv_all(); 740 741 VPRINTS("index: "); 742 VPRINTX(cpuindex); 743 VPRINTS(" ttb"); 744 745 cpu_setup(boot_args); 746 747 #ifdef ARM_MMU_EXTENDED 748 /* 749 * TTBCR should have been initialized by the MD start code. 750 */ 751 KASSERT((armreg_contextidr_read() & 0xff) == 0); 752 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 753 /* 754 * Disable lookups via TTBR0 until there is an activated pmap. 755 */ 756 757 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 758 cpu_setttb(pmap_kernel()->pm_l1_pa , KERNEL_PID); 759 arm_isb(); 760 #else 761 cpu_setttb(pmap_kernel()->pm_l1->l1_physaddr, true); 762 #endif 763 764 cpu_tlb_flushID(); 765 766 VPRINTS(" (TTBR0="); 767 VPRINTX(armreg_ttbr_read()); 768 VPRINTS(")"); 769 770 #ifdef ARM_MMU_EXTENDED 771 VPRINTS(" (TTBR1="); 772 VPRINTX(armreg_ttbr1_read()); 773 VPRINTS(")"); 774 VPRINTS(" (TTBCR="); 775 VPRINTX(armreg_ttbcr_read()); 776 VPRINTS(")"); 777 #endif 778 779 VPRINTS(" hatched|="); 780 VPRINTX(__BIT(cpuindex)); 781 VPRINTS("\n\r"); 782 783 cpu_set_hatched(cpuindex); 784 785 /* return to assembly to wait for cpu_boot_secondary_processors */ 786 } 787 788 void 789 xc_send_ipi(struct cpu_info *ci) 790 { 791 KASSERT(kpreempt_disabled()); 792 KASSERT(curcpu() != ci); 793 794 intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_XCALL); 795 } 796 797 void 798 cpu_ipi(struct cpu_info *ci) 799 { 800 KASSERT(kpreempt_disabled()); 801 KASSERT(curcpu() != ci); 802 803 intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_GENERIC); 804 } 805 806 #endif /* MULTIPROCESSOR */ 807 808 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 809 bool 810 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap) 811 { 812 bool rv; 813 vaddr_t va = pmap_direct_mapped_phys(pa, &rv, 0); 814 if (rv) { 815 *vap = va; 816 } 817 return rv; 818 } 819 #endif 820 821 bool 822 mm_md_page_color(paddr_t pa, int *colorp) 823 { 824 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 825 *colorp = atop(pa & arm_cache_prefer_mask); 826 827 return arm_cache_prefer_mask ? false : true; 828 #else 829 *colorp = 0; 830 831 return true; 832 #endif 833 } 834 835 #if defined(FDT) 836 extern char KERNEL_BASE_phys[]; 837 #define KERNEL_BASE_PHYS ((paddr_t)KERNEL_BASE_phys) 838 839 void 840 cpu_kernel_vm_init(paddr_t memory_start, psize_t memory_size) 841 { 842 const struct arm_platform *plat = arm_fdt_platform(); 843 844 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 845 const bool mapallmem_p = true; 846 #ifndef PMAP_NEED_ALLOC_POOLPAGE 847 if (memory_size > KERNEL_VM_BASE - KERNEL_BASE) { 848 VPRINTF("%s: dropping RAM size from %luMB to %uMB\n", 849 __func__, (unsigned long) (memory_size >> 20), 850 (KERNEL_VM_BASE - KERNEL_BASE) >> 20); 851 memory_size = KERNEL_VM_BASE - KERNEL_BASE; 852 } 853 #endif 854 #else 855 const bool mapallmem_p = false; 856 #endif 857 858 VPRINTF("%s: kernel phys start %" PRIxPADDR " end %" PRIxPADDR "\n", 859 __func__, memory_start, memory_start + memory_size); 860 861 arm32_bootmem_init(memory_start, memory_size, KERNEL_BASE_PHYS); 862 arm32_kernel_vm_init(KERNEL_VM_BASE, ARM_VECTORS_HIGH, 0, 863 plat->ap_devmap(), mapallmem_p); 864 } 865 #endif 866 867