1 /* $NetBSD: arm32_machdep.c,v 1.65 2009/03/15 22:20:09 cegger Exp $ */ 2 3 /* 4 * Copyright (c) 1994-1998 Mark Brinicombe. 5 * Copyright (c) 1994 Brini. 6 * All rights reserved. 7 * 8 * This code is derived from software written for Brini by Mark Brinicombe 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Mark Brinicombe 21 * for the NetBSD Project. 22 * 4. The name of the company nor the name of the author may be used to 23 * endorse or promote products derived from this software without specific 24 * prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * Machine dependant functions for kernel setup 39 * 40 * Created : 17/09/94 41 * Updated : 18/04/01 updated for new wscons 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.65 2009/03/15 22:20:09 cegger Exp $"); 46 47 #include "opt_md.h" 48 #include "opt_pmap_debug.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/reboot.h> 53 #include <sys/proc.h> 54 #include <sys/user.h> 55 #include <sys/kernel.h> 56 #include <sys/mbuf.h> 57 #include <sys/mount.h> 58 #include <sys/buf.h> 59 #include <sys/msgbuf.h> 60 #include <sys/device.h> 61 #include <uvm/uvm_extern.h> 62 #include <sys/sysctl.h> 63 #include <sys/cpu.h> 64 65 #include <dev/cons.h> 66 67 #include <arm/arm32/katelib.h> 68 #include <arm/arm32/machdep.h> 69 #include <machine/bootconfig.h> 70 71 #include "md.h" 72 73 struct vm_map *mb_map = NULL; 74 struct vm_map *phys_map = NULL; 75 76 extern int physmem; 77 78 #if NMD > 0 && defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 79 extern size_t md_root_size; /* Memory disc size */ 80 #endif /* NMD && MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 81 82 pv_addr_t kernelstack; 83 84 void * msgbufaddr; 85 extern paddr_t msgbufphys; 86 87 int kernel_debug = 0; 88 89 struct user *proc0paddr; 90 91 /* exported variable to be filled in by the bootloaders */ 92 char *booted_kernel; 93 94 95 /* Prototypes */ 96 97 void data_abort_handler(trapframe_t *frame); 98 void prefetch_abort_handler(trapframe_t *frame); 99 extern void configure(void); 100 101 /* 102 * arm32_vector_init: 103 * 104 * Initialize the vector page, and select whether or not to 105 * relocate the vectors. 106 * 107 * NOTE: We expect the vector page to be mapped at its expected 108 * destination. 109 */ 110 void 111 arm32_vector_init(vaddr_t va, int which) 112 { 113 extern unsigned int page0[], page0_data[]; 114 unsigned int *vectors = (int *) va; 115 unsigned int *vectors_data = vectors + (page0_data - page0); 116 int vec; 117 118 /* 119 * Loop through the vectors we're taking over, and copy the 120 * vector's insn and data word. 121 */ 122 for (vec = 0; vec < ARM_NVEC; vec++) { 123 if ((which & (1 << vec)) == 0) { 124 /* Don't want to take over this vector. */ 125 continue; 126 } 127 vectors[vec] = page0[vec]; 128 vectors_data[vec] = page0_data[vec]; 129 } 130 131 /* Now sync the vectors. */ 132 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 133 134 vector_page = va; 135 136 if (va == ARM_VECTORS_HIGH) { 137 /* 138 * Assume the MD caller knows what it's doing here, and 139 * really does want the vector page relocated. 140 * 141 * Note: This has to be done here (and not just in 142 * cpu_setup()) because the vector page needs to be 143 * accessible *before* cpu_startup() is called. 144 * Think ddb(9) ... 145 * 146 * NOTE: If the CPU control register is not readable, 147 * this will totally fail! We'll just assume that 148 * any system that has high vector support has a 149 * readable CPU control register, for now. If we 150 * ever encounter one that does not, we'll have to 151 * rethink this. 152 */ 153 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 154 } 155 } 156 157 /* 158 * Debug function just to park the CPU 159 */ 160 161 void 162 halt(void) 163 { 164 while (1) 165 cpu_sleep(0); 166 } 167 168 169 /* Sync the discs and unmount the filesystems */ 170 171 void 172 bootsync(void) 173 { 174 static bool bootsyncdone = false; 175 176 if (bootsyncdone) return; 177 178 bootsyncdone = true; 179 180 /* Make sure we can still manage to do things */ 181 if (GetCPSR() & I32_bit) { 182 /* 183 * If we get here then boot has been called without RB_NOSYNC 184 * and interrupts were disabled. This means the boot() call 185 * did not come from a user process e.g. shutdown, but must 186 * have come from somewhere in the kernel. 187 */ 188 IRQenable; 189 printf("Warning IRQ's disabled during boot()\n"); 190 } 191 192 vfs_shutdown(); 193 } 194 195 /* 196 * void cpu_startup(void) 197 * 198 * Machine dependant startup code. 199 * 200 */ 201 void 202 cpu_startup(void) 203 { 204 vaddr_t minaddr; 205 vaddr_t maxaddr; 206 u_int loop; 207 char pbuf[9]; 208 209 /* Set the CPU control register */ 210 cpu_setup(boot_args); 211 212 /* Lock down zero page */ 213 vector_page_setprot(VM_PROT_READ); 214 215 /* 216 * Give pmap a chance to set up a few more things now the vm 217 * is initialised 218 */ 219 pmap_postinit(); 220 221 /* 222 * Initialize error message buffer (at end of core). 223 */ 224 225 /* msgbufphys was setup during the secondary boot strap */ 226 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop) 227 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE, 228 msgbufphys + loop * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE); 229 pmap_update(pmap_kernel()); 230 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); 231 232 /* 233 * Identify ourselves for the msgbuf (everything printed earlier will 234 * not be buffered). 235 */ 236 printf("%s%s", copyright, version); 237 238 format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem)); 239 printf("total memory = %s\n", pbuf); 240 241 minaddr = 0; 242 243 /* 244 * Allocate a submap for physio 245 */ 246 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 247 VM_PHYS_SIZE, 0, false, NULL); 248 249 /* 250 * Finally, allocate mbuf cluster submap. 251 */ 252 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 253 nmbclusters * mclbytes, VM_MAP_INTRSAFE, 254 false, NULL); 255 256 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 257 printf("avail memory = %s\n", pbuf); 258 259 curpcb = &lwp0.l_addr->u_pcb; 260 curpcb->pcb_flags = 0; 261 curpcb->pcb_un.un_32.pcb32_sp = (u_int)lwp0.l_addr + 262 USPACE_SVC_STACK_TOP; 263 264 curpcb->pcb_tf = (struct trapframe *)curpcb->pcb_un.un_32.pcb32_sp - 1; 265 } 266 267 /* 268 * machine dependent system variables. 269 */ 270 static int 271 sysctl_machdep_booted_device(SYSCTLFN_ARGS) 272 { 273 struct sysctlnode node; 274 275 if (booted_device == NULL) 276 return (EOPNOTSUPP); 277 278 node = *rnode; 279 node.sysctl_data = booted_device->dv_xname; 280 node.sysctl_size = strlen(booted_device->dv_xname) + 1; 281 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 282 } 283 284 static int 285 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 286 { 287 struct sysctlnode node; 288 289 if (booted_kernel == NULL || booted_kernel[0] == '\0') 290 return (EOPNOTSUPP); 291 292 node = *rnode; 293 node.sysctl_data = booted_kernel; 294 node.sysctl_size = strlen(booted_kernel) + 1; 295 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 296 } 297 298 static int 299 sysctl_machdep_powersave(SYSCTLFN_ARGS) 300 { 301 struct sysctlnode node = *rnode; 302 int error, newval; 303 304 newval = cpu_do_powersave; 305 node.sysctl_data = &newval; 306 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop) 307 node.sysctl_flags &= ~CTLFLAG_READWRITE; 308 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 309 if (error || newp == NULL || newval == cpu_do_powersave) 310 return (error); 311 312 if (newval < 0 || newval > 1) 313 return (EINVAL); 314 cpu_do_powersave = newval; 315 316 return (0); 317 } 318 319 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 320 { 321 322 sysctl_createv(clog, 0, NULL, NULL, 323 CTLFLAG_PERMANENT, 324 CTLTYPE_NODE, "machdep", NULL, 325 NULL, 0, NULL, 0, 326 CTL_MACHDEP, CTL_EOL); 327 328 sysctl_createv(clog, 0, NULL, NULL, 329 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 330 CTLTYPE_INT, "debug", NULL, 331 NULL, 0, &kernel_debug, 0, 332 CTL_MACHDEP, CPU_DEBUG, CTL_EOL); 333 sysctl_createv(clog, 0, NULL, NULL, 334 CTLFLAG_PERMANENT, 335 CTLTYPE_STRING, "booted_device", NULL, 336 sysctl_machdep_booted_device, 0, NULL, 0, 337 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL); 338 sysctl_createv(clog, 0, NULL, NULL, 339 CTLFLAG_PERMANENT, 340 CTLTYPE_STRING, "booted_kernel", NULL, 341 sysctl_machdep_booted_kernel, 0, NULL, 0, 342 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 343 sysctl_createv(clog, 0, NULL, NULL, 344 CTLFLAG_PERMANENT, 345 CTLTYPE_STRUCT, "console_device", NULL, 346 sysctl_consdev, 0, NULL, sizeof(dev_t), 347 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 348 sysctl_createv(clog, 0, NULL, NULL, 349 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 350 CTLTYPE_INT, "powersave", NULL, 351 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0, 352 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL); 353 } 354 355 void 356 parse_mi_bootargs(char *args) 357 { 358 int integer; 359 360 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer) 361 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer)) 362 if (integer) 363 boothowto |= RB_SINGLE; 364 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer) 365 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer)) 366 if (integer) 367 boothowto |= RB_KDB; 368 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer) 369 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer)) 370 if (integer) 371 boothowto |= RB_ASKNAME; 372 373 #ifdef PMAP_DEBUG 374 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) { 375 pmap_debug_level = integer; 376 pmap_debug(pmap_debug_level); 377 } 378 #endif /* PMAP_DEBUG */ 379 380 /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer)) 381 bufpages = integer;*/ 382 383 #if NMD > 0 && defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 384 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer) 385 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) { 386 md_root_size = integer; 387 md_root_size *= 1024; 388 if (md_root_size < 32*1024) 389 md_root_size = 32*1024; 390 if (md_root_size > 2048*1024) 391 md_root_size = 2048*1024; 392 } 393 #endif /* NMD && MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 394 395 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer) 396 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer)) 397 if (integer) 398 boothowto |= AB_QUIET; 399 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer) 400 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer)) 401 if (integer) 402 boothowto |= AB_VERBOSE; 403 } 404 405 #ifdef __HAVE_FAST_SOFTINTS 406 #if IPL_SOFTSERIAL != IPL_SOFTNET + 1 407 #error IPLs are screwed up 408 #elif IPL_SOFTNET != IPL_SOFTBIO + 1 409 #error IPLs are screwed up 410 #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1 411 #error IPLs are screwed up 412 #elif !(IPL_SOFTCLOCK > IPL_NONE) 413 #error IPLs are screwed up 414 #elif (IPL_NONE != 0) 415 #error IPLs are screwed up 416 #endif 417 418 #define SOFTINT2IPLMAP \ 419 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \ 420 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \ 421 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \ 422 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4))) 423 #define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f) 424 425 /* 426 * This returns a mask of softint IPLs that be dispatch at <ipl> 427 * SOFTIPLMASK(IPL_NONE) = 0x0000000f 428 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e 429 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c 430 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008 431 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000 432 */ 433 #define SOFTIPLMASK(ipl) (0x0f << (ipl)) 434 435 void softint_switch(lwp_t *, int); 436 437 void 438 softint_trigger(uintptr_t mask) 439 { 440 curcpu()->ci_softints |= mask; 441 } 442 443 void 444 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep) 445 { 446 lwp_t ** lp = &curcpu()->ci_softlwps[level]; 447 KASSERT(*lp == NULL || *lp == l); 448 *lp = l; 449 *machdep = 1 << SOFTINT2IPL(level); 450 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK))); 451 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK))); 452 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK))); 453 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK))); 454 } 455 456 void 457 dosoftints(void) 458 { 459 struct cpu_info * const ci = curcpu(); 460 const int opl = ci->ci_cpl; 461 const uint32_t softiplmask = SOFTIPLMASK(opl); 462 463 for (;;) { 464 u_int softints = ci->ci_softints & softiplmask; 465 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0)); 466 if (softints == 0) 467 return; 468 ci->ci_cpl = IPL_HIGH; 469 #define DOSOFTINT(n) \ 470 if (softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \ 471 ci->ci_softints &= \ 472 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \ 473 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \ 474 IPL_SOFT ## n); \ 475 ci->ci_cpl = opl; \ 476 continue; \ 477 } 478 DOSOFTINT(SERIAL); 479 DOSOFTINT(NET); 480 DOSOFTINT(BIO); 481 DOSOFTINT(CLOCK); 482 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl); 483 } 484 } 485 #endif /* __HAVE_FAST_SOFTINTS */ 486