1 /* $NetBSD: machdep.c,v 1.49 2007/03/16 12:12:14 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah Hdr: machdep.c 1.74 92/12/20 36 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 37 */ 38 39 /* 40 * Copyright (c) 2001 Matthew Fredette. 41 * Copyright (c) 1994, 1995 Gordon W. Ross 42 * Copyright (c) 1993 Adam Glass 43 * Copyright (c) 1988 University of Utah. 44 * 45 * This code is derived from software contributed to Berkeley by 46 * the Systems Programming Group of the University of Utah Computer 47 * Science Department. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * from: Utah Hdr: machdep.c 1.74 92/12/20 78 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 79 */ 80 81 /*- 82 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 83 * All rights reserved. 84 * 85 * This code is derived from software contributed to The NetBSD Foundation 86 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 87 * NASA Ames Research Center. 88 * 89 * Redistribution and use in source and binary forms, with or without 90 * modification, are permitted provided that the following conditions 91 * are met: 92 * 1. Redistributions of source code must retain the above copyright 93 * notice, this list of conditions and the following disclaimer. 94 * 2. Redistributions in binary form must reproduce the above copyright 95 * notice, this list of conditions and the following disclaimer in the 96 * documentation and/or other materials provided with the distribution. 97 * 3. All advertising materials mentioning features or use of this software 98 * must display the following acknowledgement: 99 * This product includes software developed by the NetBSD 100 * Foundation, Inc. and its contributors. 101 * 4. Neither the name of The NetBSD Foundation nor the names of its 102 * contributors may be used to endorse or promote products derived 103 * from this software without specific prior written permission. 104 * 105 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 106 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 107 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 108 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 109 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 110 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 111 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 112 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 113 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 114 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 115 * POSSIBILITY OF SUCH DAMAGE. 116 */ 117 118 /* 119 * Copyright (c) 1992, 1993 120 * The Regents of the University of California. All rights reserved. 121 * 122 * This software was developed by the Computer Systems Engineering group 123 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 124 * contributed to Berkeley. 125 * 126 * All advertising materials mentioning features or use of this software 127 * must display the following acknowledgement: 128 * This product includes software developed by the University of 129 * California, Lawrence Berkeley Laboratory. 130 * 131 * Redistribution and use in source and binary forms, with or without 132 * modification, are permitted provided that the following conditions 133 * are met: 134 * 1. Redistributions of source code must retain the above copyright 135 * notice, this list of conditions and the following disclaimer. 136 * 2. Redistributions in binary form must reproduce the above copyright 137 * notice, this list of conditions and the following disclaimer in the 138 * documentation and/or other materials provided with the distribution. 139 * 3. All advertising materials mentioning features or use of this software 140 * must display the following acknowledgement: 141 * This product includes software developed by the University of 142 * California, Berkeley and its contributors. 143 * 4. Neither the name of the University nor the names of its contributors 144 * may be used to endorse or promote products derived from this software 145 * without specific prior written permission. 146 * 147 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 148 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 149 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 150 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 151 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 152 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 153 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 154 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 155 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 156 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 157 * SUCH DAMAGE. 158 * 159 * @(#)machdep.c 8.6 (Berkeley) 1/14/94 160 */ 161 162 #include <sys/cdefs.h> 163 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.49 2007/03/16 12:12:14 tsutsui Exp $"); 164 165 #include "opt_ddb.h" 166 #include "opt_kgdb.h" 167 #include "opt_fpu_emulate.h" 168 169 #include <sys/param.h> 170 #include <sys/systm.h> 171 #include <sys/kernel.h> 172 #include <sys/proc.h> 173 #include <sys/buf.h> 174 #include <sys/reboot.h> 175 #include <sys/conf.h> 176 #include <sys/file.h> 177 #include <sys/device.h> 178 #include <sys/malloc.h> 179 #include <sys/extent.h> 180 #include <sys/mbuf.h> 181 #include <sys/msgbuf.h> 182 #include <sys/ioctl.h> 183 #include <sys/tty.h> 184 #include <sys/mount.h> 185 #include <sys/user.h> 186 #include <sys/exec.h> 187 #include <sys/core.h> 188 #include <sys/kcore.h> 189 #include <sys/vnode.h> 190 #include <sys/syscallargs.h> 191 #include <sys/ksyms.h> 192 #ifdef KGDB 193 #include <sys/kgdb.h> 194 #endif 195 196 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */ 197 198 #include <sys/sysctl.h> 199 200 #include <dev/cons.h> 201 202 #include <machine/promlib.h> 203 #include <machine/cpu.h> 204 #include <machine/dvma.h> 205 #include <machine/idprom.h> 206 #include <machine/kcore.h> 207 #include <machine/reg.h> 208 #include <machine/psl.h> 209 #include <machine/pte.h> 210 #define _SUN68K_BUS_DMA_PRIVATE 211 #include <machine/autoconf.h> 212 #include <machine/bus.h> 213 #include <machine/intr.h> 214 #include <machine/pmap.h> 215 216 #if defined(DDB) 217 #include <machine/db_machdep.h> 218 #include <ddb/db_sym.h> 219 #include <ddb/db_extern.h> 220 #endif 221 222 #include <dev/vme/vmereg.h> 223 #include <dev/vme/vmevar.h> 224 225 #include <sun2/sun2/control.h> 226 #include <sun2/sun2/enable.h> 227 #include <sun2/sun2/machdep.h> 228 229 #include <sun68k/sun68k/vme_sun68k.h> 230 231 #include "ksyms.h" 232 233 /* Defined in locore.s */ 234 extern char kernel_text[]; 235 /* Defined by the linker */ 236 extern char etext[]; 237 /* Defined in vfs_bio.c */ 238 extern u_int bufpages; 239 240 /* Our exported CPU info; we can have only one. */ 241 struct cpu_info cpu_info_store; 242 243 struct vm_map *exec_map = NULL; 244 struct vm_map *mb_map = NULL; 245 struct vm_map *phys_map = NULL; 246 247 int physmem; 248 int fputype; 249 void * msgbufaddr; 250 251 /* Virtual page frame for /dev/mem (see mem.c) */ 252 vaddr_t vmmap; 253 254 /* 255 * safepri is a safe priority for sleep to set for a spin-wait 256 * during autoconfiguration or after a panic. 257 */ 258 int safepri = PSL_LOWIPL; 259 260 /* Soft copy of the enable register. */ 261 volatile u_short enable_reg_soft = ENABLE_REG_SOFT_UNDEF; 262 263 /* 264 * Our no-fault fault handler. 265 */ 266 label_t *nofault; 267 268 /* 269 * dvmamap is used to manage DVMA memory. 270 */ 271 static struct extent *dvmamap; 272 273 /* Our private scratch page for dumping the MMU. */ 274 static vaddr_t dumppage; 275 276 static void identifycpu(void); 277 static void initcpu(void); 278 279 /* 280 * cpu_startup: allocate memory for variable-sized tables, 281 * initialize CPU, and do autoconfiguration. 282 * 283 * This is called early in init_main.c:main(), after the 284 * kernel memory allocator is ready for use, but before 285 * the creation of processes 1,2, and mountroot, etc. 286 */ 287 void 288 cpu_startup(void) 289 { 290 void *v; 291 vaddr_t minaddr, maxaddr; 292 char pbuf[9]; 293 294 /* 295 * Initialize message buffer (for kernel printf). 296 * This is put in physical pages four through seven 297 * so it will always be in the same place after a 298 * reboot. (physical pages 0-3 are reserved by the PROM 299 * for its vector table and other stuff.) 300 * Its mapping was prepared in pmap_bootstrap(). 301 * Also, offset some to avoid PROM scribbles. 302 */ 303 v = (void *) (PAGE_SIZE * 4); 304 msgbufaddr = (void *)((char *)v + MSGBUFOFF); 305 initmsgbuf(msgbufaddr, MSGBUFSIZE); 306 307 #if NKSYMS || defined(DDB) || defined(LKM) 308 { 309 extern int nsym; 310 extern char *ssym, *esym; 311 312 ksyms_init(nsym, ssym, esym); 313 } 314 #endif /* DDB */ 315 316 /* 317 * Good {morning,afternoon,evening,night}. 318 */ 319 printf("%s%s", copyright, version); 320 identifycpu(); 321 fputype = FPU_NONE; 322 #ifdef FPU_EMULATE 323 printf("fpu: emulator\n"); 324 #else 325 printf("fpu: no math support\n"); 326 #endif 327 328 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 329 printf("total memory = %s\n", pbuf); 330 331 /* 332 * XXX fredette - we force a small number of buffers 333 * to help me debug this on my low-memory machine. 334 * this should go away at some point, allowing the 335 * normal automatic buffer-sizing to happen. 336 */ 337 bufpages = 37; 338 339 /* 340 * Get scratch page for dumpsys(). 341 */ 342 if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE,0, UVM_KMF_WIRED)) 343 == 0) 344 panic("startup: alloc dumppage"); 345 346 347 minaddr = 0; 348 /* 349 * Allocate a submap for exec arguments. This map effectively 350 * limits the number of processes exec'ing at any time. 351 */ 352 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 353 NCARGS, VM_MAP_PAGEABLE, false, NULL); 354 355 /* 356 * Allocate a submap for physio 357 */ 358 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 359 VM_PHYS_SIZE, 0, false, NULL); 360 361 /* 362 * Finally, allocate mbuf cluster submap. 363 */ 364 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 365 nmbclusters * mclbytes, VM_MAP_INTRSAFE, 366 false, NULL); 367 368 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 369 printf("avail memory = %s\n", pbuf); 370 371 /* 372 * Allocate a virtual page (for use by /dev/mem) 373 * This page is handed to pmap_enter() therefore 374 * it has to be in the normal kernel VA range. 375 */ 376 vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 377 UVM_KMF_VAONLY | UVM_KMF_WAITVA); 378 379 /* 380 * Allocate DMA map for devices on the bus. 381 */ 382 dvmamap = extent_create("dvmamap", 383 DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL, 384 M_DEVBUF, 0, 0, EX_NOWAIT); 385 if (dvmamap == NULL) 386 panic("unable to allocate DVMA map"); 387 388 /* 389 * Set up CPU-specific registers, cache, etc. 390 */ 391 initcpu(); 392 } 393 394 /* 395 * Set registers on exec. 396 */ 397 void 398 setregs(struct lwp *l, struct exec_package *pack, u_long stack) 399 { 400 struct trapframe *tf = (struct trapframe *)l->l_md.md_regs; 401 402 tf->tf_sr = PSL_USERSET; 403 tf->tf_pc = pack->ep_entry & ~1; 404 tf->tf_regs[D0] = 0; 405 tf->tf_regs[D1] = 0; 406 tf->tf_regs[D2] = 0; 407 tf->tf_regs[D3] = 0; 408 tf->tf_regs[D4] = 0; 409 tf->tf_regs[D5] = 0; 410 tf->tf_regs[D6] = 0; 411 tf->tf_regs[D7] = 0; 412 tf->tf_regs[A0] = 0; 413 tf->tf_regs[A1] = 0; 414 tf->tf_regs[A2] = (int)l->l_proc->p_psstr; 415 tf->tf_regs[A3] = 0; 416 tf->tf_regs[A4] = 0; 417 tf->tf_regs[A5] = 0; 418 tf->tf_regs[A6] = 0; 419 tf->tf_regs[SP] = stack; 420 421 /* restore a null state frame */ 422 l->l_addr->u_pcb.pcb_fpregs.fpf_null = 0; 423 424 l->l_md.md_flags = 0; 425 } 426 427 /* 428 * Info for CTL_HW 429 */ 430 char machine[16] = MACHINE; /* from <machine/param.h> */ 431 char kernel_arch[16] = "sun2"; /* XXX needs a sysctl node */ 432 char cpu_model[120]; 433 434 /* 435 * Determine which Sun2 model we are running on. 436 */ 437 void 438 identifycpu(void) 439 { 440 extern char *cpu_string; /* XXX */ 441 442 /* Other stuff? (VAC, mc6888x version, etc.) */ 443 /* Note: miniroot cares about the kernel_arch part. */ 444 sprintf(cpu_model, "%s %s", kernel_arch, cpu_string); 445 446 printf("Model: %s\n", cpu_model); 447 } 448 449 /* 450 * machine dependent system variables. 451 */ 452 #if 0 /* XXX - Not yet... */ 453 static int 454 sysctl_machdep_root_device(SYSCTLFN_ARGS) 455 { 456 struct sysctlnode node = *rnode; 457 458 node.sysctl_data = some permutation on root_device; 459 node.sysctl_size = strlen(root_device) + 1; 460 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 461 } 462 #endif 463 464 static int 465 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 466 { 467 struct sysctlnode node = *rnode; 468 char *cp; 469 470 cp = prom_getbootfile(); 471 if (cp == NULL || cp[0] == '\0') 472 return (ENOENT); 473 474 node.sysctl_data = cp; 475 node.sysctl_size = strlen(cp) + 1; 476 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 477 } 478 479 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 480 { 481 482 sysctl_createv(clog, 0, NULL, NULL, 483 CTLFLAG_PERMANENT, 484 CTLTYPE_NODE, "machdep", NULL, 485 NULL, 0, NULL, 0, 486 CTL_MACHDEP, CTL_EOL); 487 488 sysctl_createv(clog, 0, NULL, NULL, 489 CTLFLAG_PERMANENT, 490 CTLTYPE_STRUCT, "console_device", NULL, 491 sysctl_consdev, 0, NULL, sizeof(dev_t), 492 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 493 #if 0 /* XXX - Not yet... */ 494 sysctl_createv(clog, 0, NULL, NULL, 495 CTLFLAG_PERMANENT, 496 CTLTYPE_STRING, "root_device", NULL, 497 sysctl_machdep_root_device, 0, NULL, 0, 498 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL); 499 #endif 500 sysctl_createv(clog, 0, NULL, NULL, 501 CTLFLAG_PERMANENT, 502 CTLTYPE_STRING, "booted_kernel", NULL, 503 sysctl_machdep_booted_kernel, 0, NULL, 0, 504 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 505 } 506 507 /* See: sig_machdep.c */ 508 509 /* 510 * Do a sync in preparation for a reboot. 511 * XXX - This could probably be common code. 512 * XXX - And now, most of it is in vfs_shutdown() 513 * XXX - Put waittime checks in there too? 514 */ 515 int waittime = -1; /* XXX - Who else looks at this? -gwr */ 516 static void 517 reboot_sync(void) 518 { 519 520 /* Check waittime here to localize its use to this function. */ 521 if (waittime >= 0) 522 return; 523 waittime = 0; 524 vfs_shutdown(); 525 } 526 527 /* 528 * Common part of the BSD and SunOS reboot system calls. 529 */ 530 __dead void 531 cpu_reboot(int howto, char *user_boot_string) 532 { 533 char *bs, *p; 534 char default_boot_string[8]; 535 536 /* If system is cold, just halt. (early panic?) */ 537 if (cold) 538 goto haltsys; 539 540 /* Un-blank the screen if appropriate. */ 541 cnpollc(1); 542 543 if ((howto & RB_NOSYNC) == 0) { 544 reboot_sync(); 545 /* 546 * If we've been adjusting the clock, the todr 547 * will be out of synch; adjust it now. 548 * 549 * XXX - However, if the kernel has been sitting in ddb, 550 * the time will be way off, so don't set the HW clock! 551 * XXX - Should do sanity check against HW clock. -gwr 552 */ 553 /* resettodr(); */ 554 } 555 556 /* Disable interrupts. */ 557 splhigh(); 558 559 /* Write out a crash dump if asked. */ 560 if (howto & RB_DUMP) 561 dumpsys(); 562 563 /* run any shutdown hooks */ 564 doshutdownhooks(); 565 566 if (howto & RB_HALT) { 567 haltsys: 568 printf("halted.\n"); 569 prom_halt(); 570 } 571 572 /* 573 * Automatic reboot. 574 */ 575 bs = user_boot_string; 576 if (bs == NULL) { 577 /* 578 * Build our own boot string with an empty 579 * boot device/file and (maybe) some flags. 580 * The PROM will supply the device/file name. 581 */ 582 bs = default_boot_string; 583 *bs = '\0'; 584 if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) { 585 /* Append the boot flags. */ 586 p = bs; 587 *p++ = ' '; 588 *p++ = '-'; 589 if (howto & RB_KDB) 590 *p++ = 'd'; 591 if (howto & RB_ASKNAME) 592 *p++ = 'a'; 593 if (howto & RB_SINGLE) 594 *p++ = 's'; 595 *p = '\0'; 596 } 597 } 598 printf("rebooting...\n"); 599 prom_boot(bs); 600 for (;;) ; 601 /*NOTREACHED*/ 602 } 603 604 /* 605 * These variables are needed by /sbin/savecore 606 */ 607 uint32_t dumpmag = 0x8fca0101; /* magic number */ 608 int dumpsize = 0; /* pages */ 609 long dumplo = 0; /* blocks */ 610 611 #define DUMP_EXTRA 3 /* CPU-dependent extra pages */ 612 613 /* 614 * This is called by main to set dumplo, dumpsize. 615 * Dumps always skip the first PAGE_SIZE of disk space 616 * in case there might be a disk label stored there. 617 * If there is extra space, put dump at the end to 618 * reduce the chance that swapping trashes it. 619 */ 620 void 621 cpu_dumpconf(void) 622 { 623 const struct bdevsw *bdev; 624 int devblks; /* size of dump device in blocks */ 625 int dumpblks; /* size of dump image in blocks */ 626 int (*getsize)(dev_t); 627 628 if (dumpdev == NODEV) 629 return; 630 631 bdev = bdevsw_lookup(dumpdev); 632 if (bdev == NULL) { 633 dumpdev = NODEV; 634 return; 635 } 636 getsize = bdev->d_psize; 637 if (getsize == NULL) 638 return; 639 devblks = (*getsize)(dumpdev); 640 if (devblks <= ctod(1)) 641 return; 642 devblks &= ~(ctod(1)-1); 643 644 /* 645 * Note: savecore expects dumpsize to be the 646 * number of pages AFTER the dump header. 647 */ 648 dumpsize = physmem; 649 650 /* Position dump image near end of space, page aligned. */ 651 dumpblks = ctod(physmem + DUMP_EXTRA); 652 dumplo = devblks - dumpblks; 653 654 /* If it does not fit, truncate it by moving dumplo. */ 655 /* Note: Must force signed comparison. */ 656 if (dumplo < ((long)ctod(1))) { 657 dumplo = ctod(1); 658 dumpsize = dtoc(devblks - dumplo) - DUMP_EXTRA; 659 } 660 } 661 662 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */ 663 struct pcb dumppcb; 664 extern paddr_t avail_start; 665 666 /* 667 * Write a crash dump. The format while in swap is: 668 * kcore_seg_t cpu_hdr; 669 * cpu_kcore_hdr_t cpu_data; 670 * padding (PAGE_SIZE-sizeof(kcore_seg_t)) 671 * pagemap (2*PAGE_SIZE) 672 * physical memory... 673 */ 674 void 675 dumpsys(void) 676 { 677 const struct bdevsw *dsw; 678 kcore_seg_t *kseg_p; 679 cpu_kcore_hdr_t *chdr_p; 680 struct sun2_kcore_hdr *sh; 681 char *vaddr; 682 paddr_t paddr; 683 int psize, todo, chunk; 684 daddr_t blkno; 685 int error = 0; 686 687 if (dumpdev == NODEV) 688 return; 689 dsw = bdevsw_lookup(dumpdev); 690 if (dsw == NULL || dsw->d_psize == NULL) 691 return; 692 if (dumppage == 0) 693 return; 694 695 /* 696 * For dumps during autoconfiguration, 697 * if dump device has already configured... 698 */ 699 if (dumpsize == 0) 700 cpu_dumpconf(); 701 if (dumplo <= 0) { 702 printf("\ndump to dev %u,%u not possible\n", major(dumpdev), 703 minor(dumpdev)); 704 return; 705 } 706 savectx(&dumppcb); 707 708 psize = (*(dsw->d_psize))(dumpdev); 709 if (psize == -1) { 710 printf("dump area unavailable\n"); 711 return; 712 } 713 714 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), 715 minor(dumpdev), dumplo); 716 717 /* 718 * Prepare the dump header, including MMU state. 719 */ 720 blkno = dumplo; 721 todo = dumpsize; /* pages */ 722 vaddr = (char*)dumppage; 723 memset(vaddr, 0, PAGE_SIZE); 724 725 /* Set pointers to all three parts. */ 726 kseg_p = (kcore_seg_t *)vaddr; 727 chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1); 728 sh = &chdr_p->un._sun2; 729 730 /* Fill in kcore_seg_t part. */ 731 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 732 kseg_p->c_size = (ctob(DUMP_EXTRA) - sizeof(*kseg_p)); 733 734 /* Fill in cpu_kcore_hdr_t part. */ 735 strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name)); 736 chdr_p->page_size = PAGE_SIZE; 737 chdr_p->kernbase = KERNBASE; 738 739 /* Fill in the sun2_kcore_hdr part (MMU state). */ 740 pmap_kcore_hdr(sh); 741 742 /* Write out the dump header. */ 743 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 744 if (error) 745 goto fail; 746 blkno += btodb(PAGE_SIZE); 747 748 /* translation RAM (pages zero through seven) */ 749 for(chunk = 0; chunk < (PAGE_SIZE * 8); chunk += PAGE_SIZE) { 750 pmap_get_pagemap((int*)vaddr, chunk); 751 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 752 if (error) 753 goto fail; 754 blkno += btodb(PAGE_SIZE); 755 } 756 757 /* 758 * Now dump physical memory. Have to do it in two chunks. 759 * The first chunk is "unmanaged" (by the VM code) and its 760 * range of physical addresses is not allow in pmap_enter. 761 * However, that segment is mapped linearly, so we can just 762 * use the virtual mappings already in place. The second 763 * chunk is done the normal way, using pmap_enter. 764 * 765 * Note that vaddr==(paddr+KERNBASE) for paddr=0 through etext. 766 */ 767 768 /* Do the first chunk (0 <= PA < avail_start) */ 769 paddr = 0; 770 chunk = btoc(avail_start); 771 if (chunk > todo) 772 chunk = todo; 773 do { 774 if ((todo & 0xf) == 0) 775 printf("\r%4d", todo); 776 vaddr = (char*)(paddr + KERNBASE); 777 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 778 if (error) 779 goto fail; 780 paddr += PAGE_SIZE; 781 blkno += btodb(PAGE_SIZE); 782 --todo; 783 } while (--chunk > 0); 784 785 /* Do the second chunk (avail_start <= PA < dumpsize) */ 786 vaddr = (char*)vmmap; /* Borrow /dev/mem VA */ 787 do { 788 if ((todo & 0xf) == 0) 789 printf("\r%4d", todo); 790 pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ); 791 pmap_update(pmap_kernel()); 792 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 793 pmap_kremove(vmmap, PAGE_SIZE); 794 pmap_update(pmap_kernel()); 795 if (error) 796 goto fail; 797 paddr += PAGE_SIZE; 798 blkno += btodb(PAGE_SIZE); 799 } while (--todo > 0); 800 801 printf("\rdump succeeded\n"); 802 return; 803 fail: 804 printf(" dump error=%d\n", error); 805 } 806 807 static void 808 initcpu(void) 809 { 810 /* XXX: Enable RAM parity/ECC checking? */ 811 /* XXX: parityenable(); */ 812 813 } 814 815 /* straptrap() in trap.c */ 816 817 /* from hp300: badaddr() */ 818 819 /* XXX: parityenable() ? */ 820 /* regdump() moved to regdump.c */ 821 822 /* 823 * cpu_exec_aout_makecmds(): 824 * CPU-dependent a.out format hook for execve(). 825 * 826 * Determine if the given exec package refers to something which we 827 * understand and, if so, set up the vmcmds for it. 828 */ 829 int 830 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp) 831 { 832 return ENOEXEC; 833 } 834 835 /* 836 * Soft interrupt support. 837 */ 838 void 839 isr_soft_request(int level) 840 { 841 u_char bit; 842 843 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 844 return; 845 846 bit = 1 << level; 847 enable_reg_or(bit); 848 } 849 850 void 851 isr_soft_clear(int level) 852 { 853 u_char bit; 854 855 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 856 return; 857 858 bit = 1 << level; 859 enable_reg_and(~bit); 860 } 861 862 /* 863 * Like _bus_dmamap_load(), but for raw memory allocated with 864 * bus_dmamem_alloc(). 865 */ 866 int 867 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, 868 int nsegs, bus_size_t size, int flags) 869 { 870 struct vm_page *m; 871 paddr_t pa; 872 bus_addr_t dva; 873 bus_size_t sgsize; 874 struct pglist *mlist; 875 int pagesz = PAGE_SIZE; 876 int error; 877 878 /* 879 * Make sure that on error condition we return "no valid mappings". 880 */ 881 map->dm_nsegs = 0; 882 map->dm_mapsize = 0; 883 884 /* Allocate DVMA addresses */ 885 sgsize = (size + pagesz - 1) & -pagesz; 886 887 /* 888 * If the device can see our entire 24-bit address space, 889 * we can use any properly aligned virtual addresses. 890 */ 891 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) { 892 dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary, 893 pagesz, 0); 894 if (dva == 0) 895 return (ENOMEM); 896 } 897 898 /* 899 * Otherwise, we need virtual addresses in DVMA space. 900 */ 901 else { 902 error = extent_alloc(dvmamap, sgsize, pagesz, 903 map->_dm_boundary, 904 (flags & BUS_DMA_NOWAIT) == 0 905 ? EX_WAITOK : EX_NOWAIT, 906 (u_long *)&dva); 907 if (error) 908 return (error); 909 } 910 911 /* Fill in the segment. */ 912 map->dm_segs[0].ds_addr = dva; 913 map->dm_segs[0].ds_len = size; 914 map->dm_segs[0]._ds_va = dva; 915 map->dm_segs[0]._ds_sgsize = sgsize; 916 917 /* Map physical pages into MMU */ 918 mlist = segs[0]._ds_mlist; 919 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) { 920 if (sgsize == 0) 921 panic("_bus_dmamap_load_raw: size botch"); 922 pa = VM_PAGE_TO_PHYS(m); 923 pmap_enter(pmap_kernel(), dva, 924 (pa & -pagesz) | PMAP_NC, 925 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 926 927 dva += pagesz; 928 sgsize -= pagesz; 929 } 930 pmap_update(pmap_kernel()); 931 932 /* Make the map truly valid. */ 933 map->dm_nsegs = 1; 934 map->dm_mapsize = size; 935 936 return (0); 937 } 938 939 /* 940 * load DMA map with a linear buffer. 941 */ 942 int 943 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 944 bus_size_t buflen, struct proc *p, int flags) 945 { 946 bus_size_t sgsize; 947 vaddr_t va = (vaddr_t)buf; 948 int pagesz = PAGE_SIZE; 949 bus_addr_t dva; 950 pmap_t pmap; 951 int rv; 952 953 /* 954 * Make sure that on error condition we return "no valid mappings". 955 */ 956 map->dm_nsegs = 0; 957 map->dm_mapsize = 0; 958 959 if (buflen > map->_dm_size) 960 return (EINVAL); 961 962 /* 963 * A 24-bit device can see all of our kernel address space, so 964 * if we have KVAs, we can just load them as-is, no mapping 965 * necessary. 966 */ 967 if ((map->_dm_flags & BUS_DMA_24BIT) != 0 && p == NULL) { 968 /* 969 * XXX Need to implement "don't DMA across this boundry". 970 */ 971 if (map->_dm_boundary != 0) 972 panic("bus_dmamap_load: boundaries not implemented"); 973 map->dm_mapsize = buflen; 974 map->dm_nsegs = 1; 975 map->dm_segs[0].ds_addr = (bus_addr_t)va; 976 map->dm_segs[0].ds_len = buflen; 977 map->_dm_flags |= _BUS_DMA_DIRECTMAP; 978 return (0); 979 } 980 981 /* 982 * Allocate a region in DVMA space. 983 */ 984 sgsize = m68k_round_page(buflen + (va & (pagesz - 1))); 985 986 if (extent_alloc(dvmamap, sgsize, pagesz, map->_dm_boundary, 987 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT, 988 (u_long *)&dva) != 0) { 989 return (ENOMEM); 990 } 991 992 /* Fill in the segment. */ 993 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 994 map->dm_segs[0].ds_len = buflen; 995 map->dm_segs[0]._ds_va = dva; 996 map->dm_segs[0]._ds_sgsize = sgsize; 997 998 /* 999 * Now map the DVMA addresses we allocated to point to the 1000 * pages of the caller's buffer. 1001 */ 1002 if (p != NULL) 1003 pmap = p->p_vmspace->vm_map.pmap; 1004 else 1005 pmap = pmap_kernel(); 1006 1007 for (; buflen > 0; ) { 1008 paddr_t pa; 1009 /* 1010 * Get the physical address for this page. 1011 */ 1012 rv = pmap_extract(pmap, va, &pa); 1013 #ifdef DIAGNOSTIC 1014 if (!rv) 1015 panic("_bus_dmamap_load: no page"); 1016 #endif /* DIAGNOSTIC */ 1017 1018 /* 1019 * Compute the segment size, and adjust counts. 1020 */ 1021 sgsize = pagesz - (va & (pagesz - 1)); 1022 if (buflen < sgsize) 1023 sgsize = buflen; 1024 1025 pmap_enter(pmap_kernel(), dva, 1026 (pa & -pagesz) | PMAP_NC, 1027 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 1028 1029 dva += pagesz; 1030 va += sgsize; 1031 buflen -= sgsize; 1032 } 1033 pmap_update(pmap_kernel()); 1034 1035 /* Make the map truly valid. */ 1036 map->dm_nsegs = 1; 1037 map->dm_mapsize = map->dm_segs[0].ds_len; 1038 1039 return (0); 1040 } 1041 1042 /* 1043 * unload a DMA map. 1044 */ 1045 void 1046 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 1047 { 1048 bus_dma_segment_t *segs = map->dm_segs; 1049 int nsegs = map->dm_nsegs; 1050 int flags = map->_dm_flags; 1051 bus_addr_t dva; 1052 bus_size_t len; 1053 int s, error; 1054 1055 if (nsegs != 1) 1056 panic("_bus_dmamem_unload: nsegs = %d", nsegs); 1057 1058 /* 1059 * _BUS_DMA_DIRECTMAP is set iff this map was loaded using 1060 * _bus_dmamap_load for a 24-bit device. 1061 */ 1062 if ((flags & _BUS_DMA_DIRECTMAP) != 0) { 1063 /* Nothing to release */ 1064 map->_dm_flags &= ~_BUS_DMA_DIRECTMAP; 1065 } 1066 1067 /* 1068 * Otherwise, this map was loaded using _bus_dmamap_load for a 1069 * non-24-bit device, or using _bus_dmamap_load_raw. 1070 */ 1071 else { 1072 dva = segs[0]._ds_va & -PAGE_SIZE; 1073 len = segs[0]._ds_sgsize; 1074 1075 /* 1076 * Unmap the DVMA addresses. 1077 */ 1078 pmap_remove(pmap_kernel(), dva, dva + len); 1079 pmap_update(pmap_kernel()); 1080 1081 /* 1082 * Free the DVMA addresses. 1083 */ 1084 if ((flags & BUS_DMA_24BIT) != 0) { 1085 /* 1086 * This map was loaded using _bus_dmamap_load_raw 1087 * for a 24-bit device. 1088 */ 1089 uvm_unmap(kernel_map, dva, dva + len); 1090 } else { 1091 /* 1092 * This map was loaded using _bus_dmamap_load or 1093 * _bus_dmamap_load_raw for a non-24-bit device. 1094 */ 1095 s = splhigh(); 1096 error = extent_free(dvmamap, dva, len, EX_NOWAIT); 1097 splx(s); 1098 if (error != 0) 1099 printf("warning: %ld of DVMA space lost\n", len); 1100 } 1101 } 1102 1103 /* Mark the mappings as invalid. */ 1104 map->dm_mapsize = 0; 1105 map->dm_nsegs = 0; 1106 } 1107 1108 /* 1109 * Translate a VME address and address modifier 1110 * into a CPU physical address and page type. 1111 */ 1112 int 1113 vmebus_translate(vme_am_t mod, vme_addr_t addr, bus_type_t *btp, 1114 bus_addr_t *bap) 1115 { 1116 bus_addr_t base; 1117 1118 switch(mod) { 1119 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 1120 1121 case (VME_AM_A16|_DS): 1122 base = 0x00ff0000; 1123 break; 1124 1125 case (VME_AM_A24|_DS): 1126 base = 0; 1127 break; 1128 1129 default: 1130 return (ENOENT); 1131 #undef _DS 1132 } 1133 1134 *bap = base | addr; 1135 *btp = (*bap & 0x800000 ? PMAP_VME8 : PMAP_VME0); 1136 return (0); 1137 } 1138 1139 /* 1140 * If we can find a mapping that was established by the PROM, use it. 1141 */ 1142 int 1143 find_prom_map(paddr_t pa, bus_type_t iospace, int len, vaddr_t *vap) 1144 { 1145 u_long pf; 1146 int pgtype; 1147 vaddr_t va, eva; 1148 int sme; 1149 u_long pte; 1150 int saved_ctx; 1151 1152 /* 1153 * The mapping must fit entirely within one page. 1154 */ 1155 if ((((u_long)pa & PGOFSET) + len) > PAGE_SIZE) 1156 return EINVAL; 1157 1158 pf = PA_PGNUM(pa); 1159 pgtype = iospace << PG_MOD_SHIFT; 1160 saved_ctx = kernel_context(); 1161 1162 /* 1163 * Walk the PROM address space, looking for a page with the 1164 * mapping we want. 1165 */ 1166 for (va = SUN_MONSTART; va < SUN_MONEND; ) { 1167 1168 /* 1169 * Make sure this segment is mapped. 1170 */ 1171 sme = get_segmap(va); 1172 if (sme == SEGINV) { 1173 va += NBSG; 1174 continue; /* next segment */ 1175 } 1176 1177 /* 1178 * Walk the pages of this segment. 1179 */ 1180 for(eva = va + NBSG; va < eva; va += PAGE_SIZE) { 1181 pte = get_pte(va); 1182 1183 if ((pte & (PG_VALID | PG_TYPE)) == 1184 (PG_VALID | pgtype) && 1185 PG_PFNUM(pte) == pf) 1186 { 1187 /* 1188 * Found the PROM mapping. 1189 * note: preserve page offset 1190 */ 1191 *vap = (va | ((vaddr_t)pa & PGOFSET)); 1192 restore_context(saved_ctx); 1193 return 0; 1194 } 1195 } 1196 } 1197 restore_context(saved_ctx); 1198 return ENOENT; 1199 } 1200