1 /* $NetBSD: machdep.c,v 1.60 2009/02/13 22:41:03 apb Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah Hdr: machdep.c 1.74 92/12/20 36 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 37 */ 38 39 /* 40 * Copyright (c) 2001 Matthew Fredette. 41 * Copyright (c) 1994, 1995 Gordon W. Ross 42 * Copyright (c) 1993 Adam Glass 43 * Copyright (c) 1988 University of Utah. 44 * 45 * This code is derived from software contributed to Berkeley by 46 * the Systems Programming Group of the University of Utah Computer 47 * Science Department. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * from: Utah Hdr: machdep.c 1.74 92/12/20 78 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 79 */ 80 81 /*- 82 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 83 * All rights reserved. 84 * 85 * This code is derived from software contributed to The NetBSD Foundation 86 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 87 * NASA Ames Research Center. 88 * 89 * Redistribution and use in source and binary forms, with or without 90 * modification, are permitted provided that the following conditions 91 * are met: 92 * 1. Redistributions of source code must retain the above copyright 93 * notice, this list of conditions and the following disclaimer. 94 * 2. Redistributions in binary form must reproduce the above copyright 95 * notice, this list of conditions and the following disclaimer in the 96 * documentation and/or other materials provided with the distribution. 97 * 98 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 99 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 100 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 101 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 102 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 103 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 104 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 105 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 106 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 107 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 108 * POSSIBILITY OF SUCH DAMAGE. 109 */ 110 111 /* 112 * Copyright (c) 1992, 1993 113 * The Regents of the University of California. All rights reserved. 114 * 115 * This software was developed by the Computer Systems Engineering group 116 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 117 * contributed to Berkeley. 118 * 119 * All advertising materials mentioning features or use of this software 120 * must display the following acknowledgement: 121 * This product includes software developed by the University of 122 * California, Lawrence Berkeley Laboratory. 123 * 124 * Redistribution and use in source and binary forms, with or without 125 * modification, are permitted provided that the following conditions 126 * are met: 127 * 1. Redistributions of source code must retain the above copyright 128 * notice, this list of conditions and the following disclaimer. 129 * 2. Redistributions in binary form must reproduce the above copyright 130 * notice, this list of conditions and the following disclaimer in the 131 * documentation and/or other materials provided with the distribution. 132 * 3. All advertising materials mentioning features or use of this software 133 * must display the following acknowledgement: 134 * This product includes software developed by the University of 135 * California, Berkeley and its contributors. 136 * 4. Neither the name of the University nor the names of its contributors 137 * may be used to endorse or promote products derived from this software 138 * without specific prior written permission. 139 * 140 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 141 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 142 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 143 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 144 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 145 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 146 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 147 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 148 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 149 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 150 * SUCH DAMAGE. 151 * 152 * @(#)machdep.c 8.6 (Berkeley) 1/14/94 153 */ 154 155 #include <sys/cdefs.h> 156 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.60 2009/02/13 22:41:03 apb Exp $"); 157 158 #include "opt_ddb.h" 159 #include "opt_kgdb.h" 160 #include "opt_fpu_emulate.h" 161 #include "opt_modular.h" 162 163 #include <sys/param.h> 164 #include <sys/systm.h> 165 #include <sys/kernel.h> 166 #include <sys/proc.h> 167 #include <sys/buf.h> 168 #include <sys/reboot.h> 169 #include <sys/conf.h> 170 #include <sys/file.h> 171 #include <sys/device.h> 172 #include <sys/malloc.h> 173 #include <sys/extent.h> 174 #include <sys/mbuf.h> 175 #include <sys/msgbuf.h> 176 #include <sys/ioctl.h> 177 #include <sys/tty.h> 178 #include <sys/mount.h> 179 #include <sys/user.h> 180 #include <sys/exec.h> 181 #include <sys/core.h> 182 #include <sys/kcore.h> 183 #include <sys/vnode.h> 184 #include <sys/syscallargs.h> 185 #include <sys/ksyms.h> 186 #ifdef KGDB 187 #include <sys/kgdb.h> 188 #endif 189 190 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */ 191 192 #include <sys/sysctl.h> 193 194 #include <dev/cons.h> 195 196 #include <machine/promlib.h> 197 #include <machine/cpu.h> 198 #include <machine/dvma.h> 199 #include <machine/idprom.h> 200 #include <machine/kcore.h> 201 #include <machine/reg.h> 202 #include <machine/psl.h> 203 #include <machine/pte.h> 204 #define _SUN68K_BUS_DMA_PRIVATE 205 #include <machine/autoconf.h> 206 #include <machine/bus.h> 207 #include <machine/intr.h> 208 #include <machine/pmap.h> 209 210 #if defined(DDB) 211 #include <machine/db_machdep.h> 212 #include <ddb/db_sym.h> 213 #include <ddb/db_extern.h> 214 #endif 215 216 #include <dev/vme/vmereg.h> 217 #include <dev/vme/vmevar.h> 218 219 #include <sun2/sun2/control.h> 220 #include <sun2/sun2/enable.h> 221 #include <sun2/sun2/machdep.h> 222 223 #include <sun68k/sun68k/vme_sun68k.h> 224 225 #include "ksyms.h" 226 227 /* Defined in locore.s */ 228 extern char kernel_text[]; 229 /* Defined by the linker */ 230 extern char etext[]; 231 /* Defined in vfs_bio.c */ 232 extern u_int bufpages; 233 234 /* Our exported CPU info; we can have only one. */ 235 struct cpu_info cpu_info_store; 236 237 struct vm_map *mb_map = NULL; 238 struct vm_map *phys_map = NULL; 239 240 int physmem; 241 int fputype; 242 void * msgbufaddr; 243 244 /* Virtual page frame for /dev/mem (see mem.c) */ 245 vaddr_t vmmap; 246 247 /* 248 * safepri is a safe priority for sleep to set for a spin-wait 249 * during autoconfiguration or after a panic. 250 */ 251 int safepri = PSL_LOWIPL; 252 253 /* Soft copy of the enable register. */ 254 volatile u_short enable_reg_soft = ENABLE_REG_SOFT_UNDEF; 255 256 /* 257 * Our no-fault fault handler. 258 */ 259 label_t *nofault; 260 261 /* 262 * dvmamap is used to manage DVMA memory. 263 */ 264 static struct extent *dvmamap; 265 266 /* Our private scratch page for dumping the MMU. */ 267 static vaddr_t dumppage; 268 269 static void identifycpu(void); 270 static void initcpu(void); 271 272 /* 273 * cpu_startup: allocate memory for variable-sized tables, 274 * initialize CPU, and do autoconfiguration. 275 * 276 * This is called early in init_main.c:main(), after the 277 * kernel memory allocator is ready for use, but before 278 * the creation of processes 1,2, and mountroot, etc. 279 */ 280 void 281 cpu_startup(void) 282 { 283 void *v; 284 vaddr_t minaddr, maxaddr; 285 char pbuf[9]; 286 287 /* 288 * Initialize message buffer (for kernel printf). 289 * This is put in physical pages four through seven 290 * so it will always be in the same place after a 291 * reboot. (physical pages 0-3 are reserved by the PROM 292 * for its vector table and other stuff.) 293 * Its mapping was prepared in pmap_bootstrap(). 294 * Also, offset some to avoid PROM scribbles. 295 */ 296 v = (void *) (PAGE_SIZE * 4); 297 msgbufaddr = (void *)((char *)v + MSGBUFOFF); 298 initmsgbuf(msgbufaddr, MSGBUFSIZE); 299 300 #if NKSYMS || defined(DDB) || defined(MODULAR) 301 { 302 extern int nsym; 303 extern char *ssym, *esym; 304 305 ksyms_addsyms_elf(nsym, ssym, esym); 306 } 307 #endif /* DDB */ 308 309 /* 310 * Good {morning,afternoon,evening,night}. 311 */ 312 printf("%s%s", copyright, version); 313 identifycpu(); 314 fputype = FPU_NONE; 315 #ifdef FPU_EMULATE 316 printf("fpu: emulator\n"); 317 #else 318 printf("fpu: no math support\n"); 319 #endif 320 321 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 322 printf("total memory = %s\n", pbuf); 323 324 /* 325 * XXX fredette - we force a small number of buffers 326 * to help me debug this on my low-memory machine. 327 * this should go away at some point, allowing the 328 * normal automatic buffer-sizing to happen. 329 */ 330 bufpages = 37; 331 332 /* 333 * Get scratch page for dumpsys(). 334 */ 335 if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE,0, UVM_KMF_WIRED)) 336 == 0) 337 panic("startup: alloc dumppage"); 338 339 340 minaddr = 0; 341 342 /* 343 * Allocate a submap for physio 344 */ 345 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 346 VM_PHYS_SIZE, 0, false, NULL); 347 348 /* 349 * Finally, allocate mbuf cluster submap. 350 */ 351 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 352 nmbclusters * mclbytes, VM_MAP_INTRSAFE, 353 false, NULL); 354 355 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 356 printf("avail memory = %s\n", pbuf); 357 358 /* 359 * Allocate a virtual page (for use by /dev/mem) 360 * This page is handed to pmap_enter() therefore 361 * it has to be in the normal kernel VA range. 362 */ 363 vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 364 UVM_KMF_VAONLY | UVM_KMF_WAITVA); 365 366 /* 367 * Allocate DMA map for devices on the bus. 368 */ 369 dvmamap = extent_create("dvmamap", 370 DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL, 371 M_DEVBUF, 0, 0, EX_NOWAIT); 372 if (dvmamap == NULL) 373 panic("unable to allocate DVMA map"); 374 375 /* 376 * Set up CPU-specific registers, cache, etc. 377 */ 378 initcpu(); 379 } 380 381 /* 382 * Set registers on exec. 383 */ 384 void 385 setregs(struct lwp *l, struct exec_package *pack, u_long stack) 386 { 387 struct trapframe *tf = (struct trapframe *)l->l_md.md_regs; 388 389 tf->tf_sr = PSL_USERSET; 390 tf->tf_pc = pack->ep_entry & ~1; 391 tf->tf_regs[D0] = 0; 392 tf->tf_regs[D1] = 0; 393 tf->tf_regs[D2] = 0; 394 tf->tf_regs[D3] = 0; 395 tf->tf_regs[D4] = 0; 396 tf->tf_regs[D5] = 0; 397 tf->tf_regs[D6] = 0; 398 tf->tf_regs[D7] = 0; 399 tf->tf_regs[A0] = 0; 400 tf->tf_regs[A1] = 0; 401 tf->tf_regs[A2] = (int)l->l_proc->p_psstr; 402 tf->tf_regs[A3] = 0; 403 tf->tf_regs[A4] = 0; 404 tf->tf_regs[A5] = 0; 405 tf->tf_regs[A6] = 0; 406 tf->tf_regs[SP] = stack; 407 408 /* restore a null state frame */ 409 l->l_addr->u_pcb.pcb_fpregs.fpf_null = 0; 410 411 l->l_md.md_flags = 0; 412 } 413 414 /* 415 * Info for CTL_HW 416 */ 417 char machine[16] = MACHINE; /* from <machine/param.h> */ 418 char kernel_arch[16] = "sun2"; /* XXX needs a sysctl node */ 419 char cpu_model[120]; 420 421 /* 422 * Determine which Sun2 model we are running on. 423 */ 424 void 425 identifycpu(void) 426 { 427 extern char *cpu_string; /* XXX */ 428 429 /* Other stuff? (VAC, mc6888x version, etc.) */ 430 /* Note: miniroot cares about the kernel_arch part. */ 431 sprintf(cpu_model, "%s %s", kernel_arch, cpu_string); 432 433 printf("Model: %s\n", cpu_model); 434 } 435 436 /* 437 * machine dependent system variables. 438 */ 439 #if 0 /* XXX - Not yet... */ 440 static int 441 sysctl_machdep_root_device(SYSCTLFN_ARGS) 442 { 443 struct sysctlnode node = *rnode; 444 445 node.sysctl_data = some permutation on root_device; 446 node.sysctl_size = strlen(root_device) + 1; 447 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 448 } 449 #endif 450 451 static int 452 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 453 { 454 struct sysctlnode node = *rnode; 455 char *cp; 456 457 cp = prom_getbootfile(); 458 if (cp == NULL || cp[0] == '\0') 459 return (ENOENT); 460 461 node.sysctl_data = cp; 462 node.sysctl_size = strlen(cp) + 1; 463 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 464 } 465 466 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 467 { 468 469 sysctl_createv(clog, 0, NULL, NULL, 470 CTLFLAG_PERMANENT, 471 CTLTYPE_NODE, "machdep", NULL, 472 NULL, 0, NULL, 0, 473 CTL_MACHDEP, CTL_EOL); 474 475 sysctl_createv(clog, 0, NULL, NULL, 476 CTLFLAG_PERMANENT, 477 CTLTYPE_STRUCT, "console_device", NULL, 478 sysctl_consdev, 0, NULL, sizeof(dev_t), 479 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 480 #if 0 /* XXX - Not yet... */ 481 sysctl_createv(clog, 0, NULL, NULL, 482 CTLFLAG_PERMANENT, 483 CTLTYPE_STRING, "root_device", NULL, 484 sysctl_machdep_root_device, 0, NULL, 0, 485 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL); 486 #endif 487 sysctl_createv(clog, 0, NULL, NULL, 488 CTLFLAG_PERMANENT, 489 CTLTYPE_STRING, "booted_kernel", NULL, 490 sysctl_machdep_booted_kernel, 0, NULL, 0, 491 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 492 } 493 494 /* See: sig_machdep.c */ 495 496 /* 497 * Do a sync in preparation for a reboot. 498 * XXX - This could probably be common code. 499 * XXX - And now, most of it is in vfs_shutdown() 500 * XXX - Put waittime checks in there too? 501 */ 502 int waittime = -1; /* XXX - Who else looks at this? -gwr */ 503 static void 504 reboot_sync(void) 505 { 506 507 /* Check waittime here to localize its use to this function. */ 508 if (waittime >= 0) 509 return; 510 waittime = 0; 511 vfs_shutdown(); 512 } 513 514 /* 515 * Common part of the BSD and SunOS reboot system calls. 516 */ 517 __dead void 518 cpu_reboot(int howto, char *user_boot_string) 519 { 520 char *bs, *p; 521 char default_boot_string[8]; 522 523 /* If system is cold, just halt. (early panic?) */ 524 if (cold) 525 goto haltsys; 526 527 /* Un-blank the screen if appropriate. */ 528 cnpollc(1); 529 530 if ((howto & RB_NOSYNC) == 0) { 531 reboot_sync(); 532 /* 533 * If we've been adjusting the clock, the todr 534 * will be out of synch; adjust it now. 535 * 536 * XXX - However, if the kernel has been sitting in ddb, 537 * the time will be way off, so don't set the HW clock! 538 * XXX - Should do sanity check against HW clock. -gwr 539 */ 540 /* resettodr(); */ 541 } 542 543 /* Disable interrupts. */ 544 splhigh(); 545 546 /* Write out a crash dump if asked. */ 547 if (howto & RB_DUMP) 548 dumpsys(); 549 550 /* run any shutdown hooks */ 551 doshutdownhooks(); 552 553 pmf_system_shutdown(boothowto); 554 555 if (howto & RB_HALT) { 556 haltsys: 557 printf("halted.\n"); 558 prom_halt(); 559 } 560 561 /* 562 * Automatic reboot. 563 */ 564 bs = user_boot_string; 565 if (bs == NULL) { 566 /* 567 * Build our own boot string with an empty 568 * boot device/file and (maybe) some flags. 569 * The PROM will supply the device/file name. 570 */ 571 bs = default_boot_string; 572 *bs = '\0'; 573 if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) { 574 /* Append the boot flags. */ 575 p = bs; 576 *p++ = ' '; 577 *p++ = '-'; 578 if (howto & RB_KDB) 579 *p++ = 'd'; 580 if (howto & RB_ASKNAME) 581 *p++ = 'a'; 582 if (howto & RB_SINGLE) 583 *p++ = 's'; 584 *p = '\0'; 585 } 586 } 587 printf("rebooting...\n"); 588 prom_boot(bs); 589 for (;;) ; 590 /*NOTREACHED*/ 591 } 592 593 /* 594 * These variables are needed by /sbin/savecore 595 */ 596 uint32_t dumpmag = 0x8fca0101; /* magic number */ 597 int dumpsize = 0; /* pages */ 598 long dumplo = 0; /* blocks */ 599 600 #define DUMP_EXTRA 3 /* CPU-dependent extra pages */ 601 602 /* 603 * This is called by main to set dumplo, dumpsize. 604 * Dumps always skip the first PAGE_SIZE of disk space 605 * in case there might be a disk label stored there. 606 * If there is extra space, put dump at the end to 607 * reduce the chance that swapping trashes it. 608 */ 609 void 610 cpu_dumpconf(void) 611 { 612 const struct bdevsw *bdev; 613 int devblks; /* size of dump device in blocks */ 614 int dumpblks; /* size of dump image in blocks */ 615 int (*getsize)(dev_t); 616 617 if (dumpdev == NODEV) 618 return; 619 620 bdev = bdevsw_lookup(dumpdev); 621 if (bdev == NULL) { 622 dumpdev = NODEV; 623 return; 624 } 625 getsize = bdev->d_psize; 626 if (getsize == NULL) 627 return; 628 devblks = (*getsize)(dumpdev); 629 if (devblks <= ctod(1)) 630 return; 631 devblks &= ~(ctod(1)-1); 632 633 /* 634 * Note: savecore expects dumpsize to be the 635 * number of pages AFTER the dump header. 636 */ 637 dumpsize = physmem; 638 639 /* Position dump image near end of space, page aligned. */ 640 dumpblks = ctod(physmem + DUMP_EXTRA); 641 dumplo = devblks - dumpblks; 642 643 /* If it does not fit, truncate it by moving dumplo. */ 644 /* Note: Must force signed comparison. */ 645 if (dumplo < ((long)ctod(1))) { 646 dumplo = ctod(1); 647 dumpsize = dtoc(devblks - dumplo) - DUMP_EXTRA; 648 } 649 } 650 651 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */ 652 struct pcb dumppcb; 653 extern paddr_t avail_start; 654 655 /* 656 * Write a crash dump. The format while in swap is: 657 * kcore_seg_t cpu_hdr; 658 * cpu_kcore_hdr_t cpu_data; 659 * padding (PAGE_SIZE-sizeof(kcore_seg_t)) 660 * pagemap (2*PAGE_SIZE) 661 * physical memory... 662 */ 663 void 664 dumpsys(void) 665 { 666 const struct bdevsw *dsw; 667 kcore_seg_t *kseg_p; 668 cpu_kcore_hdr_t *chdr_p; 669 struct sun2_kcore_hdr *sh; 670 char *vaddr; 671 paddr_t paddr; 672 int psize, todo, chunk; 673 daddr_t blkno; 674 int error = 0; 675 676 if (dumpdev == NODEV) 677 return; 678 dsw = bdevsw_lookup(dumpdev); 679 if (dsw == NULL || dsw->d_psize == NULL) 680 return; 681 if (dumppage == 0) 682 return; 683 684 /* 685 * For dumps during autoconfiguration, 686 * if dump device has already configured... 687 */ 688 if (dumpsize == 0) 689 cpu_dumpconf(); 690 if (dumplo <= 0) { 691 printf("\ndump to dev %u,%u not possible\n", 692 major(dumpdev), minor(dumpdev)); 693 return; 694 } 695 savectx(&dumppcb); 696 697 psize = (*(dsw->d_psize))(dumpdev); 698 if (psize == -1) { 699 printf("dump area unavailable\n"); 700 return; 701 } 702 703 printf("\ndumping to dev %u,%u offset %ld\n", 704 major(dumpdev), minor(dumpdev), dumplo); 705 706 /* 707 * Prepare the dump header, including MMU state. 708 */ 709 blkno = dumplo; 710 todo = dumpsize; /* pages */ 711 vaddr = (char*)dumppage; 712 memset(vaddr, 0, PAGE_SIZE); 713 714 /* Set pointers to all three parts. */ 715 kseg_p = (kcore_seg_t *)vaddr; 716 chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1); 717 sh = &chdr_p->un._sun2; 718 719 /* Fill in kcore_seg_t part. */ 720 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 721 kseg_p->c_size = (ctob(DUMP_EXTRA) - sizeof(*kseg_p)); 722 723 /* Fill in cpu_kcore_hdr_t part. */ 724 strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name)); 725 chdr_p->page_size = PAGE_SIZE; 726 chdr_p->kernbase = KERNBASE; 727 728 /* Fill in the sun2_kcore_hdr part (MMU state). */ 729 pmap_kcore_hdr(sh); 730 731 /* Write out the dump header. */ 732 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 733 if (error) 734 goto fail; 735 blkno += btodb(PAGE_SIZE); 736 737 /* translation RAM (pages zero through seven) */ 738 for(chunk = 0; chunk < (PAGE_SIZE * 8); chunk += PAGE_SIZE) { 739 pmap_get_pagemap((int*)vaddr, chunk); 740 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 741 if (error) 742 goto fail; 743 blkno += btodb(PAGE_SIZE); 744 } 745 746 /* 747 * Now dump physical memory. Have to do it in two chunks. 748 * The first chunk is "unmanaged" (by the VM code) and its 749 * range of physical addresses is not allow in pmap_enter. 750 * However, that segment is mapped linearly, so we can just 751 * use the virtual mappings already in place. The second 752 * chunk is done the normal way, using pmap_enter. 753 * 754 * Note that vaddr==(paddr+KERNBASE) for paddr=0 through etext. 755 */ 756 757 /* Do the first chunk (0 <= PA < avail_start) */ 758 paddr = 0; 759 chunk = btoc(avail_start); 760 if (chunk > todo) 761 chunk = todo; 762 do { 763 if ((todo & 0xf) == 0) 764 printf_nolog("\r%4d", todo); 765 vaddr = (char*)(paddr + KERNBASE); 766 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 767 if (error) 768 goto fail; 769 paddr += PAGE_SIZE; 770 blkno += btodb(PAGE_SIZE); 771 --todo; 772 } while (--chunk > 0); 773 774 /* Do the second chunk (avail_start <= PA < dumpsize) */ 775 vaddr = (char*)vmmap; /* Borrow /dev/mem VA */ 776 do { 777 if ((todo & 0xf) == 0) 778 printf_nolog("\r%4d", todo); 779 pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ); 780 pmap_update(pmap_kernel()); 781 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 782 pmap_kremove(vmmap, PAGE_SIZE); 783 pmap_update(pmap_kernel()); 784 if (error) 785 goto fail; 786 paddr += PAGE_SIZE; 787 blkno += btodb(PAGE_SIZE); 788 } while (--todo > 0); 789 790 printf("\rdump succeeded\n"); 791 return; 792 fail: 793 printf(" dump error=%d\n", error); 794 } 795 796 static void 797 initcpu(void) 798 { 799 /* XXX: Enable RAM parity/ECC checking? */ 800 /* XXX: parityenable(); */ 801 802 } 803 804 /* straptrap() in trap.c */ 805 806 /* from hp300: badaddr() */ 807 808 /* XXX: parityenable() ? */ 809 /* regdump() moved to regdump.c */ 810 811 /* 812 * cpu_exec_aout_makecmds(): 813 * CPU-dependent a.out format hook for execve(). 814 * 815 * Determine if the given exec package refers to something which we 816 * understand and, if so, set up the vmcmds for it. 817 */ 818 int 819 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp) 820 { 821 return ENOEXEC; 822 } 823 824 #if 0 825 /* 826 * Soft interrupt support. 827 */ 828 void 829 isr_soft_request(int level) 830 { 831 u_char bit; 832 833 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 834 return; 835 836 bit = 1 << level; 837 enable_reg_or(bit); 838 } 839 840 void 841 isr_soft_clear(int level) 842 { 843 u_char bit; 844 845 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 846 return; 847 848 bit = 1 << level; 849 enable_reg_and(~bit); 850 } 851 #endif 852 853 /* 854 * Like _bus_dmamap_load(), but for raw memory allocated with 855 * bus_dmamem_alloc(). 856 */ 857 int 858 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, 859 int nsegs, bus_size_t size, int flags) 860 { 861 struct vm_page *m; 862 paddr_t pa; 863 bus_addr_t dva; 864 bus_size_t sgsize; 865 struct pglist *mlist; 866 int pagesz = PAGE_SIZE; 867 int error; 868 869 /* 870 * Make sure that on error condition we return "no valid mappings". 871 */ 872 map->dm_nsegs = 0; 873 map->dm_mapsize = 0; 874 875 /* Allocate DVMA addresses */ 876 sgsize = (size + pagesz - 1) & -pagesz; 877 878 /* 879 * If the device can see our entire 24-bit address space, 880 * we can use any properly aligned virtual addresses. 881 */ 882 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) { 883 dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary, 884 pagesz, 0); 885 if (dva == 0) 886 return (ENOMEM); 887 } 888 889 /* 890 * Otherwise, we need virtual addresses in DVMA space. 891 */ 892 else { 893 error = extent_alloc(dvmamap, sgsize, pagesz, 894 map->_dm_boundary, 895 (flags & BUS_DMA_NOWAIT) == 0 896 ? EX_WAITOK : EX_NOWAIT, 897 (u_long *)&dva); 898 if (error) 899 return (error); 900 } 901 902 /* Fill in the segment. */ 903 map->dm_segs[0].ds_addr = dva; 904 map->dm_segs[0].ds_len = size; 905 map->dm_segs[0]._ds_va = dva; 906 map->dm_segs[0]._ds_sgsize = sgsize; 907 908 /* Map physical pages into MMU */ 909 mlist = segs[0]._ds_mlist; 910 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 911 if (sgsize == 0) 912 panic("_bus_dmamap_load_raw: size botch"); 913 pa = VM_PAGE_TO_PHYS(m); 914 pmap_enter(pmap_kernel(), dva, 915 (pa & -pagesz) | PMAP_NC, 916 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 917 918 dva += pagesz; 919 sgsize -= pagesz; 920 } 921 pmap_update(pmap_kernel()); 922 923 /* Make the map truly valid. */ 924 map->dm_nsegs = 1; 925 map->dm_mapsize = size; 926 927 return (0); 928 } 929 930 /* 931 * load DMA map with a linear buffer. 932 */ 933 int 934 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 935 bus_size_t buflen, struct proc *p, int flags) 936 { 937 bus_size_t sgsize; 938 vaddr_t va = (vaddr_t)buf; 939 int pagesz = PAGE_SIZE; 940 bus_addr_t dva; 941 pmap_t pmap; 942 int rv; 943 944 /* 945 * Make sure that on error condition we return "no valid mappings". 946 */ 947 map->dm_nsegs = 0; 948 map->dm_mapsize = 0; 949 950 if (buflen > map->_dm_size) 951 return (EINVAL); 952 953 /* 954 * A 24-bit device can see all of our kernel address space, so 955 * if we have KVAs, we can just load them as-is, no mapping 956 * necessary. 957 */ 958 if ((map->_dm_flags & BUS_DMA_24BIT) != 0 && p == NULL) { 959 /* 960 * XXX Need to implement "don't DMA across this boundry". 961 */ 962 if (map->_dm_boundary != 0) 963 panic("bus_dmamap_load: boundaries not implemented"); 964 map->dm_mapsize = buflen; 965 map->dm_nsegs = 1; 966 map->dm_segs[0].ds_addr = (bus_addr_t)va; 967 map->dm_segs[0].ds_len = buflen; 968 map->_dm_flags |= _BUS_DMA_DIRECTMAP; 969 return (0); 970 } 971 972 /* 973 * Allocate a region in DVMA space. 974 */ 975 sgsize = m68k_round_page(buflen + (va & (pagesz - 1))); 976 977 if (extent_alloc(dvmamap, sgsize, pagesz, map->_dm_boundary, 978 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT, 979 (u_long *)&dva) != 0) { 980 return (ENOMEM); 981 } 982 983 /* Fill in the segment. */ 984 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 985 map->dm_segs[0].ds_len = buflen; 986 map->dm_segs[0]._ds_va = dva; 987 map->dm_segs[0]._ds_sgsize = sgsize; 988 989 /* 990 * Now map the DVMA addresses we allocated to point to the 991 * pages of the caller's buffer. 992 */ 993 if (p != NULL) 994 pmap = p->p_vmspace->vm_map.pmap; 995 else 996 pmap = pmap_kernel(); 997 998 for (; buflen > 0; ) { 999 paddr_t pa; 1000 /* 1001 * Get the physical address for this page. 1002 */ 1003 rv = pmap_extract(pmap, va, &pa); 1004 #ifdef DIAGNOSTIC 1005 if (!rv) 1006 panic("_bus_dmamap_load: no page"); 1007 #endif /* DIAGNOSTIC */ 1008 1009 /* 1010 * Compute the segment size, and adjust counts. 1011 */ 1012 sgsize = pagesz - (va & (pagesz - 1)); 1013 if (buflen < sgsize) 1014 sgsize = buflen; 1015 1016 pmap_enter(pmap_kernel(), dva, 1017 (pa & -pagesz) | PMAP_NC, 1018 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 1019 1020 dva += pagesz; 1021 va += sgsize; 1022 buflen -= sgsize; 1023 } 1024 pmap_update(pmap_kernel()); 1025 1026 /* Make the map truly valid. */ 1027 map->dm_nsegs = 1; 1028 map->dm_mapsize = map->dm_segs[0].ds_len; 1029 1030 return (0); 1031 } 1032 1033 /* 1034 * unload a DMA map. 1035 */ 1036 void 1037 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 1038 { 1039 bus_dma_segment_t *segs = map->dm_segs; 1040 int nsegs = map->dm_nsegs; 1041 int flags = map->_dm_flags; 1042 bus_addr_t dva; 1043 bus_size_t len; 1044 int s, error; 1045 1046 if (nsegs != 1) 1047 panic("_bus_dmamem_unload: nsegs = %d", nsegs); 1048 1049 /* 1050 * _BUS_DMA_DIRECTMAP is set iff this map was loaded using 1051 * _bus_dmamap_load for a 24-bit device. 1052 */ 1053 if ((flags & _BUS_DMA_DIRECTMAP) != 0) { 1054 /* Nothing to release */ 1055 map->_dm_flags &= ~_BUS_DMA_DIRECTMAP; 1056 } 1057 1058 /* 1059 * Otherwise, this map was loaded using _bus_dmamap_load for a 1060 * non-24-bit device, or using _bus_dmamap_load_raw. 1061 */ 1062 else { 1063 dva = segs[0]._ds_va & -PAGE_SIZE; 1064 len = segs[0]._ds_sgsize; 1065 1066 /* 1067 * Unmap the DVMA addresses. 1068 */ 1069 pmap_remove(pmap_kernel(), dva, dva + len); 1070 pmap_update(pmap_kernel()); 1071 1072 /* 1073 * Free the DVMA addresses. 1074 */ 1075 if ((flags & BUS_DMA_24BIT) != 0) { 1076 /* 1077 * This map was loaded using _bus_dmamap_load_raw 1078 * for a 24-bit device. 1079 */ 1080 uvm_unmap(kernel_map, dva, dva + len); 1081 } else { 1082 /* 1083 * This map was loaded using _bus_dmamap_load or 1084 * _bus_dmamap_load_raw for a non-24-bit device. 1085 */ 1086 s = splhigh(); 1087 error = extent_free(dvmamap, dva, len, EX_NOWAIT); 1088 splx(s); 1089 if (error != 0) 1090 printf("warning: %ld of DVMA space lost\n", len); 1091 } 1092 } 1093 1094 /* Mark the mappings as invalid. */ 1095 map->dm_mapsize = 0; 1096 map->dm_nsegs = 0; 1097 } 1098 1099 /* 1100 * Translate a VME address and address modifier 1101 * into a CPU physical address and page type. 1102 */ 1103 int 1104 vmebus_translate(vme_am_t mod, vme_addr_t addr, bus_type_t *btp, 1105 bus_addr_t *bap) 1106 { 1107 bus_addr_t base; 1108 1109 switch(mod) { 1110 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 1111 1112 case (VME_AM_A16|_DS): 1113 base = 0x00ff0000; 1114 break; 1115 1116 case (VME_AM_A24|_DS): 1117 base = 0; 1118 break; 1119 1120 default: 1121 return (ENOENT); 1122 #undef _DS 1123 } 1124 1125 *bap = base | addr; 1126 *btp = (*bap & 0x800000 ? PMAP_VME8 : PMAP_VME0); 1127 return (0); 1128 } 1129 1130 /* 1131 * If we can find a mapping that was established by the PROM, use it. 1132 */ 1133 int 1134 find_prom_map(paddr_t pa, bus_type_t iospace, int len, vaddr_t *vap) 1135 { 1136 u_long pf; 1137 int pgtype; 1138 vaddr_t va, eva; 1139 int sme; 1140 u_long pte; 1141 int saved_ctx; 1142 1143 /* 1144 * The mapping must fit entirely within one page. 1145 */ 1146 if ((((u_long)pa & PGOFSET) + len) > PAGE_SIZE) 1147 return EINVAL; 1148 1149 pf = PA_PGNUM(pa); 1150 pgtype = iospace << PG_MOD_SHIFT; 1151 saved_ctx = kernel_context(); 1152 1153 /* 1154 * Walk the PROM address space, looking for a page with the 1155 * mapping we want. 1156 */ 1157 for (va = SUN_MONSTART; va < SUN_MONEND; ) { 1158 1159 /* 1160 * Make sure this segment is mapped. 1161 */ 1162 sme = get_segmap(va); 1163 if (sme == SEGINV) { 1164 va += NBSG; 1165 continue; /* next segment */ 1166 } 1167 1168 /* 1169 * Walk the pages of this segment. 1170 */ 1171 for(eva = va + NBSG; va < eva; va += PAGE_SIZE) { 1172 pte = get_pte(va); 1173 1174 if ((pte & (PG_VALID | PG_TYPE)) == 1175 (PG_VALID | pgtype) && 1176 PG_PFNUM(pte) == pf) 1177 { 1178 /* 1179 * Found the PROM mapping. 1180 * note: preserve page offset 1181 */ 1182 *vap = (va | ((vaddr_t)pa & PGOFSET)); 1183 restore_context(saved_ctx); 1184 return 0; 1185 } 1186 } 1187 } 1188 restore_context(saved_ctx); 1189 return ENOENT; 1190 } 1191