1 /* $NetBSD: machdep.c,v 1.67 2011/02/08 20:20:24 rmind Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah Hdr: machdep.c 1.74 92/12/20 36 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 37 */ 38 39 /* 40 * Copyright (c) 2001 Matthew Fredette. 41 * Copyright (c) 1994, 1995 Gordon W. Ross 42 * Copyright (c) 1993 Adam Glass 43 * Copyright (c) 1988 University of Utah. 44 * 45 * This code is derived from software contributed to Berkeley by 46 * the Systems Programming Group of the University of Utah Computer 47 * Science Department. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * from: Utah Hdr: machdep.c 1.74 92/12/20 78 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 79 */ 80 81 /*- 82 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 83 * All rights reserved. 84 * 85 * This code is derived from software contributed to The NetBSD Foundation 86 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 87 * NASA Ames Research Center. 88 * 89 * Redistribution and use in source and binary forms, with or without 90 * modification, are permitted provided that the following conditions 91 * are met: 92 * 1. Redistributions of source code must retain the above copyright 93 * notice, this list of conditions and the following disclaimer. 94 * 2. Redistributions in binary form must reproduce the above copyright 95 * notice, this list of conditions and the following disclaimer in the 96 * documentation and/or other materials provided with the distribution. 97 * 98 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 99 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 100 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 101 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 102 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 103 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 104 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 105 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 106 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 107 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 108 * POSSIBILITY OF SUCH DAMAGE. 109 */ 110 111 /* 112 * Copyright (c) 1992, 1993 113 * The Regents of the University of California. All rights reserved. 114 * 115 * This software was developed by the Computer Systems Engineering group 116 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 117 * contributed to Berkeley. 118 * 119 * All advertising materials mentioning features or use of this software 120 * must display the following acknowledgement: 121 * This product includes software developed by the University of 122 * California, Lawrence Berkeley Laboratory. 123 * 124 * Redistribution and use in source and binary forms, with or without 125 * modification, are permitted provided that the following conditions 126 * are met: 127 * 1. Redistributions of source code must retain the above copyright 128 * notice, this list of conditions and the following disclaimer. 129 * 2. Redistributions in binary form must reproduce the above copyright 130 * notice, this list of conditions and the following disclaimer in the 131 * documentation and/or other materials provided with the distribution. 132 * 3. Neither the name of the University nor the names of its contributors 133 * may be used to endorse or promote products derived from this software 134 * without specific prior written permission. 135 * 136 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 137 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 138 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 139 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 140 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 141 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 142 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 143 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 144 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 145 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 146 * SUCH DAMAGE. 147 * 148 * @(#)machdep.c 8.6 (Berkeley) 1/14/94 149 */ 150 151 #include <sys/cdefs.h> 152 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.67 2011/02/08 20:20:24 rmind Exp $"); 153 154 #include "opt_ddb.h" 155 #include "opt_kgdb.h" 156 #include "opt_fpu_emulate.h" 157 #include "opt_modular.h" 158 159 #include <sys/param.h> 160 #include <sys/systm.h> 161 #include <sys/kernel.h> 162 #include <sys/proc.h> 163 #include <sys/buf.h> 164 #include <sys/reboot.h> 165 #include <sys/conf.h> 166 #include <sys/file.h> 167 #include <sys/device.h> 168 #include <sys/malloc.h> 169 #include <sys/extent.h> 170 #include <sys/mbuf.h> 171 #include <sys/msgbuf.h> 172 #include <sys/ioctl.h> 173 #include <sys/tty.h> 174 #include <sys/mount.h> 175 #include <sys/exec.h> 176 #include <sys/exec_aout.h> /* for MID_* */ 177 #include <sys/core.h> 178 #include <sys/kcore.h> 179 #include <sys/vnode.h> 180 #include <sys/syscallargs.h> 181 #include <sys/ksyms.h> 182 #ifdef KGDB 183 #include <sys/kgdb.h> 184 #endif 185 186 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */ 187 188 #include <sys/sysctl.h> 189 190 #include <dev/cons.h> 191 192 #include <machine/promlib.h> 193 #include <machine/cpu.h> 194 #include <machine/dvma.h> 195 #include <machine/idprom.h> 196 #include <machine/kcore.h> 197 #include <machine/reg.h> 198 #include <machine/pcb.h> 199 #include <machine/psl.h> 200 #include <machine/pte.h> 201 #define _SUN68K_BUS_DMA_PRIVATE 202 #include <machine/autoconf.h> 203 #include <machine/bus.h> 204 #include <machine/intr.h> 205 #include <machine/pmap.h> 206 207 #if defined(DDB) 208 #include <machine/db_machdep.h> 209 #include <ddb/db_sym.h> 210 #include <ddb/db_extern.h> 211 #endif 212 213 #include <dev/vme/vmereg.h> 214 #include <dev/vme/vmevar.h> 215 216 #include <sun2/sun2/control.h> 217 #include <sun2/sun2/enable.h> 218 #include <sun2/sun2/machdep.h> 219 220 #include <sun68k/sun68k/vme_sun68k.h> 221 222 #include "ksyms.h" 223 224 /* Defined in locore.s */ 225 extern char kernel_text[]; 226 /* Defined by the linker */ 227 extern char etext[]; 228 /* Defined in vfs_bio.c */ 229 extern u_int bufpages; 230 231 /* Our exported CPU info; we can have only one. */ 232 struct cpu_info cpu_info_store; 233 234 struct vm_map *phys_map = NULL; 235 236 int physmem; 237 int fputype; 238 void * msgbufaddr; 239 240 /* Virtual page frame for /dev/mem (see mem.c) */ 241 vaddr_t vmmap; 242 243 /* 244 * safepri is a safe priority for sleep to set for a spin-wait 245 * during autoconfiguration or after a panic. 246 */ 247 int safepri = PSL_LOWIPL; 248 249 /* Soft copy of the enable register. */ 250 volatile u_short enable_reg_soft = ENABLE_REG_SOFT_UNDEF; 251 252 /* 253 * Our no-fault fault handler. 254 */ 255 label_t *nofault; 256 257 /* 258 * dvmamap is used to manage DVMA memory. 259 */ 260 static struct extent *dvmamap; 261 262 /* Our private scratch page for dumping the MMU. */ 263 static vaddr_t dumppage; 264 265 static void identifycpu(void); 266 static void initcpu(void); 267 268 /* 269 * cpu_startup: allocate memory for variable-sized tables, 270 * initialize CPU, and do autoconfiguration. 271 * 272 * This is called early in init_main.c:main(), after the 273 * kernel memory allocator is ready for use, but before 274 * the creation of processes 1,2, and mountroot, etc. 275 */ 276 void 277 cpu_startup(void) 278 { 279 void *v; 280 vaddr_t minaddr, maxaddr; 281 char pbuf[9]; 282 283 /* 284 * Initialize message buffer (for kernel printf). 285 * This is put in physical pages four through seven 286 * so it will always be in the same place after a 287 * reboot. (physical pages 0-3 are reserved by the PROM 288 * for its vector table and other stuff.) 289 * Its mapping was prepared in pmap_bootstrap(). 290 * Also, offset some to avoid PROM scribbles. 291 */ 292 v = (void *) (PAGE_SIZE * 4); 293 msgbufaddr = (void *)((char *)v + MSGBUFOFF); 294 initmsgbuf(msgbufaddr, MSGBUFSIZE); 295 296 #if NKSYMS || defined(DDB) || defined(MODULAR) 297 { 298 extern int nsym; 299 extern char *ssym, *esym; 300 301 ksyms_addsyms_elf(nsym, ssym, esym); 302 } 303 #endif /* DDB */ 304 305 /* 306 * Good {morning,afternoon,evening,night}. 307 */ 308 printf("%s%s", copyright, version); 309 identifycpu(); 310 fputype = FPU_NONE; 311 #ifdef FPU_EMULATE 312 printf("fpu: emulator\n"); 313 #else 314 printf("fpu: no math support\n"); 315 #endif 316 317 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 318 printf("total memory = %s\n", pbuf); 319 320 /* 321 * XXX fredette - we force a small number of buffers 322 * to help me debug this on my low-memory machine. 323 * this should go away at some point, allowing the 324 * normal automatic buffer-sizing to happen. 325 */ 326 bufpages = 37; 327 328 /* 329 * Get scratch page for dumpsys(). 330 */ 331 if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE,0, UVM_KMF_WIRED)) 332 == 0) 333 panic("startup: alloc dumppage"); 334 335 336 minaddr = 0; 337 338 /* 339 * Allocate a submap for physio 340 */ 341 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 342 VM_PHYS_SIZE, 0, false, NULL); 343 344 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 345 printf("avail memory = %s\n", pbuf); 346 347 /* 348 * Allocate a virtual page (for use by /dev/mem) 349 * This page is handed to pmap_enter() therefore 350 * it has to be in the normal kernel VA range. 351 */ 352 vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 353 UVM_KMF_VAONLY | UVM_KMF_WAITVA); 354 355 /* 356 * Allocate DMA map for devices on the bus. 357 */ 358 dvmamap = extent_create("dvmamap", 359 DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL, 360 M_DEVBUF, 0, 0, EX_NOWAIT); 361 if (dvmamap == NULL) 362 panic("unable to allocate DVMA map"); 363 364 /* 365 * Set up CPU-specific registers, cache, etc. 366 */ 367 initcpu(); 368 } 369 370 /* 371 * Set registers on exec. 372 */ 373 void 374 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) 375 { 376 struct trapframe *tf = (struct trapframe *)l->l_md.md_regs; 377 struct pcb *pcb = lwp_getpcb(l); 378 379 tf->tf_sr = PSL_USERSET; 380 tf->tf_pc = pack->ep_entry & ~1; 381 tf->tf_regs[D0] = 0; 382 tf->tf_regs[D1] = 0; 383 tf->tf_regs[D2] = 0; 384 tf->tf_regs[D3] = 0; 385 tf->tf_regs[D4] = 0; 386 tf->tf_regs[D5] = 0; 387 tf->tf_regs[D6] = 0; 388 tf->tf_regs[D7] = 0; 389 tf->tf_regs[A0] = 0; 390 tf->tf_regs[A1] = 0; 391 tf->tf_regs[A2] = (int)l->l_proc->p_psstr; 392 tf->tf_regs[A3] = 0; 393 tf->tf_regs[A4] = 0; 394 tf->tf_regs[A5] = 0; 395 tf->tf_regs[A6] = 0; 396 tf->tf_regs[SP] = stack; 397 398 /* restore a null state frame */ 399 pcb->pcb_fpregs.fpf_null = 0; 400 401 l->l_md.md_flags = 0; 402 } 403 404 /* 405 * Info for CTL_HW 406 */ 407 char machine[16] = MACHINE; /* from <machine/param.h> */ 408 char kernel_arch[16] = "sun2"; /* XXX needs a sysctl node */ 409 char cpu_model[120]; 410 411 /* 412 * Determine which Sun2 model we are running on. 413 */ 414 void 415 identifycpu(void) 416 { 417 extern char *cpu_string; /* XXX */ 418 419 /* Other stuff? (VAC, mc6888x version, etc.) */ 420 /* Note: miniroot cares about the kernel_arch part. */ 421 sprintf(cpu_model, "%s %s", kernel_arch, cpu_string); 422 423 printf("Model: %s\n", cpu_model); 424 } 425 426 /* 427 * machine dependent system variables. 428 */ 429 #if 0 /* XXX - Not yet... */ 430 static int 431 sysctl_machdep_root_device(SYSCTLFN_ARGS) 432 { 433 struct sysctlnode node = *rnode; 434 435 node.sysctl_data = some permutation on root_device; 436 node.sysctl_size = strlen(root_device) + 1; 437 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 438 } 439 #endif 440 441 static int 442 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 443 { 444 struct sysctlnode node = *rnode; 445 char *cp; 446 447 cp = prom_getbootfile(); 448 if (cp == NULL || cp[0] == '\0') 449 return (ENOENT); 450 451 node.sysctl_data = cp; 452 node.sysctl_size = strlen(cp) + 1; 453 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 454 } 455 456 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 457 { 458 459 sysctl_createv(clog, 0, NULL, NULL, 460 CTLFLAG_PERMANENT, 461 CTLTYPE_NODE, "machdep", NULL, 462 NULL, 0, NULL, 0, 463 CTL_MACHDEP, CTL_EOL); 464 465 sysctl_createv(clog, 0, NULL, NULL, 466 CTLFLAG_PERMANENT, 467 CTLTYPE_STRUCT, "console_device", NULL, 468 sysctl_consdev, 0, NULL, sizeof(dev_t), 469 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 470 #if 0 /* XXX - Not yet... */ 471 sysctl_createv(clog, 0, NULL, NULL, 472 CTLFLAG_PERMANENT, 473 CTLTYPE_STRING, "root_device", NULL, 474 sysctl_machdep_root_device, 0, NULL, 0, 475 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL); 476 #endif 477 sysctl_createv(clog, 0, NULL, NULL, 478 CTLFLAG_PERMANENT, 479 CTLTYPE_STRING, "booted_kernel", NULL, 480 sysctl_machdep_booted_kernel, 0, NULL, 0, 481 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 482 } 483 484 /* See: sig_machdep.c */ 485 486 /* 487 * Do a sync in preparation for a reboot. 488 * XXX - This could probably be common code. 489 * XXX - And now, most of it is in vfs_shutdown() 490 * XXX - Put waittime checks in there too? 491 */ 492 int waittime = -1; /* XXX - Who else looks at this? -gwr */ 493 static void 494 reboot_sync(void) 495 { 496 497 /* Check waittime here to localize its use to this function. */ 498 if (waittime >= 0) 499 return; 500 waittime = 0; 501 vfs_shutdown(); 502 } 503 504 /* 505 * Common part of the BSD and SunOS reboot system calls. 506 */ 507 __dead void 508 cpu_reboot(int howto, char *user_boot_string) 509 { 510 char *bs, *p; 511 char default_boot_string[8]; 512 513 /* If system is cold, just halt. (early panic?) */ 514 if (cold) 515 goto haltsys; 516 517 /* Un-blank the screen if appropriate. */ 518 cnpollc(1); 519 520 if ((howto & RB_NOSYNC) == 0) { 521 reboot_sync(); 522 /* 523 * If we've been adjusting the clock, the todr 524 * will be out of synch; adjust it now. 525 * 526 * XXX - However, if the kernel has been sitting in ddb, 527 * the time will be way off, so don't set the HW clock! 528 * XXX - Should do sanity check against HW clock. -gwr 529 */ 530 /* resettodr(); */ 531 } 532 533 /* Disable interrupts. */ 534 splhigh(); 535 536 /* Write out a crash dump if asked. */ 537 if (howto & RB_DUMP) 538 dumpsys(); 539 540 /* run any shutdown hooks */ 541 doshutdownhooks(); 542 543 pmf_system_shutdown(boothowto); 544 545 if (howto & RB_HALT) { 546 haltsys: 547 printf("halted.\n"); 548 prom_halt(); 549 } 550 551 /* 552 * Automatic reboot. 553 */ 554 bs = user_boot_string; 555 if (bs == NULL) { 556 /* 557 * Build our own boot string with an empty 558 * boot device/file and (maybe) some flags. 559 * The PROM will supply the device/file name. 560 */ 561 bs = default_boot_string; 562 *bs = '\0'; 563 if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) { 564 /* Append the boot flags. */ 565 p = bs; 566 *p++ = ' '; 567 *p++ = '-'; 568 if (howto & RB_KDB) 569 *p++ = 'd'; 570 if (howto & RB_ASKNAME) 571 *p++ = 'a'; 572 if (howto & RB_SINGLE) 573 *p++ = 's'; 574 *p = '\0'; 575 } 576 } 577 printf("rebooting...\n"); 578 prom_boot(bs); 579 for (;;) ; 580 /*NOTREACHED*/ 581 } 582 583 /* 584 * These variables are needed by /sbin/savecore 585 */ 586 uint32_t dumpmag = 0x8fca0101; /* magic number */ 587 int dumpsize = 0; /* pages */ 588 long dumplo = 0; /* blocks */ 589 590 #define DUMP_EXTRA 3 /* CPU-dependent extra pages */ 591 592 /* 593 * This is called by main to set dumplo, dumpsize. 594 * Dumps always skip the first PAGE_SIZE of disk space 595 * in case there might be a disk label stored there. 596 * If there is extra space, put dump at the end to 597 * reduce the chance that swapping trashes it. 598 */ 599 void 600 cpu_dumpconf(void) 601 { 602 const struct bdevsw *bdev; 603 int devblks; /* size of dump device in blocks */ 604 int dumpblks; /* size of dump image in blocks */ 605 int (*getsize)(dev_t); 606 607 if (dumpdev == NODEV) 608 return; 609 610 bdev = bdevsw_lookup(dumpdev); 611 if (bdev == NULL) { 612 dumpdev = NODEV; 613 return; 614 } 615 getsize = bdev->d_psize; 616 if (getsize == NULL) 617 return; 618 devblks = (*getsize)(dumpdev); 619 if (devblks <= ctod(1)) 620 return; 621 devblks &= ~(ctod(1)-1); 622 623 /* 624 * Note: savecore expects dumpsize to be the 625 * number of pages AFTER the dump header. 626 */ 627 dumpsize = physmem; 628 629 /* Position dump image near end of space, page aligned. */ 630 dumpblks = ctod(physmem + DUMP_EXTRA); 631 dumplo = devblks - dumpblks; 632 633 /* If it does not fit, truncate it by moving dumplo. */ 634 /* Note: Must force signed comparison. */ 635 if (dumplo < ((long)ctod(1))) { 636 dumplo = ctod(1); 637 dumpsize = dtoc(devblks - dumplo) - DUMP_EXTRA; 638 } 639 } 640 641 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */ 642 struct pcb dumppcb; 643 extern paddr_t avail_start; 644 645 /* 646 * Write a crash dump. The format while in swap is: 647 * kcore_seg_t cpu_hdr; 648 * cpu_kcore_hdr_t cpu_data; 649 * padding (PAGE_SIZE-sizeof(kcore_seg_t)) 650 * pagemap (2*PAGE_SIZE) 651 * physical memory... 652 */ 653 void 654 dumpsys(void) 655 { 656 const struct bdevsw *dsw; 657 kcore_seg_t *kseg_p; 658 cpu_kcore_hdr_t *chdr_p; 659 struct sun2_kcore_hdr *sh; 660 char *vaddr; 661 paddr_t paddr; 662 int psize, todo, chunk; 663 daddr_t blkno; 664 int error = 0; 665 666 if (dumpdev == NODEV) 667 return; 668 dsw = bdevsw_lookup(dumpdev); 669 if (dsw == NULL || dsw->d_psize == NULL) 670 return; 671 if (dumppage == 0) 672 return; 673 674 /* 675 * For dumps during autoconfiguration, 676 * if dump device has already configured... 677 */ 678 if (dumpsize == 0) 679 cpu_dumpconf(); 680 if (dumplo <= 0) { 681 printf("\ndump to dev %u,%u not possible\n", 682 major(dumpdev), minor(dumpdev)); 683 return; 684 } 685 savectx(&dumppcb); 686 687 psize = (*(dsw->d_psize))(dumpdev); 688 if (psize == -1) { 689 printf("dump area unavailable\n"); 690 return; 691 } 692 693 printf("\ndumping to dev %u,%u offset %ld\n", 694 major(dumpdev), minor(dumpdev), dumplo); 695 696 /* 697 * Prepare the dump header, including MMU state. 698 */ 699 blkno = dumplo; 700 todo = dumpsize; /* pages */ 701 vaddr = (char*)dumppage; 702 memset(vaddr, 0, PAGE_SIZE); 703 704 /* Set pointers to all three parts. */ 705 kseg_p = (kcore_seg_t *)vaddr; 706 chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1); 707 sh = &chdr_p->un._sun2; 708 709 /* Fill in kcore_seg_t part. */ 710 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 711 kseg_p->c_size = (ctob(DUMP_EXTRA) - sizeof(*kseg_p)); 712 713 /* Fill in cpu_kcore_hdr_t part. */ 714 strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name)); 715 chdr_p->page_size = PAGE_SIZE; 716 chdr_p->kernbase = KERNBASE; 717 718 /* Fill in the sun2_kcore_hdr part (MMU state). */ 719 pmap_kcore_hdr(sh); 720 721 /* Write out the dump header. */ 722 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 723 if (error) 724 goto fail; 725 blkno += btodb(PAGE_SIZE); 726 727 /* translation RAM (pages zero through seven) */ 728 for(chunk = 0; chunk < (PAGE_SIZE * 8); chunk += PAGE_SIZE) { 729 pmap_get_pagemap((int*)vaddr, chunk); 730 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 731 if (error) 732 goto fail; 733 blkno += btodb(PAGE_SIZE); 734 } 735 736 /* 737 * Now dump physical memory. Have to do it in two chunks. 738 * The first chunk is "unmanaged" (by the VM code) and its 739 * range of physical addresses is not allow in pmap_enter. 740 * However, that segment is mapped linearly, so we can just 741 * use the virtual mappings already in place. The second 742 * chunk is done the normal way, using pmap_enter. 743 * 744 * Note that vaddr==(paddr+KERNBASE) for paddr=0 through etext. 745 */ 746 747 /* Do the first chunk (0 <= PA < avail_start) */ 748 paddr = 0; 749 chunk = btoc(avail_start); 750 if (chunk > todo) 751 chunk = todo; 752 do { 753 if ((todo & 0xf) == 0) 754 printf_nolog("\r%4d", todo); 755 vaddr = (char*)(paddr + KERNBASE); 756 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 757 if (error) 758 goto fail; 759 paddr += PAGE_SIZE; 760 blkno += btodb(PAGE_SIZE); 761 --todo; 762 } while (--chunk > 0); 763 764 /* Do the second chunk (avail_start <= PA < dumpsize) */ 765 vaddr = (char*)vmmap; /* Borrow /dev/mem VA */ 766 do { 767 if ((todo & 0xf) == 0) 768 printf_nolog("\r%4d", todo); 769 pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ, 0); 770 pmap_update(pmap_kernel()); 771 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 772 pmap_kremove(vmmap, PAGE_SIZE); 773 pmap_update(pmap_kernel()); 774 if (error) 775 goto fail; 776 paddr += PAGE_SIZE; 777 blkno += btodb(PAGE_SIZE); 778 } while (--todo > 0); 779 780 printf("\rdump succeeded\n"); 781 return; 782 fail: 783 printf(" dump error=%d\n", error); 784 } 785 786 static void 787 initcpu(void) 788 { 789 /* XXX: Enable RAM parity/ECC checking? */ 790 /* XXX: parityenable(); */ 791 792 } 793 794 /* straptrap() in trap.c */ 795 796 /* from hp300: badaddr() */ 797 798 /* XXX: parityenable() ? */ 799 /* regdump() moved to regdump.c */ 800 801 /* 802 * cpu_exec_aout_makecmds(): 803 * CPU-dependent a.out format hook for execve(). 804 * 805 * Determine if the given exec package refers to something which we 806 * understand and, if so, set up the vmcmds for it. 807 */ 808 int 809 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp) 810 { 811 return ENOEXEC; 812 } 813 814 #if 0 815 /* 816 * Soft interrupt support. 817 */ 818 void 819 isr_soft_request(int level) 820 { 821 u_char bit; 822 823 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 824 return; 825 826 bit = 1 << level; 827 enable_reg_or(bit); 828 } 829 830 void 831 isr_soft_clear(int level) 832 { 833 u_char bit; 834 835 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 836 return; 837 838 bit = 1 << level; 839 enable_reg_and(~bit); 840 } 841 #endif 842 843 /* 844 * Like _bus_dmamap_load(), but for raw memory allocated with 845 * bus_dmamem_alloc(). 846 */ 847 int 848 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, 849 int nsegs, bus_size_t size, int flags) 850 { 851 struct vm_page *m; 852 paddr_t pa; 853 bus_addr_t dva; 854 bus_size_t sgsize; 855 struct pglist *mlist; 856 int pagesz = PAGE_SIZE; 857 int error; 858 859 /* 860 * Make sure that on error condition we return "no valid mappings". 861 */ 862 map->dm_nsegs = 0; 863 map->dm_mapsize = 0; 864 865 /* Allocate DVMA addresses */ 866 sgsize = (size + pagesz - 1) & -pagesz; 867 868 /* 869 * If the device can see our entire 24-bit address space, 870 * we can use any properly aligned virtual addresses. 871 */ 872 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) { 873 dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary, 874 pagesz, 0); 875 if (dva == 0) 876 return (ENOMEM); 877 } 878 879 /* 880 * Otherwise, we need virtual addresses in DVMA space. 881 */ 882 else { 883 error = extent_alloc(dvmamap, sgsize, pagesz, 884 map->_dm_boundary, 885 (flags & BUS_DMA_NOWAIT) == 0 886 ? EX_WAITOK : EX_NOWAIT, 887 (u_long *)&dva); 888 if (error) 889 return (error); 890 } 891 892 /* Fill in the segment. */ 893 map->dm_segs[0].ds_addr = dva; 894 map->dm_segs[0].ds_len = size; 895 map->dm_segs[0]._ds_va = dva; 896 map->dm_segs[0]._ds_sgsize = sgsize; 897 898 /* Map physical pages into MMU */ 899 mlist = segs[0]._ds_mlist; 900 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 901 if (sgsize == 0) 902 panic("_bus_dmamap_load_raw: size botch"); 903 pa = VM_PAGE_TO_PHYS(m); 904 pmap_enter(pmap_kernel(), dva, 905 (pa & -pagesz) | PMAP_NC, 906 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 907 908 dva += pagesz; 909 sgsize -= pagesz; 910 } 911 pmap_update(pmap_kernel()); 912 913 /* Make the map truly valid. */ 914 map->dm_nsegs = 1; 915 map->dm_mapsize = size; 916 917 return (0); 918 } 919 920 /* 921 * load DMA map with a linear buffer. 922 */ 923 int 924 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 925 bus_size_t buflen, struct proc *p, int flags) 926 { 927 bus_size_t sgsize; 928 vaddr_t va = (vaddr_t)buf; 929 int pagesz = PAGE_SIZE; 930 bus_addr_t dva; 931 pmap_t pmap; 932 int rv; 933 934 /* 935 * Make sure that on error condition we return "no valid mappings". 936 */ 937 map->dm_nsegs = 0; 938 map->dm_mapsize = 0; 939 940 if (buflen > map->_dm_size) 941 return (EINVAL); 942 943 /* 944 * A 24-bit device can see all of our kernel address space, so 945 * if we have KVAs, we can just load them as-is, no mapping 946 * necessary. 947 */ 948 if ((map->_dm_flags & BUS_DMA_24BIT) != 0 && p == NULL) { 949 /* 950 * XXX Need to implement "don't DMA across this boundry". 951 */ 952 if (map->_dm_boundary != 0) 953 panic("bus_dmamap_load: boundaries not implemented"); 954 map->dm_mapsize = buflen; 955 map->dm_nsegs = 1; 956 map->dm_segs[0].ds_addr = (bus_addr_t)va; 957 map->dm_segs[0].ds_len = buflen; 958 map->_dm_flags |= _BUS_DMA_DIRECTMAP; 959 return (0); 960 } 961 962 /* 963 * Allocate a region in DVMA space. 964 */ 965 sgsize = m68k_round_page(buflen + (va & (pagesz - 1))); 966 967 if (extent_alloc(dvmamap, sgsize, pagesz, map->_dm_boundary, 968 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT, 969 (u_long *)&dva) != 0) { 970 return (ENOMEM); 971 } 972 973 /* Fill in the segment. */ 974 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 975 map->dm_segs[0].ds_len = buflen; 976 map->dm_segs[0]._ds_va = dva; 977 map->dm_segs[0]._ds_sgsize = sgsize; 978 979 /* 980 * Now map the DVMA addresses we allocated to point to the 981 * pages of the caller's buffer. 982 */ 983 if (p != NULL) 984 pmap = p->p_vmspace->vm_map.pmap; 985 else 986 pmap = pmap_kernel(); 987 988 for (; buflen > 0; ) { 989 paddr_t pa; 990 /* 991 * Get the physical address for this page. 992 */ 993 rv = pmap_extract(pmap, va, &pa); 994 #ifdef DIAGNOSTIC 995 if (!rv) 996 panic("_bus_dmamap_load: no page"); 997 #endif /* DIAGNOSTIC */ 998 999 /* 1000 * Compute the segment size, and adjust counts. 1001 */ 1002 sgsize = pagesz - (va & (pagesz - 1)); 1003 if (buflen < sgsize) 1004 sgsize = buflen; 1005 1006 pmap_enter(pmap_kernel(), dva, 1007 (pa & -pagesz) | PMAP_NC, 1008 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 1009 1010 dva += pagesz; 1011 va += sgsize; 1012 buflen -= sgsize; 1013 } 1014 pmap_update(pmap_kernel()); 1015 1016 /* Make the map truly valid. */ 1017 map->dm_nsegs = 1; 1018 map->dm_mapsize = map->dm_segs[0].ds_len; 1019 1020 return (0); 1021 } 1022 1023 /* 1024 * unload a DMA map. 1025 */ 1026 void 1027 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 1028 { 1029 bus_dma_segment_t *segs = map->dm_segs; 1030 int nsegs = map->dm_nsegs; 1031 int flags = map->_dm_flags; 1032 bus_addr_t dva; 1033 bus_size_t len; 1034 int s, error; 1035 1036 if (nsegs != 1) 1037 panic("_bus_dmamem_unload: nsegs = %d", nsegs); 1038 1039 /* 1040 * _BUS_DMA_DIRECTMAP is set iff this map was loaded using 1041 * _bus_dmamap_load for a 24-bit device. 1042 */ 1043 if ((flags & _BUS_DMA_DIRECTMAP) != 0) { 1044 /* Nothing to release */ 1045 map->_dm_flags &= ~_BUS_DMA_DIRECTMAP; 1046 } 1047 1048 /* 1049 * Otherwise, this map was loaded using _bus_dmamap_load for a 1050 * non-24-bit device, or using _bus_dmamap_load_raw. 1051 */ 1052 else { 1053 dva = segs[0]._ds_va & -PAGE_SIZE; 1054 len = segs[0]._ds_sgsize; 1055 1056 /* 1057 * Unmap the DVMA addresses. 1058 */ 1059 pmap_remove(pmap_kernel(), dva, dva + len); 1060 pmap_update(pmap_kernel()); 1061 1062 /* 1063 * Free the DVMA addresses. 1064 */ 1065 if ((flags & BUS_DMA_24BIT) != 0) { 1066 /* 1067 * This map was loaded using _bus_dmamap_load_raw 1068 * for a 24-bit device. 1069 */ 1070 uvm_unmap(kernel_map, dva, dva + len); 1071 } else { 1072 /* 1073 * This map was loaded using _bus_dmamap_load or 1074 * _bus_dmamap_load_raw for a non-24-bit device. 1075 */ 1076 s = splhigh(); 1077 error = extent_free(dvmamap, dva, len, EX_NOWAIT); 1078 splx(s); 1079 if (error != 0) 1080 printf("warning: %ld of DVMA space lost\n", len); 1081 } 1082 } 1083 1084 /* Mark the mappings as invalid. */ 1085 map->dm_mapsize = 0; 1086 map->dm_nsegs = 0; 1087 } 1088 1089 /* 1090 * Translate a VME address and address modifier 1091 * into a CPU physical address and page type. 1092 */ 1093 int 1094 vmebus_translate(vme_am_t mod, vme_addr_t addr, bus_type_t *btp, 1095 bus_addr_t *bap) 1096 { 1097 bus_addr_t base; 1098 1099 switch(mod) { 1100 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 1101 1102 case (VME_AM_A16|_DS): 1103 base = 0x00ff0000; 1104 break; 1105 1106 case (VME_AM_A24|_DS): 1107 base = 0; 1108 break; 1109 1110 default: 1111 return (ENOENT); 1112 #undef _DS 1113 } 1114 1115 *bap = base | addr; 1116 *btp = (*bap & 0x800000 ? PMAP_VME8 : PMAP_VME0); 1117 return (0); 1118 } 1119 1120 /* 1121 * If we can find a mapping that was established by the PROM, use it. 1122 */ 1123 int 1124 find_prom_map(paddr_t pa, bus_type_t iospace, int len, vaddr_t *vap) 1125 { 1126 u_long pf; 1127 int pgtype; 1128 vaddr_t va, eva; 1129 int sme; 1130 u_long pte; 1131 int saved_ctx; 1132 1133 /* 1134 * The mapping must fit entirely within one page. 1135 */ 1136 if ((((u_long)pa & PGOFSET) + len) > PAGE_SIZE) 1137 return EINVAL; 1138 1139 pf = PA_PGNUM(pa); 1140 pgtype = iospace << PG_MOD_SHIFT; 1141 saved_ctx = kernel_context(); 1142 1143 /* 1144 * Walk the PROM address space, looking for a page with the 1145 * mapping we want. 1146 */ 1147 for (va = SUN_MONSTART; va < SUN_MONEND; ) { 1148 1149 /* 1150 * Make sure this segment is mapped. 1151 */ 1152 sme = get_segmap(va); 1153 if (sme == SEGINV) { 1154 va += NBSG; 1155 continue; /* next segment */ 1156 } 1157 1158 /* 1159 * Walk the pages of this segment. 1160 */ 1161 for(eva = va + NBSG; va < eva; va += PAGE_SIZE) { 1162 pte = get_pte(va); 1163 1164 if ((pte & (PG_VALID | PG_TYPE)) == 1165 (PG_VALID | pgtype) && 1166 PG_PFNUM(pte) == pf) 1167 { 1168 /* 1169 * Found the PROM mapping. 1170 * note: preserve page offset 1171 */ 1172 *vap = (va | ((vaddr_t)pa & PGOFSET)); 1173 restore_context(saved_ctx); 1174 return 0; 1175 } 1176 } 1177 } 1178 restore_context(saved_ctx); 1179 return ENOENT; 1180 } 1181