1 /* $NetBSD: machdep.c,v 1.83 2020/06/11 19:20:45 ad Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah Hdr: machdep.c 1.74 92/12/20 36 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 37 */ 38 39 /* 40 * Copyright (c) 2001 Matthew Fredette. 41 * Copyright (c) 1994, 1995 Gordon W. Ross 42 * Copyright (c) 1993 Adam Glass 43 * Copyright (c) 1988 University of Utah. 44 * 45 * This code is derived from software contributed to Berkeley by 46 * the Systems Programming Group of the University of Utah Computer 47 * Science Department. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * from: Utah Hdr: machdep.c 1.74 92/12/20 78 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 79 */ 80 81 /*- 82 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 83 * All rights reserved. 84 * 85 * This code is derived from software contributed to The NetBSD Foundation 86 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 87 * NASA Ames Research Center. 88 * 89 * Redistribution and use in source and binary forms, with or without 90 * modification, are permitted provided that the following conditions 91 * are met: 92 * 1. Redistributions of source code must retain the above copyright 93 * notice, this list of conditions and the following disclaimer. 94 * 2. Redistributions in binary form must reproduce the above copyright 95 * notice, this list of conditions and the following disclaimer in the 96 * documentation and/or other materials provided with the distribution. 97 * 98 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 99 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 100 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 101 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 102 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 103 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 104 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 105 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 106 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 107 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 108 * POSSIBILITY OF SUCH DAMAGE. 109 */ 110 111 /* 112 * Copyright (c) 1992, 1993 113 * The Regents of the University of California. All rights reserved. 114 * 115 * This software was developed by the Computer Systems Engineering group 116 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 117 * contributed to Berkeley. 118 * 119 * All advertising materials mentioning features or use of this software 120 * must display the following acknowledgement: 121 * This product includes software developed by the University of 122 * California, Lawrence Berkeley Laboratory. 123 * 124 * Redistribution and use in source and binary forms, with or without 125 * modification, are permitted provided that the following conditions 126 * are met: 127 * 1. Redistributions of source code must retain the above copyright 128 * notice, this list of conditions and the following disclaimer. 129 * 2. Redistributions in binary form must reproduce the above copyright 130 * notice, this list of conditions and the following disclaimer in the 131 * documentation and/or other materials provided with the distribution. 132 * 3. Neither the name of the University nor the names of its contributors 133 * may be used to endorse or promote products derived from this software 134 * without specific prior written permission. 135 * 136 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 137 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 138 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 139 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 140 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 141 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 142 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 143 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 144 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 145 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 146 * SUCH DAMAGE. 147 * 148 * @(#)machdep.c 8.6 (Berkeley) 1/14/94 149 */ 150 151 #include <sys/cdefs.h> 152 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.83 2020/06/11 19:20:45 ad Exp $"); 153 154 #include "opt_ddb.h" 155 #include "opt_fpu_emulate.h" 156 #include "opt_kgdb.h" 157 #include "opt_modular.h" 158 159 #include <sys/param.h> 160 #include <sys/buf.h> 161 #include <sys/conf.h> 162 #include <sys/core.h> 163 #include <sys/cpu.h> 164 #include <sys/device.h> 165 #include <sys/exec.h> 166 #include <sys/exec_aout.h> /* for MID_* */ 167 #include <sys/extent.h> 168 #include <sys/file.h> 169 #include <sys/ioctl.h> 170 #include <sys/kcore.h> 171 #include <sys/kernel.h> 172 #include <sys/ksyms.h> 173 #include <sys/malloc.h> 174 #include <sys/mbuf.h> 175 #include <sys/module.h> 176 #include <sys/mount.h> 177 #include <sys/msgbuf.h> 178 #include <sys/proc.h> 179 #include <sys/reboot.h> 180 #include <sys/syscallargs.h> 181 #include <sys/sysctl.h> 182 #include <sys/systm.h> 183 #include <sys/tty.h> 184 #include <sys/vnode.h> 185 186 #ifdef KGDB 187 #include <sys/kgdb.h> 188 #endif 189 190 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */ 191 192 #include <dev/cons.h> 193 #include <dev/mm.h> 194 195 #define _SUN68K_BUS_DMA_PRIVATE 196 #include <machine/autoconf.h> 197 #include <machine/bus.h> 198 #include <machine/cpu.h> 199 #include <machine/dvma.h> 200 #include <machine/idprom.h> 201 #include <machine/intr.h> 202 #include <machine/kcore.h> 203 #include <machine/pcb.h> 204 #include <machine/pmap.h> 205 #include <machine/promlib.h> 206 #include <machine/psl.h> 207 #include <machine/pte.h> 208 #include <machine/reg.h> 209 210 #if defined(DDB) 211 #include <machine/db_machdep.h> 212 #include <ddb/db_extern.h> 213 #include <ddb/db_sym.h> 214 #endif 215 216 #include <dev/vme/vmereg.h> 217 #include <dev/vme/vmevar.h> 218 219 #include <sun2/sun2/control.h> 220 #include <sun2/sun2/enable.h> 221 #include <sun2/sun2/machdep.h> 222 223 #include <sun68k/sun68k/vme_sun68k.h> 224 225 #include "ksyms.h" 226 227 /* Defined in locore.s */ 228 extern char kernel_text[]; 229 /* Defined by the linker */ 230 extern char etext[]; 231 /* Defined in vfs_bio.c */ 232 extern u_int bufpages; 233 234 /* Our exported CPU info; we can have only one. */ 235 struct cpu_info cpu_info_store; 236 237 struct vm_map *phys_map = NULL; 238 239 int fputype; 240 void * msgbufaddr; 241 242 /* Virtual page frame for /dev/mem (see mem.c) */ 243 vaddr_t vmmap; 244 245 /* Soft copy of the enable register. */ 246 volatile u_short enable_reg_soft = ENABLE_REG_SOFT_UNDEF; 247 248 /* 249 * Our no-fault fault handler. 250 */ 251 label_t *nofault; 252 253 /* 254 * dvmamap is used to manage DVMA memory. 255 */ 256 static struct extent *dvmamap; 257 258 /* Our private scratch page for dumping the MMU. */ 259 static vaddr_t dumppage; 260 261 static void identifycpu(void); 262 static void initcpu(void); 263 264 /* 265 * cpu_startup: allocate memory for variable-sized tables, 266 * initialize CPU, and do autoconfiguration. 267 * 268 * This is called early in init_main.c:main(), after the 269 * kernel memory allocator is ready for use, but before 270 * the creation of processes 1,2, and mountroot, etc. 271 */ 272 void 273 cpu_startup(void) 274 { 275 void *v; 276 vaddr_t minaddr, maxaddr; 277 char pbuf[9]; 278 279 /* 280 * Initialize message buffer (for kernel printf). 281 * This is put in physical pages four through seven 282 * so it will always be in the same place after a 283 * reboot. (physical pages 0-3 are reserved by the PROM 284 * for its vector table and other stuff.) 285 * Its mapping was prepared in pmap_bootstrap(). 286 * Also, offset some to avoid PROM scribbles. 287 */ 288 v = (void *) (PAGE_SIZE * 4); 289 msgbufaddr = (void *)((char *)v + MSGBUFOFF); 290 initmsgbuf(msgbufaddr, MSGBUFSIZE); 291 292 #if NKSYMS || defined(DDB) || defined(MODULAR) 293 { 294 extern int nsym; 295 extern char *ssym, *esym; 296 297 ksyms_addsyms_elf(nsym, ssym, esym); 298 } 299 #endif /* DDB */ 300 301 /* 302 * Good {morning,afternoon,evening,night}. 303 */ 304 printf("%s%s", copyright, version); 305 identifycpu(); 306 fputype = FPU_NONE; 307 #ifdef FPU_EMULATE 308 printf("fpu: emulator\n"); 309 #else 310 printf("fpu: no math support\n"); 311 #endif 312 313 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 314 printf("total memory = %s\n", pbuf); 315 316 /* 317 * XXX fredette - we force a small number of buffers 318 * to help me debug this on my low-memory machine. 319 * this should go away at some point, allowing the 320 * normal automatic buffer-sizing to happen. 321 */ 322 bufpages = 37; 323 324 /* 325 * Get scratch page for dumpsys(). 326 */ 327 if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE,0, UVM_KMF_WIRED)) 328 == 0) 329 panic("startup: alloc dumppage"); 330 331 332 minaddr = 0; 333 334 /* 335 * Allocate a submap for physio 336 */ 337 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 338 VM_PHYS_SIZE, 0, false, NULL); 339 340 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 341 printf("avail memory = %s\n", pbuf); 342 343 /* 344 * Allocate a virtual page (for use by /dev/mem) 345 * This page is handed to pmap_enter() therefore 346 * it has to be in the normal kernel VA range. 347 */ 348 vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 349 UVM_KMF_VAONLY | UVM_KMF_WAITVA); 350 351 /* 352 * Allocate DMA map for devices on the bus. 353 */ 354 dvmamap = extent_create("dvmamap", 355 DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL, 356 0, 0, EX_NOWAIT); 357 if (dvmamap == NULL) 358 panic("unable to allocate DVMA map"); 359 360 /* 361 * Set up CPU-specific registers, cache, etc. 362 */ 363 initcpu(); 364 } 365 366 /* 367 * Info for CTL_HW 368 */ 369 char machine[16] = MACHINE; /* from <machine/param.h> */ 370 char kernel_arch[16] = "sun2"; /* XXX needs a sysctl node */ 371 372 /* 373 * Determine which Sun2 model we are running on. 374 */ 375 void 376 identifycpu(void) 377 { 378 extern char *cpu_string; /* XXX */ 379 380 /* Other stuff? (VAC, mc6888x version, etc.) */ 381 /* Note: miniroot cares about the kernel_arch part. */ 382 cpu_setmodel("%s %s", kernel_arch, cpu_string); 383 384 printf("Model: %s\n", cpu_getmodel()); 385 } 386 387 /* 388 * machine dependent system variables. 389 */ 390 #if 0 /* XXX - Not yet... */ 391 static int 392 sysctl_machdep_root_device(SYSCTLFN_ARGS) 393 { 394 struct sysctlnode node = *rnode; 395 396 node.sysctl_data = some permutation on root_device; 397 node.sysctl_size = strlen(root_device) + 1; 398 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 399 } 400 #endif 401 402 static int 403 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 404 { 405 struct sysctlnode node = *rnode; 406 char *cp; 407 408 cp = prom_getbootfile(); 409 if (cp == NULL || cp[0] == '\0') 410 return (ENOENT); 411 412 node.sysctl_data = cp; 413 node.sysctl_size = strlen(cp) + 1; 414 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 415 } 416 417 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 418 { 419 420 sysctl_createv(clog, 0, NULL, NULL, 421 CTLFLAG_PERMANENT, 422 CTLTYPE_NODE, "machdep", NULL, 423 NULL, 0, NULL, 0, 424 CTL_MACHDEP, CTL_EOL); 425 426 sysctl_createv(clog, 0, NULL, NULL, 427 CTLFLAG_PERMANENT, 428 CTLTYPE_STRUCT, "console_device", NULL, 429 sysctl_consdev, 0, NULL, sizeof(dev_t), 430 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 431 #if 0 /* XXX - Not yet... */ 432 sysctl_createv(clog, 0, NULL, NULL, 433 CTLFLAG_PERMANENT, 434 CTLTYPE_STRING, "root_device", NULL, 435 sysctl_machdep_root_device, 0, NULL, 0, 436 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL); 437 #endif 438 sysctl_createv(clog, 0, NULL, NULL, 439 CTLFLAG_PERMANENT, 440 CTLTYPE_STRING, "booted_kernel", NULL, 441 sysctl_machdep_booted_kernel, 0, NULL, 0, 442 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 443 } 444 445 /* See: sig_machdep.c */ 446 447 /* 448 * Do a sync in preparation for a reboot. 449 * XXX - This could probably be common code. 450 * XXX - And now, most of it is in vfs_shutdown() 451 * XXX - Put waittime checks in there too? 452 */ 453 int waittime = -1; /* XXX - Who else looks at this? -gwr */ 454 static void 455 reboot_sync(void) 456 { 457 458 /* Check waittime here to localize its use to this function. */ 459 if (waittime >= 0) 460 return; 461 waittime = 0; 462 vfs_shutdown(); 463 } 464 465 /* 466 * Common part of the BSD and SunOS reboot system calls. 467 */ 468 __dead void 469 cpu_reboot(int howto, char *user_boot_string) 470 { 471 char *bs, *p; 472 char default_boot_string[8]; 473 474 /* If system is cold, just halt. (early panic?) */ 475 if (cold) 476 goto haltsys; 477 478 /* Un-blank the screen if appropriate. */ 479 cnpollc(1); 480 481 if ((howto & RB_NOSYNC) == 0) { 482 reboot_sync(); 483 /* 484 * If we've been adjusting the clock, the todr 485 * will be out of synch; adjust it now. 486 * 487 * XXX - However, if the kernel has been sitting in ddb, 488 * the time will be way off, so don't set the HW clock! 489 * XXX - Should do sanity check against HW clock. -gwr 490 */ 491 /* resettodr(); */ 492 } 493 494 /* Disable interrupts. */ 495 splhigh(); 496 497 /* Write out a crash dump if asked. */ 498 if (howto & RB_DUMP) 499 dumpsys(); 500 501 /* run any shutdown hooks */ 502 doshutdownhooks(); 503 504 pmf_system_shutdown(boothowto); 505 506 if (howto & RB_HALT) { 507 haltsys: 508 printf("halted.\n"); 509 prom_halt(); 510 } 511 512 /* 513 * Automatic reboot. 514 */ 515 bs = user_boot_string; 516 if (bs == NULL) { 517 /* 518 * Build our own boot string with an empty 519 * boot device/file and (maybe) some flags. 520 * The PROM will supply the device/file name. 521 */ 522 bs = default_boot_string; 523 *bs = '\0'; 524 if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) { 525 /* Append the boot flags. */ 526 p = bs; 527 *p++ = ' '; 528 *p++ = '-'; 529 if (howto & RB_KDB) 530 *p++ = 'd'; 531 if (howto & RB_ASKNAME) 532 *p++ = 'a'; 533 if (howto & RB_SINGLE) 534 *p++ = 's'; 535 *p = '\0'; 536 } 537 } 538 printf("rebooting...\n"); 539 prom_boot(bs); 540 for (;;) ; 541 /*NOTREACHED*/ 542 } 543 544 /* 545 * These variables are needed by /sbin/savecore 546 */ 547 uint32_t dumpmag = 0x8fca0101; /* magic number */ 548 int dumpsize = 0; /* pages */ 549 long dumplo = 0; /* blocks */ 550 551 #define DUMP_EXTRA 3 /* CPU-dependent extra pages */ 552 553 /* 554 * This is called by main to set dumplo, dumpsize. 555 * Dumps always skip the first PAGE_SIZE of disk space 556 * in case there might be a disk label stored there. 557 * If there is extra space, put dump at the end to 558 * reduce the chance that swapping trashes it. 559 */ 560 void 561 cpu_dumpconf(void) 562 { 563 int devblks; /* size of dump device in blocks */ 564 int dumpblks; /* size of dump image in blocks */ 565 566 if (dumpdev == NODEV) 567 return; 568 569 devblks = bdev_size(dumpdev); 570 if (devblks <= ctod(1)) 571 return; 572 devblks &= ~(ctod(1)-1); 573 574 /* 575 * Note: savecore expects dumpsize to be the 576 * number of pages AFTER the dump header. 577 */ 578 dumpsize = physmem; 579 580 /* Position dump image near end of space, page aligned. */ 581 dumpblks = ctod(physmem + DUMP_EXTRA); 582 dumplo = devblks - dumpblks; 583 584 /* If it does not fit, truncate it by moving dumplo. */ 585 /* Note: Must force signed comparison. */ 586 if (dumplo < ((long)ctod(1))) { 587 dumplo = ctod(1); 588 dumpsize = dtoc(devblks - dumplo) - DUMP_EXTRA; 589 } 590 } 591 592 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */ 593 struct pcb dumppcb; 594 extern paddr_t avail_start; 595 596 /* 597 * Write a crash dump. The format while in swap is: 598 * kcore_seg_t cpu_hdr; 599 * cpu_kcore_hdr_t cpu_data; 600 * padding (PAGE_SIZE-sizeof(kcore_seg_t)) 601 * pagemap (2*PAGE_SIZE) 602 * physical memory... 603 */ 604 void 605 dumpsys(void) 606 { 607 const struct bdevsw *dsw; 608 kcore_seg_t *kseg_p; 609 cpu_kcore_hdr_t *chdr_p; 610 struct sun2_kcore_hdr *sh; 611 char *vaddr; 612 paddr_t paddr; 613 int psize, todo, chunk; 614 daddr_t blkno; 615 int error = 0; 616 617 if (dumpdev == NODEV) 618 return; 619 dsw = bdevsw_lookup(dumpdev); 620 if (dsw == NULL || dsw->d_psize == NULL) 621 return; 622 if (dumppage == 0) 623 return; 624 625 /* 626 * For dumps during autoconfiguration, 627 * if dump device has already configured... 628 */ 629 if (dumpsize == 0) 630 cpu_dumpconf(); 631 if (dumplo <= 0) { 632 printf("\ndump to dev %u,%u not possible\n", 633 major(dumpdev), minor(dumpdev)); 634 return; 635 } 636 savectx(&dumppcb); 637 638 psize = bdev_size(dumpdev); 639 if (psize == -1) { 640 printf("dump area unavailable\n"); 641 return; 642 } 643 644 printf("\ndumping to dev %u,%u offset %ld\n", 645 major(dumpdev), minor(dumpdev), dumplo); 646 647 /* 648 * Prepare the dump header, including MMU state. 649 */ 650 blkno = dumplo; 651 todo = dumpsize; /* pages */ 652 vaddr = (char*)dumppage; 653 memset(vaddr, 0, PAGE_SIZE); 654 655 /* Set pointers to all three parts. */ 656 kseg_p = (kcore_seg_t *)vaddr; 657 chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1); 658 sh = &chdr_p->un._sun2; 659 660 /* Fill in kcore_seg_t part. */ 661 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 662 kseg_p->c_size = (ctob(DUMP_EXTRA) - sizeof(*kseg_p)); 663 664 /* Fill in cpu_kcore_hdr_t part. */ 665 strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name)); 666 chdr_p->page_size = PAGE_SIZE; 667 chdr_p->kernbase = KERNBASE; 668 669 /* Fill in the sun2_kcore_hdr part (MMU state). */ 670 pmap_kcore_hdr(sh); 671 672 /* Write out the dump header. */ 673 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 674 if (error) 675 goto fail; 676 blkno += btodb(PAGE_SIZE); 677 678 /* translation RAM (pages zero through seven) */ 679 for(chunk = 0; chunk < (PAGE_SIZE * 8); chunk += PAGE_SIZE) { 680 pmap_get_pagemap((int*)vaddr, chunk); 681 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 682 if (error) 683 goto fail; 684 blkno += btodb(PAGE_SIZE); 685 } 686 687 /* 688 * Now dump physical memory. Have to do it in two chunks. 689 * The first chunk is "unmanaged" (by the VM code) and its 690 * range of physical addresses is not allow in pmap_enter. 691 * However, that segment is mapped linearly, so we can just 692 * use the virtual mappings already in place. The second 693 * chunk is done the normal way, using pmap_enter. 694 * 695 * Note that vaddr==(paddr+KERNBASE) for paddr=0 through etext. 696 */ 697 698 /* Do the first chunk (0 <= PA < avail_start) */ 699 paddr = 0; 700 chunk = btoc(avail_start); 701 if (chunk > todo) 702 chunk = todo; 703 do { 704 if ((todo & 0xf) == 0) 705 printf_nolog("\r%4d", todo); 706 vaddr = (char*)(paddr + KERNBASE); 707 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 708 if (error) 709 goto fail; 710 paddr += PAGE_SIZE; 711 blkno += btodb(PAGE_SIZE); 712 --todo; 713 } while (--chunk > 0); 714 715 /* Do the second chunk (avail_start <= PA < dumpsize) */ 716 vaddr = (char*)vmmap; /* Borrow /dev/mem VA */ 717 do { 718 if ((todo & 0xf) == 0) 719 printf_nolog("\r%4d", todo); 720 pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ, 0); 721 pmap_update(pmap_kernel()); 722 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 723 pmap_kremove(vmmap, PAGE_SIZE); 724 pmap_update(pmap_kernel()); 725 if (error) 726 goto fail; 727 paddr += PAGE_SIZE; 728 blkno += btodb(PAGE_SIZE); 729 } while (--todo > 0); 730 731 printf("\rdump succeeded\n"); 732 return; 733 fail: 734 printf(" dump error=%d\n", error); 735 } 736 737 static void 738 initcpu(void) 739 { 740 /* XXX: Enable RAM parity/ECC checking? */ 741 /* XXX: parityenable(); */ 742 743 } 744 745 /* straptrap() in trap.c */ 746 747 /* from hp300: badaddr() */ 748 749 /* XXX: parityenable() ? */ 750 /* regdump() moved to regdump.c */ 751 752 /* 753 * cpu_exec_aout_makecmds(): 754 * CPU-dependent a.out format hook for execve(). 755 * 756 * Determine if the given exec package refers to something which we 757 * understand and, if so, set up the vmcmds for it. 758 */ 759 int 760 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp) 761 { 762 return ENOEXEC; 763 } 764 765 #if 0 766 /* 767 * Soft interrupt support. 768 */ 769 void 770 isr_soft_request(int level) 771 { 772 u_char bit; 773 774 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 775 return; 776 777 bit = 1 << level; 778 enable_reg_or(bit); 779 } 780 781 void 782 isr_soft_clear(int level) 783 { 784 u_char bit; 785 786 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 787 return; 788 789 bit = 1 << level; 790 enable_reg_and(~bit); 791 } 792 #endif 793 794 /* 795 * Like _bus_dmamap_load(), but for raw memory allocated with 796 * bus_dmamem_alloc(). 797 */ 798 int 799 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, 800 int nsegs, bus_size_t size, int flags) 801 { 802 struct vm_page *m; 803 paddr_t pa; 804 bus_addr_t dva; 805 bus_size_t sgsize; 806 struct pglist *mlist; 807 int pagesz = PAGE_SIZE; 808 int error; 809 810 /* 811 * Make sure that on error condition we return "no valid mappings". 812 */ 813 map->dm_nsegs = 0; 814 map->dm_mapsize = 0; 815 816 /* Allocate DVMA addresses */ 817 sgsize = (size + pagesz - 1) & -pagesz; 818 819 /* 820 * If the device can see our entire 24-bit address space, 821 * we can use any properly aligned virtual addresses. 822 */ 823 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) { 824 dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary, 825 pagesz, 0); 826 if (dva == 0) 827 return (ENOMEM); 828 } 829 830 /* 831 * Otherwise, we need virtual addresses in DVMA space. 832 */ 833 else { 834 error = extent_alloc(dvmamap, sgsize, pagesz, 835 map->_dm_boundary, 836 (flags & BUS_DMA_NOWAIT) == 0 837 ? EX_WAITOK : EX_NOWAIT, 838 (u_long *)&dva); 839 if (error) 840 return (error); 841 } 842 843 /* Fill in the segment. */ 844 map->dm_segs[0].ds_addr = dva; 845 map->dm_segs[0].ds_len = size; 846 map->dm_segs[0]._ds_va = dva; 847 map->dm_segs[0]._ds_sgsize = sgsize; 848 849 /* Map physical pages into MMU */ 850 mlist = segs[0]._ds_mlist; 851 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 852 if (sgsize == 0) 853 panic("_bus_dmamap_load_raw: size botch"); 854 pa = VM_PAGE_TO_PHYS(m); 855 pmap_enter(pmap_kernel(), dva, 856 (pa & -pagesz) | PMAP_NC, 857 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 858 859 dva += pagesz; 860 sgsize -= pagesz; 861 } 862 pmap_update(pmap_kernel()); 863 864 /* Make the map truly valid. */ 865 map->dm_nsegs = 1; 866 map->dm_mapsize = size; 867 868 return (0); 869 } 870 871 /* 872 * load DMA map with a linear buffer. 873 */ 874 int 875 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 876 bus_size_t buflen, struct proc *p, int flags) 877 { 878 bus_size_t sgsize; 879 vaddr_t va = (vaddr_t)buf; 880 int pagesz = PAGE_SIZE; 881 bus_addr_t dva; 882 pmap_t pmap; 883 int rv __diagused; 884 885 /* 886 * Make sure that on error condition we return "no valid mappings". 887 */ 888 map->dm_nsegs = 0; 889 map->dm_mapsize = 0; 890 891 if (buflen > map->_dm_size) 892 return (EINVAL); 893 894 /* 895 * A 24-bit device can see all of our kernel address space, so 896 * if we have KVAs, we can just load them as-is, no mapping 897 * necessary. 898 */ 899 if ((map->_dm_flags & BUS_DMA_24BIT) != 0 && p == NULL) { 900 /* 901 * XXX Need to implement "don't DMA across this boundry". 902 */ 903 if (map->_dm_boundary != 0) 904 panic("bus_dmamap_load: boundaries not implemented"); 905 map->dm_mapsize = buflen; 906 map->dm_nsegs = 1; 907 map->dm_segs[0].ds_addr = (bus_addr_t)va; 908 map->dm_segs[0].ds_len = buflen; 909 map->_dm_flags |= _BUS_DMA_DIRECTMAP; 910 return (0); 911 } 912 913 /* 914 * Allocate a region in DVMA space. 915 */ 916 sgsize = m68k_round_page(buflen + (va & (pagesz - 1))); 917 918 if (extent_alloc(dvmamap, sgsize, pagesz, map->_dm_boundary, 919 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT, 920 (u_long *)&dva) != 0) { 921 return (ENOMEM); 922 } 923 924 /* Fill in the segment. */ 925 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 926 map->dm_segs[0].ds_len = buflen; 927 map->dm_segs[0]._ds_va = dva; 928 map->dm_segs[0]._ds_sgsize = sgsize; 929 930 /* 931 * Now map the DVMA addresses we allocated to point to the 932 * pages of the caller's buffer. 933 */ 934 if (p != NULL) 935 pmap = p->p_vmspace->vm_map.pmap; 936 else 937 pmap = pmap_kernel(); 938 939 for (; buflen > 0; ) { 940 paddr_t pa; 941 /* 942 * Get the physical address for this page. 943 */ 944 rv = pmap_extract(pmap, va, &pa); 945 #ifdef DIAGNOSTIC 946 if (!rv) 947 panic("_bus_dmamap_load: no page"); 948 #endif /* DIAGNOSTIC */ 949 950 /* 951 * Compute the segment size, and adjust counts. 952 */ 953 sgsize = pagesz - (va & (pagesz - 1)); 954 if (buflen < sgsize) 955 sgsize = buflen; 956 957 pmap_enter(pmap_kernel(), dva, 958 (pa & -pagesz) | PMAP_NC, 959 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 960 961 dva += pagesz; 962 va += sgsize; 963 buflen -= sgsize; 964 } 965 pmap_update(pmap_kernel()); 966 967 /* Make the map truly valid. */ 968 map->dm_nsegs = 1; 969 map->dm_mapsize = map->dm_segs[0].ds_len; 970 971 return (0); 972 } 973 974 /* 975 * unload a DMA map. 976 */ 977 void 978 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 979 { 980 bus_dma_segment_t *segs = map->dm_segs; 981 int nsegs = map->dm_nsegs; 982 int flags = map->_dm_flags; 983 bus_addr_t dva; 984 bus_size_t len; 985 int s, error; 986 987 if (nsegs != 1) 988 panic("_bus_dmamem_unload: nsegs = %d", nsegs); 989 990 /* 991 * _BUS_DMA_DIRECTMAP is set iff this map was loaded using 992 * _bus_dmamap_load for a 24-bit device. 993 */ 994 if ((flags & _BUS_DMA_DIRECTMAP) != 0) { 995 /* Nothing to release */ 996 map->_dm_flags &= ~_BUS_DMA_DIRECTMAP; 997 } 998 999 /* 1000 * Otherwise, this map was loaded using _bus_dmamap_load for a 1001 * non-24-bit device, or using _bus_dmamap_load_raw. 1002 */ 1003 else { 1004 dva = segs[0]._ds_va & -PAGE_SIZE; 1005 len = segs[0]._ds_sgsize; 1006 1007 /* 1008 * Unmap the DVMA addresses. 1009 */ 1010 pmap_remove(pmap_kernel(), dva, dva + len); 1011 pmap_update(pmap_kernel()); 1012 1013 /* 1014 * Free the DVMA addresses. 1015 */ 1016 if ((flags & BUS_DMA_24BIT) != 0) { 1017 /* 1018 * This map was loaded using _bus_dmamap_load_raw 1019 * for a 24-bit device. 1020 */ 1021 uvm_unmap(kernel_map, dva, dva + len); 1022 } else { 1023 /* 1024 * This map was loaded using _bus_dmamap_load or 1025 * _bus_dmamap_load_raw for a non-24-bit device. 1026 */ 1027 s = splhigh(); 1028 error = extent_free(dvmamap, dva, len, EX_NOWAIT); 1029 splx(s); 1030 if (error != 0) 1031 printf("warning: %ld of DVMA space lost\n", len); 1032 } 1033 } 1034 1035 /* Mark the mappings as invalid. */ 1036 map->dm_mapsize = 0; 1037 map->dm_nsegs = 0; 1038 } 1039 1040 /* 1041 * Translate a VME address and address modifier 1042 * into a CPU physical address and page type. 1043 */ 1044 int 1045 vmebus_translate(vme_am_t mod, vme_addr_t addr, bus_type_t *btp, 1046 bus_addr_t *bap) 1047 { 1048 bus_addr_t base; 1049 1050 switch(mod) { 1051 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 1052 1053 case (VME_AM_A16|_DS): 1054 base = 0x00ff0000; 1055 break; 1056 1057 case (VME_AM_A24|_DS): 1058 base = 0; 1059 break; 1060 1061 default: 1062 return (ENOENT); 1063 #undef _DS 1064 } 1065 1066 *bap = base | addr; 1067 *btp = (*bap & 0x800000 ? PMAP_VME8 : PMAP_VME0); 1068 return (0); 1069 } 1070 1071 /* 1072 * If we can find a mapping that was established by the PROM, use it. 1073 */ 1074 int 1075 find_prom_map(paddr_t pa, bus_type_t iospace, int len, vaddr_t *vap) 1076 { 1077 u_long pf; 1078 int pgtype; 1079 vaddr_t va, eva; 1080 int sme; 1081 u_long pte; 1082 int saved_ctx; 1083 1084 /* 1085 * The mapping must fit entirely within one page. 1086 */ 1087 if ((((u_long)pa & PGOFSET) + len) > PAGE_SIZE) 1088 return EINVAL; 1089 1090 pf = PA_PGNUM(pa); 1091 pgtype = iospace << PG_MOD_SHIFT; 1092 saved_ctx = kernel_context(); 1093 1094 /* 1095 * Walk the PROM address space, looking for a page with the 1096 * mapping we want. 1097 */ 1098 for (va = SUN_MONSTART; va < SUN_MONEND; ) { 1099 1100 /* 1101 * Make sure this segment is mapped. 1102 */ 1103 sme = get_segmap(va); 1104 if (sme == SEGINV) { 1105 va += NBSG; 1106 continue; /* next segment */ 1107 } 1108 1109 /* 1110 * Walk the pages of this segment. 1111 */ 1112 for(eva = va + NBSG; va < eva; va += PAGE_SIZE) { 1113 pte = get_pte(va); 1114 1115 if ((pte & (PG_VALID | PG_TYPE)) == 1116 (PG_VALID | pgtype) && 1117 PG_PFNUM(pte) == pf) 1118 { 1119 /* 1120 * Found the PROM mapping. 1121 * note: preserve page offset 1122 */ 1123 *vap = (va | ((vaddr_t)pa & PGOFSET)); 1124 restore_context(saved_ctx); 1125 return 0; 1126 } 1127 } 1128 } 1129 restore_context(saved_ctx); 1130 return ENOENT; 1131 } 1132 1133 int 1134 mm_md_physacc(paddr_t pa, vm_prot_t prot) 1135 { 1136 1137 /* Allow access only in "managed" RAM. */ 1138 if (pa < avail_start || pa >= avail_end) 1139 return EFAULT; 1140 return 0; 1141 } 1142 1143 bool 1144 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr) 1145 { 1146 1147 if (paddr >= avail_start) 1148 return false; 1149 *vaddr = paddr; 1150 return true; 1151 } 1152 1153 /* 1154 * Allow access to the PROM mapping similar to uvm_kernacc(). 1155 */ 1156 int 1157 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled) 1158 { 1159 1160 if ((vaddr_t)ptr < SUN2_PROM_BASE || (vaddr_t)ptr > SUN2_MONEND) { 1161 *handled = false; 1162 return 0; 1163 } 1164 1165 *handled = true; 1166 /* Read in the PROM itself is OK, write not. */ 1167 if ((prot & VM_PROT_WRITE) == 0) 1168 return 0; 1169 return EFAULT; 1170 } 1171 1172 #ifdef MODULAR 1173 /* 1174 * Push any modules loaded by the bootloader etc. 1175 */ 1176 void 1177 module_init_md(void) 1178 { 1179 } 1180 #endif 1181