1 /* $NetBSD: machdep.c,v 1.86 2023/12/20 05:13:35 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah Hdr: machdep.c 1.74 92/12/20 36 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 37 */ 38 39 /* 40 * Copyright (c) 2001 Matthew Fredette. 41 * Copyright (c) 1994, 1995 Gordon W. Ross 42 * Copyright (c) 1993 Adam Glass 43 * Copyright (c) 1988 University of Utah. 44 * 45 * This code is derived from software contributed to Berkeley by 46 * the Systems Programming Group of the University of Utah Computer 47 * Science Department. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * from: Utah Hdr: machdep.c 1.74 92/12/20 78 * from: @(#)machdep.c 8.10 (Berkeley) 4/20/94 79 */ 80 81 /*- 82 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 83 * All rights reserved. 84 * 85 * This code is derived from software contributed to The NetBSD Foundation 86 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 87 * NASA Ames Research Center. 88 * 89 * Redistribution and use in source and binary forms, with or without 90 * modification, are permitted provided that the following conditions 91 * are met: 92 * 1. Redistributions of source code must retain the above copyright 93 * notice, this list of conditions and the following disclaimer. 94 * 2. Redistributions in binary form must reproduce the above copyright 95 * notice, this list of conditions and the following disclaimer in the 96 * documentation and/or other materials provided with the distribution. 97 * 98 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 99 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 100 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 101 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 102 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 103 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 104 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 105 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 106 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 107 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 108 * POSSIBILITY OF SUCH DAMAGE. 109 */ 110 111 /* 112 * Copyright (c) 1992, 1993 113 * The Regents of the University of California. All rights reserved. 114 * 115 * This software was developed by the Computer Systems Engineering group 116 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 117 * contributed to Berkeley. 118 * 119 * All advertising materials mentioning features or use of this software 120 * must display the following acknowledgement: 121 * This product includes software developed by the University of 122 * California, Lawrence Berkeley Laboratory. 123 * 124 * Redistribution and use in source and binary forms, with or without 125 * modification, are permitted provided that the following conditions 126 * are met: 127 * 1. Redistributions of source code must retain the above copyright 128 * notice, this list of conditions and the following disclaimer. 129 * 2. Redistributions in binary form must reproduce the above copyright 130 * notice, this list of conditions and the following disclaimer in the 131 * documentation and/or other materials provided with the distribution. 132 * 3. Neither the name of the University nor the names of its contributors 133 * may be used to endorse or promote products derived from this software 134 * without specific prior written permission. 135 * 136 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 137 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 138 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 139 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 140 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 141 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 142 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 143 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 144 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 145 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 146 * SUCH DAMAGE. 147 * 148 * @(#)machdep.c 8.6 (Berkeley) 1/14/94 149 */ 150 151 #include <sys/cdefs.h> 152 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.86 2023/12/20 05:13:35 thorpej Exp $"); 153 154 #include "opt_ddb.h" 155 #include "opt_fpu_emulate.h" 156 #include "opt_kgdb.h" 157 #include "opt_modular.h" 158 159 #include <sys/param.h> 160 #include <sys/buf.h> 161 #include <sys/conf.h> 162 #include <sys/core.h> 163 #include <sys/cpu.h> 164 #include <sys/device.h> 165 #include <sys/exec.h> 166 #include <sys/exec_aout.h> /* for MID_* */ 167 #include <sys/vmem.h> 168 #include <sys/file.h> 169 #include <sys/ioctl.h> 170 #include <sys/kcore.h> 171 #include <sys/kernel.h> 172 #include <sys/ksyms.h> 173 #include <sys/mbuf.h> 174 #include <sys/module.h> 175 #include <sys/mount.h> 176 #include <sys/msgbuf.h> 177 #include <sys/proc.h> 178 #include <sys/reboot.h> 179 #include <sys/syscallargs.h> 180 #include <sys/sysctl.h> 181 #include <sys/systm.h> 182 #include <sys/tty.h> 183 #include <sys/vnode.h> 184 185 #ifdef KGDB 186 #include <sys/kgdb.h> 187 #endif 188 189 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */ 190 191 #include <dev/cons.h> 192 #include <dev/mm.h> 193 194 #define _SUN68K_BUS_DMA_PRIVATE 195 #include <machine/autoconf.h> 196 #include <machine/bus.h> 197 #include <machine/cpu.h> 198 #include <machine/dvma.h> 199 #include <machine/idprom.h> 200 #include <machine/intr.h> 201 #include <machine/kcore.h> 202 #include <machine/pcb.h> 203 #include <machine/pmap.h> 204 #include <machine/promlib.h> 205 #include <machine/psl.h> 206 #include <machine/pte.h> 207 #include <machine/reg.h> 208 209 #if defined(DDB) 210 #include <machine/db_machdep.h> 211 #include <ddb/db_extern.h> 212 #include <ddb/db_sym.h> 213 #endif 214 215 #include <dev/vme/vmereg.h> 216 #include <dev/vme/vmevar.h> 217 218 #include <sun2/sun2/control.h> 219 #include <sun2/sun2/enable.h> 220 #include <sun2/sun2/machdep.h> 221 222 #include <sun68k/sun68k/vme_sun68k.h> 223 224 #include "ksyms.h" 225 226 /* Defined in locore.s */ 227 extern char kernel_text[]; 228 /* Defined by the linker */ 229 extern char etext[]; 230 /* Defined in vfs_bio.c */ 231 extern u_int bufpages; 232 233 /* Our exported CPU info; we can have only one. */ 234 struct cpu_info cpu_info_store; 235 236 struct vm_map *phys_map = NULL; 237 238 int fputype; 239 void * msgbufaddr; 240 241 /* Virtual page frame for /dev/mem (see mem.c) */ 242 vaddr_t vmmap; 243 244 /* Soft copy of the enable register. */ 245 volatile u_short enable_reg_soft = ENABLE_REG_SOFT_UNDEF; 246 247 /* 248 * Our no-fault fault handler. 249 */ 250 label_t *nofault; 251 252 /* 253 * dvmamap is used to manage DVMA memory. 254 */ 255 vmem_t *dvma_arena; 256 257 /* Our private scratch page for dumping the MMU. */ 258 static vaddr_t dumppage; 259 260 static void identifycpu(void); 261 static void initcpu(void); 262 263 /* 264 * cpu_startup: allocate memory for variable-sized tables, 265 * initialize CPU, and do autoconfiguration. 266 * 267 * This is called early in init_main.c:main(), after the 268 * kernel memory allocator is ready for use, but before 269 * the creation of processes 1,2, and mountroot, etc. 270 */ 271 void 272 cpu_startup(void) 273 { 274 void *v; 275 vaddr_t minaddr, maxaddr; 276 char pbuf[9]; 277 278 /* 279 * Initialize message buffer (for kernel printf). 280 * This is put in physical pages four through seven 281 * so it will always be in the same place after a 282 * reboot. (physical pages 0-3 are reserved by the PROM 283 * for its vector table and other stuff.) 284 * Its mapping was prepared in pmap_bootstrap(). 285 * Also, offset some to avoid PROM scribbles. 286 */ 287 v = (void *) (PAGE_SIZE * 4); 288 msgbufaddr = (void *)((char *)v + MSGBUFOFF); 289 initmsgbuf(msgbufaddr, MSGBUFSIZE); 290 291 #if NKSYMS || defined(DDB) || defined(MODULAR) 292 { 293 extern int nsym; 294 extern char *ssym, *esym; 295 296 ksyms_addsyms_elf(nsym, ssym, esym); 297 } 298 #endif /* DDB */ 299 300 /* 301 * Good {morning,afternoon,evening,night}. 302 */ 303 printf("%s%s", copyright, version); 304 identifycpu(); 305 fputype = FPU_NONE; 306 #ifdef FPU_EMULATE 307 printf("fpu: emulator\n"); 308 #else 309 printf("fpu: no math support\n"); 310 #endif 311 312 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 313 printf("total memory = %s\n", pbuf); 314 315 /* 316 * XXX fredette - we force a small number of buffers 317 * to help me debug this on my low-memory machine. 318 * this should go away at some point, allowing the 319 * normal automatic buffer-sizing to happen. 320 */ 321 bufpages = 37; 322 323 /* 324 * Get scratch page for dumpsys(). 325 */ 326 if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE,0, UVM_KMF_WIRED)) 327 == 0) 328 panic("startup: alloc dumppage"); 329 330 331 minaddr = 0; 332 333 /* 334 * Allocate a submap for physio 335 */ 336 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 337 VM_PHYS_SIZE, 0, false, NULL); 338 339 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 340 printf("avail memory = %s\n", pbuf); 341 342 /* 343 * Allocate a virtual page (for use by /dev/mem) 344 * This page is handed to pmap_enter() therefore 345 * it has to be in the normal kernel VA range. 346 */ 347 vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 348 UVM_KMF_VAONLY | UVM_KMF_WAITVA); 349 350 /* 351 * Allocate DMA map for devices on the bus. 352 */ 353 dvma_arena = vmem_create("dvmamap", DVMA_MAP_BASE, DVMA_MAP_AVAIL, 354 PAGE_SIZE, /* quantum */ 355 NULL, /* importfn */ 356 NULL, /* releasefn */ 357 NULL, /* source */ 358 0, /* qcache_max */ 359 VM_SLEEP, 360 IPL_VM); 361 if (dvma_arena == NULL) 362 panic("unable to allocate DVMA map"); 363 364 /* 365 * Set up CPU-specific registers, cache, etc. 366 */ 367 initcpu(); 368 } 369 370 /* 371 * Info for CTL_HW 372 */ 373 char machine[16] = MACHINE; /* from <machine/param.h> */ 374 char kernel_arch[16] = "sun2"; /* XXX needs a sysctl node */ 375 376 /* 377 * Determine which Sun2 model we are running on. 378 */ 379 void 380 identifycpu(void) 381 { 382 extern char *cpu_string; /* XXX */ 383 384 /* Other stuff? (VAC, mc6888x version, etc.) */ 385 /* Note: miniroot cares about the kernel_arch part. */ 386 cpu_setmodel("%s %s", kernel_arch, cpu_string); 387 388 printf("Model: %s\n", cpu_getmodel()); 389 } 390 391 /* 392 * machine dependent system variables. 393 */ 394 #if 0 /* XXX - Not yet... */ 395 static int 396 sysctl_machdep_root_device(SYSCTLFN_ARGS) 397 { 398 struct sysctlnode node = *rnode; 399 400 node.sysctl_data = some permutation on root_device; 401 node.sysctl_size = strlen(root_device) + 1; 402 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 403 } 404 #endif 405 406 static int 407 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 408 { 409 struct sysctlnode node = *rnode; 410 char *cp; 411 412 cp = prom_getbootfile(); 413 if (cp == NULL || cp[0] == '\0') 414 return (ENOENT); 415 416 node.sysctl_data = cp; 417 node.sysctl_size = strlen(cp) + 1; 418 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 419 } 420 421 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 422 { 423 424 sysctl_createv(clog, 0, NULL, NULL, 425 CTLFLAG_PERMANENT, 426 CTLTYPE_NODE, "machdep", NULL, 427 NULL, 0, NULL, 0, 428 CTL_MACHDEP, CTL_EOL); 429 430 sysctl_createv(clog, 0, NULL, NULL, 431 CTLFLAG_PERMANENT, 432 CTLTYPE_STRUCT, "console_device", NULL, 433 sysctl_consdev, 0, NULL, sizeof(dev_t), 434 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 435 #if 0 /* XXX - Not yet... */ 436 sysctl_createv(clog, 0, NULL, NULL, 437 CTLFLAG_PERMANENT, 438 CTLTYPE_STRING, "root_device", NULL, 439 sysctl_machdep_root_device, 0, NULL, 0, 440 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL); 441 #endif 442 sysctl_createv(clog, 0, NULL, NULL, 443 CTLFLAG_PERMANENT, 444 CTLTYPE_STRING, "booted_kernel", NULL, 445 sysctl_machdep_booted_kernel, 0, NULL, 0, 446 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 447 } 448 449 /* See: sig_machdep.c */ 450 451 /* 452 * Do a sync in preparation for a reboot. 453 * XXX - This could probably be common code. 454 * XXX - And now, most of it is in vfs_shutdown() 455 * XXX - Put waittime checks in there too? 456 */ 457 int waittime = -1; /* XXX - Who else looks at this? -gwr */ 458 static void 459 reboot_sync(void) 460 { 461 462 /* Check waittime here to localize its use to this function. */ 463 if (waittime >= 0) 464 return; 465 waittime = 0; 466 vfs_shutdown(); 467 } 468 469 /* 470 * Common part of the BSD and SunOS reboot system calls. 471 */ 472 __dead void 473 cpu_reboot(int howto, char *user_boot_string) 474 { 475 char *bs, *p; 476 char default_boot_string[8]; 477 478 /* If system is cold, just halt. (early panic?) */ 479 if (cold) 480 goto haltsys; 481 482 /* Un-blank the screen if appropriate. */ 483 cnpollc(1); 484 485 if ((howto & RB_NOSYNC) == 0) { 486 reboot_sync(); 487 /* 488 * If we've been adjusting the clock, the todr 489 * will be out of synch; adjust it now. 490 * 491 * XXX - However, if the kernel has been sitting in ddb, 492 * the time will be way off, so don't set the HW clock! 493 * XXX - Should do sanity check against HW clock. -gwr 494 */ 495 /* resettodr(); */ 496 } 497 498 /* Disable interrupts. */ 499 splhigh(); 500 501 /* Write out a crash dump if asked. */ 502 if (howto & RB_DUMP) 503 dumpsys(); 504 505 /* run any shutdown hooks */ 506 doshutdownhooks(); 507 508 pmf_system_shutdown(boothowto); 509 510 if (howto & RB_HALT) { 511 haltsys: 512 printf("halted.\n"); 513 prom_halt(); 514 } 515 516 /* 517 * Automatic reboot. 518 */ 519 bs = user_boot_string; 520 if (bs == NULL) { 521 /* 522 * Build our own boot string with an empty 523 * boot device/file and (maybe) some flags. 524 * The PROM will supply the device/file name. 525 */ 526 bs = default_boot_string; 527 *bs = '\0'; 528 if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) { 529 /* Append the boot flags. */ 530 p = bs; 531 *p++ = ' '; 532 *p++ = '-'; 533 if (howto & RB_KDB) 534 *p++ = 'd'; 535 if (howto & RB_ASKNAME) 536 *p++ = 'a'; 537 if (howto & RB_SINGLE) 538 *p++ = 's'; 539 *p = '\0'; 540 } 541 } 542 printf("rebooting...\n"); 543 prom_boot(bs); 544 for (;;) ; 545 /*NOTREACHED*/ 546 } 547 548 /* 549 * These variables are needed by /sbin/savecore 550 */ 551 uint32_t dumpmag = 0x8fca0101; /* magic number */ 552 int dumpsize = 0; /* pages */ 553 long dumplo = 0; /* blocks */ 554 555 #define DUMP_EXTRA 3 /* CPU-dependent extra pages */ 556 557 /* 558 * This is called by main to set dumplo, dumpsize. 559 * Dumps always skip the first PAGE_SIZE of disk space 560 * in case there might be a disk label stored there. 561 * If there is extra space, put dump at the end to 562 * reduce the chance that swapping trashes it. 563 */ 564 void 565 cpu_dumpconf(void) 566 { 567 int devblks; /* size of dump device in blocks */ 568 int dumpblks; /* size of dump image in blocks */ 569 570 if (dumpdev == NODEV) 571 return; 572 573 devblks = bdev_size(dumpdev); 574 if (devblks <= ctod(1)) 575 return; 576 devblks &= ~(ctod(1)-1); 577 578 /* 579 * Note: savecore expects dumpsize to be the 580 * number of pages AFTER the dump header. 581 */ 582 dumpsize = physmem; 583 584 /* Position dump image near end of space, page aligned. */ 585 dumpblks = ctod(physmem + DUMP_EXTRA); 586 dumplo = devblks - dumpblks; 587 588 /* If it does not fit, truncate it by moving dumplo. */ 589 /* Note: Must force signed comparison. */ 590 if (dumplo < ((long)ctod(1))) { 591 dumplo = ctod(1); 592 dumpsize = dtoc(devblks - dumplo) - DUMP_EXTRA; 593 } 594 } 595 596 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */ 597 struct pcb dumppcb; 598 extern paddr_t avail_start; 599 600 /* 601 * Write a crash dump. The format while in swap is: 602 * kcore_seg_t cpu_hdr; 603 * cpu_kcore_hdr_t cpu_data; 604 * padding (PAGE_SIZE-sizeof(kcore_seg_t)) 605 * pagemap (2*PAGE_SIZE) 606 * physical memory... 607 */ 608 void 609 dumpsys(void) 610 { 611 const struct bdevsw *dsw; 612 kcore_seg_t *kseg_p; 613 cpu_kcore_hdr_t *chdr_p; 614 struct sun2_kcore_hdr *sh; 615 char *vaddr; 616 paddr_t paddr; 617 int psize, todo, chunk; 618 daddr_t blkno; 619 int error = 0; 620 621 if (dumpdev == NODEV) 622 return; 623 dsw = bdevsw_lookup(dumpdev); 624 if (dsw == NULL || dsw->d_psize == NULL) 625 return; 626 if (dumppage == 0) 627 return; 628 629 /* 630 * For dumps during autoconfiguration, 631 * if dump device has already configured... 632 */ 633 if (dumpsize == 0) 634 cpu_dumpconf(); 635 if (dumplo <= 0) { 636 printf("\ndump to dev %u,%u not possible\n", 637 major(dumpdev), minor(dumpdev)); 638 return; 639 } 640 savectx(&dumppcb); 641 642 psize = bdev_size(dumpdev); 643 if (psize == -1) { 644 printf("dump area unavailable\n"); 645 return; 646 } 647 648 printf("\ndumping to dev %u,%u offset %ld\n", 649 major(dumpdev), minor(dumpdev), dumplo); 650 651 /* 652 * Prepare the dump header, including MMU state. 653 */ 654 blkno = dumplo; 655 todo = dumpsize; /* pages */ 656 vaddr = (char*)dumppage; 657 memset(vaddr, 0, PAGE_SIZE); 658 659 /* Set pointers to all three parts. */ 660 kseg_p = (kcore_seg_t *)vaddr; 661 chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1); 662 sh = &chdr_p->un._sun2; 663 664 /* Fill in kcore_seg_t part. */ 665 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 666 kseg_p->c_size = (ctob(DUMP_EXTRA) - sizeof(*kseg_p)); 667 668 /* Fill in cpu_kcore_hdr_t part. */ 669 strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name)); 670 chdr_p->page_size = PAGE_SIZE; 671 chdr_p->kernbase = KERNBASE; 672 673 /* Fill in the sun2_kcore_hdr part (MMU state). */ 674 pmap_kcore_hdr(sh); 675 676 /* Write out the dump header. */ 677 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 678 if (error) 679 goto fail; 680 blkno += btodb(PAGE_SIZE); 681 682 /* translation RAM (pages zero through seven) */ 683 for(chunk = 0; chunk < (PAGE_SIZE * 8); chunk += PAGE_SIZE) { 684 pmap_get_pagemap((int*)vaddr, chunk); 685 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 686 if (error) 687 goto fail; 688 blkno += btodb(PAGE_SIZE); 689 } 690 691 /* 692 * Now dump physical memory. Have to do it in two chunks. 693 * The first chunk is "unmanaged" (by the VM code) and its 694 * range of physical addresses is not allow in pmap_enter. 695 * However, that segment is mapped linearly, so we can just 696 * use the virtual mappings already in place. The second 697 * chunk is done the normal way, using pmap_enter. 698 * 699 * Note that vaddr==(paddr+KERNBASE) for paddr=0 through etext. 700 */ 701 702 /* Do the first chunk (0 <= PA < avail_start) */ 703 paddr = 0; 704 chunk = btoc(avail_start); 705 if (chunk > todo) 706 chunk = todo; 707 do { 708 if ((todo & 0xf) == 0) 709 printf_nolog("\r%4d", todo); 710 vaddr = (char*)(paddr + KERNBASE); 711 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 712 if (error) 713 goto fail; 714 paddr += PAGE_SIZE; 715 blkno += btodb(PAGE_SIZE); 716 --todo; 717 } while (--chunk > 0); 718 719 /* Do the second chunk (avail_start <= PA < dumpsize) */ 720 vaddr = (char*)vmmap; /* Borrow /dev/mem VA */ 721 do { 722 if ((todo & 0xf) == 0) 723 printf_nolog("\r%4d", todo); 724 pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ, 0); 725 pmap_update(pmap_kernel()); 726 error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE); 727 pmap_kremove(vmmap, PAGE_SIZE); 728 pmap_update(pmap_kernel()); 729 if (error) 730 goto fail; 731 paddr += PAGE_SIZE; 732 blkno += btodb(PAGE_SIZE); 733 } while (--todo > 0); 734 735 printf("\rdump succeeded\n"); 736 return; 737 fail: 738 printf(" dump error=%d\n", error); 739 } 740 741 static void 742 initcpu(void) 743 { 744 /* XXX: Enable RAM parity/ECC checking? */ 745 /* XXX: parityenable(); */ 746 747 } 748 749 /* straptrap() in trap.c */ 750 751 /* from hp300: badaddr() */ 752 753 /* XXX: parityenable() ? */ 754 /* regdump() moved to regdump.c */ 755 756 /* 757 * cpu_exec_aout_makecmds(): 758 * CPU-dependent a.out format hook for execve(). 759 * 760 * Determine if the given exec package refers to something which we 761 * understand and, if so, set up the vmcmds for it. 762 */ 763 int 764 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp) 765 { 766 return ENOEXEC; 767 } 768 769 #if 0 770 /* 771 * Soft interrupt support. 772 */ 773 void 774 isr_soft_request(int level) 775 { 776 u_char bit; 777 778 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 779 return; 780 781 bit = 1 << level; 782 enable_reg_or(bit); 783 } 784 785 void 786 isr_soft_clear(int level) 787 { 788 u_char bit; 789 790 if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX)) 791 return; 792 793 bit = 1 << level; 794 enable_reg_and(~bit); 795 } 796 #endif 797 798 /* 799 * Like _bus_dmamap_load(), but for raw memory allocated with 800 * bus_dmamem_alloc(). 801 */ 802 int 803 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, 804 int nsegs, bus_size_t size, int flags) 805 { 806 struct vm_page *m; 807 paddr_t pa; 808 vmem_addr_t dva; 809 bus_size_t sgsize; 810 struct pglist *mlist; 811 int pagesz = PAGE_SIZE; 812 int error; 813 814 /* 815 * Make sure that on error condition we return "no valid mappings". 816 */ 817 map->dm_nsegs = 0; 818 map->dm_mapsize = 0; 819 820 /* Allocate DVMA addresses */ 821 sgsize = (size + pagesz - 1) & -pagesz; 822 823 /* 824 * If the device can see our entire 24-bit address space, 825 * we can use any properly aligned virtual addresses. 826 */ 827 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) { 828 dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary, 829 pagesz, 0); 830 if (dva == 0) 831 return (ENOMEM); 832 } 833 834 /* 835 * Otherwise, we need virtual addresses in DVMA space. 836 */ 837 else { 838 const vm_flag_t vmflags = VM_BESTFIT | 839 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 840 841 error = vmem_xalloc(dvma_arena, sgsize, 842 0, /* alignment */ 843 0, /* phase */ 844 map->_dm_boundary, /* nocross */ 845 VMEM_ADDR_MIN, /* minaddr */ 846 VMEM_ADDR_MAX, /* maxaddr */ 847 vmflags, 848 &dva); 849 if (error) 850 return (error); 851 } 852 853 /* Fill in the segment. */ 854 map->dm_segs[0].ds_addr = dva; 855 map->dm_segs[0].ds_len = size; 856 map->dm_segs[0]._ds_va = dva; 857 map->dm_segs[0]._ds_sgsize = sgsize; 858 859 /* Map physical pages into MMU */ 860 mlist = segs[0]._ds_mlist; 861 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 862 if (sgsize == 0) 863 panic("_bus_dmamap_load_raw: size botch"); 864 pa = VM_PAGE_TO_PHYS(m); 865 pmap_enter(pmap_kernel(), dva, 866 (pa & -pagesz) | PMAP_NC, 867 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 868 869 dva += pagesz; 870 sgsize -= pagesz; 871 } 872 pmap_update(pmap_kernel()); 873 874 /* Make the map truly valid. */ 875 map->dm_nsegs = 1; 876 map->dm_mapsize = size; 877 878 return (0); 879 } 880 881 /* 882 * load DMA map with a linear buffer. 883 */ 884 int 885 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 886 bus_size_t buflen, struct proc *p, int flags) 887 { 888 bus_size_t sgsize; 889 vaddr_t va = (vaddr_t)buf; 890 int pagesz = PAGE_SIZE; 891 vmem_addr_t dva; 892 pmap_t pmap; 893 int rv __diagused; 894 895 /* 896 * Make sure that on error condition we return "no valid mappings". 897 */ 898 map->dm_nsegs = 0; 899 map->dm_mapsize = 0; 900 901 if (buflen > map->_dm_size) 902 return (EINVAL); 903 904 /* 905 * A 24-bit device can see all of our kernel address space, so 906 * if we have KVAs, we can just load them as-is, no mapping 907 * necessary. 908 */ 909 if ((map->_dm_flags & BUS_DMA_24BIT) != 0 && p == NULL) { 910 /* 911 * XXX Need to implement "don't DMA across this boundary". 912 */ 913 if (map->_dm_boundary != 0) 914 panic("bus_dmamap_load: boundaries not implemented"); 915 map->dm_mapsize = buflen; 916 map->dm_nsegs = 1; 917 map->dm_segs[0].ds_addr = (bus_addr_t)va; 918 map->dm_segs[0].ds_len = buflen; 919 map->_dm_flags |= _BUS_DMA_DIRECTMAP; 920 return (0); 921 } 922 923 /* 924 * Allocate a region in DVMA space. 925 */ 926 sgsize = m68k_round_page(buflen + (va & (pagesz - 1))); 927 928 const vm_flag_t vmflags = VM_BESTFIT | 929 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 930 931 if (vmem_xalloc(dvma_arena, sgsize, 932 0, /* alignment */ 933 0, /* phase */ 934 map->_dm_boundary, /* nocross */ 935 VMEM_ADDR_MIN, /* minaddr */ 936 VMEM_ADDR_MAX, /* maxaddr */ 937 vmflags, 938 &dva) != 0) { 939 return (ENOMEM); 940 } 941 942 /* Fill in the segment. */ 943 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 944 map->dm_segs[0].ds_len = buflen; 945 map->dm_segs[0]._ds_va = dva; 946 map->dm_segs[0]._ds_sgsize = sgsize; 947 948 /* 949 * Now map the DVMA addresses we allocated to point to the 950 * pages of the caller's buffer. 951 */ 952 if (p != NULL) 953 pmap = p->p_vmspace->vm_map.pmap; 954 else 955 pmap = pmap_kernel(); 956 957 for (; buflen > 0; ) { 958 paddr_t pa; 959 /* 960 * Get the physical address for this page. 961 */ 962 rv = pmap_extract(pmap, va, &pa); 963 #ifdef DIAGNOSTIC 964 if (!rv) 965 panic("_bus_dmamap_load: no page"); 966 #endif /* DIAGNOSTIC */ 967 968 /* 969 * Compute the segment size, and adjust counts. 970 */ 971 sgsize = pagesz - (va & (pagesz - 1)); 972 if (buflen < sgsize) 973 sgsize = buflen; 974 975 pmap_enter(pmap_kernel(), dva, 976 (pa & -pagesz) | PMAP_NC, 977 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 978 979 dva += pagesz; 980 va += sgsize; 981 buflen -= sgsize; 982 } 983 pmap_update(pmap_kernel()); 984 985 /* Make the map truly valid. */ 986 map->dm_nsegs = 1; 987 map->dm_mapsize = map->dm_segs[0].ds_len; 988 989 return (0); 990 } 991 992 /* 993 * unload a DMA map. 994 */ 995 void 996 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 997 { 998 bus_dma_segment_t *segs = map->dm_segs; 999 int nsegs = map->dm_nsegs; 1000 int flags = map->_dm_flags; 1001 bus_addr_t dva; 1002 bus_size_t len; 1003 1004 if (nsegs != 1) 1005 panic("_bus_dmamem_unload: nsegs = %d", nsegs); 1006 1007 /* 1008 * _BUS_DMA_DIRECTMAP is set iff this map was loaded using 1009 * _bus_dmamap_load for a 24-bit device. 1010 */ 1011 if ((flags & _BUS_DMA_DIRECTMAP) != 0) { 1012 /* Nothing to release */ 1013 map->_dm_flags &= ~_BUS_DMA_DIRECTMAP; 1014 } 1015 1016 /* 1017 * Otherwise, this map was loaded using _bus_dmamap_load for a 1018 * non-24-bit device, or using _bus_dmamap_load_raw. 1019 */ 1020 else { 1021 dva = segs[0]._ds_va & -PAGE_SIZE; 1022 len = segs[0]._ds_sgsize; 1023 1024 /* 1025 * Unmap the DVMA addresses. 1026 */ 1027 pmap_remove(pmap_kernel(), dva, dva + len); 1028 pmap_update(pmap_kernel()); 1029 1030 /* 1031 * Free the DVMA addresses. 1032 */ 1033 if ((flags & BUS_DMA_24BIT) != 0) { 1034 /* 1035 * This map was loaded using _bus_dmamap_load_raw 1036 * for a 24-bit device. 1037 */ 1038 uvm_unmap(kernel_map, dva, dva + len); 1039 } else { 1040 /* 1041 * This map was loaded using _bus_dmamap_load or 1042 * _bus_dmamap_load_raw for a non-24-bit device. 1043 */ 1044 vmem_xfree(dvma_arena, dva, len); 1045 } 1046 } 1047 1048 /* Mark the mappings as invalid. */ 1049 map->dm_mapsize = 0; 1050 map->dm_nsegs = 0; 1051 } 1052 1053 /* 1054 * Translate a VME address and address modifier 1055 * into a CPU physical address and page type. 1056 */ 1057 int 1058 vmebus_translate(vme_am_t mod, vme_addr_t addr, bus_type_t *btp, 1059 bus_addr_t *bap) 1060 { 1061 bus_addr_t base; 1062 1063 switch(mod) { 1064 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 1065 1066 case (VME_AM_A16|_DS): 1067 base = 0x00ff0000; 1068 break; 1069 1070 case (VME_AM_A24|_DS): 1071 base = 0; 1072 break; 1073 1074 default: 1075 return (ENOENT); 1076 #undef _DS 1077 } 1078 1079 *bap = base | addr; 1080 *btp = (*bap & 0x800000 ? PMAP_VME8 : PMAP_VME0); 1081 return (0); 1082 } 1083 1084 /* 1085 * If we can find a mapping that was established by the PROM, use it. 1086 */ 1087 int 1088 find_prom_map(paddr_t pa, bus_type_t iospace, int len, vaddr_t *vap) 1089 { 1090 u_long pf; 1091 int pgtype; 1092 vaddr_t va, eva; 1093 int sme; 1094 u_long pte; 1095 int saved_ctx; 1096 1097 /* 1098 * The mapping must fit entirely within one page. 1099 */ 1100 if ((((u_long)pa & PGOFSET) + len) > PAGE_SIZE) 1101 return EINVAL; 1102 1103 pf = PA_PGNUM(pa); 1104 pgtype = iospace << PG_MOD_SHIFT; 1105 saved_ctx = kernel_context(); 1106 1107 /* 1108 * Walk the PROM address space, looking for a page with the 1109 * mapping we want. 1110 */ 1111 for (va = SUN_MONSTART; va < SUN_MONEND; ) { 1112 1113 /* 1114 * Make sure this segment is mapped. 1115 */ 1116 sme = get_segmap(va); 1117 if (sme == SEGINV) { 1118 va += NBSG; 1119 continue; /* next segment */ 1120 } 1121 1122 /* 1123 * Walk the pages of this segment. 1124 */ 1125 for(eva = va + NBSG; va < eva; va += PAGE_SIZE) { 1126 pte = get_pte(va); 1127 1128 if ((pte & (PG_VALID | PG_TYPE)) == 1129 (PG_VALID | pgtype) && 1130 PG_PFNUM(pte) == pf) 1131 { 1132 /* 1133 * Found the PROM mapping. 1134 * note: preserve page offset 1135 */ 1136 *vap = (va | ((vaddr_t)pa & PGOFSET)); 1137 restore_context(saved_ctx); 1138 return 0; 1139 } 1140 } 1141 } 1142 restore_context(saved_ctx); 1143 return ENOENT; 1144 } 1145 1146 int 1147 mm_md_physacc(paddr_t pa, vm_prot_t prot) 1148 { 1149 1150 /* Allow access only in "managed" RAM. */ 1151 if (pa < avail_start || pa >= avail_end) 1152 return EFAULT; 1153 return 0; 1154 } 1155 1156 bool 1157 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr) 1158 { 1159 1160 if (paddr >= avail_start) 1161 return false; 1162 *vaddr = paddr; 1163 return true; 1164 } 1165 1166 /* 1167 * Allow access to the PROM mapping similar to uvm_kernacc(). 1168 */ 1169 int 1170 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled) 1171 { 1172 1173 if ((vaddr_t)ptr < SUN2_PROM_BASE || (vaddr_t)ptr > SUN2_MONEND) { 1174 *handled = false; 1175 return 0; 1176 } 1177 1178 *handled = true; 1179 /* Read in the PROM itself is OK, write not. */ 1180 if ((prot & VM_PROT_WRITE) == 0) 1181 return 0; 1182 return EFAULT; 1183 } 1184 1185 #ifdef MODULAR 1186 /* 1187 * Push any modules loaded by the bootloader etc. 1188 */ 1189 void 1190 module_init_md(void) 1191 { 1192 } 1193 #endif 1194