xref: /netbsd-src/sys/arch/sun2/sun2/machdep.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: machdep.c,v 1.50 2007/12/04 15:12:07 tsutsui Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1990, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	from: Utah Hdr: machdep.c 1.74 92/12/20
36  *	from: @(#)machdep.c	8.10 (Berkeley) 4/20/94
37  */
38 
39 /*
40  * Copyright (c) 2001 Matthew Fredette.
41  * Copyright (c) 1994, 1995 Gordon W. Ross
42  * Copyright (c) 1993 Adam Glass
43  * Copyright (c) 1988 University of Utah.
44  *
45  * This code is derived from software contributed to Berkeley by
46  * the Systems Programming Group of the University of Utah Computer
47  * Science Department.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	from: Utah Hdr: machdep.c 1.74 92/12/20
78  *	from: @(#)machdep.c	8.10 (Berkeley) 4/20/94
79  */
80 
81 /*-
82  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
83  * All rights reserved.
84  *
85  * This code is derived from software contributed to The NetBSD Foundation
86  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
87  * NASA Ames Research Center.
88  *
89  * Redistribution and use in source and binary forms, with or without
90  * modification, are permitted provided that the following conditions
91  * are met:
92  * 1. Redistributions of source code must retain the above copyright
93  *    notice, this list of conditions and the following disclaimer.
94  * 2. Redistributions in binary form must reproduce the above copyright
95  *    notice, this list of conditions and the following disclaimer in the
96  *    documentation and/or other materials provided with the distribution.
97  * 3. All advertising materials mentioning features or use of this software
98  *    must display the following acknowledgement:
99  *	This product includes software developed by the NetBSD
100  *	Foundation, Inc. and its contributors.
101  * 4. Neither the name of The NetBSD Foundation nor the names of its
102  *    contributors may be used to endorse or promote products derived
103  *    from this software without specific prior written permission.
104  *
105  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
106  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
107  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
108  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
109  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
110  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
111  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
112  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
113  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
114  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
115  * POSSIBILITY OF SUCH DAMAGE.
116  */
117 
118 /*
119  * Copyright (c) 1992, 1993
120  *	The Regents of the University of California.  All rights reserved.
121  *
122  * This software was developed by the Computer Systems Engineering group
123  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
124  * contributed to Berkeley.
125  *
126  * All advertising materials mentioning features or use of this software
127  * must display the following acknowledgement:
128  *	This product includes software developed by the University of
129  *	California, Lawrence Berkeley Laboratory.
130  *
131  * Redistribution and use in source and binary forms, with or without
132  * modification, are permitted provided that the following conditions
133  * are met:
134  * 1. Redistributions of source code must retain the above copyright
135  *    notice, this list of conditions and the following disclaimer.
136  * 2. Redistributions in binary form must reproduce the above copyright
137  *    notice, this list of conditions and the following disclaimer in the
138  *    documentation and/or other materials provided with the distribution.
139  * 3. All advertising materials mentioning features or use of this software
140  *    must display the following acknowledgement:
141  *	This product includes software developed by the University of
142  *	California, Berkeley and its contributors.
143  * 4. Neither the name of the University nor the names of its contributors
144  *    may be used to endorse or promote products derived from this software
145  *    without specific prior written permission.
146  *
147  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
148  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
149  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
150  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
151  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
152  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
153  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
154  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
155  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
156  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
157  * SUCH DAMAGE.
158  *
159  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
160  */
161 
162 #include <sys/cdefs.h>
163 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.50 2007/12/04 15:12:07 tsutsui Exp $");
164 
165 #include "opt_ddb.h"
166 #include "opt_kgdb.h"
167 #include "opt_fpu_emulate.h"
168 
169 #include <sys/param.h>
170 #include <sys/systm.h>
171 #include <sys/kernel.h>
172 #include <sys/proc.h>
173 #include <sys/buf.h>
174 #include <sys/reboot.h>
175 #include <sys/conf.h>
176 #include <sys/file.h>
177 #include <sys/device.h>
178 #include <sys/malloc.h>
179 #include <sys/extent.h>
180 #include <sys/mbuf.h>
181 #include <sys/msgbuf.h>
182 #include <sys/ioctl.h>
183 #include <sys/tty.h>
184 #include <sys/mount.h>
185 #include <sys/user.h>
186 #include <sys/exec.h>
187 #include <sys/core.h>
188 #include <sys/kcore.h>
189 #include <sys/vnode.h>
190 #include <sys/syscallargs.h>
191 #include <sys/ksyms.h>
192 #ifdef	KGDB
193 #include <sys/kgdb.h>
194 #endif
195 
196 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */
197 
198 #include <sys/sysctl.h>
199 
200 #include <dev/cons.h>
201 
202 #include <machine/promlib.h>
203 #include <machine/cpu.h>
204 #include <machine/dvma.h>
205 #include <machine/idprom.h>
206 #include <machine/kcore.h>
207 #include <machine/reg.h>
208 #include <machine/psl.h>
209 #include <machine/pte.h>
210 #define _SUN68K_BUS_DMA_PRIVATE
211 #include <machine/autoconf.h>
212 #include <machine/bus.h>
213 #include <machine/intr.h>
214 #include <machine/pmap.h>
215 
216 #if defined(DDB)
217 #include <machine/db_machdep.h>
218 #include <ddb/db_sym.h>
219 #include <ddb/db_extern.h>
220 #endif
221 
222 #include <dev/vme/vmereg.h>
223 #include <dev/vme/vmevar.h>
224 
225 #include <sun2/sun2/control.h>
226 #include <sun2/sun2/enable.h>
227 #include <sun2/sun2/machdep.h>
228 
229 #include <sun68k/sun68k/vme_sun68k.h>
230 
231 #include "ksyms.h"
232 
233 /* Defined in locore.s */
234 extern char kernel_text[];
235 /* Defined by the linker */
236 extern char etext[];
237 /* Defined in vfs_bio.c */
238 extern u_int bufpages;
239 
240 /* Our exported CPU info; we can have only one. */
241 struct cpu_info cpu_info_store;
242 
243 struct vm_map *exec_map = NULL;
244 struct vm_map *mb_map = NULL;
245 struct vm_map *phys_map = NULL;
246 
247 int	physmem;
248 int	fputype;
249 void *	msgbufaddr;
250 
251 /* Virtual page frame for /dev/mem (see mem.c) */
252 vaddr_t vmmap;
253 
254 /*
255  * safepri is a safe priority for sleep to set for a spin-wait
256  * during autoconfiguration or after a panic.
257  */
258 int	safepri = PSL_LOWIPL;
259 
260 /* Soft copy of the enable register. */
261 volatile u_short enable_reg_soft = ENABLE_REG_SOFT_UNDEF;
262 
263 /*
264  * Our no-fault fault handler.
265  */
266 label_t *nofault;
267 
268 /*
269  * dvmamap is used to manage DVMA memory.
270  */
271 static struct extent *dvmamap;
272 
273 /* Our private scratch page for dumping the MMU. */
274 static vaddr_t dumppage;
275 
276 static void identifycpu(void);
277 static void initcpu(void);
278 
279 /*
280  * cpu_startup: allocate memory for variable-sized tables,
281  * initialize CPU, and do autoconfiguration.
282  *
283  * This is called early in init_main.c:main(), after the
284  * kernel memory allocator is ready for use, but before
285  * the creation of processes 1,2, and mountroot, etc.
286  */
287 void
288 cpu_startup(void)
289 {
290 	void *v;
291 	vaddr_t minaddr, maxaddr;
292 	char pbuf[9];
293 
294 	/*
295 	 * Initialize message buffer (for kernel printf).
296 	 * This is put in physical pages four through seven
297 	 * so it will always be in the same place after a
298 	 * reboot. (physical pages 0-3 are reserved by the PROM
299 	 * for its vector table and other stuff.)
300 	 * Its mapping was prepared in pmap_bootstrap().
301 	 * Also, offset some to avoid PROM scribbles.
302 	 */
303 	v = (void *) (PAGE_SIZE * 4);
304 	msgbufaddr = (void *)((char *)v + MSGBUFOFF);
305 	initmsgbuf(msgbufaddr, MSGBUFSIZE);
306 
307 #if NKSYMS || defined(DDB) || defined(LKM)
308 	{
309 		extern int nsym;
310 		extern char *ssym, *esym;
311 
312 		ksyms_init(nsym, ssym, esym);
313 	}
314 #endif /* DDB */
315 
316 	/*
317 	 * Good {morning,afternoon,evening,night}.
318 	 */
319 	printf("%s%s", copyright, version);
320 	identifycpu();
321 	fputype = FPU_NONE;
322 #ifdef  FPU_EMULATE
323 	printf("fpu: emulator\n");
324 #else
325 	printf("fpu: no math support\n");
326 #endif
327 
328 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
329 	printf("total memory = %s\n", pbuf);
330 
331 	/*
332 	 * XXX fredette - we force a small number of buffers
333 	 * to help me debug this on my low-memory machine.
334 	 * this should go away at some point, allowing the
335 	 * normal automatic buffer-sizing to happen.
336 	 */
337 	bufpages = 37;
338 
339 	/*
340 	 * Get scratch page for dumpsys().
341 	 */
342 	if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE,0, UVM_KMF_WIRED))
343 	    == 0)
344 		panic("startup: alloc dumppage");
345 
346 
347 	minaddr = 0;
348 	/*
349 	 * Allocate a submap for exec arguments.  This map effectively
350 	 * limits the number of processes exec'ing at any time.
351 	 */
352 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
353 				   NCARGS, VM_MAP_PAGEABLE, false, NULL);
354 
355 	/*
356 	 * Allocate a submap for physio
357 	 */
358 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
359 				   VM_PHYS_SIZE, 0, false, NULL);
360 
361 	/*
362 	 * Finally, allocate mbuf cluster submap.
363 	 */
364 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
365 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
366 				 false, NULL);
367 
368 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
369 	printf("avail memory = %s\n", pbuf);
370 
371 	/*
372 	 * Allocate a virtual page (for use by /dev/mem)
373 	 * This page is handed to pmap_enter() therefore
374 	 * it has to be in the normal kernel VA range.
375 	 */
376 	vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
377 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
378 
379 	/*
380 	 * Allocate DMA map for devices on the bus.
381 	 */
382 	dvmamap = extent_create("dvmamap",
383 	    DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL,
384 	    M_DEVBUF, 0, 0, EX_NOWAIT);
385 	if (dvmamap == NULL)
386 		panic("unable to allocate DVMA map");
387 
388 	/*
389 	 * Set up CPU-specific registers, cache, etc.
390 	 */
391 	initcpu();
392 }
393 
394 /*
395  * Set registers on exec.
396  */
397 void
398 setregs(struct lwp *l, struct exec_package *pack, u_long stack)
399 {
400 	struct trapframe *tf = (struct trapframe *)l->l_md.md_regs;
401 
402 	tf->tf_sr = PSL_USERSET;
403 	tf->tf_pc = pack->ep_entry & ~1;
404 	tf->tf_regs[D0] = 0;
405 	tf->tf_regs[D1] = 0;
406 	tf->tf_regs[D2] = 0;
407 	tf->tf_regs[D3] = 0;
408 	tf->tf_regs[D4] = 0;
409 	tf->tf_regs[D5] = 0;
410 	tf->tf_regs[D6] = 0;
411 	tf->tf_regs[D7] = 0;
412 	tf->tf_regs[A0] = 0;
413 	tf->tf_regs[A1] = 0;
414 	tf->tf_regs[A2] = (int)l->l_proc->p_psstr;
415 	tf->tf_regs[A3] = 0;
416 	tf->tf_regs[A4] = 0;
417 	tf->tf_regs[A5] = 0;
418 	tf->tf_regs[A6] = 0;
419 	tf->tf_regs[SP] = stack;
420 
421 	/* restore a null state frame */
422 	l->l_addr->u_pcb.pcb_fpregs.fpf_null = 0;
423 
424 	l->l_md.md_flags = 0;
425 }
426 
427 /*
428  * Info for CTL_HW
429  */
430 char	machine[16] = MACHINE;		/* from <machine/param.h> */
431 char	kernel_arch[16] = "sun2";	/* XXX needs a sysctl node */
432 char	cpu_model[120];
433 
434 /*
435  * Determine which Sun2 model we are running on.
436  */
437 void
438 identifycpu(void)
439 {
440 	extern char *cpu_string;	/* XXX */
441 
442 	/* Other stuff? (VAC, mc6888x version, etc.) */
443 	/* Note: miniroot cares about the kernel_arch part. */
444 	sprintf(cpu_model, "%s %s", kernel_arch, cpu_string);
445 
446 	printf("Model: %s\n", cpu_model);
447 }
448 
449 /*
450  * machine dependent system variables.
451  */
452 #if 0	/* XXX - Not yet... */
453 static int
454 sysctl_machdep_root_device(SYSCTLFN_ARGS)
455 {
456 	struct sysctlnode node = *rnode;
457 
458 	node.sysctl_data = some permutation on root_device;
459 	node.sysctl_size = strlen(root_device) + 1;
460 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
461 }
462 #endif
463 
464 static int
465 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
466 {
467 	struct sysctlnode node = *rnode;
468 	char *cp;
469 
470 	cp = prom_getbootfile();
471 	if (cp == NULL || cp[0] == '\0')
472 		return (ENOENT);
473 
474 	node.sysctl_data = cp;
475 	node.sysctl_size = strlen(cp) + 1;
476 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
477 }
478 
479 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
480 {
481 
482 	sysctl_createv(clog, 0, NULL, NULL,
483 		       CTLFLAG_PERMANENT,
484 		       CTLTYPE_NODE, "machdep", NULL,
485 		       NULL, 0, NULL, 0,
486 		       CTL_MACHDEP, CTL_EOL);
487 
488 	sysctl_createv(clog, 0, NULL, NULL,
489 		       CTLFLAG_PERMANENT,
490 		       CTLTYPE_STRUCT, "console_device", NULL,
491 		       sysctl_consdev, 0, NULL, sizeof(dev_t),
492 		       CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
493 #if 0	/* XXX - Not yet... */
494 	sysctl_createv(clog, 0, NULL, NULL,
495 		       CTLFLAG_PERMANENT,
496 		       CTLTYPE_STRING, "root_device", NULL,
497 		       sysctl_machdep_root_device, 0, NULL, 0,
498 		       CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL);
499 #endif
500 	sysctl_createv(clog, 0, NULL, NULL,
501 		       CTLFLAG_PERMANENT,
502 		       CTLTYPE_STRING, "booted_kernel", NULL,
503 		       sysctl_machdep_booted_kernel, 0, NULL, 0,
504 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
505 }
506 
507 /* See: sig_machdep.c */
508 
509 /*
510  * Do a sync in preparation for a reboot.
511  * XXX - This could probably be common code.
512  * XXX - And now, most of it is in vfs_shutdown()
513  * XXX - Put waittime checks in there too?
514  */
515 int waittime = -1;	/* XXX - Who else looks at this? -gwr */
516 static void
517 reboot_sync(void)
518 {
519 
520 	/* Check waittime here to localize its use to this function. */
521 	if (waittime >= 0)
522 		return;
523 	waittime = 0;
524 	vfs_shutdown();
525 }
526 
527 /*
528  * Common part of the BSD and SunOS reboot system calls.
529  */
530 __dead void
531 cpu_reboot(int howto, char *user_boot_string)
532 {
533 	char *bs, *p;
534 	char default_boot_string[8];
535 
536 	/* If system is cold, just halt. (early panic?) */
537 	if (cold)
538 		goto haltsys;
539 
540 	/* Un-blank the screen if appropriate. */
541 	cnpollc(1);
542 
543 	if ((howto & RB_NOSYNC) == 0) {
544 		reboot_sync();
545 		/*
546 		 * If we've been adjusting the clock, the todr
547 		 * will be out of synch; adjust it now.
548 		 *
549 		 * XXX - However, if the kernel has been sitting in ddb,
550 		 * the time will be way off, so don't set the HW clock!
551 		 * XXX - Should do sanity check against HW clock. -gwr
552 		 */
553 		/* resettodr(); */
554 	}
555 
556 	/* Disable interrupts. */
557 	splhigh();
558 
559 	/* Write out a crash dump if asked. */
560 	if (howto & RB_DUMP)
561 		dumpsys();
562 
563 	/* run any shutdown hooks */
564 	doshutdownhooks();
565 
566 	if (howto & RB_HALT) {
567 	haltsys:
568 		printf("halted.\n");
569 		prom_halt();
570 	}
571 
572 	/*
573 	 * Automatic reboot.
574 	 */
575 	bs = user_boot_string;
576 	if (bs == NULL) {
577 		/*
578 		 * Build our own boot string with an empty
579 		 * boot device/file and (maybe) some flags.
580 		 * The PROM will supply the device/file name.
581 		 */
582 		bs = default_boot_string;
583 		*bs = '\0';
584 		if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) {
585 			/* Append the boot flags. */
586 			p = bs;
587 			*p++ = ' ';
588 			*p++ = '-';
589 			if (howto & RB_KDB)
590 				*p++ = 'd';
591 			if (howto & RB_ASKNAME)
592 				*p++ = 'a';
593 			if (howto & RB_SINGLE)
594 				*p++ = 's';
595 			*p = '\0';
596 		}
597 	}
598 	printf("rebooting...\n");
599 	prom_boot(bs);
600 	for (;;) ;
601 	/*NOTREACHED*/
602 }
603 
604 /*
605  * These variables are needed by /sbin/savecore
606  */
607 uint32_t dumpmag = 0x8fca0101;	/* magic number */
608 int 	dumpsize = 0;		/* pages */
609 long	dumplo = 0; 		/* blocks */
610 
611 #define	DUMP_EXTRA 	3	/* CPU-dependent extra pages */
612 
613 /*
614  * This is called by main to set dumplo, dumpsize.
615  * Dumps always skip the first PAGE_SIZE of disk space
616  * in case there might be a disk label stored there.
617  * If there is extra space, put dump at the end to
618  * reduce the chance that swapping trashes it.
619  */
620 void
621 cpu_dumpconf(void)
622 {
623 	const struct bdevsw *bdev;
624 	int devblks;	/* size of dump device in blocks */
625 	int dumpblks;	/* size of dump image in blocks */
626 	int (*getsize)(dev_t);
627 
628 	if (dumpdev == NODEV)
629 		return;
630 
631 	bdev = bdevsw_lookup(dumpdev);
632 	if (bdev == NULL) {
633 		dumpdev = NODEV;
634 		return;
635 	}
636 	getsize = bdev->d_psize;
637 	if (getsize == NULL)
638 		return;
639 	devblks = (*getsize)(dumpdev);
640 	if (devblks <= ctod(1))
641 		return;
642 	devblks &= ~(ctod(1)-1);
643 
644 	/*
645 	 * Note: savecore expects dumpsize to be the
646 	 * number of pages AFTER the dump header.
647 	 */
648 	dumpsize = physmem;
649 
650 	/* Position dump image near end of space, page aligned. */
651 	dumpblks = ctod(physmem + DUMP_EXTRA);
652 	dumplo = devblks - dumpblks;
653 
654 	/* If it does not fit, truncate it by moving dumplo. */
655 	/* Note: Must force signed comparison. */
656 	if (dumplo < ((long)ctod(1))) {
657 		dumplo = ctod(1);
658 		dumpsize = dtoc(devblks - dumplo) - DUMP_EXTRA;
659 	}
660 }
661 
662 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */
663 struct pcb dumppcb;
664 extern paddr_t avail_start;
665 
666 /*
667  * Write a crash dump.  The format while in swap is:
668  *   kcore_seg_t cpu_hdr;
669  *   cpu_kcore_hdr_t cpu_data;
670  *   padding (PAGE_SIZE-sizeof(kcore_seg_t))
671  *   pagemap (2*PAGE_SIZE)
672  *   physical memory...
673  */
674 void
675 dumpsys(void)
676 {
677 	const struct bdevsw *dsw;
678 	kcore_seg_t	*kseg_p;
679 	cpu_kcore_hdr_t *chdr_p;
680 	struct sun2_kcore_hdr *sh;
681 	char *vaddr;
682 	paddr_t paddr;
683 	int psize, todo, chunk;
684 	daddr_t blkno;
685 	int error = 0;
686 
687 	if (dumpdev == NODEV)
688 		return;
689 	dsw = bdevsw_lookup(dumpdev);
690 	if (dsw == NULL || dsw->d_psize == NULL)
691 		return;
692 	if (dumppage == 0)
693 		return;
694 
695 	/*
696 	 * For dumps during autoconfiguration,
697 	 * if dump device has already configured...
698 	 */
699 	if (dumpsize == 0)
700 		cpu_dumpconf();
701 	if (dumplo <= 0) {
702 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
703 		    minor(dumpdev));
704 		return;
705 	}
706 	savectx(&dumppcb);
707 
708 	psize = (*(dsw->d_psize))(dumpdev);
709 	if (psize == -1) {
710 		printf("dump area unavailable\n");
711 		return;
712 	}
713 
714 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
715 	    minor(dumpdev), dumplo);
716 
717 	/*
718 	 * Prepare the dump header, including MMU state.
719 	 */
720 	blkno = dumplo;
721 	todo = dumpsize;	/* pages */
722 	vaddr = (char*)dumppage;
723 	memset(vaddr, 0, PAGE_SIZE);
724 
725 	/* Set pointers to all three parts. */
726 	kseg_p = (kcore_seg_t *)vaddr;
727 	chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1);
728 	sh = &chdr_p->un._sun2;
729 
730 	/* Fill in kcore_seg_t part. */
731 	CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
732 	kseg_p->c_size = (ctob(DUMP_EXTRA) - sizeof(*kseg_p));
733 
734 	/* Fill in cpu_kcore_hdr_t part. */
735 	strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name));
736 	chdr_p->page_size = PAGE_SIZE;
737 	chdr_p->kernbase = KERNBASE;
738 
739 	/* Fill in the sun2_kcore_hdr part (MMU state). */
740 	pmap_kcore_hdr(sh);
741 
742 	/* Write out the dump header. */
743 	error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
744 	if (error)
745 		goto fail;
746 	blkno += btodb(PAGE_SIZE);
747 
748 	/* translation RAM (pages zero through seven) */
749 	for(chunk = 0; chunk < (PAGE_SIZE * 8); chunk += PAGE_SIZE) {
750 		pmap_get_pagemap((int*)vaddr, chunk);
751 		error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
752 		if (error)
753 			goto fail;
754 		blkno += btodb(PAGE_SIZE);
755 	}
756 
757 	/*
758 	 * Now dump physical memory.  Have to do it in two chunks.
759 	 * The first chunk is "unmanaged" (by the VM code) and its
760 	 * range of physical addresses is not allow in pmap_enter.
761 	 * However, that segment is mapped linearly, so we can just
762 	 * use the virtual mappings already in place.  The second
763 	 * chunk is done the normal way, using pmap_enter.
764 	 *
765 	 * Note that vaddr==(paddr+KERNBASE) for paddr=0 through etext.
766 	 */
767 
768 	/* Do the first chunk (0 <= PA < avail_start) */
769 	paddr = 0;
770 	chunk = btoc(avail_start);
771 	if (chunk > todo)
772 		chunk = todo;
773 	do {
774 		if ((todo & 0xf) == 0)
775 			printf("\r%4d", todo);
776 		vaddr = (char*)(paddr + KERNBASE);
777 		error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
778 		if (error)
779 			goto fail;
780 		paddr += PAGE_SIZE;
781 		blkno += btodb(PAGE_SIZE);
782 		--todo;
783 	} while (--chunk > 0);
784 
785 	/* Do the second chunk (avail_start <= PA < dumpsize) */
786 	vaddr = (char*)vmmap;	/* Borrow /dev/mem VA */
787 	do {
788 		if ((todo & 0xf) == 0)
789 			printf("\r%4d", todo);
790 		pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ);
791 		pmap_update(pmap_kernel());
792 		error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
793 		pmap_kremove(vmmap, PAGE_SIZE);
794 		pmap_update(pmap_kernel());
795 		if (error)
796 			goto fail;
797 		paddr += PAGE_SIZE;
798 		blkno += btodb(PAGE_SIZE);
799 	} while (--todo > 0);
800 
801 	printf("\rdump succeeded\n");
802 	return;
803 fail:
804 	printf(" dump error=%d\n", error);
805 }
806 
807 static void
808 initcpu(void)
809 {
810 	/* XXX: Enable RAM parity/ECC checking? */
811 	/* XXX: parityenable(); */
812 
813 }
814 
815 /* straptrap() in trap.c */
816 
817 /* from hp300: badaddr() */
818 
819 /* XXX: parityenable() ? */
820 /* regdump() moved to regdump.c */
821 
822 /*
823  * cpu_exec_aout_makecmds():
824  *	CPU-dependent a.out format hook for execve().
825  *
826  * Determine if the given exec package refers to something which we
827  * understand and, if so, set up the vmcmds for it.
828  */
829 int
830 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
831 {
832 	return ENOEXEC;
833 }
834 
835 #if 0
836 /*
837  * Soft interrupt support.
838  */
839 void
840 isr_soft_request(int level)
841 {
842 	u_char bit;
843 
844 	if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX))
845 		return;
846 
847 	bit = 1 << level;
848 	enable_reg_or(bit);
849 }
850 
851 void
852 isr_soft_clear(int level)
853 {
854 	u_char bit;
855 
856 	if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX))
857 		return;
858 
859 	bit = 1 << level;
860 	enable_reg_and(~bit);
861 }
862 #endif
863 
864 /*
865  * Like _bus_dmamap_load(), but for raw memory allocated with
866  * bus_dmamem_alloc().
867  */
868 int
869 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
870     int nsegs, bus_size_t size, int flags)
871 {
872 	struct vm_page *m;
873 	paddr_t pa;
874 	bus_addr_t dva;
875 	bus_size_t sgsize;
876 	struct pglist *mlist;
877 	int pagesz = PAGE_SIZE;
878 	int error;
879 
880 	/*
881 	 * Make sure that on error condition we return "no valid mappings".
882 	 */
883 	map->dm_nsegs = 0;
884 	map->dm_mapsize = 0;
885 
886 	/* Allocate DVMA addresses */
887 	sgsize = (size + pagesz - 1) & -pagesz;
888 
889 	/*
890 	 * If the device can see our entire 24-bit address space,
891 	 * we can use any properly aligned virtual addresses.
892 	 */
893 	if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
894 		dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
895 					     pagesz, 0);
896 		if (dva == 0)
897 			return (ENOMEM);
898 	}
899 
900 	/*
901 	 * Otherwise, we need virtual addresses in DVMA space.
902 	 */
903 	else {
904 		error = extent_alloc(dvmamap, sgsize, pagesz,
905 					map->_dm_boundary,
906 					(flags & BUS_DMA_NOWAIT) == 0
907 						? EX_WAITOK : EX_NOWAIT,
908 					(u_long *)&dva);
909 		if (error)
910 			return (error);
911 	}
912 
913 	/* Fill in the segment. */
914 	map->dm_segs[0].ds_addr = dva;
915 	map->dm_segs[0].ds_len = size;
916 	map->dm_segs[0]._ds_va = dva;
917 	map->dm_segs[0]._ds_sgsize = sgsize;
918 
919 	/* Map physical pages into MMU */
920 	mlist = segs[0]._ds_mlist;
921 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
922 		if (sgsize == 0)
923 			panic("_bus_dmamap_load_raw: size botch");
924 		pa = VM_PAGE_TO_PHYS(m);
925 		pmap_enter(pmap_kernel(), dva,
926 			   (pa & -pagesz) | PMAP_NC,
927 			   VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
928 
929 		dva += pagesz;
930 		sgsize -= pagesz;
931 	}
932 	pmap_update(pmap_kernel());
933 
934 	/* Make the map truly valid. */
935 	map->dm_nsegs = 1;
936 	map->dm_mapsize = size;
937 
938 	return (0);
939 }
940 
941 /*
942  * load DMA map with a linear buffer.
943  */
944 int
945 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
946     bus_size_t buflen, struct proc *p, int flags)
947 {
948 	bus_size_t sgsize;
949 	vaddr_t va = (vaddr_t)buf;
950 	int pagesz = PAGE_SIZE;
951 	bus_addr_t dva;
952 	pmap_t pmap;
953 	int rv;
954 
955 	/*
956 	 * Make sure that on error condition we return "no valid mappings".
957 	 */
958 	map->dm_nsegs = 0;
959 	map->dm_mapsize = 0;
960 
961 	if (buflen > map->_dm_size)
962 		return (EINVAL);
963 
964 	/*
965 	 * A 24-bit device can see all of our kernel address space, so
966 	 * if we have KVAs, we can just load them as-is, no mapping
967 	 * necessary.
968 	 */
969 	if ((map->_dm_flags & BUS_DMA_24BIT) != 0 && p == NULL) {
970 		/*
971 		 * XXX Need to implement "don't DMA across this boundry".
972 		 */
973 		if (map->_dm_boundary != 0)
974 			panic("bus_dmamap_load: boundaries not implemented");
975 		map->dm_mapsize = buflen;
976 		map->dm_nsegs = 1;
977 		map->dm_segs[0].ds_addr = (bus_addr_t)va;
978 		map->dm_segs[0].ds_len = buflen;
979 		map->_dm_flags |= _BUS_DMA_DIRECTMAP;
980 		return (0);
981 	}
982 
983 	/*
984 	 * Allocate a region in DVMA space.
985 	 */
986 	sgsize = m68k_round_page(buflen + (va & (pagesz - 1)));
987 
988 	if (extent_alloc(dvmamap, sgsize, pagesz, map->_dm_boundary,
989 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
990 			 (u_long *)&dva) != 0) {
991 		return (ENOMEM);
992 	}
993 
994 	/* Fill in the segment. */
995 	map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
996 	map->dm_segs[0].ds_len = buflen;
997 	map->dm_segs[0]._ds_va = dva;
998 	map->dm_segs[0]._ds_sgsize = sgsize;
999 
1000 	/*
1001 	 * Now map the DVMA addresses we allocated to point to the
1002 	 * pages of the caller's buffer.
1003 	 */
1004 	if (p != NULL)
1005 		pmap = p->p_vmspace->vm_map.pmap;
1006 	else
1007 		pmap = pmap_kernel();
1008 
1009 	for (; buflen > 0; ) {
1010 		paddr_t pa;
1011 		/*
1012 		 * Get the physical address for this page.
1013 		 */
1014 		rv = pmap_extract(pmap, va, &pa);
1015 #ifdef	DIAGNOSTIC
1016 		if (!rv)
1017 			panic("_bus_dmamap_load: no page");
1018 #endif	/* DIAGNOSTIC */
1019 
1020 		/*
1021 		 * Compute the segment size, and adjust counts.
1022 		 */
1023 		sgsize = pagesz - (va & (pagesz - 1));
1024 		if (buflen < sgsize)
1025 			sgsize = buflen;
1026 
1027 		pmap_enter(pmap_kernel(), dva,
1028 			   (pa & -pagesz) | PMAP_NC,
1029 			   VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
1030 
1031 		dva += pagesz;
1032 		va += sgsize;
1033 		buflen -= sgsize;
1034 	}
1035 	pmap_update(pmap_kernel());
1036 
1037 	/* Make the map truly valid. */
1038 	map->dm_nsegs = 1;
1039 	map->dm_mapsize = map->dm_segs[0].ds_len;
1040 
1041 	return (0);
1042 }
1043 
1044 /*
1045  * unload a DMA map.
1046  */
1047 void
1048 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1049 {
1050 	bus_dma_segment_t *segs = map->dm_segs;
1051 	int nsegs = map->dm_nsegs;
1052 	int flags = map->_dm_flags;
1053 	bus_addr_t dva;
1054 	bus_size_t len;
1055 	int s, error;
1056 
1057 	if (nsegs != 1)
1058 		panic("_bus_dmamem_unload: nsegs = %d", nsegs);
1059 
1060 	/*
1061 	 * _BUS_DMA_DIRECTMAP is set iff this map was loaded using
1062 	 * _bus_dmamap_load for a 24-bit device.
1063 	 */
1064 	if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
1065 		/* Nothing to release */
1066 		map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
1067 	}
1068 
1069 	/*
1070 	 * Otherwise, this map was loaded using _bus_dmamap_load for a
1071 	 * non-24-bit device, or using _bus_dmamap_load_raw.
1072 	 */
1073 	else {
1074 		dva = segs[0]._ds_va & -PAGE_SIZE;
1075 		len = segs[0]._ds_sgsize;
1076 
1077 		/*
1078 		 * Unmap the DVMA addresses.
1079 		 */
1080 		pmap_remove(pmap_kernel(), dva, dva + len);
1081 		pmap_update(pmap_kernel());
1082 
1083 		/*
1084 		 * Free the DVMA addresses.
1085 		 */
1086 		if ((flags & BUS_DMA_24BIT) != 0) {
1087 			/*
1088 			 * This map was loaded using _bus_dmamap_load_raw
1089 			 * for a 24-bit device.
1090 			 */
1091 			uvm_unmap(kernel_map, dva, dva + len);
1092 		} else {
1093 			/*
1094 			 * This map was loaded using _bus_dmamap_load or
1095 			 * _bus_dmamap_load_raw for a non-24-bit device.
1096 			 */
1097 			s = splhigh();
1098 			error = extent_free(dvmamap, dva, len, EX_NOWAIT);
1099 			splx(s);
1100 			if (error != 0)
1101 				printf("warning: %ld of DVMA space lost\n", len);
1102 		}
1103 	}
1104 
1105 	/* Mark the mappings as invalid. */
1106 	map->dm_mapsize = 0;
1107 	map->dm_nsegs = 0;
1108 }
1109 
1110 /*
1111  * Translate a VME address and address modifier
1112  * into a CPU physical address and page type.
1113  */
1114 int
1115 vmebus_translate(vme_am_t mod, vme_addr_t addr, bus_type_t *btp,
1116     bus_addr_t *bap)
1117 {
1118 	bus_addr_t base;
1119 
1120 	switch(mod) {
1121 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA)
1122 
1123 	case (VME_AM_A16|_DS):
1124 		base = 0x00ff0000;
1125 		break;
1126 
1127 	case (VME_AM_A24|_DS):
1128 		base = 0;
1129 		break;
1130 
1131 	default:
1132 		return (ENOENT);
1133 #undef _DS
1134 	}
1135 
1136 	*bap = base | addr;
1137 	*btp = (*bap & 0x800000 ? PMAP_VME8 : PMAP_VME0);
1138 	return (0);
1139 }
1140 
1141 /*
1142  * If we can find a mapping that was established by the PROM, use it.
1143  */
1144 int
1145 find_prom_map(paddr_t pa, bus_type_t iospace, int len, vaddr_t *vap)
1146 {
1147 	u_long	pf;
1148 	int	pgtype;
1149 	vaddr_t	va, eva;
1150 	int	sme;
1151 	u_long	pte;
1152 	int	saved_ctx;
1153 
1154 	/*
1155 	 * The mapping must fit entirely within one page.
1156 	 */
1157 	if ((((u_long)pa & PGOFSET) + len) > PAGE_SIZE)
1158 		return EINVAL;
1159 
1160 	pf = PA_PGNUM(pa);
1161 	pgtype = iospace << PG_MOD_SHIFT;
1162 	saved_ctx = kernel_context();
1163 
1164 	/*
1165 	 * Walk the PROM address space, looking for a page with the
1166 	 * mapping we want.
1167 	 */
1168 	for (va = SUN_MONSTART; va < SUN_MONEND; ) {
1169 
1170 		/*
1171 		 * Make sure this segment is mapped.
1172 		 */
1173 		sme = get_segmap(va);
1174 		if (sme == SEGINV) {
1175 			va += NBSG;
1176 			continue;			/* next segment */
1177 		}
1178 
1179 		/*
1180 		 * Walk the pages of this segment.
1181 		 */
1182 		for(eva = va + NBSG; va < eva; va += PAGE_SIZE) {
1183 			pte = get_pte(va);
1184 
1185 			if ((pte & (PG_VALID | PG_TYPE)) ==
1186 				(PG_VALID | pgtype) &&
1187 			    PG_PFNUM(pte) == pf)
1188 			{
1189 				/*
1190 				 * Found the PROM mapping.
1191 				 * note: preserve page offset
1192 				 */
1193 				*vap = (va | ((vaddr_t)pa & PGOFSET));
1194 				restore_context(saved_ctx);
1195 				return 0;
1196 			}
1197 		}
1198 	}
1199 	restore_context(saved_ctx);
1200 	return ENOENT;
1201 }
1202