xref: /netbsd-src/sys/arch/sun2/sun2/machdep.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: machdep.c,v 1.25 2003/06/23 11:01:44 martin Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Matthew Fredette.
5  * Copyright (c) 1994, 1995 Gordon W. Ross
6  * Copyright (c) 1993 Adam Glass
7  * Copyright (c) 1988 University of Utah.
8  * Copyright (c) 1982, 1986, 1990, 1993
9  *	The Regents of the University of California.  All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by the University of
26  *	California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  *	from: Utah Hdr: machdep.c 1.74 92/12/20
44  *	from: @(#)machdep.c	8.10 (Berkeley) 4/20/94
45  */
46 
47 /*-
48  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
49  * All rights reserved.
50  *
51  * This code is derived from software contributed to The NetBSD Foundation
52  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
53  * NASA Ames Research Center.
54  *
55  * Redistribution and use in source and binary forms, with or without
56  * modification, are permitted provided that the following conditions
57  * are met:
58  * 1. Redistributions of source code must retain the above copyright
59  *    notice, this list of conditions and the following disclaimer.
60  * 2. Redistributions in binary form must reproduce the above copyright
61  *    notice, this list of conditions and the following disclaimer in the
62  *    documentation and/or other materials provided with the distribution.
63  * 3. All advertising materials mentioning features or use of this software
64  *    must display the following acknowledgement:
65  *	This product includes software developed by the NetBSD
66  *	Foundation, Inc. and its contributors.
67  * 4. Neither the name of The NetBSD Foundation nor the names of its
68  *    contributors may be used to endorse or promote products derived
69  *    from this software without specific prior written permission.
70  *
71  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
72  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
73  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
74  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
75  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
76  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
77  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
78  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
79  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
80  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
81  * POSSIBILITY OF SUCH DAMAGE.
82  */
83 
84 /*
85  * Copyright (c) 1992, 1993
86  *	The Regents of the University of California.  All rights reserved.
87  *
88  * This software was developed by the Computer Systems Engineering group
89  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
90  * contributed to Berkeley.
91  *
92  * All advertising materials mentioning features or use of this software
93  * must display the following acknowledgement:
94  *	This product includes software developed by the University of
95  *	California, Lawrence Berkeley Laboratory.
96  *
97  * Redistribution and use in source and binary forms, with or without
98  * modification, are permitted provided that the following conditions
99  * are met:
100  * 1. Redistributions of source code must retain the above copyright
101  *    notice, this list of conditions and the following disclaimer.
102  * 2. Redistributions in binary form must reproduce the above copyright
103  *    notice, this list of conditions and the following disclaimer in the
104  *    documentation and/or other materials provided with the distribution.
105  * 3. All advertising materials mentioning features or use of this software
106  *    must display the following acknowledgement:
107  *	This product includes software developed by the University of
108  *	California, Berkeley and its contributors.
109  * 4. Neither the name of the University nor the names of its contributors
110  *    may be used to endorse or promote products derived from this software
111  *    without specific prior written permission.
112  *
113  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
114  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
115  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
116  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
117  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
118  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
119  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
120  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
121  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
122  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
123  * SUCH DAMAGE.
124  *
125  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
126  */
127 
128 #include "opt_ddb.h"
129 #include "opt_kgdb.h"
130 #include "opt_fpu_emulate.h"
131 
132 #include <sys/param.h>
133 #include <sys/systm.h>
134 #include <sys/kernel.h>
135 #include <sys/proc.h>
136 #include <sys/buf.h>
137 #include <sys/reboot.h>
138 #include <sys/conf.h>
139 #include <sys/file.h>
140 #include <sys/device.h>
141 #include <sys/malloc.h>
142 #include <sys/extent.h>
143 #include <sys/mbuf.h>
144 #include <sys/msgbuf.h>
145 #include <sys/ioctl.h>
146 #include <sys/tty.h>
147 #include <sys/mount.h>
148 #include <sys/user.h>
149 #include <sys/exec.h>
150 #include <sys/core.h>
151 #include <sys/kcore.h>
152 #include <sys/vnode.h>
153 #include <sys/sa.h>
154 #include <sys/syscallargs.h>
155 #include <sys/ksyms.h>
156 #ifdef	KGDB
157 #include <sys/kgdb.h>
158 #endif
159 
160 #include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */
161 
162 #include <sys/sysctl.h>
163 
164 #include <dev/cons.h>
165 
166 #include <machine/promlib.h>
167 #include <machine/cpu.h>
168 #include <machine/dvma.h>
169 #include <machine/idprom.h>
170 #include <machine/kcore.h>
171 #include <machine/reg.h>
172 #include <machine/psl.h>
173 #include <machine/pte.h>
174 #define _SUN68K_BUS_DMA_PRIVATE
175 #include <machine/autoconf.h>
176 #include <machine/bus.h>
177 #include <machine/intr.h>
178 #include <machine/pmap.h>
179 
180 #if defined(DDB)
181 #include <machine/db_machdep.h>
182 #include <ddb/db_sym.h>
183 #include <ddb/db_extern.h>
184 #endif
185 
186 #include <dev/vme/vmereg.h>
187 #include <dev/vme/vmevar.h>
188 
189 #include <sun2/sun2/control.h>
190 #include <sun2/sun2/enable.h>
191 #include <sun2/sun2/machdep.h>
192 
193 #include <sun68k/sun68k/vme_sun68k.h>
194 
195 #include "ksyms.h"
196 
197 /* Defined in locore.s */
198 extern char kernel_text[];
199 /* Defined by the linker */
200 extern char etext[];
201 
202 /* Our exported CPU info; we can have only one. */
203 struct cpu_info cpu_info_store;
204 
205 struct vm_map *exec_map = NULL;
206 struct vm_map *mb_map = NULL;
207 struct vm_map *phys_map = NULL;
208 
209 int	physmem;
210 int	fputype;
211 caddr_t	msgbufaddr;
212 
213 /* Virtual page frame for /dev/mem (see mem.c) */
214 vaddr_t vmmap;
215 
216 /*
217  * safepri is a safe priority for sleep to set for a spin-wait
218  * during autoconfiguration or after a panic.
219  */
220 int	safepri = PSL_LOWIPL;
221 
222 /* Soft copy of the enable register. */
223 __volatile u_short enable_reg_soft = ENABLE_REG_SOFT_UNDEF;
224 
225 /*
226  * Our no-fault fault handler.
227  */
228 label_t *nofault;
229 
230 /*
231  * dvmamap is used to manage DVMA memory.
232  */
233 static struct extent *dvmamap;
234 
235 /* Our private scratch page for dumping the MMU. */
236 static vaddr_t dumppage;
237 
238 static void identifycpu __P((void));
239 static void initcpu __P((void));
240 
241 /*
242  * cpu_startup: allocate memory for variable-sized tables,
243  * initialize cpu, and do autoconfiguration.
244  *
245  * This is called early in init_main.c:main(), after the
246  * kernel memory allocator is ready for use, but before
247  * the creation of processes 1,2, and mountroot, etc.
248  */
249 void
250 cpu_startup()
251 {
252 	caddr_t v;
253 	vsize_t size;
254 	u_int sz, i, base, residual;
255 	vaddr_t minaddr, maxaddr;
256 	char pbuf[9];
257 
258 	/*
259 	 * Initialize message buffer (for kernel printf).
260 	 * This is put in physical pages four through seven
261 	 * so it will always be in the same place after a
262 	 * reboot. (physical pages 0-3 are reserved by the PROM
263 	 * for its vector table and other stuff.)
264 	 * Its mapping was prepared in pmap_bootstrap().
265 	 * Also, offset some to avoid PROM scribbles.
266 	 */
267 	v = (caddr_t) (PAGE_SIZE * 4);
268 	msgbufaddr = (caddr_t)(v + MSGBUFOFF);
269 	initmsgbuf(msgbufaddr, MSGBUFSIZE);
270 
271 #if NKSYMS || defined(DDB) || defined(LKM)
272 	{
273 		extern int end[];
274 		extern char *esym;
275 
276 		ksyms_init(end[0], end + 1, (int*)esym);
277 	}
278 #endif /* DDB */
279 
280 	/*
281 	 * Good {morning,afternoon,evening,night}.
282 	 */
283 	printf(version);
284 	identifycpu();
285 	fputype = FPU_NONE;
286 #ifdef  FPU_EMULATE
287 	printf("fpu: emulator\n");
288 #else
289 	printf("fpu: no math support\n");
290 #endif
291 
292 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
293 	printf("total memory = %s\n", pbuf);
294 
295 	/*
296 	 * XXX fredette - we force a small number of buffers
297 	 * to help me debug this on my low-memory machine.
298 	 * this should go away at some point, allowing the
299 	 * normal automatic buffer-sizing to happen.
300 	 */
301 	bufpages = 37;
302 
303 	/*
304 	 * Get scratch page for dumpsys().
305 	 */
306 	if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE)) == 0)
307 		panic("startup: alloc dumppage");
308 
309 	/*
310 	 * Find out how much space we need, allocate it,
311 	 * and then give everything true virtual addresses.
312 	 */
313 	sz = (u_int)allocsys(NULL, NULL);
314 	if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
315 		panic("startup: no room for tables");
316 	if (allocsys(v, NULL) - v != sz)
317 		panic("startup: table size inconsistency");
318 
319 	/*
320 	 * Now allocate buffers proper.  They are different than the above
321 	 * in that they usually occupy more virtual memory than physical.
322 	 */
323 	size = MAXBSIZE * nbuf;
324 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
325 		    NULL, UVM_UNKNOWN_OFFSET, 0,
326 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
327 				UVM_ADV_NORMAL, 0)) != 0)
328 		panic("startup: cannot allocate VM for buffers");
329 	minaddr = (vaddr_t)buffers;
330 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
331 		/* don't want to alloc more physical mem than needed */
332 		bufpages = btoc(MAXBSIZE) * nbuf;
333 	}
334 	base = bufpages / nbuf;
335 	residual = bufpages % nbuf;
336 	for (i = 0; i < nbuf; i++) {
337 		vsize_t curbufsize;
338 		vaddr_t curbuf;
339 		struct vm_page *pg;
340 
341 		/*
342 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
343 		 * that MAXBSIZE space, we allocate and map (base+1) pages
344 		 * for the first "residual" buffers, and then we allocate
345 		 * "base" pages for the rest.
346 		 */
347 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
348 		curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
349 
350 		while (curbufsize) {
351 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
352 			if (pg == NULL)
353 				panic("cpu_startup: not enough memory for "
354 				    "buffer cache");
355 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
356 				       VM_PROT_READ|VM_PROT_WRITE);
357 			curbuf += PAGE_SIZE;
358 			curbufsize -= PAGE_SIZE;
359 		}
360 	}
361 	pmap_update(pmap_kernel());
362 
363 	/*
364 	 * Allocate a submap for exec arguments.  This map effectively
365 	 * limits the number of processes exec'ing at any time.
366 	 */
367 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
368 				   NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
369 
370 	/*
371 	 * Allocate a submap for physio
372 	 */
373 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
374 				   VM_PHYS_SIZE, 0, FALSE, NULL);
375 
376 	/*
377 	 * Finally, allocate mbuf cluster submap.
378 	 */
379 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
380 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
381 				 FALSE, NULL);
382 
383 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
384 	printf("avail memory = %s\n", pbuf);
385 	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
386 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
387 
388 	/*
389 	 * Allocate a virtual page (for use by /dev/mem)
390 	 * This page is handed to pmap_enter() therefore
391 	 * it has to be in the normal kernel VA range.
392 	 */
393 	vmmap = uvm_km_valloc_wait(kernel_map, PAGE_SIZE);
394 
395 	/*
396 	 * Allocate DMA map for devices on the bus.
397 	 */
398 	dvmamap = extent_create("dvmamap",
399 	    DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL,
400 	    M_DEVBUF, 0, 0, EX_NOWAIT);
401 	if (dvmamap == NULL)
402 		panic("unable to allocate DVMA map");
403 
404 	/*
405 	 * Set up CPU-specific registers, cache, etc.
406 	 */
407 	initcpu();
408 
409 	/*
410 	 * Set up buffers, so they can be used to read disk labels.
411 	 */
412 	bufinit();
413 }
414 
415 /*
416  * Set registers on exec.
417  */
418 void
419 setregs(l, pack, stack)
420 	struct lwp *l;
421 	struct exec_package *pack;
422 	u_long stack;
423 {
424 	struct trapframe *tf = (struct trapframe *)l->l_md.md_regs;
425 
426 	tf->tf_sr = PSL_USERSET;
427 	tf->tf_pc = pack->ep_entry & ~1;
428 	tf->tf_regs[D0] = 0;
429 	tf->tf_regs[D1] = 0;
430 	tf->tf_regs[D2] = 0;
431 	tf->tf_regs[D3] = 0;
432 	tf->tf_regs[D4] = 0;
433 	tf->tf_regs[D5] = 0;
434 	tf->tf_regs[D6] = 0;
435 	tf->tf_regs[D7] = 0;
436 	tf->tf_regs[A0] = 0;
437 	tf->tf_regs[A1] = 0;
438 	tf->tf_regs[A2] = (int)l->l_proc->p_psstr;
439 	tf->tf_regs[A3] = 0;
440 	tf->tf_regs[A4] = 0;
441 	tf->tf_regs[A5] = 0;
442 	tf->tf_regs[A6] = 0;
443 	tf->tf_regs[SP] = stack;
444 
445 	/* restore a null state frame */
446 	l->l_addr->u_pcb.pcb_fpregs.fpf_null = 0;
447 
448 	l->l_md.md_flags = 0;
449 }
450 
451 /*
452  * Info for CTL_HW
453  */
454 char	machine[16] = MACHINE;		/* from <machine/param.h> */
455 char	kernel_arch[16] = "sun2";	/* XXX needs a sysctl node */
456 char	cpu_model[120];
457 
458 /*
459  * Determine which Sun2 model we are running on.
460  */
461 void
462 identifycpu()
463 {
464 	extern char *cpu_string;	/* XXX */
465 
466 	/* Other stuff? (VAC, mc6888x version, etc.) */
467 	/* Note: miniroot cares about the kernel_arch part. */
468 	sprintf(cpu_model, "%s %s", kernel_arch, cpu_string);
469 
470 	printf("Model: %s\n", cpu_model);
471 }
472 
473 /*
474  * machine dependent system variables.
475  */
476 int
477 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
478 	int *name;
479 	u_int namelen;
480 	void *oldp;
481 	size_t *oldlenp;
482 	void *newp;
483 	size_t newlen;
484 	struct proc *p;
485 {
486 	int error;
487 	dev_t consdev;
488 	char *cp;
489 
490 	/* all sysctl names at this level are terminal */
491 	if (namelen != 1)
492 		return (ENOTDIR);		/* overloaded */
493 
494 	switch (name[0]) {
495 	case CPU_CONSDEV:
496 		if (cn_tab != NULL)
497 			consdev = cn_tab->cn_dev;
498 		else
499 			consdev = NODEV;
500 		error = sysctl_rdstruct(oldp, oldlenp, newp,
501 		    &consdev, sizeof consdev);
502 		break;
503 
504 #if 0	/* XXX - Not yet... */
505 	case CPU_ROOT_DEVICE:
506 		error = sysctl_rdstring(oldp, oldlenp, newp, root_device);
507 		break;
508 
509 #endif
510 	case CPU_BOOTED_KERNEL:
511 		cp = prom_getbootfile();
512 		if (cp == NULL || cp[0] == '\0')
513 			return (ENOENT);
514 		return (sysctl_rdstring(oldp, oldlenp, newp, cp));
515 
516 	default:
517 		error = EOPNOTSUPP;
518 	}
519 	return (error);
520 }
521 
522 /* See: sig_machdep.c */
523 
524 /*
525  * Do a sync in preparation for a reboot.
526  * XXX - This could probably be common code.
527  * XXX - And now, most of it is in vfs_shutdown()
528  * XXX - Put waittime checks in there too?
529  */
530 int waittime = -1;	/* XXX - Who else looks at this? -gwr */
531 static void
532 reboot_sync __P((void))
533 {
534 
535 	/* Check waittime here to localize its use to this function. */
536 	if (waittime >= 0)
537 		return;
538 	waittime = 0;
539 	vfs_shutdown();
540 }
541 
542 /*
543  * Common part of the BSD and SunOS reboot system calls.
544  */
545 __dead void
546 cpu_reboot(howto, user_boot_string)
547 	int howto;
548 	char *user_boot_string;
549 {
550 	char *bs, *p;
551 	char default_boot_string[8];
552 
553 	/* If system is cold, just halt. (early panic?) */
554 	if (cold)
555 		goto haltsys;
556 
557 	/* Un-blank the screen if appropriate. */
558 	cnpollc(1);
559 
560 	if ((howto & RB_NOSYNC) == 0) {
561 		reboot_sync();
562 		/*
563 		 * If we've been adjusting the clock, the todr
564 		 * will be out of synch; adjust it now.
565 		 *
566 		 * XXX - However, if the kernel has been sitting in ddb,
567 		 * the time will be way off, so don't set the HW clock!
568 		 * XXX - Should do sanity check against HW clock. -gwr
569 		 */
570 		/* resettodr(); */
571 	}
572 
573 	/* Disable interrupts. */
574 	splhigh();
575 
576 	/* Write out a crash dump if asked. */
577 	if (howto & RB_DUMP)
578 		dumpsys();
579 
580 	/* run any shutdown hooks */
581 	doshutdownhooks();
582 
583 	if (howto & RB_HALT) {
584 	haltsys:
585 		printf("halted.\n");
586 		prom_halt();
587 	}
588 
589 	/*
590 	 * Automatic reboot.
591 	 */
592 	bs = user_boot_string;
593 	if (bs == NULL) {
594 		/*
595 		 * Build our own boot string with an empty
596 		 * boot device/file and (maybe) some flags.
597 		 * The PROM will supply the device/file name.
598 		 */
599 		bs = default_boot_string;
600 		*bs = '\0';
601 		if (howto & (RB_KDB|RB_ASKNAME|RB_SINGLE)) {
602 			/* Append the boot flags. */
603 			p = bs;
604 			*p++ = ' ';
605 			*p++ = '-';
606 			if (howto & RB_KDB)
607 				*p++ = 'd';
608 			if (howto & RB_ASKNAME)
609 				*p++ = 'a';
610 			if (howto & RB_SINGLE)
611 				*p++ = 's';
612 			*p = '\0';
613 		}
614 	}
615 	printf("rebooting...\n");
616 	prom_boot(bs);
617 	for (;;) ;
618 	/*NOTREACHED*/
619 }
620 
621 /*
622  * These variables are needed by /sbin/savecore
623  */
624 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
625 int 	dumpsize = 0;		/* pages */
626 long	dumplo = 0; 		/* blocks */
627 
628 #define	DUMP_EXTRA 	3	/* CPU-dependent extra pages */
629 
630 /*
631  * This is called by main to set dumplo, dumpsize.
632  * Dumps always skip the first PAGE_SIZE of disk space
633  * in case there might be a disk label stored there.
634  * If there is extra space, put dump at the end to
635  * reduce the chance that swapping trashes it.
636  */
637 void
638 cpu_dumpconf()
639 {
640 	const struct bdevsw *bdev;
641 	int devblks;	/* size of dump device in blocks */
642 	int dumpblks;	/* size of dump image in blocks */
643 	int (*getsize)__P((dev_t));
644 
645 	if (dumpdev == NODEV)
646 		return;
647 
648 	bdev = bdevsw_lookup(dumpdev);
649 	if (bdev == NULL)
650 		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
651 	getsize = bdev->d_psize;
652 	if (getsize == NULL)
653 		return;
654 	devblks = (*getsize)(dumpdev);
655 	if (devblks <= ctod(1))
656 		return;
657 	devblks &= ~(ctod(1)-1);
658 
659 	/*
660 	 * Note: savecore expects dumpsize to be the
661 	 * number of pages AFTER the dump header.
662 	 */
663 	dumpsize = physmem;
664 
665 	/* Position dump image near end of space, page aligned. */
666 	dumpblks = ctod(physmem + DUMP_EXTRA);
667 	dumplo = devblks - dumpblks;
668 
669 	/* If it does not fit, truncate it by moving dumplo. */
670 	/* Note: Must force signed comparison. */
671 	if (dumplo < ((long)ctod(1))) {
672 		dumplo = ctod(1);
673 		dumpsize = dtoc(devblks - dumplo) - DUMP_EXTRA;
674 	}
675 }
676 
677 /* Note: gdb looks for "dumppcb" in a kernel crash dump. */
678 struct pcb dumppcb;
679 extern paddr_t avail_start;
680 
681 /*
682  * Write a crash dump.  The format while in swap is:
683  *   kcore_seg_t cpu_hdr;
684  *   cpu_kcore_hdr_t cpu_data;
685  *   padding (PAGE_SIZE-sizeof(kcore_seg_t))
686  *   pagemap (2*PAGE_SIZE)
687  *   physical memory...
688  */
689 void
690 dumpsys()
691 {
692 	const struct bdevsw *dsw;
693 	kcore_seg_t	*kseg_p;
694 	cpu_kcore_hdr_t *chdr_p;
695 	struct sun2_kcore_hdr *sh;
696 	char *vaddr;
697 	paddr_t paddr;
698 	int psize, todo, chunk;
699 	daddr_t blkno;
700 	int error = 0;
701 
702 	if (dumpdev == NODEV)
703 		return;
704 	dsw = bdevsw_lookup(dumpdev);
705 	if (dsw == NULL || dsw->d_psize == NULL)
706 		return;
707 	if (dumppage == 0)
708 		return;
709 
710 	/*
711 	 * For dumps during autoconfiguration,
712 	 * if dump device has already configured...
713 	 */
714 	if (dumpsize == 0)
715 		cpu_dumpconf();
716 	if (dumplo <= 0) {
717 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
718 		    minor(dumpdev));
719 		return;
720 	}
721 	savectx(&dumppcb);
722 
723 	psize = (*(dsw->d_psize))(dumpdev);
724 	if (psize == -1) {
725 		printf("dump area unavailable\n");
726 		return;
727 	}
728 
729 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
730 	    minor(dumpdev), dumplo);
731 
732 	/*
733 	 * Prepare the dump header, including MMU state.
734 	 */
735 	blkno = dumplo;
736 	todo = dumpsize;	/* pages */
737 	vaddr = (char*)dumppage;
738 	memset(vaddr, 0, PAGE_SIZE);
739 
740 	/* Set pointers to all three parts. */
741 	kseg_p = (kcore_seg_t *)vaddr;
742 	chdr_p = (cpu_kcore_hdr_t *) (kseg_p + 1);
743 	sh = &chdr_p->un._sun2;
744 
745 	/* Fill in kcore_seg_t part. */
746 	CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
747 	kseg_p->c_size = (ctob(DUMP_EXTRA) - sizeof(*kseg_p));
748 
749 	/* Fill in cpu_kcore_hdr_t part. */
750 	strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name));
751 	chdr_p->page_size = PAGE_SIZE;
752 	chdr_p->kernbase = KERNBASE;
753 
754 	/* Fill in the sun2_kcore_hdr part (MMU state). */
755 	pmap_kcore_hdr(sh);
756 
757 	/* Write out the dump header. */
758 	error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
759 	if (error)
760 		goto fail;
761 	blkno += btodb(PAGE_SIZE);
762 
763 	/* translation RAM (pages zero through seven) */
764 	for(chunk = 0; chunk < (PAGE_SIZE * 8); chunk += PAGE_SIZE) {
765 		pmap_get_pagemap((int*)vaddr, chunk);
766 		error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
767 		if (error)
768 			goto fail;
769 		blkno += btodb(PAGE_SIZE);
770 	}
771 
772 	/*
773 	 * Now dump physical memory.  Have to do it in two chunks.
774 	 * The first chunk is "unmanaged" (by the VM code) and its
775 	 * range of physical addresses is not allow in pmap_enter.
776 	 * However, that segment is mapped linearly, so we can just
777 	 * use the virtual mappings already in place.  The second
778 	 * chunk is done the normal way, using pmap_enter.
779 	 *
780 	 * Note that vaddr==(paddr+KERNBASE) for paddr=0 through etext.
781 	 */
782 
783 	/* Do the first chunk (0 <= PA < avail_start) */
784 	paddr = 0;
785 	chunk = btoc(avail_start);
786 	if (chunk > todo)
787 		chunk = todo;
788 	do {
789 		if ((todo & 0xf) == 0)
790 			printf("\r%4d", todo);
791 		vaddr = (char*)(paddr + KERNBASE);
792 		error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
793 		if (error)
794 			goto fail;
795 		paddr += PAGE_SIZE;
796 		blkno += btodb(PAGE_SIZE);
797 		--todo;
798 	} while (--chunk > 0);
799 
800 	/* Do the second chunk (avail_start <= PA < dumpsize) */
801 	vaddr = (char*)vmmap;	/* Borrow /dev/mem VA */
802 	do {
803 		if ((todo & 0xf) == 0)
804 			printf("\r%4d", todo);
805 		pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ);
806 		pmap_update(pmap_kernel());
807 		error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
808 		pmap_kremove(vmmap, PAGE_SIZE);
809 		pmap_update(pmap_kernel());
810 		if (error)
811 			goto fail;
812 		paddr += PAGE_SIZE;
813 		blkno += btodb(PAGE_SIZE);
814 	} while (--todo > 0);
815 
816 	printf("\rdump succeeded\n");
817 	return;
818 fail:
819 	printf(" dump error=%d\n", error);
820 }
821 
822 static void
823 initcpu()
824 {
825 	/* XXX: Enable RAM parity/ECC checking? */
826 	/* XXX: parityenable(); */
827 
828 }
829 
830 /* straptrap() in trap.c */
831 
832 /* from hp300: badaddr() */
833 
834 /* XXX: parityenable() ? */
835 /* regdump() moved to regdump.c */
836 
837 /*
838  * cpu_exec_aout_makecmds():
839  *	cpu-dependent a.out format hook for execve().
840  *
841  * Determine if the given exec package refers to something which we
842  * understand and, if so, set up the vmcmds for it.
843  */
844 int
845 cpu_exec_aout_makecmds(p, epp)
846 	struct proc *p;
847 	struct exec_package *epp;
848 {
849 	return ENOEXEC;
850 }
851 
852 /*
853  * Soft interrupt support.
854  */
855 void isr_soft_request(level)
856 	int level;
857 {
858 	u_char bit;
859 
860 	if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX))
861 		return;
862 
863 	bit = 1 << level;
864 	enable_reg_or(bit);
865 }
866 
867 void isr_soft_clear(level)
868 	int level;
869 {
870 	u_char bit;
871 
872 	if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX))
873 		return;
874 
875 	bit = 1 << level;
876 	enable_reg_and(~bit);
877 }
878 
879 /*
880  * Like _bus_dmamap_load(), but for raw memory allocated with
881  * bus_dmamem_alloc().
882  */
883 int
884 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
885 	bus_dma_tag_t t;
886 	bus_dmamap_t map;
887 	bus_dma_segment_t *segs;
888 	int nsegs;
889 	bus_size_t size;
890 	int flags;
891 {
892 	struct vm_page *m;
893 	paddr_t pa;
894 	bus_addr_t dva;
895 	bus_size_t sgsize;
896 	struct pglist *mlist;
897 	int pagesz = PAGE_SIZE;
898 	int error;
899 
900 	/*
901 	 * Make sure that on error condition we return "no valid mappings".
902 	 */
903 	map->dm_nsegs = 0;
904 	map->dm_mapsize = 0;
905 
906 	/* Allocate DVMA addresses */
907 	sgsize = (size + pagesz - 1) & -pagesz;
908 
909 	/*
910 	 * If the device can see our entire 24-bit address space,
911 	 * we can use any properly aligned virtual addresses.
912 	 */
913 	if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
914 		dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
915 					     pagesz, 0);
916 		if (dva == 0)
917 			return (ENOMEM);
918 	}
919 
920 	/*
921 	 * Otherwise, we need virtual addresses in DVMA space.
922 	 */
923 	else {
924 		error = extent_alloc(dvmamap, sgsize, pagesz,
925 					map->_dm_boundary,
926 					(flags & BUS_DMA_NOWAIT) == 0
927 						? EX_WAITOK : EX_NOWAIT,
928 					(u_long *)&dva);
929 		if (error)
930 			return (error);
931 	}
932 
933 	/* Fill in the segment. */
934 	map->dm_segs[0].ds_addr = dva;
935 	map->dm_segs[0].ds_len = size;
936 	map->dm_segs[0]._ds_va = dva;
937 	map->dm_segs[0]._ds_sgsize = sgsize;
938 
939 	/* Map physical pages into MMU */
940 	mlist = segs[0]._ds_mlist;
941 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
942 		if (sgsize == 0)
943 			panic("_bus_dmamap_load_raw: size botch");
944 		pa = VM_PAGE_TO_PHYS(m);
945 		pmap_enter(pmap_kernel(), dva,
946 			   (pa & -pagesz) | PMAP_NC,
947 			   VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
948 
949 		dva += pagesz;
950 		sgsize -= pagesz;
951 	}
952 	pmap_update(pmap_kernel());
953 
954 	/* Make the map truly valid. */
955 	map->dm_nsegs = 1;
956 	map->dm_mapsize = size;
957 
958 	return (0);
959 }
960 
961 /*
962  * load DMA map with a linear buffer.
963  */
964 int
965 _bus_dmamap_load(t, map, buf, buflen, p, flags)
966 	bus_dma_tag_t t;
967 	bus_dmamap_t map;
968 	void *buf;
969 	bus_size_t buflen;
970 	struct proc *p;
971 	int flags;
972 {
973 	bus_size_t sgsize;
974 	vaddr_t va = (vaddr_t)buf;
975 	int pagesz = PAGE_SIZE;
976 	bus_addr_t dva;
977 	pmap_t pmap;
978 	int rv;
979 
980 	/*
981 	 * Make sure that on error condition we return "no valid mappings".
982 	 */
983 	map->dm_nsegs = 0;
984 	map->dm_mapsize = 0;
985 
986 	if (buflen > map->_dm_size)
987 		return (EINVAL);
988 
989 	/*
990 	 * A 24-bit device can see all of our kernel address space, so
991 	 * if we have KVAs, we can just load them as-is, no mapping
992 	 * necessary.
993 	 */
994 	if ((map->_dm_flags & BUS_DMA_24BIT) != 0 && p == NULL) {
995 		/*
996 		 * XXX Need to implement "don't DMA across this boundry".
997 		 */
998 		if (map->_dm_boundary != 0)
999 			panic("bus_dmamap_load: boundaries not implemented");
1000 		map->dm_mapsize = buflen;
1001 		map->dm_nsegs = 1;
1002 		map->dm_segs[0].ds_addr = (bus_addr_t)va;
1003 		map->dm_segs[0].ds_len = buflen;
1004 		map->_dm_flags |= _BUS_DMA_DIRECTMAP;
1005 		return (0);
1006 	}
1007 
1008 	/*
1009 	 * Allocate a region in DVMA space.
1010 	 */
1011 	sgsize = m68k_round_page(buflen + (va & (pagesz - 1)));
1012 
1013 	if (extent_alloc(dvmamap, sgsize, pagesz, map->_dm_boundary,
1014 			 (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
1015 			 (u_long *)&dva) != 0) {
1016 		return (ENOMEM);
1017 	}
1018 
1019 	/* Fill in the segment. */
1020 	map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
1021 	map->dm_segs[0].ds_len = buflen;
1022 	map->dm_segs[0]._ds_va = dva;
1023 	map->dm_segs[0]._ds_sgsize = sgsize;
1024 
1025 	/*
1026 	 * Now map the DVMA addresses we allocated to point to the
1027 	 * pages of the caller's buffer.
1028 	 */
1029 	if (p != NULL)
1030 		pmap = p->p_vmspace->vm_map.pmap;
1031 	else
1032 		pmap = pmap_kernel();
1033 
1034 	for (; buflen > 0; ) {
1035 		paddr_t pa;
1036 		/*
1037 		 * Get the physical address for this page.
1038 		 */
1039 		rv = pmap_extract(pmap, va, &pa);
1040 #ifdef	DIAGNOSTIC
1041 		if (!rv)
1042 			panic("_bus_dmamap_load: no page");
1043 #endif	/* DIAGNOSTIC */
1044 
1045 		/*
1046 		 * Compute the segment size, and adjust counts.
1047 		 */
1048 		sgsize = pagesz - (va & (pagesz - 1));
1049 		if (buflen < sgsize)
1050 			sgsize = buflen;
1051 
1052 		pmap_enter(pmap_kernel(), dva,
1053 			   (pa & -pagesz) | PMAP_NC,
1054 			   VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
1055 
1056 		dva += pagesz;
1057 		va += sgsize;
1058 		buflen -= sgsize;
1059 	}
1060 	pmap_update(pmap_kernel());
1061 
1062 	/* Make the map truly valid. */
1063 	map->dm_nsegs = 1;
1064 	map->dm_mapsize = map->dm_segs[0].ds_len;
1065 
1066 	return (0);
1067 }
1068 
1069 /*
1070  * unload a DMA map.
1071  */
1072 void
1073 _bus_dmamap_unload(t, map)
1074 	bus_dma_tag_t t;
1075 	bus_dmamap_t map;
1076 {
1077 	bus_dma_segment_t *segs = map->dm_segs;
1078 	int nsegs = map->dm_nsegs;
1079 	int flags = map->_dm_flags;
1080 	bus_addr_t dva;
1081 	bus_size_t len;
1082 	int s, error;
1083 
1084 	if (nsegs != 1)
1085 		panic("_bus_dmamem_unload: nsegs = %d", nsegs);
1086 
1087 	/*
1088 	 * _BUS_DMA_DIRECTMAP is set iff this map was loaded using
1089 	 * _bus_dmamap_load for a 24-bit device.
1090 	 */
1091 	if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
1092 		/* Nothing to release */
1093 		map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
1094 	}
1095 
1096 	/*
1097 	 * Otherwise, this map was loaded using _bus_dmamap_load for a
1098 	 * non-24-bit device, or using _bus_dmamap_load_raw.
1099 	 */
1100 	else {
1101 		dva = segs[0]._ds_va & -PAGE_SIZE;
1102 		len = segs[0]._ds_sgsize;
1103 
1104 		/*
1105 		 * Unmap the DVMA addresses.
1106 		 */
1107 		pmap_remove(pmap_kernel(), dva, dva + len);
1108 		pmap_update(pmap_kernel());
1109 
1110 		/*
1111 		 * Free the DVMA addresses.
1112 		 */
1113 		if ((flags & BUS_DMA_24BIT) != 0) {
1114 			/*
1115 			 * This map was loaded using _bus_dmamap_load_raw
1116 			 * for a 24-bit device.
1117 			 */
1118 			uvm_unmap(kernel_map, dva, dva + len);
1119 		} else {
1120 			/*
1121 			 * This map was loaded using _bus_dmamap_load or
1122 			 * _bus_dmamap_load_raw for a non-24-bit device.
1123 			 */
1124 			s = splhigh();
1125 			error = extent_free(dvmamap, dva, len, EX_NOWAIT);
1126 			splx(s);
1127 			if (error != 0)
1128 				printf("warning: %ld of DVMA space lost\n", len);
1129 		}
1130 	}
1131 
1132 	/* Mark the mappings as invalid. */
1133 	map->dm_mapsize = 0;
1134 	map->dm_nsegs = 0;
1135 }
1136 
1137 /*
1138  * Translate a VME address and address modifier
1139  * into a CPU physical address and page type.
1140  */
1141 int
1142 vmebus_translate(mod, addr, btp, bap)
1143 	vme_am_t	mod;
1144 	vme_addr_t	addr;
1145 	bus_type_t	*btp;
1146 	bus_addr_t	*bap;
1147 {
1148 	bus_addr_t base;
1149 
1150 	switch(mod) {
1151 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA)
1152 
1153 	case (VME_AM_A16|_DS):
1154 		base = 0x00ff0000;
1155 		break;
1156 
1157 	case (VME_AM_A24|_DS):
1158 		base = 0;
1159 		break;
1160 
1161 	default:
1162 		return (ENOENT);
1163 #undef _DS
1164 	}
1165 
1166 	*bap = base | addr;
1167 	*btp = (*bap & 0x800000 ? PMAP_VME8 : PMAP_VME0);
1168 	return (0);
1169 }
1170