xref: /netbsd-src/sys/arch/sparc64/sparc64/machdep.c (revision f36002f244a49908fef9cba8789032bdbf48d572)
1 /*	$NetBSD: machdep.c,v 1.308 2024/03/05 14:15:35 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998, 2019, 2023 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This software was developed by the Computer Systems Engineering group
38  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
39  * contributed to Berkeley.
40  *
41  * All advertising materials mentioning features or use of this software
42  * must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Lawrence Berkeley Laboratory.
45  *
46  * Redistribution and use in source and binary forms, with or without
47  * modification, are permitted provided that the following conditions
48  * are met:
49  * 1. Redistributions of source code must retain the above copyright
50  *    notice, this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright
52  *    notice, this list of conditions and the following disclaimer in the
53  *    documentation and/or other materials provided with the distribution.
54  * 3. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.308 2024/03/05 14:15:35 thorpej Exp $");
75 
76 #include "opt_ddb.h"
77 #include "opt_multiprocessor.h"
78 #include "opt_modular.h"
79 #include "opt_compat_netbsd.h"
80 #include "opt_compat_sunos.h"
81 
82 #include <sys/param.h>
83 #include <sys/extent.h>
84 #include <sys/signal.h>
85 #include <sys/signalvar.h>
86 #include <sys/proc.h>
87 #include <sys/buf.h>
88 #include <sys/device.h>
89 #include <sys/ras.h>
90 #include <sys/reboot.h>
91 #include <sys/systm.h>
92 #include <sys/kernel.h>
93 #include <sys/conf.h>
94 #include <sys/file.h>
95 #include <sys/kmem.h>
96 #include <sys/mbuf.h>
97 #include <sys/mount.h>
98 #include <sys/msgbuf.h>
99 #include <sys/syscallargs.h>
100 #include <sys/exec.h>
101 #include <sys/ucontext.h>
102 #include <sys/cpu.h>
103 #include <sys/module.h>
104 #include <sys/ksyms.h>
105 #include <sys/pserialize.h>
106 
107 #include <sys/exec_aout.h>
108 
109 #include <ddb/db_active.h>
110 
111 #include <dev/mm.h>
112 
113 #include <uvm/uvm.h>
114 
115 #include <sys/sysctl.h>
116 #ifndef	ELFSIZE
117 #ifdef __arch64__
118 #define	ELFSIZE	64
119 #else
120 #define	ELFSIZE	32
121 #endif
122 #endif
123 #include <sys/exec_elf.h>
124 
125 #define _SPARC_BUS_DMA_PRIVATE
126 #include <machine/autoconf.h>
127 #include <sys/bus.h>
128 #include <sys/kprintf.h>
129 #include <machine/frame.h>
130 #include <machine/cpu.h>
131 #include <machine/pcb.h>
132 #include <machine/pmap.h>
133 #include <machine/openfirm.h>
134 #include <machine/sparc64.h>
135 
136 #include <sparc64/sparc64/cache.h>
137 
138 /* #include "fb.h" */
139 #include "ksyms.h"
140 
141 int bus_space_debug = 0; /* This may be used by macros elsewhere. */
142 #ifdef DEBUG
143 #define DPRINTF(l, s)   do { if (bus_space_debug & l) printf s; } while (0)
144 #else
145 #define DPRINTF(l, s)
146 #endif
147 
148 #if defined(COMPAT_16) || defined(COMPAT_SUNOS)
149 #ifdef DEBUG
150 /* See <sparc64/sparc64/sigdebug.h> */
151 int sigdebug = 0x0;
152 int sigpid = 0;
153 #endif
154 #endif
155 
156 extern vaddr_t avail_end;
157 #ifdef MODULAR
158 vaddr_t module_start, module_end;
159 static struct vm_map module_map_store;
160 #endif
161 
162 /*
163  * Maximum number of DMA segments we'll allow in dmamem_load()
164  * routines.  Can be overridden in config files, etc.
165  */
166 #ifndef MAX_DMA_SEGS
167 #define MAX_DMA_SEGS	20
168 #endif
169 
170 void	dumpsys(void);
171 void	stackdump(void);
172 
173 
174 /*
175  * Machine-dependent startup code
176  */
177 void
cpu_startup(void)178 cpu_startup(void)
179 {
180 #ifdef DEBUG
181 	extern int pmapdebug;
182 	int opmapdebug = pmapdebug;
183 #endif
184 	char pbuf[9];
185 
186 #ifdef DEBUG
187 	pmapdebug = 0;
188 #endif
189 
190 	/*
191 	 * Good {morning,afternoon,evening,night}.
192 	 */
193 	printf("%s%s", copyright, version);
194 	/*identifycpu();*/
195 	format_bytes(pbuf, sizeof(pbuf), ctob((uint64_t)physmem));
196 	printf("total memory = %s\n", pbuf);
197 
198 #ifdef DEBUG
199 	pmapdebug = opmapdebug;
200 #endif
201 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
202 	printf("avail memory = %s\n", pbuf);
203 
204 #if 0
205 	pmap_redzone();
206 #endif
207 
208 #ifdef MODULAR
209 	uvm_map_setup(&module_map_store, module_start, module_end, 0);
210 	module_map_store.pmap = pmap_kernel();
211 	module_map = &module_map_store;
212 #endif
213 }
214 
215 /*
216  * Set up registers on exec.
217  */
218 
219 #ifdef __arch64__
220 #define STACK_OFFSET	BIAS
221 #undef CCFSZ
222 #define CCFSZ	CC64FSZ
223 #else
224 #define STACK_OFFSET	0
225 #endif
226 
227 /* ARGSUSED */
228 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)229 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
230 {
231 	struct trapframe64 *tf = l->l_md.md_tf;
232 	struct fpstate64 *fs;
233 	int64_t tstate;
234 	int pstate = PSTATE_USER;
235 #ifdef __arch64__
236 	Elf_Ehdr *eh = pack->ep_hdr;
237 #endif
238 
239 	/* Clear the P_32 flag. */
240 	l->l_proc->p_flag &= ~PK_32;
241 
242 	/* Don't allow misaligned code by default */
243 	l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN;
244 
245 	/*
246 	 * Set the registers to 0 except for:
247 	 *	%o6: stack pointer, built in exec())
248 	 *	%tstate: (retain icc and xcc and cwp bits)
249 	 *	%g1: p->p_psstrp (used by crt0)
250 	 *	%tpc,%tnpc: entry point of program
251 	 */
252 #ifdef __arch64__
253 	/* Check what memory model is requested */
254 	switch ((eh->e_flags & EF_SPARCV9_MM)) {
255 	default:
256 		printf("Unknown memory model %d\n",
257 		       (eh->e_flags & EF_SPARCV9_MM));
258 		/* FALLTHROUGH */
259 	case EF_SPARCV9_TSO:
260 		pstate = PSTATE_MM_TSO|PSTATE_IE;
261 		break;
262 	case EF_SPARCV9_PSO:
263 		pstate = PSTATE_MM_PSO|PSTATE_IE;
264 		break;
265 	case EF_SPARCV9_RMO:
266 		pstate = PSTATE_MM_RMO|PSTATE_IE;
267 		break;
268 	}
269 #endif
270 	tstate = ((int64_t)ASI_PRIMARY_NO_FAULT << TSTATE_ASI_SHIFT) |
271 	    (pstate << TSTATE_PSTATE_SHIFT) | (tf->tf_tstate & TSTATE_CWP);
272 	if ((fs = l->l_md.md_fpstate) != NULL) {
273 		/*
274 		 * We hold an FPU state.  If we own *the* FPU chip state
275 		 * we must get rid of it, and the only way to do that is
276 		 * to save it.  In any case, get rid of our FPU state.
277 		 */
278 		fpusave_lwp(l, false);
279 		pool_cache_put(fpstate_cache, fs);
280 		l->l_md.md_fpstate = NULL;
281 	}
282 	memset(tf, 0, sizeof *tf);
283 	tf->tf_tstate = tstate;
284 	tf->tf_global[1] = l->l_proc->p_psstrp;
285 	/* %g4 needs to point to the start of the data segment */
286 	tf->tf_global[4] = 0;
287 	tf->tf_pc = pack->ep_entry & ~3;
288 	tf->tf_npc = tf->tf_pc + 4;
289 	stack -= sizeof(struct rwindow);
290 	tf->tf_out[6] = stack - STACK_OFFSET;
291 	tf->tf_out[7] = 0UL;
292 #ifdef NOTDEF_DEBUG
293 	printf("setregs: setting tf %p sp %p pc %p\n", (long)tf,
294 	       (long)tf->tf_out[6], (long)tf->tf_pc);
295 #ifdef DDB
296 	Debugger();
297 #endif
298 #endif
299 }
300 
301 /*
302  * machine dependent system variables.
303  */
304 static int
sysctl_machdep_boot(SYSCTLFN_ARGS)305 sysctl_machdep_boot(SYSCTLFN_ARGS)
306 {
307 	struct sysctlnode node = *rnode;
308 	char bootpath[256];
309 	const char *cp = NULL;
310 	extern char ofbootpath[], *ofbootpartition, *ofbootfile, *ofbootflags;
311 
312 	switch (node.sysctl_num) {
313 	case CPU_BOOTED_KERNEL:
314 		cp = ofbootfile;
315                 if (cp == NULL || cp[0] == '\0')
316                         /* Unknown to firmware, return default name */
317                         cp = "netbsd";
318 		break;
319 	case CPU_BOOT_ARGS:
320 		cp = ofbootflags;
321 		break;
322 	case CPU_BOOTED_DEVICE:
323 		if (ofbootpartition) {
324 			snprintf(bootpath, sizeof(bootpath), "%s:%s",
325 			    ofbootpath, ofbootpartition);
326 			cp = bootpath;
327 		} else {
328 			cp = ofbootpath;
329 		}
330 		break;
331 	}
332 
333 	if (cp == NULL || cp[0] == '\0')
334 		return (ENOENT);
335 
336 	/*XXXUNCONST*/
337 	node.sysctl_data = __UNCONST(cp);
338 	node.sysctl_size = strlen(cp) + 1;
339 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
340 }
341 
342 /*
343  * figure out which VIS version the CPU supports
344  * this assumes all CPUs in the system are the same
345  */
346 static int
get_vis(void)347 get_vis(void)
348 {
349 	int vis = 0;
350 
351 	if ( CPU_ISSUN4V ) {
352 		/*
353 		 * UA2005 and UA2007 supports VIS 1 and VIS 2.
354 		 * Oracle SPARC Architecture 2011 supports VIS 3.
355 		 *
356 		 * XXX Settle with VIS 2 until we can determite the
357 		 *     actual sun4v implementation.
358 		 */
359 		vis = 2;
360 	} else {
361 		if (GETVER_CPU_MANUF() == MANUF_FUJITSU) {
362 			/* as far as I can tell SPARC64-III and up have VIS 1.0 */
363 			if (GETVER_CPU_IMPL() >= IMPL_SPARC64_III) {
364 				vis = 1;
365 			}
366 			/* XXX - which, if any, SPARC64 support VIS 2.0? */
367 		} else {
368 			/* this better be Sun */
369 			vis = 1;	/* all UltraSPARCs support at least VIS 1.0 */
370 			if (CPU_IS_USIII_UP()) {
371 				vis = 2;
372 			}
373 		}
374 	}
375 	return vis;
376 }
377 
378 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
379 {
380 
381 	sysctl_createv(clog, 0, NULL, NULL,
382 		       CTLFLAG_PERMANENT,
383 		       CTLTYPE_NODE, "machdep", NULL,
384 		       NULL, 0, NULL, 0,
385 		       CTL_MACHDEP, CTL_EOL);
386 
387 	sysctl_createv(clog, 0, NULL, NULL,
388 		       CTLFLAG_PERMANENT,
389 		       CTLTYPE_STRING, "booted_kernel", NULL,
390 		       sysctl_machdep_boot, 0, NULL, 0,
391 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
392 	sysctl_createv(clog, 0, NULL, NULL,
393 		       CTLFLAG_PERMANENT,
394 		       CTLTYPE_STRING, "boot_args", NULL,
395 		       sysctl_machdep_boot, 0, NULL, 0,
396 		       CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL);
397 	sysctl_createv(clog, 0, NULL, NULL,
398 		       CTLFLAG_PERMANENT,
399 		       CTLTYPE_STRING, "booted_device", NULL,
400 		       sysctl_machdep_boot, 0, NULL, 0,
401 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
402 	sysctl_createv(clog, 0, NULL, NULL,
403 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
404 		       CTLTYPE_INT, "cpu_arch", NULL,
405 		       NULL, 9, NULL, 0,
406 		       CTL_MACHDEP, CPU_ARCH, CTL_EOL);
407 	sysctl_createv(clog, 0, NULL, NULL,
408 	               CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
409 	               CTLTYPE_INT, "vis",
410 	               SYSCTL_DESCR("supported version of VIS instruction set"),
411 	               NULL, get_vis(), NULL, 0,
412 	               CTL_MACHDEP, CPU_VIS, CTL_EOL);
413 }
414 
415 void *
getframe(struct lwp * l,int sig,int * onstack)416 getframe(struct lwp *l, int sig, int *onstack)
417 {
418 	struct proc *p = l->l_proc;
419 	struct trapframe64 *tf = l->l_md.md_tf;
420 
421 	/*
422 	 * Compute new user stack addresses, subtract off
423 	 * one signal frame, and align.
424 	 */
425 	*onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
426 	    && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
427 
428 	if (*onstack)
429 		return ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size);
430 	else
431 		return (void *)((uintptr_t)tf->tf_out[6] + STACK_OFFSET);
432 }
433 
434 struct sigframe_siginfo {
435 	siginfo_t	sf_si;		/* saved siginfo */
436 	ucontext_t	sf_uc;		/* saved ucontext */
437 };
438 
439 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)440 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
441 {
442 	struct lwp *l = curlwp;
443 	struct proc *p = l->l_proc;
444 	struct sigacts *ps = p->p_sigacts;
445 	int onstack, error;
446 	int sig = ksi->ksi_signo;
447 	ucontext_t uc;
448 	long ucsz;
449 	struct sigframe_siginfo *fp = getframe(l, sig, &onstack);
450 	sig_t catcher = SIGACTION(p, sig).sa_handler;
451 	struct trapframe64 *tf = l->l_md.md_tf;
452 	struct rwindow *newsp;
453 	register_t sp;
454 	/* Allocate an aligned sigframe */
455 	fp = (void *)((u_long)(fp - 1) & ~0x0f);
456 
457 	memset(&uc, 0, sizeof(uc));
458 	uc.uc_flags = _UC_SIGMASK |
459 	    ((l->l_sigstk.ss_flags & SS_ONSTACK)
460 		? _UC_SETSTACK : _UC_CLRSTACK);
461 	uc.uc_sigmask = *mask;
462 	uc.uc_link = l->l_ctxlink;
463 
464 	sendsig_reset(l, sig);
465 	mutex_exit(p->p_lock);
466 	cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
467 	ucsz = (char *)&uc.__uc_pad - (char *)&uc;
468 
469 	/*
470 	 * Now copy the stack contents out to user space.
471 	 * We need to make sure that when we start the signal handler,
472 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
473 	 * joins seamlessly with the frame it was in when the signal occurred,
474 	 * so that the debugger and _longjmp code can back up through it.
475 	 * Since we're calling the handler directly, allocate a full size
476 	 * C stack frame.
477 	 */
478 	newsp = (struct rwindow *)((u_long)fp - CCFSZ);
479 	sp = (register_t)(uintptr_t)tf->tf_out[6];
480 	error = (copyout(&ksi->ksi_info, &fp->sf_si,
481 			sizeof(ksi->ksi_info)) != 0 ||
482 	    copyout(&uc, &fp->sf_uc, ucsz) != 0 ||
483 	    copyout(&sp, &newsp->rw_in[6], sizeof(sp)) != 0);
484 	mutex_enter(p->p_lock);
485 
486 	if (error) {
487 		/*
488 		 * Process has trashed its stack; give it an illegal
489 		 * instruction to halt it in its tracks.
490 		 */
491 		sigexit(l, SIGILL);
492 		/* NOTREACHED */
493 	}
494 
495 	tf->tf_pc = (const vaddr_t)catcher;
496 	tf->tf_npc = (const vaddr_t)catcher + 4;
497 	tf->tf_out[0] = sig;
498 	tf->tf_out[1] = (vaddr_t)&fp->sf_si;
499 	tf->tf_out[2] = (vaddr_t)&fp->sf_uc;
500 	tf->tf_out[6] = (vaddr_t)newsp - STACK_OFFSET;
501 	tf->tf_out[7] = (vaddr_t)ps->sa_sigdesc[sig].sd_tramp - 8;
502 
503 	/* Remember that we're now on the signal stack. */
504 	if (onstack)
505 		l->l_sigstk.ss_flags |= SS_ONSTACK;
506 }
507 
508 struct pcb dumppcb;
509 
510 static void
maybe_dump(int howto)511 maybe_dump(int howto)
512 {
513 	int s;
514 
515 	/* Disable interrupts. */
516 	s = splhigh();
517 
518 	/* Do a dump if requested. */
519 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
520 		dumpsys();
521 
522 	splx(s);
523 }
524 
525 void
cpu_reboot(int howto,char * user_boot_string)526 cpu_reboot(int howto, char *user_boot_string)
527 {
528 	static bool syncdone = false;
529 	int i;
530 	static char str[128];
531 	struct lwp *l;
532 
533 	l = (curlwp == NULL) ? &lwp0 : curlwp;
534 
535 	if (cold) {
536 		howto |= RB_HALT;
537 		goto haltsys;
538 	}
539 
540 #if NFB > 0
541 	fb_unblank();
542 #endif
543 	boothowto = howto;
544 
545 	/* If rebooting and a dump is requested, do it.
546 	 *
547 	 * XXX used to dump after vfs_shutdown() and before
548 	 * detaching devices / shutdown hooks / pmf_system_shutdown().
549 	 */
550 	maybe_dump(howto);
551 
552 	/*
553 	 * If we've panic'd, don't make the situation potentially
554 	 * worse by syncing or unmounting the file systems.
555 	 */
556 	if ((howto & RB_NOSYNC) == 0 && panicstr == NULL) {
557 		if (!syncdone) {
558 			syncdone = true;
559 			/* XXX used to force unmount as well, here */
560 			vfs_sync_all(l);
561 		}
562 
563 		while (vfs_unmountall1(l, false, false) ||
564 		       config_detach_all(boothowto) ||
565 		       vfs_unmount_forceone(l))
566 			;	/* do nothing */
567 	} else {
568 		if (!db_active)
569 			suspendsched();
570 	}
571 
572 	pmf_system_shutdown(boothowto);
573 
574 	splhigh();
575 
576 haltsys:
577 	doshutdownhooks();
578 
579 #ifdef MULTIPROCESSOR
580 	/* Stop all secondary cpus */
581 	mp_halt_cpus();
582 #endif
583 
584 	/* If powerdown was requested, do it. */
585 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
586 #ifdef MULTIPROCESSOR
587 		printf("cpu%d: powered down\n\n", cpu_number());
588 #else
589 		printf("powered down\n\n");
590 #endif
591 		/* Let the OBP do the work. */
592 		OF_poweroff();
593 		printf("WARNING: powerdown failed!\n");
594 		/*
595 		 * RB_POWERDOWN implies RB_HALT... fall into it...
596 		 */
597 	}
598 
599 	if (howto & RB_HALT) {
600 #ifdef MULTIPROCESSOR
601 		printf("cpu%d: halted\n\n", cpu_number());
602 #else
603 		printf("halted\n\n");
604 #endif
605 		OF_exit();
606 		panic("PROM exit failed");
607 	}
608 
609 #ifdef MULTIPROCESSOR
610 	printf("cpu%d: rebooting\n\n", cpu_number());
611 #else
612 	printf("rebooting\n\n");
613 #endif
614 	if (user_boot_string && *user_boot_string) {
615 		i = strlen(user_boot_string);
616 		if (i > sizeof(str))
617 			OF_boot(user_boot_string);	/* XXX */
618 		memcpy(str, user_boot_string, i);
619 	} else {
620 		i = 1;
621 		str[0] = '\0';
622 	}
623 
624 	if (howto & RB_SINGLE)
625 		str[i++] = 's';
626 	if (howto & RB_KDB)
627 		str[i++] = 'd';
628 	if (i > 1) {
629 		if (str[0] == '\0')
630 			str[0] = '-';
631 		str[i] = 0;
632 	} else
633 		str[0] = 0;
634 	OF_boot(str);
635 	panic("cpu_reboot -- failed");
636 	/*NOTREACHED*/
637 }
638 
639 uint32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
640 int	dumpsize = 0;		/* also for savecore */
641 long	dumplo = 0;
642 
643 void
cpu_dumpconf(void)644 cpu_dumpconf(void)
645 {
646 	int nblks, dumpblks;
647 
648 	if (dumpdev == NODEV)
649 		/* No usable dump device */
650 		return;
651 	nblks = bdev_size(dumpdev);
652 
653 	dumpblks = ctod(physmem) + pmap_dumpsize();
654 	if (dumpblks > (nblks - ctod(1)))
655 		/*
656 		 * dump size is too big for the partition.
657 		 * Note, we safeguard a click at the front for a
658 		 * possible disk label.
659 		 */
660 		return;
661 
662 	/* Put the dump at the end of the partition */
663 	dumplo = nblks - dumpblks;
664 
665 	/*
666 	 * savecore(8) expects dumpsize to be the number of pages
667 	 * of actual core dumped (i.e. excluding the MMU stuff).
668 	 */
669 	dumpsize = physmem;
670 }
671 
672 #define	BYTES_PER_DUMP	MAXPHYS		/* must be a multiple of pagesize */
673 static vaddr_t dumpspace;
674 
675 void *
reserve_dumppages(void * p)676 reserve_dumppages(void *p)
677 {
678 
679 	dumpspace = (vaddr_t)p;
680 	return (char *)p + BYTES_PER_DUMP;
681 }
682 
683 /*
684  * Write a crash dump.
685  */
686 void
dumpsys(void)687 dumpsys(void)
688 {
689 	const struct bdevsw *bdev;
690 	int psize;
691 	daddr_t blkno;
692 	int (*dump)(dev_t, daddr_t, void *, size_t);
693 	int j, error = 0;
694 	uint64_t todo;
695 	struct mem_region *mp;
696 
697 	/* copy registers to dumppcb and flush windows */
698 	memset(&dumppcb, 0, sizeof(struct pcb));
699 	snapshot(&dumppcb);
700 	stackdump();
701 
702 	if (dumpdev == NODEV)
703 		return;
704 	bdev = bdevsw_lookup(dumpdev);
705 	if (bdev == NULL || bdev->d_psize == NULL)
706 		return;
707 
708 	/*
709 	 * For dumps during autoconfiguration,
710 	 * if dump device has already configured...
711 	 */
712 	if (dumpsize == 0)
713 		cpu_dumpconf();
714 	if (!dumpspace) {
715 		printf("\nno address space available, dump not possible\n");
716 		return;
717 	}
718 	if (dumplo <= 0) {
719 		printf("\ndump to dev %" PRId32 ",%" PRId32 " not possible ("
720 		    "partition too small?)\n", major(dumpdev), minor(dumpdev));
721 		return;
722 	}
723 	printf("\ndumping to dev %" PRId32 ",%" PRId32 " offset %ld\n",
724 	    major(dumpdev), minor(dumpdev), dumplo);
725 
726 	psize = bdev_size(dumpdev);
727 	if (psize == -1) {
728 		printf("dump area unavailable\n");
729 		return;
730 	}
731 	blkno = dumplo;
732 	dump = bdev->d_dump;
733 
734 	error = pmap_dumpmmu(dump, blkno);
735 	blkno += pmap_dumpsize();
736 
737 	/* calculate total size of dump */
738 	for (todo = 0, j = 0; j < phys_installed_size; j++)
739 		todo += phys_installed[j].size;
740 
741 	for (mp = &phys_installed[0], j = 0; j < phys_installed_size;
742 			j++, mp = &phys_installed[j]) {
743 		uint64_t i = 0, n, off;
744 		paddr_t maddr = mp->start;
745 
746 		for (; i < mp->size; i += n) {
747 			n = mp->size - i;
748 			if (n > BYTES_PER_DUMP)
749 				 n = BYTES_PER_DUMP;
750 
751 			/* print out how many MBs we still have to dump */
752 			if ((todo % (1024*1024)) == 0)
753 				printf_flags(TOCONS|NOTSTAMP,
754 				    "\r%6" PRIu64 " M ",
755 				    todo / (1024*1024));
756 			for (off = 0; off < n; off += PAGE_SIZE)
757 				pmap_kenter_pa(dumpspace+off, maddr+off,
758 				    VM_PROT_READ, 0);
759 			error = (*dump)(dumpdev, blkno,
760 					(void *)dumpspace, (size_t)n);
761 			pmap_kremove(dumpspace, n);
762 			if (error)
763 				break;
764 			maddr += n;
765 			todo -= n;
766 			blkno += btodb(n);
767 		}
768 	}
769 
770 	switch (error) {
771 
772 	case ENXIO:
773 		printf("- device bad\n");
774 		break;
775 
776 	case EFAULT:
777 		printf("- device not ready\n");
778 		break;
779 
780 	case EINVAL:
781 		printf("- area improper\n");
782 		break;
783 
784 	case EIO:
785 		printf("- i/o error\n");
786 		break;
787 
788 	case 0:
789 		printf_flags(TOCONS|NOTSTAMP, "\r           ");
790 		printf("\ndump succeeded\n");
791 		break;
792 
793 	default:
794 		printf("- error %d\n", error);
795 		break;
796 	}
797 }
798 
799 void trapdump(struct trapframe64*);
800 /*
801  * dump out a trapframe.
802  */
803 void
trapdump(struct trapframe64 * tf)804 trapdump(struct trapframe64* tf)
805 {
806 	printf("TRAPFRAME: tstate=%llx pc=%llx npc=%llx y=%x\n",
807 	       (unsigned long long)tf->tf_tstate, (unsigned long long)tf->tf_pc,
808 	       (unsigned long long)tf->tf_npc, (unsigned)tf->tf_y);
809 	printf("%%g1-7: %llx %llx %llx %llx %llx %llx %llx\n",
810 	       (unsigned long long)tf->tf_global[1],
811 	       (unsigned long long)tf->tf_global[2],
812 	       (unsigned long long)tf->tf_global[3],
813 	       (unsigned long long)tf->tf_global[4],
814 	       (unsigned long long)tf->tf_global[5],
815 	       (unsigned long long)tf->tf_global[6],
816 	       (unsigned long long)tf->tf_global[7]);
817 	printf("%%o0-7: %llx %llx %llx %llx\n %llx %llx %llx %llx\n",
818 	       (unsigned long long)tf->tf_out[0],
819 	       (unsigned long long)tf->tf_out[1],
820 	       (unsigned long long)tf->tf_out[2],
821 	       (unsigned long long)tf->tf_out[3],
822 	       (unsigned long long)tf->tf_out[4],
823 	       (unsigned long long)tf->tf_out[5],
824 	       (unsigned long long)tf->tf_out[6],
825 	       (unsigned long long)tf->tf_out[7]);
826 }
827 
828 static void
get_symbol_and_offset(const char ** mod,const char ** sym,vaddr_t * offset,vaddr_t pc)829 get_symbol_and_offset(const char **mod, const char **sym, vaddr_t *offset, vaddr_t pc)
830 {
831 	static char symbuf[256];
832 	unsigned long symaddr;
833 	int s, error;
834 
835 #if NKSYMS || defined(DDB) || defined(MODULAR)
836 	s = pserialize_read_enter();
837 	if (ksyms_getname(mod, sym, pc,
838 			  KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY) == 0) {
839 		error = ksyms_getval(*mod, *sym, &symaddr,
840 		    KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
841 		pserialize_read_exit(s);
842 		if (error)
843 			goto failed;
844 
845 		*offset = (vaddr_t)(pc - symaddr);
846 		return;
847 	}
848 	pserialize_read_exit(s);
849 #endif
850  failed:
851 	snprintf(symbuf, sizeof symbuf, "%llx", (unsigned long long)pc);
852 	*mod = "netbsd";
853 	*sym = symbuf;
854 	*offset = 0;
855 }
856 
857 /*
858  * get the fp and dump the stack as best we can.  don't leave the
859  * current stack page
860  */
861 void
stackdump(void)862 stackdump(void)
863 {
864 	struct frame32 *fp = (struct frame32 *)getfp(), *sfp;
865 	struct frame64 *fp64;
866 	const char *mod, *sym;
867 	vaddr_t offset;
868 
869 	sfp = fp;
870 	printf("Frame pointer is at %p\n", fp);
871 	printf("Call traceback:\n");
872 	while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
873 		if( ((long)fp) & 1 ) {
874 			fp64 = (struct frame64*)(((char*)fp)+BIAS);
875 			/* 64-bit frame */
876 			get_symbol_and_offset(&mod, &sym, &offset, fp64->fr_pc);
877 			printf(" %s:%s+%#llx(%llx, %llx, %llx, %llx, %llx, %llx) fp = %llx\n",
878 			       mod, sym,
879 			       (unsigned long long)offset,
880 			       (unsigned long long)fp64->fr_arg[0],
881 			       (unsigned long long)fp64->fr_arg[1],
882 			       (unsigned long long)fp64->fr_arg[2],
883 			       (unsigned long long)fp64->fr_arg[3],
884 			       (unsigned long long)fp64->fr_arg[4],
885 			       (unsigned long long)fp64->fr_arg[5],
886 			       (unsigned long long)fp64->fr_fp);
887 			fp = (struct frame32 *)(u_long)fp64->fr_fp;
888 		} else {
889 			/* 32-bit frame */
890 			get_symbol_and_offset(&mod, &sym, &offset, fp->fr_pc);
891 			printf(" %s:%s+%#lx(%x, %x, %x, %x, %x, %x) fp = %x\n",
892 			       mod, sym,
893 			       (unsigned long)offset,
894 			       fp->fr_arg[0],
895 			       fp->fr_arg[1],
896 			       fp->fr_arg[2],
897 			       fp->fr_arg[3],
898 			       fp->fr_arg[4],
899 			       fp->fr_arg[5],
900 			       fp->fr_fp);
901 			fp = (struct frame32*)(u_long)fp->fr_fp;
902 		}
903 	}
904 }
905 
906 
907 int
cpu_exec_aout_makecmds(struct lwp * l,struct exec_package * epp)908 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
909 {
910 	return (ENOEXEC);
911 }
912 
913 static size_t
_bus_dmamap_mapsize(int const nsegments)914 _bus_dmamap_mapsize(int const nsegments)
915 {
916 	KASSERT(nsegments > 0);
917 	return sizeof(struct sparc_bus_dmamap) +
918 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
919 }
920 
921 /*
922  * Common function for DMA map creation.  May be called by bus-specific
923  * DMA map creation functions.
924  */
925 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)926 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
927 	bus_size_t maxsegsz, bus_size_t boundary, int flags,
928 	bus_dmamap_t *dmamp)
929 {
930 	struct sparc_bus_dmamap *map;
931 	void *mapstore;
932 
933 	/*
934 	 * Allocate and initialize the DMA map.  The end of the map
935 	 * is a variable-sized array of segments, so we allocate enough
936 	 * room for them in one shot.
937 	 *
938 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
939 	 * of ALLOCNOW notifies others that we've reserved these resources,
940 	 * and they are not to be freed.
941 	 *
942 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
943 	 * the (nsegments - 1).
944 	 */
945 	if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
946 	    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
947 		return (ENOMEM);
948 
949 	map = (struct sparc_bus_dmamap *)mapstore;
950 	map->_dm_size = size;
951 	map->_dm_segcnt = nsegments;
952 	map->_dm_maxmaxsegsz = maxsegsz;
953 	map->_dm_boundary = boundary;
954 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT|BUS_DMA_COHERENT|
955 				   BUS_DMA_NOWRITE|BUS_DMA_NOCACHE);
956 	map->dm_maxsegsz = maxsegsz;
957 	map->dm_mapsize = 0;		/* no valid mappings */
958 	map->dm_nsegs = 0;
959 
960 	*dmamp = map;
961 	return (0);
962 }
963 
964 /*
965  * Common function for DMA map destruction.  May be called by bus-specific
966  * DMA map destruction functions.
967  */
968 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)969 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
970 {
971 	if (map->dm_nsegs)
972 		bus_dmamap_unload(t, map);
973 	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
974 }
975 
976 /*
977  * Common function for loading a DMA map with a linear buffer.  May
978  * be called by bus-specific DMA map load functions.
979  *
980  * Most SPARCs have IOMMUs in the bus controllers.  In those cases
981  * they only need one segment and will use virtual addresses for DVMA.
982  * Those bus controllers should intercept these vectors and should
983  * *NEVER* call _bus_dmamap_load() which is used only by devices that
984  * bypass DVMA.
985  */
986 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * sbuf,bus_size_t buflen,struct proc * p,int flags)987 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *sbuf,
988 	bus_size_t buflen, struct proc *p, int flags)
989 {
990 	bus_size_t sgsize;
991 	vaddr_t vaddr = (vaddr_t)sbuf;
992 	long incr;
993 	int i;
994 
995 	/*
996 	 * Make sure that on error condition we return "no valid mappings".
997 	 */
998 	map->dm_nsegs = 0;
999 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
1000 
1001 	if (buflen > map->_dm_size)
1002 	{
1003 #ifdef DEBUG
1004 		printf("_bus_dmamap_load(): error %lu > %lu -- map size exceeded!\n",
1005 		    (unsigned long)buflen, (unsigned long)map->_dm_size);
1006 #ifdef DDB
1007 		Debugger();
1008 #endif
1009 #endif
1010 		return (EINVAL);
1011 	}
1012 
1013 	sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1014 
1015 	/*
1016 	 * We always use just one segment.
1017 	 */
1018 	i = 0;
1019 	map->dm_segs[i].ds_addr = 0UL;
1020 	map->dm_segs[i].ds_len = 0;
1021 
1022 	incr = PAGE_SIZE - (vaddr & PGOFSET);
1023 	while (sgsize > 0) {
1024 		paddr_t pa;
1025 
1026 		incr = uimin(sgsize, incr);
1027 
1028 		(void) pmap_extract(pmap_kernel(), vaddr, &pa);
1029 		if (map->dm_segs[i].ds_len == 0)
1030 			map->dm_segs[i].ds_addr = pa;
1031 		if (pa == (map->dm_segs[i].ds_addr + map->dm_segs[i].ds_len)
1032 		    && ((map->dm_segs[i].ds_len + incr) <= map->dm_maxsegsz)) {
1033 			/* Hey, waddyaknow, they're contiguous */
1034 			map->dm_segs[i].ds_len += incr;
1035 		} else {
1036 			if (++i >= map->_dm_segcnt)
1037 				return (EFBIG);
1038 			map->dm_segs[i].ds_addr = pa;
1039 			map->dm_segs[i].ds_len = incr;
1040 		}
1041 		sgsize -= incr;
1042 		vaddr += incr;
1043 		incr = PAGE_SIZE;
1044 	}
1045 	map->dm_nsegs = i + 1;
1046 	map->dm_mapsize = buflen;
1047 	/* Mapping is bus dependent */
1048 	return (0);
1049 }
1050 
1051 /*
1052  * Like _bus_dmamap_load(), but for mbufs.
1053  */
1054 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m,int flags)1055 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
1056 	int flags)
1057 {
1058 	bus_dma_segment_t segs[MAX_DMA_SEGS];
1059 	int i;
1060 	size_t len;
1061 
1062 	/*
1063 	 * Make sure that on error condition we return "no valid mappings".
1064 	 */
1065 	map->dm_nsegs = 0;
1066 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
1067 
1068 	if (m->m_pkthdr.len > map->_dm_size)
1069 		return EINVAL;
1070 
1071 	/* Record mbuf for *_unload */
1072 	map->_dm_type = _DM_TYPE_MBUF;
1073 	map->_dm_source = (void *)m;
1074 
1075 	i = 0;
1076 	len = 0;
1077 	while (m) {
1078 		vaddr_t vaddr = mtod(m, vaddr_t);
1079 		long buflen = (long)m->m_len;
1080 
1081 		len += buflen;
1082 		while (buflen > 0 && i < MAX_DMA_SEGS) {
1083 			paddr_t pa;
1084 			long incr;
1085 
1086 			incr = PAGE_SIZE - (vaddr & PGOFSET);
1087 			incr = uimin(buflen, incr);
1088 
1089 			if (pmap_extract(pmap_kernel(), vaddr, &pa) == FALSE) {
1090 #ifdef DIAGNOSTIC
1091 				printf("_bus_dmamap_load_mbuf: pmap_extract failed %lx\n",
1092 				       vaddr);
1093 #endif
1094 				map->_dm_type = 0;
1095 				map->_dm_source = NULL;
1096 				return EINVAL;
1097 			}
1098 
1099 			buflen -= incr;
1100 			vaddr += incr;
1101 
1102 			if (i > 0 &&
1103 				pa == (segs[i-1].ds_addr + segs[i-1].ds_len) &&
1104 				((segs[i-1].ds_len + incr) <=
1105 					map->dm_maxsegsz)) {
1106 				/* Hey, waddyaknow, they're contiguous */
1107 				segs[i-1].ds_len += incr;
1108 				continue;
1109 			}
1110 			segs[i].ds_addr = pa;
1111 			segs[i].ds_len = incr;
1112 			segs[i]._ds_boundary = 0;
1113 			segs[i]._ds_align = 0;
1114 			segs[i]._ds_mlist = NULL;
1115 			i++;
1116 		}
1117 		m = m->m_next;
1118 		if (m && i >= MAX_DMA_SEGS) {
1119 			/* Exceeded the size of our dmamap */
1120 			map->_dm_type = 0;
1121 			map->_dm_source = NULL;
1122 			return EFBIG;
1123 		}
1124 	}
1125 
1126 #ifdef DEBUG
1127 	{
1128 		size_t mbuflen, sglen;
1129 		int j;
1130 		int retval;
1131 
1132 		mbuflen = 0;
1133 		for (m = (struct mbuf *)map->_dm_source; m; m = m->m_next)
1134 			mbuflen += (long)m->m_len;
1135 		sglen = 0;
1136 		for (j = 0; j < i; j++)
1137 			sglen += segs[j].ds_len;
1138 		if (sglen != mbuflen)
1139 			panic("load_mbuf: sglen %ld != mbuflen %lx\n",
1140 				sglen, mbuflen);
1141 		if (sglen != len)
1142 			panic("load_mbuf: sglen %ld != len %lx\n",
1143 				sglen, len);
1144 		retval = bus_dmamap_load_raw(t, map, segs, i,
1145 			(bus_size_t)len, flags);
1146 		if (retval == 0) {
1147 			if (map->dm_mapsize != len)
1148 				panic("load_mbuf: mapsize %ld != len %lx\n",
1149 					(long)map->dm_mapsize, len);
1150 			sglen = 0;
1151 			for (j = 0; j < map->dm_nsegs; j++)
1152 				sglen += map->dm_segs[j].ds_len;
1153 			if (sglen != len)
1154 				panic("load_mbuf: dmamap sglen %ld != len %lx\n",
1155 					sglen, len);
1156 		}
1157 		return (retval);
1158 	}
1159 #endif
1160 	return (bus_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags));
1161 }
1162 
1163 /*
1164  * Like _bus_dmamap_load(), but for uios.
1165  */
1166 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)1167 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
1168 	int flags)
1169 {
1170 /*
1171  * XXXXXXX The problem with this routine is that it needs to
1172  * lock the user address space that is being loaded, but there
1173  * is no real way for us to unlock it during the unload process.
1174  */
1175 #if 0
1176 	bus_dma_segment_t segs[MAX_DMA_SEGS];
1177 	int i, j;
1178 	size_t len;
1179 	struct proc *p = uio->uio_lwp->l_proc;
1180 	struct pmap *pm;
1181 
1182 	/*
1183 	 * Make sure that on error condition we return "no valid mappings".
1184 	 */
1185 	map->dm_nsegs = 0;
1186 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
1187 
1188 	if (uio->uio_segflg == UIO_USERSPACE) {
1189 		pm = p->p_vmspace->vm_map.pmap;
1190 	} else
1191 		pm = pmap_kernel();
1192 
1193 	i = 0;
1194 	len = 0;
1195 	for (j = 0; j < uio->uio_iovcnt; j++) {
1196 		struct iovec *iov = &uio->uio_iov[j];
1197 		vaddr_t vaddr = (vaddr_t)iov->iov_base;
1198 		bus_size_t buflen = iov->iov_len;
1199 
1200 		/*
1201 		 * Lock the part of the user address space involved
1202 		 *    in the transfer.
1203 		 */
1204 		if (__predict_false(uvm_vslock(p->p_vmspace, vaddr, buflen,
1205 			    (uio->uio_rw == UIO_WRITE) ?
1206 			    VM_PROT_WRITE : VM_PROT_READ) != 0)) {
1207 				goto after_vsunlock;
1208 			}
1209 
1210 		len += buflen;
1211 		while (buflen > 0 && i < MAX_DMA_SEGS) {
1212 			paddr_t pa;
1213 			long incr;
1214 
1215 			incr = uimin(buflen, PAGE_SIZE);
1216 			(void) pmap_extract(pm, vaddr, &pa);
1217 			buflen -= incr;
1218 			vaddr += incr;
1219 			if (segs[i].ds_len == 0)
1220 				segs[i].ds_addr = pa;
1221 
1222 
1223 			if (i > 0 && pa == (segs[i-1].ds_addr + segs[i-1].ds_len)
1224 			    && ((segs[i-1].ds_len + incr) <= map->dm_maxsegsz)) {
1225 				/* Hey, waddyaknow, they're contiguous */
1226 				segs[i-1].ds_len += incr;
1227 				continue;
1228 			}
1229 			segs[i].ds_addr = pa;
1230 			segs[i].ds_len = incr;
1231 			segs[i]._ds_boundary = 0;
1232 			segs[i]._ds_align = 0;
1233 			segs[i]._ds_mlist = NULL;
1234 			i++;
1235 		}
1236 		uvm_vsunlock(p->p_vmspace, bp->b_data, todo);
1237  		if (buflen > 0 && i >= MAX_DMA_SEGS)
1238 			/* Exceeded the size of our dmamap */
1239 			return EFBIG;
1240 	}
1241 	map->_dm_type = DM_TYPE_UIO;
1242 	map->_dm_source = (void *)uio;
1243 	return (bus_dmamap_load_raw(t, map, segs, i,
1244 				    (bus_size_t)len, flags));
1245 #endif
1246 	return 0;
1247 }
1248 
1249 /*
1250  * Like _bus_dmamap_load(), but for raw memory allocated with
1251  * bus_dmamem_alloc().
1252  */
1253 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1254 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
1255 	int nsegs, bus_size_t size, int flags)
1256 {
1257 
1258 	panic("_bus_dmamap_load_raw: not implemented");
1259 }
1260 
1261 /*
1262  * Common function for unloading a DMA map.  May be called by
1263  * bus-specific DMA map unload functions.
1264  */
1265 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)1266 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1267 {
1268 	int i;
1269 	struct vm_page *pg;
1270 	struct pglist *pglist;
1271 	paddr_t pa;
1272 
1273 	for (i = 0; i < map->dm_nsegs; i++) {
1274 		if ((pglist = map->dm_segs[i]._ds_mlist) == NULL) {
1275 
1276 			/*
1277 			 * We were asked to load random VAs and lost the
1278 			 * PA info so just blow the entire cache away.
1279 			 */
1280 			blast_dcache();
1281 			break;
1282 		}
1283 		TAILQ_FOREACH(pg, pglist, pageq.queue) {
1284 			pa = VM_PAGE_TO_PHYS(pg);
1285 
1286 			/*
1287 			 * We should be flushing a subrange, but we
1288 			 * don't know where the segments starts.
1289 			 */
1290 			dcache_flush_page_all(pa);
1291 		}
1292 	}
1293 
1294 	/* Mark the mappings as invalid. */
1295 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
1296 	map->dm_mapsize = 0;
1297 	map->dm_nsegs = 0;
1298 
1299 }
1300 
1301 /*
1302  * Common function for DMA map synchronization.  May be called
1303  * by bus-specific DMA map synchronization functions.
1304  */
1305 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)1306 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1307 	bus_size_t len, int ops)
1308 {
1309 	int i;
1310 	struct vm_page *pg;
1311 	struct pglist *pglist;
1312 
1313 	/*
1314 	 * We sync out our caches, but the bus must do the same.
1315 	 *
1316 	 * Actually a #Sync is expensive.  We should optimize.
1317 	 */
1318 	if ((ops & BUS_DMASYNC_PREREAD) || (ops & BUS_DMASYNC_PREWRITE)) {
1319 
1320 		/*
1321 		 * Don't really need to do anything, but flush any pending
1322 		 * writes anyway.
1323 		 */
1324 		membar_Sync();
1325 	}
1326 	if (ops & BUS_DMASYNC_POSTREAD) {
1327 		/* Invalidate the vcache */
1328 		for (i = 0; i < map->dm_nsegs; i++) {
1329 			if ((pglist = map->dm_segs[i]._ds_mlist) == NULL)
1330 				/* Should not really happen. */
1331 				continue;
1332 			TAILQ_FOREACH(pg, pglist, pageq.queue) {
1333 				paddr_t start;
1334 				psize_t size = PAGE_SIZE;
1335 
1336 				if (offset < PAGE_SIZE) {
1337 					start = VM_PAGE_TO_PHYS(pg) + offset;
1338 					size -= offset;
1339 					if (size > len)
1340 						size = len;
1341 					cache_flush_phys(start, size, 0);
1342 					len -= size;
1343 					if (len == 0)
1344 						goto done;
1345 					offset = 0;
1346 					continue;
1347 				}
1348 				offset -= size;
1349 			}
1350 		}
1351 	}
1352  done:
1353 	if (ops & BUS_DMASYNC_POSTWRITE) {
1354 		/* Nothing to do.  Handled by the bus controller. */
1355 	}
1356 }
1357 
1358 extern paddr_t   vm_first_phys, vm_num_phys;
1359 /*
1360  * Common function for DMA-safe memory allocation.  May be called
1361  * by bus-specific DMA memory allocation functions.
1362  */
1363 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1364 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1365 	bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1366 	int flags)
1367 {
1368 	vaddr_t low, high;
1369 	struct pglist *pglist;
1370 	int error;
1371 
1372 	/* Always round the size. */
1373 	size = round_page(size);
1374 	low = vm_first_phys;
1375 	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1376 
1377 	if ((pglist = kmem_alloc(sizeof(*pglist),
1378 	    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1379 		return (ENOMEM);
1380 
1381 	/*
1382 	 * If the bus uses DVMA then ignore boundary and alignment.
1383 	 */
1384 	segs[0]._ds_boundary = boundary;
1385 	segs[0]._ds_align = alignment;
1386 	if (flags & BUS_DMA_DVMA) {
1387 		boundary = 0;
1388 		alignment = 0;
1389 	}
1390 
1391 	/*
1392 	 * Allocate pages from the VM system.
1393 	 */
1394 	error = uvm_pglistalloc(size, low, high,
1395 	    alignment, boundary, pglist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1396 	if (error) {
1397 		kmem_free(pglist, sizeof(*pglist));
1398 		return (error);
1399 	}
1400 
1401 	/*
1402 	 * Compute the location, size, and number of segments actually
1403 	 * returned by the VM code.
1404 	 */
1405 	segs[0].ds_addr = 0UL; /* UPA does not map things */
1406 	segs[0].ds_len = size;
1407 	*rsegs = 1;
1408 
1409 	/*
1410 	 * Simply keep a pointer around to the linked list, so
1411 	 * bus_dmamap_free() can return it.
1412 	 *
1413 	 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
1414 	 * ARE IN OUR CUSTODY.
1415 	 */
1416 	segs[0]._ds_mlist = pglist;
1417 
1418 	/* The bus driver should do the actual mapping */
1419 	return (0);
1420 }
1421 
1422 /*
1423  * Common function for freeing DMA-safe memory.  May be called by
1424  * bus-specific DMA memory free functions.
1425  */
1426 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)1427 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1428 {
1429 	struct pglist *pglist = segs[0]._ds_mlist;
1430 
1431 	if (nsegs != 1)
1432 		panic("bus_dmamem_free: nsegs = %d", nsegs);
1433 
1434 	/*
1435 	 * Return the list of pages back to the VM system.
1436 	 */
1437 	uvm_pglistfree(pglist);
1438 	kmem_free(pglist, sizeof(*pglist));
1439 }
1440 
1441 /*
1442  * Common function for mapping DMA-safe memory.  May be called by
1443  * bus-specific DMA memory map functions.
1444  */
1445 int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1446 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1447 	size_t size, void **kvap, int flags)
1448 {
1449 	vaddr_t va, sva;
1450 	int r;
1451 	size_t oversize;
1452 	u_long align;
1453 
1454 	if (nsegs != 1)
1455 		panic("_bus_dmamem_map: nsegs = %d", nsegs);
1456 
1457 	align = PAGE_SIZE;
1458 
1459 	size = round_page(size);
1460 
1461 	/*
1462 	 * Find a region of kernel virtual addresses that can accommodate
1463 	 * our alignment requirements.
1464 	 */
1465 	oversize = size + align - PAGE_SIZE;
1466 	r = uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET, 0,
1467 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1468 	    UVM_ADV_NORMAL, 0));
1469 	if (r != 0)
1470 		return (ENOMEM);
1471 
1472 	/* Compute start of aligned region */
1473 	va = sva;
1474 	va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
1475 
1476 	/* Return excess virtual addresses */
1477 	if (va != sva)
1478 		uvm_unmap(kernel_map, sva, va);
1479 	if (va + size != sva + oversize)
1480 		uvm_unmap(kernel_map, va + size, sva + oversize);
1481 
1482 	*kvap = (void *)va;
1483 	return (0);
1484 }
1485 
1486 /*
1487  * Common function for unmapping DMA-safe memory.  May be called by
1488  * bus-specific DMA memory unmapping functions.
1489  */
1490 void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)1491 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1492 {
1493 
1494 #ifdef DIAGNOSTIC
1495 	if ((u_long)kva & PGOFSET)
1496 		panic("_bus_dmamem_unmap");
1497 #endif
1498 
1499 	size = round_page(size);
1500 	uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
1501 }
1502 
1503 /*
1504  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
1505  * bus-specific DMA mmap(2)'ing functions.
1506  */
1507 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1508 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
1509 	int prot, int flags)
1510 {
1511 	int i;
1512 
1513 	for (i = 0; i < nsegs; i++) {
1514 #ifdef DIAGNOSTIC
1515 		if (off & PGOFSET)
1516 			panic("_bus_dmamem_mmap: offset unaligned");
1517 		if (segs[i].ds_addr & PGOFSET)
1518 			panic("_bus_dmamem_mmap: segment unaligned");
1519 		if (segs[i].ds_len & PGOFSET)
1520 			panic("_bus_dmamem_mmap: segment size not multiple"
1521 			    " of page size");
1522 #endif
1523 		if (off >= segs[i].ds_len) {
1524 			off -= segs[i].ds_len;
1525 			continue;
1526 		}
1527 
1528 		return (atop(segs[i].ds_addr + off));
1529 	}
1530 
1531 	/* Page not found. */
1532 	return (-1);
1533 }
1534 
1535 
1536 struct sparc_bus_dma_tag mainbus_dma_tag = {
1537 	NULL,
1538 	NULL,
1539 	_bus_dmamap_create,
1540 	_bus_dmamap_destroy,
1541 	_bus_dmamap_load,
1542 	_bus_dmamap_load_mbuf,
1543 	_bus_dmamap_load_uio,
1544 	_bus_dmamap_load_raw,
1545 	_bus_dmamap_unload,
1546 	_bus_dmamap_sync,
1547 
1548 	_bus_dmamem_alloc,
1549 	_bus_dmamem_free,
1550 	_bus_dmamem_map,
1551 	_bus_dmamem_unmap,
1552 	_bus_dmamem_mmap
1553 };
1554 
1555 
1556 /*
1557  * Base bus space handlers.
1558  */
1559 static int	sparc_bus_map(bus_space_tag_t, bus_addr_t, bus_size_t, int,
1560 	vaddr_t, bus_space_handle_t *);
1561 static int	sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t, bus_size_t);
1562 static int	sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t, bus_size_t,
1563 	bus_size_t, bus_space_handle_t *);
1564 static paddr_t	sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t, int, int);
1565 static void	*sparc_mainbus_intr_establish(bus_space_tag_t, int, int,
1566 	int (*)(void *), void *, void (*)(void));
1567 static int	sparc_bus_alloc(bus_space_tag_t, bus_addr_t, bus_addr_t, bus_size_t,
1568 	bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *);
1569 static void	sparc_bus_free(bus_space_tag_t, bus_space_handle_t, bus_size_t);
1570 
1571 struct extent *io_space = NULL;
1572 
1573 int
bus_space_alloc(bus_space_tag_t t,bus_addr_t rs,bus_addr_t re,bus_size_t s,bus_size_t a,bus_size_t b,int f,bus_addr_t * ap,bus_space_handle_t * hp)1574 bus_space_alloc(bus_space_tag_t t, bus_addr_t rs, bus_addr_t re, bus_size_t s,
1575 	bus_size_t a, bus_size_t b, int f, bus_addr_t *ap,
1576 	bus_space_handle_t *hp)
1577 {
1578 	_BS_CALL(t, sparc_bus_alloc)(t, rs, re, s, a, b, f, ap, hp);
1579 }
1580 
1581 void
bus_space_free(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1582 bus_space_free(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
1583 {
1584 	_BS_CALL(t, sparc_bus_free)(t, h, s);
1585 }
1586 
1587 int
bus_space_map(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,bus_space_handle_t * hp)1588 bus_space_map(bus_space_tag_t t, bus_addr_t a, bus_size_t s, int f,
1589 	bus_space_handle_t *hp)
1590 {
1591 	_BS_CALL(t, sparc_bus_map)(t, a, s, f, 0, hp);
1592 }
1593 
1594 void
bus_space_unmap(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1595 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
1596 {
1597 	_BS_VOID_CALL(t, sparc_bus_unmap)(t, h, s);
1598 }
1599 
1600 int
bus_space_subregion(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,bus_space_handle_t * hp)1601 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
1602 	bus_size_t s, bus_space_handle_t *hp)
1603 {
1604 	_BS_CALL(t, sparc_bus_subregion)(t, h, o, s, hp);
1605 }
1606 
1607 paddr_t
bus_space_mmap(bus_space_tag_t t,bus_addr_t a,off_t o,int p,int f)1608 bus_space_mmap(bus_space_tag_t t, bus_addr_t a, off_t o, int p, int f)
1609 {
1610 	_BS_CALL(t, sparc_bus_mmap)(t, a, o, p, f);
1611 }
1612 
1613 /*
1614  *	void bus_space_read_multi_N(bus_space_tag_t tag,
1615  *	    bus_space_handle_t bsh, bus_size_t offset,
1616  *	    uintN_t *addr, bus_size_t count);
1617  *
1618  * Read `count' 1, 2, 4, or 8 byte quantities from bus space
1619  * described by tag/handle/offset and copy into buffer provided.
1620  */
1621 void
bus_space_read_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)1622 bus_space_read_multi_1(bus_space_tag_t t, bus_space_handle_t h,
1623 	bus_size_t o, uint8_t * a, bus_size_t c)
1624 {
1625 	while (c-- > 0)
1626 		*a++ = bus_space_read_1(t, h, o);
1627 }
1628 
1629 void
bus_space_read_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)1630 bus_space_read_multi_2(bus_space_tag_t t, bus_space_handle_t h,
1631 	bus_size_t o, uint16_t * a, bus_size_t c)
1632 {
1633 	while (c-- > 0)
1634 		*a++ = bus_space_read_2(t, h, o);
1635 }
1636 
1637 void
bus_space_read_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)1638 bus_space_read_multi_4(bus_space_tag_t t, bus_space_handle_t h,
1639 	bus_size_t o, uint32_t * a, bus_size_t c)
1640 {
1641 	while (c-- > 0)
1642 		*a++ = bus_space_read_4(t, h, o);
1643 }
1644 
1645 void
bus_space_read_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)1646 bus_space_read_multi_8(bus_space_tag_t t, bus_space_handle_t h,
1647 	bus_size_t o, uint64_t * a, bus_size_t c)
1648 {
1649 	while (c-- > 0)
1650 		*a++ = bus_space_read_8(t, h, o);
1651 }
1652 
1653 /*
1654  *	void bus_space_write_multi_N(bus_space_tag_t tag,
1655  *	    bus_space_handle_t bsh, bus_size_t offset,
1656  *	    const uintN_t *addr, bus_size_t count);
1657  *
1658  * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
1659  * provided to bus space described by tag/handle/offset.
1660  */
1661 void
bus_space_write_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)1662 bus_space_write_multi_1(bus_space_tag_t t,
1663 	bus_space_handle_t h, bus_size_t o,
1664 	const uint8_t *a, bus_size_t c)
1665 {
1666 	while (c-- > 0)
1667 		bus_space_write_1(t, h, o, *a++);
1668 }
1669 
1670 void
bus_space_write_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)1671 bus_space_write_multi_2(bus_space_tag_t t,
1672 	bus_space_handle_t h, bus_size_t o,
1673 	const uint16_t *a, bus_size_t c)
1674 {
1675 	while (c-- > 0)
1676 		bus_space_write_2(t, h, o, *a++);
1677 }
1678 
1679 void
bus_space_write_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)1680 bus_space_write_multi_4(bus_space_tag_t t,
1681 	bus_space_handle_t h, bus_size_t o,
1682 	const uint32_t *a, bus_size_t c)
1683 {
1684 	while (c-- > 0)
1685 		bus_space_write_4(t, h, o, *a++);
1686 }
1687 
1688 void
bus_space_write_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)1689 bus_space_write_multi_8(bus_space_tag_t t,
1690 	bus_space_handle_t h, bus_size_t o,
1691 	const uint64_t *a, bus_size_t c)
1692 {
1693 	while (c-- > 0)
1694 		bus_space_write_8(t, h, o, *a++);
1695 }
1696 
1697 /*
1698  *	void bus_space_set_multi_stream_N(bus_space_tag_t tag,
1699  *	    bus_space_handle_t bsh, bus_size_t offset, uintN_t val,
1700  *	    bus_size_t count);
1701  *
1702  * Write the 1, 2, 4, or 8 byte value `val' to bus space described
1703  * by tag/handle/offset `count' times.
1704  */
1705 void
bus_space_set_multi_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v,bus_size_t c)1706 bus_space_set_multi_stream_1(bus_space_tag_t t,
1707 	bus_space_handle_t h, bus_size_t o, uint8_t v,
1708 	bus_size_t c)
1709 {
1710 	while (c-- > 0)
1711 		bus_space_write_stream_1(t, h, o, v);
1712 }
1713 
1714 void
bus_space_set_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v,bus_size_t c)1715 bus_space_set_multi_stream_2(bus_space_tag_t t,
1716 	bus_space_handle_t h, bus_size_t o, uint16_t v,
1717 	bus_size_t c)
1718 {
1719 	while (c-- > 0)
1720 		bus_space_write_stream_2(t, h, o, v);
1721 }
1722 
1723 void
bus_space_set_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v,bus_size_t c)1724 bus_space_set_multi_stream_4(bus_space_tag_t t,
1725 	bus_space_handle_t h, bus_size_t o, uint32_t v,
1726 	bus_size_t c)
1727 {
1728 	while (c-- > 0)
1729 		bus_space_write_stream_4(t, h, o, v);
1730 }
1731 
1732 void
bus_space_set_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v,bus_size_t c)1733 bus_space_set_multi_stream_8(bus_space_tag_t t,
1734 	bus_space_handle_t h, bus_size_t o, uint64_t v,
1735 	bus_size_t c)
1736 {
1737 	while (c-- > 0)
1738 		bus_space_write_stream_8(t, h, o, v);
1739 }
1740 
1741 /*
1742  *	void bus_space_copy_region_stream_N(bus_space_tag_t tag,
1743  *	    bus_space_handle_t bsh1, bus_size_t off1,
1744  *	    bus_space_handle_t bsh2, bus_size_t off2,
1745  *	    bus_size_t count);
1746  *
1747  * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
1748  * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
1749  */
1750 void
bus_space_copy_region_stream_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1751 bus_space_copy_region_stream_1(bus_space_tag_t t, bus_space_handle_t h1,
1752 	bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1753 {
1754 	for (; c; c--, o1++, o2++)
1755 	    bus_space_write_stream_1(t, h1, o1, bus_space_read_stream_1(t, h2, o2));
1756 }
1757 
1758 void
bus_space_copy_region_stream_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1759 bus_space_copy_region_stream_2(bus_space_tag_t t, bus_space_handle_t h1,
1760 	bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1761 {
1762 	for (; c; c--, o1+=2, o2+=2)
1763 	    bus_space_write_stream_2(t, h1, o1, bus_space_read_stream_2(t, h2, o2));
1764 }
1765 
1766 void
bus_space_copy_region_stream_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1767 bus_space_copy_region_stream_4(bus_space_tag_t t, bus_space_handle_t h1,
1768 	bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1769 {
1770 	for (; c; c--, o1+=4, o2+=4)
1771 	    bus_space_write_stream_4(t, h1, o1, bus_space_read_stream_4(t, h2, o2));
1772 }
1773 
1774 void
bus_space_copy_region_stream_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1775 bus_space_copy_region_stream_8(bus_space_tag_t t, bus_space_handle_t h1,
1776 	bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1777 {
1778 	for (; c; c--, o1+=8, o2+=8)
1779 	    bus_space_write_stream_8(t, h1, o1, bus_space_read_8(t, h2, o2));
1780 }
1781 
1782 /*
1783  *	void bus_space_set_region_stream_N(bus_space_tag_t tag,
1784  *	    bus_space_handle_t bsh, bus_size_t off,
1785  *	    uintN_t *addr, bus_size_t count);
1786  *
1787  */
1788 void
bus_space_set_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)1789 bus_space_set_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
1790 	bus_size_t o, const uint8_t v, bus_size_t c)
1791 {
1792 	for (; c; c--, o++)
1793 		bus_space_write_stream_1(t, h, o, v);
1794 }
1795 
1796 void
bus_space_set_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)1797 bus_space_set_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
1798 	bus_size_t o, const uint16_t v, bus_size_t c)
1799 {
1800 	for (; c; c--, o+=2)
1801 		bus_space_write_stream_2(t, h, o, v);
1802 }
1803 
1804 void
bus_space_set_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)1805 bus_space_set_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
1806 	bus_size_t o, const uint32_t v, bus_size_t c)
1807 {
1808 	for (; c; c--, o+=4)
1809 		bus_space_write_stream_4(t, h, o, v);
1810 }
1811 
1812 void
bus_space_set_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)1813 bus_space_set_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
1814 	bus_size_t o, const uint64_t v, bus_size_t c)
1815 {
1816 	for (; c; c--, o+=8)
1817 		bus_space_write_stream_8(t, h, o, v);
1818 }
1819 
1820 
1821 /*
1822  *	void bus_space_read_multi_stream_N(bus_space_tag_t tag,
1823  *	    bus_space_handle_t bsh, bus_size_t offset,
1824  *	    uintN_t *addr, bus_size_t count);
1825  *
1826  * Read `count' 1, 2, 4, or 8 byte quantities from bus space
1827  * described by tag/handle/offset and copy into buffer provided.
1828  */
1829 void
bus_space_read_multi_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)1830 bus_space_read_multi_stream_1(bus_space_tag_t t,
1831 	bus_space_handle_t h, bus_size_t o,
1832 	uint8_t *a, bus_size_t c)
1833 {
1834 	while (c-- > 0)
1835 		*a++ = bus_space_read_stream_1(t, h, o);
1836 }
1837 
1838 void
bus_space_read_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)1839 bus_space_read_multi_stream_2(bus_space_tag_t t,
1840 	bus_space_handle_t h, bus_size_t o,
1841 	uint16_t *a, bus_size_t c)
1842 {
1843 	while (c-- > 0)
1844 		*a++ = bus_space_read_stream_2(t, h, o);
1845 }
1846 
1847 void
bus_space_read_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)1848 bus_space_read_multi_stream_4(bus_space_tag_t t,
1849 	bus_space_handle_t h, bus_size_t o,
1850 	uint32_t *a, bus_size_t c)
1851 {
1852 	while (c-- > 0)
1853 		*a++ = bus_space_read_stream_4(t, h, o);
1854 }
1855 
1856 void
bus_space_read_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)1857 bus_space_read_multi_stream_8(bus_space_tag_t t,
1858 	bus_space_handle_t h, bus_size_t o,
1859 	uint64_t *a, bus_size_t c)
1860 {
1861 	while (c-- > 0)
1862 		*a++ = bus_space_read_stream_8(t, h, o);
1863 }
1864 
1865 /*
1866  *	void bus_space_read_region_stream_N(bus_space_tag_t tag,
1867  *	    bus_space_handle_t bsh, bus_size_t off,
1868  *	    uintN_t *addr, bus_size_t count);
1869  *
1870  */
1871 void
bus_space_read_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)1872 bus_space_read_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
1873 	bus_size_t o, uint8_t *a, bus_size_t c)
1874 {
1875 	for (; c; a++, c--, o++)
1876 		*a = bus_space_read_stream_1(t, h, o);
1877 }
1878 void
bus_space_read_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)1879 bus_space_read_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
1880 	bus_size_t o, uint16_t *a, bus_size_t c)
1881 {
1882 	for (; c; a++, c--, o+=2)
1883 		*a = bus_space_read_stream_2(t, h, o);
1884  }
1885 void
bus_space_read_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)1886 bus_space_read_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
1887 	bus_size_t o, uint32_t *a, bus_size_t c)
1888 {
1889 	for (; c; a++, c--, o+=4)
1890 		*a = bus_space_read_stream_4(t, h, o);
1891 }
1892 void
bus_space_read_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)1893 bus_space_read_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
1894 	bus_size_t o, uint64_t *a, bus_size_t c)
1895 {
1896 	for (; c; a++, c--, o+=8)
1897 		*a = bus_space_read_stream_8(t, h, o);
1898 }
1899 
1900 /*
1901  *	void bus_space_write_multi_stream_N(bus_space_tag_t tag,
1902  *	    bus_space_handle_t bsh, bus_size_t offset,
1903  *	    const uintN_t *addr, bus_size_t count);
1904  *
1905  * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
1906  * provided to bus space described by tag/handle/offset.
1907  */
1908 void
bus_space_write_multi_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)1909 bus_space_write_multi_stream_1(bus_space_tag_t t,
1910 	bus_space_handle_t h, bus_size_t o,
1911 	const uint8_t *a, bus_size_t c)
1912 {
1913 	while (c-- > 0)
1914 		bus_space_write_stream_1(t, h, o, *a++);
1915 }
1916 
1917 void
bus_space_write_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)1918 bus_space_write_multi_stream_2(bus_space_tag_t t,
1919 	bus_space_handle_t h, bus_size_t o,
1920 	const uint16_t *a, bus_size_t c)
1921 {
1922 	while (c-- > 0)
1923 		bus_space_write_stream_2(t, h, o, *a++);
1924 }
1925 
1926 void
bus_space_write_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)1927 bus_space_write_multi_stream_4(bus_space_tag_t t,
1928 	bus_space_handle_t h, bus_size_t o,
1929 	const uint32_t *a, bus_size_t c)
1930 {
1931 	while (c-- > 0)
1932 		bus_space_write_stream_4(t, h, o, *a++);
1933 }
1934 
1935 void
bus_space_write_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)1936 bus_space_write_multi_stream_8(bus_space_tag_t t,
1937 	bus_space_handle_t h, bus_size_t o,
1938 	const uint64_t *a, bus_size_t c)
1939 {
1940 	while (c-- > 0)
1941 		bus_space_write_stream_8(t, h, o, *a++);
1942 }
1943 
1944 /*
1945  *	void bus_space_copy_region_N(bus_space_tag_t tag,
1946  *	    bus_space_handle_t bsh1, bus_size_t off1,
1947  *	    bus_space_handle_t bsh2, bus_size_t off2,
1948  *	    bus_size_t count);
1949  *
1950  * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
1951  * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
1952  */
1953 void
bus_space_copy_region_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1954 bus_space_copy_region_1(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
1955 	bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1956 {
1957 	for (; c; c--, o1++, o2++)
1958 	    bus_space_write_1(t, h1, o1, bus_space_read_1(t, h2, o2));
1959 }
1960 
1961 void
bus_space_copy_region_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1962 bus_space_copy_region_2(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
1963 	bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1964 {
1965 	for (; c; c--, o1+=2, o2+=2)
1966 	    bus_space_write_2(t, h1, o1, bus_space_read_2(t, h2, o2));
1967 }
1968 
1969 void
bus_space_copy_region_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1970 bus_space_copy_region_4(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
1971 	bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1972 {
1973 	for (; c; c--, o1+=4, o2+=4)
1974 	    bus_space_write_4(t, h1, o1, bus_space_read_4(t, h2, o2));
1975 }
1976 
1977 void
bus_space_copy_region_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)1978 bus_space_copy_region_8(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
1979 	bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
1980 {
1981 	for (; c; c--, o1+=8, o2+=8)
1982 	    bus_space_write_8(t, h1, o1, bus_space_read_8(t, h2, o2));
1983 }
1984 
1985 /*
1986  *	void bus_space_set_region_N(bus_space_tag_t tag,
1987  *	    bus_space_handle_t bsh, bus_size_t off,
1988  *	    uintN_t *addr, bus_size_t count);
1989  *
1990  */
1991 void
bus_space_set_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)1992 bus_space_set_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
1993 	const uint8_t v, bus_size_t c)
1994 {
1995 	for (; c; c--, o++)
1996 		bus_space_write_1(t, h, o, v);
1997 }
1998 
1999 void
bus_space_set_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)2000 bus_space_set_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2001 	const uint16_t v, bus_size_t c)
2002 {
2003 	for (; c; c--, o+=2)
2004 		bus_space_write_2(t, h, o, v);
2005 }
2006 
2007 void
bus_space_set_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2008 bus_space_set_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2009 	const uint32_t v, bus_size_t c)
2010 {
2011 	for (; c; c--, o+=4)
2012 		bus_space_write_4(t, h, o, v);
2013 }
2014 
2015 void
bus_space_set_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2016 bus_space_set_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2017 	const uint64_t v, bus_size_t c)
2018 {
2019 	for (; c; c--, o+=8)
2020 		bus_space_write_8(t, h, o, v);
2021 }
2022 
2023 
2024 /*
2025  *	void bus_space_set_multi_N(bus_space_tag_t tag,
2026  *	    bus_space_handle_t bsh, bus_size_t offset, uintN_t val,
2027  *	    bus_size_t count);
2028  *
2029  * Write the 1, 2, 4, or 8 byte value `val' to bus space described
2030  * by tag/handle/offset `count' times.
2031  */
2032 void
bus_space_set_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v,bus_size_t c)2033 bus_space_set_multi_1(bus_space_tag_t t,
2034 	bus_space_handle_t h, bus_size_t o, uint8_t v,
2035 	bus_size_t c)
2036 {
2037 	while (c-- > 0)
2038 		bus_space_write_1(t, h, o, v);
2039 }
2040 
2041 void
bus_space_set_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v,bus_size_t c)2042 bus_space_set_multi_2(bus_space_tag_t t,
2043 	bus_space_handle_t h, bus_size_t o, uint16_t v,
2044 	bus_size_t c)
2045 {
2046 	while (c-- > 0)
2047 		bus_space_write_2(t, h, o, v);
2048 }
2049 
2050 void
bus_space_set_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v,bus_size_t c)2051 bus_space_set_multi_4(bus_space_tag_t t,
2052 	bus_space_handle_t h, bus_size_t o, uint32_t v,
2053 	bus_size_t c)
2054 {
2055 	while (c-- > 0)
2056 		bus_space_write_4(t, h, o, v);
2057 }
2058 
2059 void
bus_space_set_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v,bus_size_t c)2060 bus_space_set_multi_8(bus_space_tag_t t,
2061 	bus_space_handle_t h, bus_size_t o, uint64_t v,
2062 	bus_size_t c)
2063 {
2064 	while (c-- > 0)
2065 		bus_space_write_8(t, h, o, v);
2066 }
2067 
2068 /*
2069  *	void bus_space_write_region_N(bus_space_tag_t tag,
2070  *	    bus_space_handle_t bsh, bus_size_t off,
2071  *	    uintN_t *addr, bus_size_t count);
2072  *
2073  */
2074 void
bus_space_write_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2075 bus_space_write_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2076 	const uint8_t *a, bus_size_t c)
2077 {
2078 	for (; c; a++, c--, o++)
2079 		bus_space_write_1(t, h, o, *a);
2080 }
2081 
2082 void
bus_space_write_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2083 bus_space_write_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2084 	const uint16_t *a, bus_size_t c)
2085 {
2086 	for (; c; a++, c--, o+=2)
2087 		bus_space_write_2(t, h, o, *a);
2088 }
2089 
2090 void
bus_space_write_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2091 bus_space_write_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2092 	const uint32_t *a, bus_size_t c)
2093 {
2094 	for (; c; a++, c--, o+=4)
2095 		bus_space_write_4(t, h, o, *a);
2096 }
2097 
2098 void
bus_space_write_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2099 bus_space_write_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2100 	const uint64_t *a, bus_size_t c)
2101 {
2102 	for (; c; a++, c--, o+=8)
2103 		bus_space_write_8(t, h, o, *a);
2104 }
2105 
2106 
2107 /*
2108  *	void bus_space_read_region_N(bus_space_tag_t tag,
2109  *	    bus_space_handle_t bsh, bus_size_t off,
2110  *	    uintN_t *addr, bus_size_t count);
2111  *
2112  */
2113 void
bus_space_read_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2114 bus_space_read_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2115 	uint8_t *a, bus_size_t c)
2116 {
2117 	for (; c; a++, c--, o++)
2118 		*a = bus_space_read_1(t, h, o);
2119 }
2120 void
bus_space_read_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2121 bus_space_read_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2122 	uint16_t *a, bus_size_t c)
2123 {
2124 	for (; c; a++, c--, o+=2)
2125 		*a = bus_space_read_2(t, h, o);
2126  }
2127 void
bus_space_read_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2128 bus_space_read_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2129 	uint32_t *a, bus_size_t c)
2130 {
2131 	for (; c; a++, c--, o+=4)
2132 		*a = bus_space_read_4(t, h, o);
2133 }
2134 void
bus_space_read_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2135 bus_space_read_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
2136 	uint64_t *a, bus_size_t c)
2137 {
2138 	for (; c; a++, c--, o+=8)
2139 		*a = bus_space_read_8(t, h, o);
2140 }
2141 
2142 /*
2143  *	void bus_space_write_region_stream_N(bus_space_tag_t tag,
2144  *	    bus_space_handle_t bsh, bus_size_t off,
2145  *	    uintN_t *addr, bus_size_t count);
2146  *
2147  */
2148 void
bus_space_write_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2149 bus_space_write_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
2150 	bus_size_t o, const uint8_t *a, bus_size_t c)
2151 {
2152 	for (; c; a++, c--, o++)
2153 		bus_space_write_stream_1(t, h, o, *a);
2154 }
2155 
2156 void
bus_space_write_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2157 bus_space_write_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
2158 	bus_size_t o, const uint16_t *a, bus_size_t c)
2159 {
2160 	for (; c; a++, c--, o+=2)
2161 		bus_space_write_stream_2(t, h, o, *a);
2162 }
2163 
2164 void
bus_space_write_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2165 bus_space_write_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
2166 	bus_size_t o, const uint32_t *a, bus_size_t c)
2167 {
2168 	for (; c; a++, c--, o+=4)
2169 		bus_space_write_stream_4(t, h, o, *a);
2170 }
2171 
2172 void
bus_space_write_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2173 bus_space_write_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
2174 	bus_size_t o, const uint64_t *a, bus_size_t c)
2175 {
2176 	for (; c; a++, c--, o+=8)
2177 		bus_space_write_stream_8(t, h, o, *a);
2178 }
2179 
2180 /*
2181  * Allocate a new bus tag and have it inherit the methods of the
2182  * given parent.
2183  */
2184 bus_space_tag_t
bus_space_tag_alloc(bus_space_tag_t parent,void * cookie)2185 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie)
2186 {
2187 	struct sparc_bus_space_tag *sbt;
2188 
2189 	sbt = kmem_zalloc(sizeof(*sbt), KM_SLEEP);
2190 
2191 	if (parent) {
2192 		memcpy(sbt, parent, sizeof(*sbt));
2193 		sbt->parent = parent;
2194 		sbt->ranges = NULL;
2195 		sbt->nranges = 0;
2196 	}
2197 
2198 	sbt->cookie = cookie;
2199 	return (sbt);
2200 }
2201 
2202 /*
2203  * Generic routine to translate an address using OpenPROM `ranges'.
2204  */
2205 int
bus_space_translate_address_generic(struct openprom_range * ranges,int nranges,bus_addr_t * bap)2206 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges,
2207     bus_addr_t *bap)
2208 {
2209 	int i, space = BUS_ADDR_IOSPACE(*bap);
2210 
2211 	for (i = 0; i < nranges; i++) {
2212 		struct openprom_range *rp = &ranges[i];
2213 
2214 		if (rp->or_child_space != space)
2215 			continue;
2216 
2217 		/* We've found the connection to the parent bus. */
2218 		*bap = BUS_ADDR(rp->or_parent_space,
2219 		    rp->or_parent_base + BUS_ADDR_PADDR(*bap));
2220 		return (0);
2221 	}
2222 
2223 	return (EINVAL);
2224 }
2225 
2226 int
sparc_bus_map(bus_space_tag_t t,bus_addr_t addr,bus_size_t size,int flags,vaddr_t unused,bus_space_handle_t * hp)2227 sparc_bus_map(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
2228 	int flags, vaddr_t unused, bus_space_handle_t *hp)
2229 {
2230 	vaddr_t v;
2231 	uint64_t pa;
2232 	paddr_t	pm_flags = 0;
2233 	vm_prot_t pm_prot = VM_PROT_READ;
2234 	int err, map_little = 0;
2235 
2236 	if (io_space == NULL)
2237 		/*
2238 		 * And set up IOSPACE extents.
2239 		 */
2240 		io_space = extent_create("IOSPACE",
2241 					 (u_long)IODEV_BASE, (u_long)IODEV_END,
2242 					 0, 0, EX_NOWAIT);
2243 
2244 
2245 	size = round_page(size);
2246 	if (size == 0) {
2247 		printf("sparc_bus_map: zero size\n");
2248 		return (EINVAL);
2249 	}
2250 	switch (t->type) {
2251 	case PCI_CONFIG_BUS_SPACE:
2252 		/*
2253 		 * PCI config space is special.
2254 		 *
2255 		 * It's really big and seldom used.  In order not to run
2256 		 * out of IO mappings, config space will not be mapped in,
2257 		 * rather it will be accessed through MMU bypass ASI accesses.
2258 		 */
2259 		if (flags & BUS_SPACE_MAP_LINEAR)
2260 			return (-1);
2261 		hp->_ptr = addr;
2262 		hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
2263 		hp->_sasi = ASI_PHYS_NON_CACHED;
2264 		DPRINTF(BSDB_MAP, ("\n%s: config type %x flags %x "
2265 			"addr %016llx size %016llx virt %llx\n", __func__,
2266 			(int)t->type, (int) flags, (unsigned long long)addr,
2267 			(unsigned long long)size,
2268 			(unsigned long long)hp->_ptr));
2269 		return (0);
2270 	case PCI_IO_BUS_SPACE:
2271 		map_little = 1;
2272 		break;
2273 	case PCI_MEMORY_BUS_SPACE:
2274 		map_little = 1;
2275 		break;
2276 	default:
2277 		map_little = 0;
2278 		break;
2279 	}
2280 
2281 #ifdef _LP64
2282 	if (!CPU_ISSUN4V) {
2283 		/* If it's not LINEAR don't bother to map it.  Use phys accesses. */
2284 		if ((flags & BUS_SPACE_MAP_LINEAR) == 0) {
2285 			hp->_ptr = addr;
2286 			if (map_little)
2287 				hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
2288 			else
2289 				hp->_asi = ASI_PHYS_NON_CACHED;
2290 			hp->_sasi = ASI_PHYS_NON_CACHED;
2291 			return (0);
2292 		}
2293 	}
2294 #endif
2295 
2296 	if (!(flags & BUS_SPACE_MAP_CACHEABLE))
2297 		pm_flags |= PMAP_NC;
2298 
2299 	if ((flags & BUS_SPACE_MAP_PREFETCHABLE))
2300 		pm_flags |= PMAP_WC;
2301 
2302 	if ((err = extent_alloc(io_space, size, PAGE_SIZE,
2303 		0, EX_NOWAIT|EX_BOUNDZERO, (u_long *)&v)))
2304 			panic("sparc_bus_map: cannot allocate io_space: %d", err);
2305 
2306 	/* note: preserve page offset */
2307 	hp->_ptr = (v | ((u_long)addr & PGOFSET));
2308 	hp->_sasi = ASI_PRIMARY;
2309 	if (map_little)
2310 		hp->_asi = ASI_PRIMARY_LITTLE;
2311 	else
2312 		hp->_asi = ASI_PRIMARY;
2313 
2314 	pa = trunc_page(addr);
2315 	if (!(flags&BUS_SPACE_MAP_READONLY))
2316 		pm_prot |= VM_PROT_WRITE;
2317 
2318 	DPRINTF(BSDB_MAP, ("\n%s: type %x flags %x addr %016llx prot %02x "
2319 		"pm_flags %x size %016llx virt %llx paddr %016llx\n", __func__,
2320 		(int)t->type, (int)flags, (unsigned long long)addr, pm_prot,
2321 		(int)pm_flags, (unsigned long long)size,
2322 		(unsigned long long)hp->_ptr, (unsigned long long)pa));
2323 
2324 	do {
2325 		DPRINTF(BSDB_MAP, ("%s: phys %llx virt %p hp %llx\n",
2326 			__func__,
2327 			(unsigned long long)pa, (char *)v,
2328 			(unsigned long long)hp->_ptr));
2329 		pmap_kenter_pa(v, pa | pm_flags, pm_prot, 0);
2330 		v += PAGE_SIZE;
2331 		pa += PAGE_SIZE;
2332 	} while ((size -= PAGE_SIZE) > 0);
2333 	return (0);
2334 }
2335 
2336 int
sparc_bus_subregion(bus_space_tag_t tag,bus_space_handle_t handle,bus_size_t offset,bus_size_t size,bus_space_handle_t * nhandlep)2337 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle,
2338 	bus_size_t offset, bus_size_t size, bus_space_handle_t *nhandlep)
2339 {
2340 	nhandlep->_ptr = handle._ptr + offset;
2341 	nhandlep->_asi = handle._asi;
2342 	nhandlep->_sasi = handle._sasi;
2343 	return (0);
2344 }
2345 
2346 int
sparc_bus_unmap(bus_space_tag_t t,bus_space_handle_t bh,bus_size_t size)2347 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
2348 {
2349 	vaddr_t va = trunc_page((vaddr_t)bh._ptr);
2350 	vaddr_t endva = va + round_page(size);
2351 	int error = 0;
2352 
2353 	if (PHYS_ASI(bh._asi)) return (0);
2354 
2355 	error = extent_free(io_space, va, size, EX_NOWAIT);
2356 	if (error) printf("sparc_bus_unmap: extent_free returned %d\n", error);
2357 
2358 	pmap_remove(pmap_kernel(), va, endva);
2359 	return (0);
2360 }
2361 
2362 paddr_t
sparc_bus_mmap(bus_space_tag_t t,bus_addr_t paddr,off_t off,int prot,int flags)2363 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t paddr, off_t off, int prot,
2364 	int flags)
2365 {
2366 	paddr_t pa;
2367 	/* Devices are un-cached... although the driver should do that */
2368 	pa = (paddr + off) | PMAP_NC;
2369 	if (flags & BUS_SPACE_MAP_LITTLE)
2370 		pa |= PMAP_LITTLE;
2371 	if (flags & BUS_SPACE_MAP_PREFETCHABLE)
2372 		pa |= PMAP_WC;
2373 	return pa;
2374 }
2375 
2376 
2377 void *
sparc_mainbus_intr_establish(bus_space_tag_t t,int pil,int level,int (* handler)(void *),void * arg,void (* fastvec)(void))2378 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level,
2379 	int	(*handler)(void *), void *arg, void	(*fastvec)(void) /* ignored */)
2380 {
2381 	struct intrhand *ih;
2382 
2383 	ih = intrhand_alloc();
2384 	ih->ih_fun = handler;
2385 	ih->ih_arg = arg;
2386 	intr_establish(pil, level != IPL_VM, ih);
2387 	return (ih);
2388 }
2389 
2390 int
sparc_bus_alloc(bus_space_tag_t t,bus_addr_t rs,bus_addr_t re,bus_size_t s,bus_size_t a,bus_size_t b,int f,bus_addr_t * ap,bus_space_handle_t * hp)2391 sparc_bus_alloc(bus_space_tag_t t, bus_addr_t rs, bus_addr_t re, bus_size_t s,
2392 	bus_size_t a, bus_size_t b, int f, bus_addr_t *ap, bus_space_handle_t *hp)
2393 {
2394 	return (ENOTTY);
2395 }
2396 
2397 void
sparc_bus_free(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)2398 sparc_bus_free(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
2399 {
2400 	return;
2401 }
2402 
2403 struct sparc_bus_space_tag mainbus_space_tag = {
2404 	NULL,				/* cookie */
2405 	NULL,				/* parent bus tag */
2406 	NULL,				/* ranges */
2407 	0,				/* nranges */
2408 	UPA_BUS_SPACE,			/* type */
2409 	sparc_bus_alloc,
2410 	sparc_bus_free,
2411 	sparc_bus_map,			/* bus_space_map */
2412 	sparc_bus_unmap,		/* bus_space_unmap */
2413 	sparc_bus_subregion,		/* bus_space_subregion */
2414 	sparc_bus_mmap,			/* bus_space_mmap */
2415 	sparc_mainbus_intr_establish	/* bus_intr_establish */
2416 };
2417 
2418 
2419 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)2420 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
2421 {
2422 	__greg_t *gr = mcp->__gregs;
2423 	__greg_t ras_pc;
2424 	const struct trapframe64 *tf = l->l_md.md_tf;
2425 
2426 	/* First ensure consistent stack state (see sendsig). */ /* XXX? */
2427 	write_user_windows();
2428 	if (rwindow_save(l)) {
2429 		mutex_enter(l->l_proc->p_lock);
2430 		sigexit(l, SIGILL);
2431 	}
2432 
2433 	/* For now: Erase any random indicators for optional state. */
2434 	(void)memset(mcp, 0, sizeof (*mcp));
2435 
2436 	/* Save general register context. */
2437 #ifdef __arch64__
2438 	gr[_REG_CCR] = (tf->tf_tstate & TSTATE_CCR) >> TSTATE_CCR_SHIFT;
2439 #else
2440 	gr[_REG_PSR] = TSTATECCR_TO_PSR(tf->tf_tstate);
2441 #endif
2442 	gr[_REG_PC]  = tf->tf_pc;
2443 	gr[_REG_nPC] = tf->tf_npc;
2444 	gr[_REG_Y]   = tf->tf_y;
2445 	gr[_REG_G1]  = tf->tf_global[1];
2446 	gr[_REG_G2]  = tf->tf_global[2];
2447 	gr[_REG_G3]  = tf->tf_global[3];
2448 	gr[_REG_G4]  = tf->tf_global[4];
2449 	gr[_REG_G5]  = tf->tf_global[5];
2450 	gr[_REG_G6]  = tf->tf_global[6];
2451 	gr[_REG_G7]  = tf->tf_global[7];
2452 	gr[_REG_O0]  = tf->tf_out[0];
2453 	gr[_REG_O1]  = tf->tf_out[1];
2454 	gr[_REG_O2]  = tf->tf_out[2];
2455 	gr[_REG_O3]  = tf->tf_out[3];
2456 	gr[_REG_O4]  = tf->tf_out[4];
2457 	gr[_REG_O5]  = tf->tf_out[5];
2458 	gr[_REG_O6]  = tf->tf_out[6];
2459 	gr[_REG_O7]  = tf->tf_out[7];
2460 #ifdef __arch64__
2461 	gr[_REG_ASI] = (tf->tf_tstate & TSTATE_ASI) >> TSTATE_ASI_SHIFT;
2462 #if 0 /* not yet supported */
2463 	gr[_REG_FPRS] = ;
2464 #endif
2465 #endif /* __arch64__ */
2466 
2467 	if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
2468 	    (void *) gr[_REG_PC])) != -1) {
2469 		gr[_REG_PC] = ras_pc;
2470 		gr[_REG_nPC] = ras_pc + 4;
2471 	}
2472 
2473 	*flags |= (_UC_CPU|_UC_TLSBASE);
2474 
2475 	mcp->__gwins = NULL;
2476 
2477 
2478 	/* Save FP register context, if any. */
2479 	if (l->l_md.md_fpstate != NULL) {
2480 		struct fpstate64 *fsp;
2481 		__fpregset_t *fpr = &mcp->__fpregs;
2482 
2483 		/*
2484 		 * If our FP context is currently held in the FPU, take a
2485 		 * private snapshot - lazy FPU context switching can deal
2486 		 * with it later when it becomes necessary.
2487 		 * Otherwise, get it from the process's save area.
2488 		 */
2489 		fpusave_lwp(l, true);
2490 		fsp = l->l_md.md_fpstate;
2491 		memcpy(&fpr->__fpu_fr, fsp->fs_regs, sizeof (fpr->__fpu_fr));
2492 		mcp->__fpregs.__fpu_q = NULL;	/* `Need more info.' */
2493 		mcp->__fpregs.__fpu_fsr = fsp->fs_fsr;
2494 		mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
2495 		mcp->__fpregs.__fpu_q_entrysize =
2496 		    (unsigned char) sizeof (*mcp->__fpregs.__fpu_q);
2497 		mcp->__fpregs.__fpu_en = 1;
2498 		*flags |= _UC_FPU;
2499 	} else {
2500 		mcp->__fpregs.__fpu_en = 0;
2501 	}
2502 
2503 	mcp->__xrs.__xrs_id = 0;	/* Solaris extension? */
2504 }
2505 
2506 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mc)2507 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mc)
2508 {
2509 	const __greg_t *gr = mc->__gregs;
2510 
2511 	/*
2512  	 * Only the icc bits in the psr are used, so it need not be
2513  	 * verified.  pc and npc must be multiples of 4.  This is all
2514  	 * that is required; if it holds, just do it.
2515 	 */
2516 	if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 ||
2517 	    gr[_REG_PC] == 0 || gr[_REG_nPC] == 0)
2518 		return EINVAL;
2519 
2520 	return 0;
2521 }
2522 
2523 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)2524 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
2525 {
2526 	const __greg_t *gr = mcp->__gregs;
2527 	struct trapframe64 *tf = l->l_md.md_tf;
2528 	struct proc *p = l->l_proc;
2529 	int error;
2530 
2531 	/* First ensure consistent stack state (see sendsig). */
2532 	write_user_windows();
2533 	if (rwindow_save(l)) {
2534 		mutex_enter(p->p_lock);
2535 		sigexit(l, SIGILL);
2536 	}
2537 
2538 	if ((flags & _UC_CPU) != 0) {
2539 		error = cpu_mcontext_validate(l, mcp);
2540 		if (error)
2541 			return error;
2542 
2543 		/* Restore general register context. */
2544 		/* take only tstate CCR (and ASI) fields */
2545 #ifdef __arch64__
2546 		tf->tf_tstate = (tf->tf_tstate & ~(TSTATE_CCR | TSTATE_ASI)) |
2547 		    ((gr[_REG_CCR] << TSTATE_CCR_SHIFT) & TSTATE_CCR) |
2548 		    ((gr[_REG_ASI] << TSTATE_ASI_SHIFT) & TSTATE_ASI);
2549 #else
2550 		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_CCR) |
2551 		    PSRCC_TO_TSTATE(gr[_REG_PSR]);
2552 #endif
2553 		tf->tf_pc        = (uint64_t)gr[_REG_PC];
2554 		tf->tf_npc       = (uint64_t)gr[_REG_nPC];
2555 		tf->tf_y         = (uint64_t)gr[_REG_Y];
2556 		tf->tf_global[1] = (uint64_t)gr[_REG_G1];
2557 		tf->tf_global[2] = (uint64_t)gr[_REG_G2];
2558 		tf->tf_global[3] = (uint64_t)gr[_REG_G3];
2559 		tf->tf_global[4] = (uint64_t)gr[_REG_G4];
2560 		tf->tf_global[5] = (uint64_t)gr[_REG_G5];
2561 		tf->tf_global[6] = (uint64_t)gr[_REG_G6];
2562 		/* done in lwp_setprivate */
2563 		/* tf->tf_global[7] = (uint64_t)gr[_REG_G7]; */
2564 		tf->tf_out[0]    = (uint64_t)gr[_REG_O0];
2565 		tf->tf_out[1]    = (uint64_t)gr[_REG_O1];
2566 		tf->tf_out[2]    = (uint64_t)gr[_REG_O2];
2567 		tf->tf_out[3]    = (uint64_t)gr[_REG_O3];
2568 		tf->tf_out[4]    = (uint64_t)gr[_REG_O4];
2569 		tf->tf_out[5]    = (uint64_t)gr[_REG_O5];
2570 		tf->tf_out[6]    = (uint64_t)gr[_REG_O6];
2571 		tf->tf_out[7]    = (uint64_t)gr[_REG_O7];
2572 		/* %asi restored above; %fprs not yet supported. */
2573 
2574 		/* XXX mcp->__gwins */
2575 
2576 		if (flags & _UC_TLSBASE)
2577 			lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_G7]);
2578 	}
2579 
2580 	/* Restore FP register context, if any. */
2581 	if ((flags & _UC_FPU) != 0 && mcp->__fpregs.__fpu_en != 0) {
2582 		struct fpstate64 *fsp;
2583 		const __fpregset_t *fpr = &mcp->__fpregs;
2584 
2585 		/*
2586 		 * If we're the current FPU owner, simply reload it from
2587 		 * the supplied context.  Otherwise, store it into the
2588 		 * process' FPU save area (which is used to restore from
2589 		 * by lazy FPU context switching); allocate it if necessary.
2590 		 */
2591 		if ((fsp = l->l_md.md_fpstate) == NULL) {
2592 			fsp = pool_cache_get(fpstate_cache, PR_WAITOK);
2593 			l->l_md.md_fpstate = fsp;
2594 		} else {
2595 			/* Drop the live context on the floor. */
2596 			fpusave_lwp(l, false);
2597 		}
2598 		/* Note: sizeof fpr->__fpu_fr <= sizeof fsp->fs_regs. */
2599 		memcpy(fsp->fs_regs, &fpr->__fpu_fr, sizeof (fpr->__fpu_fr));
2600 		fsp->fs_fsr = mcp->__fpregs.__fpu_fsr;
2601 		fsp->fs_qsize = 0;
2602 
2603 #if 0
2604 		/* Need more info! */
2605 		mcp->__fpregs.__fpu_q = NULL;	/* `Need more info.' */
2606 		mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
2607 #endif
2608 	}
2609 
2610 	/* XXX mcp->__xrs */
2611 	/* XXX mcp->__asrs */
2612 
2613 	mutex_enter(p->p_lock);
2614 	if (flags & _UC_SETSTACK)
2615 		l->l_sigstk.ss_flags |= SS_ONSTACK;
2616 	if (flags & _UC_CLRSTACK)
2617 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
2618 	mutex_exit(p->p_lock);
2619 
2620 	return 0;
2621 }
2622 
2623 /*
2624  * Preempt the current process if in interrupt from user mode,
2625  * or after the current trap/syscall if in system mode.
2626  */
2627 void
cpu_need_resched(struct cpu_info * ci,struct lwp * l,int flags)2628 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
2629 {
2630 
2631 	ci->ci_want_ast = 1;
2632 
2633 #ifdef MULTIPROCESSOR
2634 	if ((flags & RESCHED_REMOTE) != 0) {
2635 		/* Just interrupt the target CPU, so it can notice its AST */
2636 		sparc64_send_ipi(ci->ci_cpuid, sparc64_ipi_nop, 0, 0);
2637 	}
2638 #endif
2639 }
2640 
2641 /*
2642  * Notify an LWP that it has a signal pending, process as soon as possible.
2643  */
2644 void
cpu_signotify(struct lwp * l)2645 cpu_signotify(struct lwp *l)
2646 {
2647 	struct cpu_info *ci = l->l_cpu;
2648 
2649 	ci->ci_want_ast = 1;
2650 #ifdef MULTIPROCESSOR
2651 	if (ci != curcpu()) {
2652 		sparc64_send_ipi(ci->ci_cpuid, sparc64_ipi_nop, 0, 0);
2653 	}
2654 #endif
2655 }
2656 
2657 bool
cpu_intr_p(void)2658 cpu_intr_p(void)
2659 {
2660 	int idepth;
2661 	long pctr;
2662 	lwp_t *l;
2663 
2664 	l = curlwp;
2665 	do {
2666 		pctr = lwp_pctr();
2667 		idepth = l->l_cpu->ci_idepth;
2668 	} while (__predict_false(pctr != lwp_pctr()));
2669 
2670 	return idepth >= 0;
2671 }
2672 
2673 #ifdef MODULAR
2674 void
module_init_md(void)2675 module_init_md(void)
2676 {
2677 }
2678 #endif
2679 
2680 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)2681 mm_md_physacc(paddr_t pa, vm_prot_t prot)
2682 {
2683 
2684 	return pmap_pa_exists(pa) ? 0 : EFAULT;
2685 }
2686 
2687 int
mm_md_kernacc(void * ptr,vm_prot_t prot,bool * handled)2688 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
2689 {
2690 	/* XXX: Don't know where PROMs are on Ultras.  Think it's at f000000 */
2691 	const vaddr_t prom_vstart = 0xf000000, prom_vend = 0xf0100000;
2692 	const vaddr_t msgbufpv = (vaddr_t)msgbufp, v = (vaddr_t)ptr;
2693 	const size_t msgbufsz = msgbufp->msg_bufs +
2694 	    offsetof(struct kern_msgbuf, msg_bufc);
2695 
2696 	*handled = (v >= msgbufpv && v < msgbufpv + msgbufsz) ||
2697 	    (v >= prom_vstart && v < prom_vend && (prot & VM_PROT_WRITE) == 0);
2698 	return 0;
2699 }
2700 
2701 int
mm_md_readwrite(dev_t dev,struct uio * uio)2702 mm_md_readwrite(dev_t dev, struct uio *uio)
2703 {
2704 
2705 	return ENXIO;
2706 }
2707 
2708 #ifdef __arch64__
2709 void
sparc64_elf_mcmodel_check(struct exec_package * epp,const char * model,size_t len)2710 sparc64_elf_mcmodel_check(struct exec_package *epp, const char *model,
2711     size_t len)
2712 {
2713 	/* no model specific execution for 32bit processes */
2714 	if (epp->ep_flags & EXEC_32)
2715 		return;
2716 
2717 #ifdef __USE_TOPDOWN_VM
2718 	/*
2719 	 * we allow TOPDOWN_VM for all processes where the binary is compiled
2720 	 * with the medany or medmid code model.
2721 	 */
2722 	if (strncmp(model, "medany", len) == 0 ||
2723 	    strncmp(model, "medmid", len) == 0)
2724 		epp->ep_flags |= EXEC_TOPDOWN_VM;
2725 #endif
2726 }
2727 #endif
2728