1 /* $NetBSD: machdep.c,v 1.341 2024/03/05 14:15:35 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1992, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This software was developed by the Computer Systems Engineering group
38 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
39 * contributed to Berkeley.
40 *
41 * All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Lawrence Berkeley Laboratory.
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)machdep.c 8.6 (Berkeley) 1/14/94
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.341 2024/03/05 14:15:35 thorpej Exp $");
75
76 #include "opt_compat_netbsd.h"
77 #include "opt_compat_sunos.h"
78 #include "opt_sparc_arch.h"
79 #include "opt_modular.h"
80 #include "opt_multiprocessor.h"
81
82 #include <sys/param.h>
83 #include <sys/signal.h>
84 #include <sys/signalvar.h>
85 #include <sys/proc.h>
86 #include <sys/vmem.h>
87 #include <sys/cpu.h>
88 #include <sys/buf.h>
89 #include <sys/device.h>
90 #include <sys/reboot.h>
91 #include <sys/systm.h>
92 #include <sys/kernel.h>
93 #include <sys/conf.h>
94 #include <sys/file.h>
95 #include <sys/kmem.h>
96 #include <sys/mbuf.h>
97 #include <sys/mount.h>
98 #include <sys/msgbuf.h>
99 #include <sys/syscallargs.h>
100 #include <sys/exec.h>
101 #include <sys/exec_aout.h>
102 #include <sys/ucontext.h>
103 #include <sys/module.h>
104 #include <sys/mutex.h>
105 #include <sys/ras.h>
106
107 #include <dev/mm.h>
108
109 #include <uvm/uvm.h> /* we use uvm.kernel_object */
110
111 #include <sys/sysctl.h>
112
113 #ifdef COMPAT_13
114 #include <compat/sys/signal.h>
115 #include <compat/sys/signalvar.h>
116 #endif
117
118 #define _SPARC_BUS_DMA_PRIVATE
119 #include <machine/autoconf.h>
120 #include <sys/bus.h>
121 #include <machine/frame.h>
122 #include <machine/cpu.h>
123 #include <machine/pcb.h>
124 #include <machine/pmap.h>
125 #include <machine/oldmon.h>
126 #include <machine/bsd_openprom.h>
127 #include <machine/bootinfo.h>
128 #include <machine/eeprom.h>
129
130 #include <sparc/sparc/asm.h>
131 #include <sparc/sparc/cache.h>
132 #include <sparc/sparc/vaddrs.h>
133 #include <sparc/sparc/cpuvar.h>
134
135 #include "fb.h"
136 #include "power.h"
137
138 #if NPOWER > 0
139 #include <sparc/dev/power.h>
140 #endif
141
142 kmutex_t fpu_mtx;
143
144 /*
145 * dvmamap24 is used to manage DVMA memory for devices that have the upper
146 * eight address bits wired to all-ones (e.g. `le' and `ie')
147 */
148 vmem_t *dvmamap24;
149
150 void dumpsys(void);
151 void stackdump(void);
152
153 /*
154 * Machine-dependent startup code
155 */
156 void
cpu_startup(void)157 cpu_startup(void)
158 {
159 #ifdef DEBUG
160 extern int pmapdebug;
161 int opmapdebug = pmapdebug;
162 #endif
163 struct pcb *pcb;
164 vsize_t size;
165 paddr_t pa;
166 char pbuf[9];
167
168 #ifdef DEBUG
169 pmapdebug = 0;
170 #endif
171
172 /* XXX */
173 pcb = lwp_getpcb(&lwp0);
174 if (pcb && pcb->pcb_psr == 0)
175 pcb->pcb_psr = getpsr();
176
177 /*
178 * Re-map the message buffer from its temporary address
179 * at KERNBASE to MSGBUF_VA.
180 */
181 #if !defined(MSGBUFSIZE) || MSGBUFSIZE <= 8192
182 /*
183 * We use the free page(s) in front of the kernel load address.
184 */
185 size = 8192;
186
187 /* Get physical address of the message buffer */
188 pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
189
190 /* Invalidate the current mapping at KERNBASE. */
191 pmap_kremove((vaddr_t)KERNBASE, size);
192 pmap_update(pmap_kernel());
193
194 /* Enter the new mapping */
195 pmap_map(MSGBUF_VA, pa, pa + size, VM_PROT_READ|VM_PROT_WRITE);
196
197 /*
198 * Re-initialize the message buffer.
199 */
200 initmsgbuf((void *)MSGBUF_VA, size);
201 #else /* MSGBUFSIZE */
202 {
203 struct pglist mlist;
204 struct vm_page *m;
205 vaddr_t va0, va;
206
207 /*
208 * We use the free page(s) in front of the kernel load address,
209 * and then allocate some more.
210 */
211 size = round_page(MSGBUFSIZE);
212
213 /* Get physical address of first 8192 chunk of the message buffer */
214 pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa);
215
216 /* Allocate additional physical pages */
217 if (uvm_pglistalloc(size - 8192,
218 vm_first_phys, vm_first_phys+vm_num_phys,
219 0, 0, &mlist, 1, 0) != 0)
220 panic("cpu_start: no memory for message buffer");
221
222 /* Invalidate the current mapping at KERNBASE. */
223 pmap_kremove((vaddr_t)KERNBASE, 8192);
224 pmap_update(pmap_kernel());
225
226 /* Allocate virtual memory space */
227 va0 = va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
228 if (va == 0)
229 panic("cpu_start: no virtual memory for message buffer");
230
231 /* Map first 8192 */
232 while (va < va0 + 8192) {
233 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
234 pa += PAGE_SIZE;
235 va += PAGE_SIZE;
236 }
237 pmap_update(pmap_kernel());
238
239 /* Map the rest of the pages */
240 TAILQ_FOREACH(m, &mlist ,pageq.queue) {
241 if (va >= va0 + size)
242 panic("cpu_start: memory buffer size botch");
243 pa = VM_PAGE_TO_PHYS(m);
244 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
245 va += PAGE_SIZE;
246 }
247 pmap_update(pmap_kernel());
248
249 /*
250 * Re-initialize the message buffer.
251 */
252 initmsgbuf((void *)va0, size);
253 }
254 #endif /* MSGBUFSIZE */
255
256 /*
257 * Good {morning,afternoon,evening,night}.
258 */
259 printf("%s%s", copyright, version);
260 /*identifycpu();*/
261 format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
262 printf("total memory = %s\n", pbuf);
263
264 /*
265 * Tune buffer cache variables based on the capabilities of the MMU
266 * to cut down on VM space allocated for the buffer caches that
267 * would lead to MMU resource shortage.
268 */
269 if (CPU_ISSUN4 || CPU_ISSUN4C) {
270 /* Clip UBC windows */
271 if (cpuinfo.mmu_nsegment <= 128) {
272 /*
273 * ubc_nwins and ubc_winshift control the amount
274 * of VM used by the UBC. Normally, this VM is
275 * not wired in the kernel map, hence non-locked
276 * `PMEGs' (see pmap.c) are used for this space.
277 * We still limit possible fragmentation to prevent
278 * the occasional wired UBC mappings from tying up
279 * too many PMEGs.
280 *
281 * Set the upper limit to 9 segments (default
282 * winshift = 13).
283 */
284 ubc_nwins = 512;
285
286 /*
287 * buf_setvalimit() allocates a submap for buffer
288 * allocation. We use it to limit the number of locked
289 * `PMEGs' (see pmap.c) dedicated to the buffer cache.
290 *
291 * Set the upper limit to 12 segments (3MB), which
292 * corresponds approximately to the size of the
293 * traditional 5% rule (assuming a maximum 64MB of
294 * memory in small sun4c machines).
295 */
296 buf_setvalimit(12 * 256*1024);
297 }
298
299 /* Clip max data & stack to avoid running into the MMU hole */
300 #if MAXDSIZ > 256*1024*1024
301 maxdmap = 256*1024*1024;
302 #endif
303 #if MAXSSIZ > 256*1024*1024
304 maxsmap = 256*1024*1024;
305 #endif
306 }
307
308 if (CPU_ISSUN4 || CPU_ISSUN4C) {
309 /*
310 * Allocate DMA map for 24-bit devices (le, ie)
311 * [dvma_base - dvma_end] is for VME devices..
312 */
313 dvmamap24 = vmem_create("dvmamap24",
314 D24_DVMA_BASE,
315 D24_DVMA_END - D24_DVMA_BASE,
316 PAGE_SIZE, /* quantum */
317 NULL, /* importfn */
318 NULL, /* releasefn */
319 NULL, /* source */
320 0, /* qcache_max */
321 VM_SLEEP,
322 IPL_VM);
323 if (dvmamap24 == NULL)
324 panic("unable to allocate DVMA map");
325 }
326
327 #ifdef DEBUG
328 pmapdebug = opmapdebug;
329 #endif
330 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
331 printf("avail memory = %s\n", pbuf);
332
333 pmap_redzone();
334
335 mutex_init(&fpu_mtx, MUTEX_DEFAULT, IPL_SCHED);
336 }
337
338 /*
339 * Set up registers on exec.
340 *
341 * XXX this entire mess must be fixed
342 */
343 /* ARGSUSED */
344 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)345 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
346 {
347 struct trapframe *tf = l->l_md.md_tf;
348 struct fpstate *fs;
349 int psr;
350
351 /* Don't allow unaligned data references by default */
352 l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN;
353
354 /*
355 * Set the registers to 0 except for:
356 * %o6: stack pointer, built in exec())
357 * %psr: (retain CWP and PSR_S bits)
358 * %g1: p->p_psstrp (used by crt0)
359 * %pc,%npc: entry point of program
360 */
361 psr = tf->tf_psr & (PSR_S | PSR_CWP);
362 if ((fs = l->l_md.md_fpstate) != NULL) {
363 struct cpu_info *cpi;
364 int s;
365 /*
366 * We hold an FPU state. If we own *some* FPU chip state
367 * we must get rid of it, and the only way to do that is
368 * to save it. In any case, get rid of our FPU state.
369 */
370 FPU_LOCK(s);
371 if ((cpi = l->l_md.md_fpu) != NULL) {
372 if (cpi->fplwp != l)
373 panic("FPU(%d): fplwp %p",
374 cpi->ci_cpuid, cpi->fplwp);
375 if (l == cpuinfo.fplwp)
376 savefpstate(fs);
377 #if defined(MULTIPROCESSOR)
378 else
379 XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid);
380 #endif
381 cpi->fplwp = NULL;
382 }
383 l->l_md.md_fpu = NULL;
384 FPU_UNLOCK(s);
385 kmem_free(fs, sizeof(struct fpstate));
386 l->l_md.md_fpstate = NULL;
387 }
388 memset((void *)tf, 0, sizeof *tf);
389 tf->tf_psr = psr;
390 tf->tf_global[1] = l->l_proc->p_psstrp;
391 tf->tf_pc = pack->ep_entry & ~3;
392 tf->tf_npc = tf->tf_pc + 4;
393 stack -= sizeof(struct rwindow);
394 tf->tf_out[6] = stack;
395 }
396
397 #ifdef DEBUG
398 int sigdebug = 0;
399 int sigpid = 0;
400 #define SDB_FOLLOW 0x01
401 #define SDB_KSTACK 0x02
402 #define SDB_FPSTATE 0x04
403 #endif
404
405 /*
406 * machine dependent system variables.
407 */
408 static int
sysctl_machdep_boot(SYSCTLFN_ARGS)409 sysctl_machdep_boot(SYSCTLFN_ARGS)
410 {
411 struct sysctlnode node = *rnode;
412 struct btinfo_kernelfile *bi_file;
413 const char *cp;
414
415
416 switch (node.sysctl_num) {
417 case CPU_BOOTED_KERNEL:
418 if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL)
419 cp = bi_file->name;
420 else
421 cp = prom_getbootfile();
422 if (cp != NULL && cp[0] == '\0')
423 cp = "netbsd";
424 break;
425 case CPU_BOOTED_DEVICE:
426 cp = prom_getbootpath();
427 break;
428 case CPU_BOOT_ARGS:
429 cp = prom_getbootargs();
430 break;
431 default:
432 return (EINVAL);
433 }
434
435 if (cp == NULL || cp[0] == '\0')
436 return (ENOENT);
437
438 node.sysctl_data = __UNCONST(cp);
439 node.sysctl_size = strlen(cp) + 1;
440 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
441 }
442
443 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
444 {
445
446 sysctl_createv(clog, 0, NULL, NULL,
447 CTLFLAG_PERMANENT,
448 CTLTYPE_NODE, "machdep", NULL,
449 NULL, 0, NULL, 0,
450 CTL_MACHDEP, CTL_EOL);
451
452 sysctl_createv(clog, 0, NULL, NULL,
453 CTLFLAG_PERMANENT,
454 CTLTYPE_STRING, "booted_kernel", NULL,
455 sysctl_machdep_boot, 0, NULL, 0,
456 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
457 sysctl_createv(clog, 0, NULL, NULL,
458 CTLFLAG_PERMANENT,
459 CTLTYPE_STRING, "booted_device", NULL,
460 sysctl_machdep_boot, 0, NULL, 0,
461 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
462 sysctl_createv(clog, 0, NULL, NULL,
463 CTLFLAG_PERMANENT,
464 CTLTYPE_STRING, "boot_args", NULL,
465 sysctl_machdep_boot, 0, NULL, 0,
466 CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL);
467 sysctl_createv(clog, 0, NULL, NULL,
468 CTLFLAG_PERMANENT,
469 CTLTYPE_INT, "cpu_arch", NULL,
470 NULL, 0, &cpu_arch, 0,
471 CTL_MACHDEP, CPU_ARCH, CTL_EOL);
472 }
473
474 /*
475 * Send an interrupt to process.
476 */
477 struct sigframe {
478 siginfo_t sf_si;
479 ucontext_t sf_uc;
480 };
481
482 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)483 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
484 {
485 struct lwp *l = curlwp;
486 struct proc *p = l->l_proc;
487 struct sigacts *ps = p->p_sigacts;
488 struct trapframe *tf;
489 ucontext_t uc;
490 struct sigframe *fp;
491 u_int onstack, oldsp, newsp;
492 u_int catcher;
493 int sig, error;
494 size_t ucsz;
495
496 sig = ksi->ksi_signo;
497
498 tf = l->l_md.md_tf;
499 oldsp = tf->tf_out[6];
500
501 /*
502 * Compute new user stack addresses, subtract off
503 * one signal frame, and align.
504 */
505 onstack =
506 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
507 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
508
509 if (onstack)
510 fp = (struct sigframe *)
511 ((char *)l->l_sigstk.ss_sp +
512 l->l_sigstk.ss_size);
513 else
514 fp = (struct sigframe *)oldsp;
515
516 fp = (struct sigframe *)((int)(fp - 1) & ~7);
517
518 #ifdef DEBUG
519 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
520 printf("sendsig: %s[%d] sig %d newusp %p si %p uc %p\n",
521 p->p_comm, p->p_pid, sig, fp, &fp->sf_si, &fp->sf_uc);
522 #endif
523
524 /*
525 * Build the signal context to be used by sigreturn.
526 */
527 uc.uc_flags = _UC_SIGMASK |
528 ((l->l_sigstk.ss_flags & SS_ONSTACK)
529 ? _UC_SETSTACK : _UC_CLRSTACK);
530 uc.uc_sigmask = *mask;
531 uc.uc_link = l->l_ctxlink;
532 memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
533
534 /*
535 * Now copy the stack contents out to user space.
536 * We need to make sure that when we start the signal handler,
537 * its %i6 (%fp), which is loaded from the newly allocated stack area,
538 * joins seamlessly with the frame it was in when the signal occurred,
539 * so that the debugger and _longjmp code can back up through it.
540 * Since we're calling the handler directly, allocate a full size
541 * C stack frame.
542 */
543 sendsig_reset(l, sig);
544 mutex_exit(p->p_lock);
545 newsp = (int)fp - sizeof(struct frame);
546 cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
547 ucsz = (int)&uc.__uc_pad - (int)&uc;
548 error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) ||
549 copyout(&uc, &fp->sf_uc, ucsz) ||
550 ustore_int((u_int *)&((struct rwindow *)newsp)->rw_in[6], oldsp));
551 mutex_enter(p->p_lock);
552
553 if (error) {
554 /*
555 * Process has trashed its stack; give it an illegal
556 * instruction to halt it in its tracks.
557 */
558 #ifdef DEBUG
559 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
560 printf("sendsig: window save or copyout error\n");
561 #endif
562 sigexit(l, SIGILL);
563 /* NOTREACHED */
564 }
565
566 switch (ps->sa_sigdesc[sig].sd_vers) {
567 default:
568 /* Unsupported trampoline version; kill the process. */
569 sigexit(l, SIGILL);
570 case __SIGTRAMP_SIGINFO_VERSION:
571 /*
572 * Arrange to continue execution at the user's handler.
573 * It needs a new stack pointer, a return address and
574 * three arguments: (signo, siginfo *, ucontext *).
575 */
576 catcher = (u_int)SIGACTION(p, sig).sa_handler;
577 tf->tf_pc = catcher;
578 tf->tf_npc = catcher + 4;
579 tf->tf_out[0] = sig;
580 tf->tf_out[1] = (int)&fp->sf_si;
581 tf->tf_out[2] = (int)&fp->sf_uc;
582 tf->tf_out[6] = newsp;
583 tf->tf_out[7] = (int)ps->sa_sigdesc[sig].sd_tramp - 8;
584 break;
585 }
586
587 /* Remember that we're now on the signal stack. */
588 if (onstack)
589 l->l_sigstk.ss_flags |= SS_ONSTACK;
590
591 #ifdef DEBUG
592 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
593 printf("sendsig: about to return to catcher\n");
594 #endif
595 }
596
597 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)598 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
599 {
600 struct trapframe *tf = (struct trapframe *)l->l_md.md_tf;
601 __greg_t *r = mcp->__gregs;
602 __greg_t ras_pc;
603 #ifdef FPU_CONTEXT
604 __fpregset_t *f = &mcp->__fpregs;
605 struct fpstate *fps = l->l_md.md_fpstate;
606 #endif
607
608 /*
609 * Put the stack in a consistent state before we whack away
610 * at it. Note that write_user_windows may just dump the
611 * registers into the pcb; we need them in the process's memory.
612 */
613 write_user_windows();
614 if (rwindow_save(l)) {
615 mutex_enter(l->l_proc->p_lock);
616 sigexit(l, SIGILL);
617 }
618
619 /*
620 * Get the general purpose registers
621 */
622 r[_REG_PSR] = tf->tf_psr;
623 r[_REG_PC] = tf->tf_pc;
624 r[_REG_nPC] = tf->tf_npc;
625 r[_REG_Y] = tf->tf_y;
626 r[_REG_G1] = tf->tf_global[1];
627 r[_REG_G2] = tf->tf_global[2];
628 r[_REG_G3] = tf->tf_global[3];
629 r[_REG_G4] = tf->tf_global[4];
630 r[_REG_G5] = tf->tf_global[5];
631 r[_REG_G6] = tf->tf_global[6];
632 r[_REG_G7] = tf->tf_global[7];
633 r[_REG_O0] = tf->tf_out[0];
634 r[_REG_O1] = tf->tf_out[1];
635 r[_REG_O2] = tf->tf_out[2];
636 r[_REG_O3] = tf->tf_out[3];
637 r[_REG_O4] = tf->tf_out[4];
638 r[_REG_O5] = tf->tf_out[5];
639 r[_REG_O6] = tf->tf_out[6];
640 r[_REG_O7] = tf->tf_out[7];
641
642 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
643 (void *) r[_REG_PC])) != -1) {
644 r[_REG_PC] = ras_pc;
645 r[_REG_nPC] = ras_pc + 4;
646 }
647
648 *flags |= (_UC_CPU|_UC_TLSBASE);
649
650 #ifdef FPU_CONTEXT
651 /*
652 * Get the floating point registers
653 */
654 memcpy(f->__fpu_regs, fps->fs_regs, sizeof(fps->fs_regs));
655 f->__fp_nqsize = sizeof(struct fp_qentry);
656 f->__fp_nqel = fps->fs_qsize;
657 f->__fp_fsr = fps->fs_fsr;
658 if (f->__fp_q != NULL) {
659 size_t sz = f->__fp_nqel * f->__fp_nqsize;
660 if (sz > sizeof(fps->fs_queue)) {
661 #ifdef DIAGNOSTIC
662 printf("getcontext: fp_queue too large\n");
663 #endif
664 return;
665 }
666 if (copyout(fps->fs_queue, f->__fp_q, sz) != 0) {
667 #ifdef DIAGNOSTIC
668 printf("getcontext: copy of fp_queue failed %d\n",
669 error);
670 #endif
671 return;
672 }
673 }
674 f->fp_busy = 0; /* XXX: How do we determine that? */
675 *flags |= _UC_FPU;
676 #endif
677
678 return;
679 }
680
681 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mc)682 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mc)
683 {
684 const __greg_t *gr = mc->__gregs;
685
686 /*
687 * Only the icc bits in the psr are used, so it need not be
688 * verified. pc and npc must be multiples of 4. This is all
689 * that is required; if it holds, just do it.
690 */
691 if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 ||
692 gr[_REG_PC] == 0 || gr[_REG_nPC] == 0)
693 return EINVAL;
694
695 return 0;
696 }
697
698 /*
699 * Set to mcontext specified.
700 * Return to previous pc and psl as specified by
701 * context left by sendsig. Check carefully to
702 * make sure that the user has not modified the
703 * psl to gain improper privileges or to cause
704 * a machine fault.
705 * This is almost like sigreturn() and it shows.
706 */
707 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)708 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
709 {
710 struct trapframe *tf;
711 const __greg_t *r = mcp->__gregs;
712 struct proc *p = l->l_proc;
713 int error;
714 #ifdef FPU_CONTEXT
715 __fpregset_t *f = &mcp->__fpregs;
716 struct fpstate *fps = l->l_md.md_fpstate;
717 #endif
718
719 write_user_windows();
720 if (rwindow_save(l)) {
721 mutex_enter(p->p_lock);
722 sigexit(l, SIGILL);
723 }
724
725 #ifdef DEBUG
726 if (sigdebug & SDB_FOLLOW)
727 printf("__setmcontext: %s[%d], __mcontext %p\n",
728 l->l_proc->p_comm, l->l_proc->p_pid, mcp);
729 #endif
730
731 if (flags & _UC_CPU) {
732 /* Validate */
733 error = cpu_mcontext_validate(l, mcp);
734 if (error)
735 return error;
736
737 /* Restore register context. */
738 tf = (struct trapframe *)l->l_md.md_tf;
739
740 /* take only psr ICC field */
741 tf->tf_psr = (tf->tf_psr & ~PSR_ICC) |
742 (r[_REG_PSR] & PSR_ICC);
743 tf->tf_pc = r[_REG_PC];
744 tf->tf_npc = r[_REG_nPC];
745 tf->tf_y = r[_REG_Y];
746
747 /* Restore everything */
748 tf->tf_global[1] = r[_REG_G1];
749 tf->tf_global[2] = r[_REG_G2];
750 tf->tf_global[3] = r[_REG_G3];
751 tf->tf_global[4] = r[_REG_G4];
752 tf->tf_global[5] = r[_REG_G5];
753 tf->tf_global[6] = r[_REG_G6];
754 /* done in lwp_setprivate */
755 /* tf->tf_global[7] = r[_REG_G7]; */
756
757 tf->tf_out[0] = r[_REG_O0];
758 tf->tf_out[1] = r[_REG_O1];
759 tf->tf_out[2] = r[_REG_O2];
760 tf->tf_out[3] = r[_REG_O3];
761 tf->tf_out[4] = r[_REG_O4];
762 tf->tf_out[5] = r[_REG_O5];
763 tf->tf_out[6] = r[_REG_O6];
764 tf->tf_out[7] = r[_REG_O7];
765
766 if (flags & _UC_TLSBASE)
767 lwp_setprivate(l, (void *)(uintptr_t)r[_REG_G7]);
768 }
769
770 #ifdef FPU_CONTEXT
771 if (flags & _UC_FPU) {
772 /*
773 * Set the floating point registers
774 */
775 int error;
776 size_t sz = f->__fp_nqel * f->__fp_nqsize;
777 if (sz > sizeof(fps->fs_queue)) {
778 #ifdef DIAGNOSTIC
779 printf("setmcontext: fp_queue too large\n");
780 #endif
781 return (EINVAL);
782 }
783 memcpy(fps->fs_regs, f->__fpu_regs, sizeof(fps->fs_regs));
784 fps->fs_qsize = f->__fp_nqel;
785 fps->fs_fsr = f->__fp_fsr;
786 if (f->__fp_q != NULL) {
787 if ((error = copyin(f->__fp_q, fps->fs_queue, sz)) != 0) {
788 #ifdef DIAGNOSTIC
789 printf("setmcontext: fp_queue copy failed\n");
790 #endif
791 return (error);
792 }
793 }
794 }
795 #endif
796
797 mutex_enter(p->p_lock);
798 if (flags & _UC_SETSTACK)
799 l->l_sigstk.ss_flags |= SS_ONSTACK;
800 if (flags & _UC_CLRSTACK)
801 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
802 mutex_exit(p->p_lock);
803
804 return (0);
805 }
806
807 int waittime = -1;
808
809 void
cpu_reboot(int howto,char * user_boot_string)810 cpu_reboot(int howto, char *user_boot_string)
811 {
812 int i;
813 char opts[4];
814 static char str[128];
815
816 /* If system is cold, just halt. */
817 if (cold) {
818 howto |= RB_HALT;
819 goto haltsys;
820 }
821
822 #if NFB > 0
823 fb_unblank();
824 #endif
825 boothowto = howto;
826 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
827
828 /* XXX protect against curlwp->p_stats.foo refs in sync() */
829 if (curlwp == NULL)
830 curlwp = &lwp0;
831 waittime = 0;
832 vfs_shutdown();
833 }
834
835 /* Disable interrupts. But still allow IPI on MP systems */
836 if (sparc_ncpus > 1)
837 (void)splsched();
838 else
839 (void)splhigh();
840
841 #if defined(MULTIPROCESSOR)
842 /* Direct system interrupts to this CPU, since dump uses polled I/O */
843 if (CPU_ISSUN4M)
844 *((u_int *)ICR_ITR) = cpuinfo.mid - 8;
845 #endif
846
847 /* If rebooting and a dump is requested, do it. */
848 #if 0
849 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
850 #else
851 if (howto & RB_DUMP)
852 #endif
853 dumpsys();
854
855 haltsys:
856
857 /* Run any shutdown hooks. */
858 doshutdownhooks();
859
860 pmf_system_shutdown(boothowto);
861
862 /* If powerdown was requested, do it. */
863 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
864 prom_interpret("power-off");
865 #if NPOWER > 0
866 /* Fall back on `power' device if the PROM can't do it */
867 powerdown();
868 #endif
869 printf("WARNING: powerdown not supported\n");
870 /*
871 * RB_POWERDOWN implies RB_HALT... fall into it...
872 */
873 }
874
875 if (howto & RB_HALT) {
876 #if defined(MULTIPROCESSOR)
877 mp_halt_cpus();
878 printf("cpu%d halted\n\n", cpu_number());
879 #else
880 printf("halted\n\n");
881 #endif
882 prom_halt();
883 }
884
885 printf("rebooting\n\n");
886
887 i = 1;
888 if (howto & RB_SINGLE)
889 opts[i++] = 's';
890 if (howto & RB_KDB)
891 opts[i++] = 'd';
892 opts[i] = '\0';
893 opts[0] = (i > 1) ? '-' : '\0';
894
895 if (user_boot_string && *user_boot_string) {
896 i = strlen(user_boot_string);
897 if (i > sizeof(str) - sizeof(opts) - 1)
898 prom_boot(user_boot_string); /* XXX */
899 memcpy(str, user_boot_string, i);
900 if (opts[0] != '\0')
901 str[i] = ' ';
902 }
903 strcat(str, opts);
904 prom_boot(str);
905 /*NOTREACHED*/
906 }
907
908 uint32_t dumpmag = 0x8fca0101; /* magic number for savecore */
909 int dumpsize = 0; /* also for savecore */
910 long dumplo = 0;
911
912 void
cpu_dumpconf(void)913 cpu_dumpconf(void)
914 {
915 int nblks, dumpblks;
916
917 if (dumpdev == NODEV)
918 return;
919 nblks = bdev_size(dumpdev);
920
921 dumpblks = ctod(physmem) + pmap_dumpsize();
922 if (dumpblks > (nblks - ctod(1)))
923 /*
924 * dump size is too big for the partition.
925 * Note, we safeguard a click at the front for a
926 * possible disk label.
927 */
928 return;
929
930 /* Put the dump at the end of the partition */
931 dumplo = nblks - dumpblks;
932
933 /*
934 * savecore(8) expects dumpsize to be the number of pages
935 * of actual core dumped (i.e. excluding the MMU stuff).
936 */
937 dumpsize = physmem;
938 }
939
940 #define BYTES_PER_DUMP (32 * 1024) /* must be a multiple of pagesize */
941 static vaddr_t dumpspace;
942 struct pcb dumppcb;
943
944 void *
reserve_dumppages(void * p)945 reserve_dumppages(void *p)
946 {
947
948 dumpspace = (vaddr_t)p;
949 return ((char *)p + BYTES_PER_DUMP);
950 }
951
952 /*
953 * Write a crash dump.
954 */
955 void
dumpsys(void)956 dumpsys(void)
957 {
958 const struct bdevsw *bdev;
959 int psize;
960 daddr_t blkno;
961 int (*dump)(dev_t, daddr_t, void *, size_t);
962 int error = 0;
963 struct memarr *mp;
964 int nmem;
965
966 /* copy registers to memory */
967 snapshot(cpuinfo.curpcb);
968 memcpy(&dumppcb, cpuinfo.curpcb, sizeof dumppcb);
969 stackdump();
970
971 if (dumpdev == NODEV)
972 return;
973 bdev = bdevsw_lookup(dumpdev);
974 if (bdev == NULL || bdev->d_psize == NULL)
975 return;
976
977 /*
978 * For dumps during autoconfiguration,
979 * if dump device has already configured...
980 */
981 if (dumpsize == 0)
982 cpu_dumpconf();
983 if (dumplo <= 0) {
984 printf("\ndump to dev %u,%u not possible\n",
985 major(dumpdev), minor(dumpdev));
986 return;
987 }
988 printf("\ndumping to dev %u,%u offset %ld\n",
989 major(dumpdev), minor(dumpdev), dumplo);
990
991 psize = bdev_size(dumpdev);
992 printf("dump ");
993 if (psize == -1) {
994 printf("area unavailable\n");
995 return;
996 }
997 blkno = dumplo;
998 dump = bdev->d_dump;
999
1000 error = pmap_dumpmmu(dump, blkno);
1001 blkno += pmap_dumpsize();
1002
1003 for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) {
1004 unsigned i = 0, n;
1005 int maddr = mp->addr;
1006
1007 if (maddr == 0) {
1008 /* Skip first page at physical address 0 */
1009 maddr += PAGE_SIZE;
1010 i += PAGE_SIZE;
1011 blkno += btodb(PAGE_SIZE);
1012 }
1013
1014 for (; i < mp->len; i += n) {
1015 n = mp->len - i;
1016 if (n > BYTES_PER_DUMP)
1017 n = BYTES_PER_DUMP;
1018
1019 /* print out how many MBs we have dumped */
1020 if (i && (i % (1024*1024)) == 0)
1021 printf_nolog("%d ", i / (1024*1024));
1022
1023 (void) pmap_map(dumpspace, maddr, maddr + n,
1024 VM_PROT_READ);
1025 error = (*dump)(dumpdev, blkno,
1026 (void *)dumpspace, (int)n);
1027 pmap_kremove(dumpspace, n);
1028 pmap_update(pmap_kernel());
1029 if (error)
1030 break;
1031 maddr += n;
1032 blkno += btodb(n);
1033 }
1034 }
1035
1036 switch (error) {
1037
1038 case ENXIO:
1039 printf("device bad\n");
1040 break;
1041
1042 case EFAULT:
1043 printf("device not ready\n");
1044 break;
1045
1046 case EINVAL:
1047 printf("area improper\n");
1048 break;
1049
1050 case EIO:
1051 printf("i/o error\n");
1052 break;
1053
1054 case 0:
1055 printf("succeeded\n");
1056 break;
1057
1058 default:
1059 printf("error %d\n", error);
1060 break;
1061 }
1062 }
1063
1064 /*
1065 * get the fp and dump the stack as best we can. don't leave the
1066 * current stack page
1067 */
1068 void
stackdump(void)1069 stackdump(void)
1070 {
1071 struct frame *fp = getfp(), *sfp;
1072
1073 sfp = fp;
1074 printf("Frame pointer is at %p\n", fp);
1075 printf("Call traceback:\n");
1076 while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
1077 printf(" pc = 0x%x args = (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp = %p\n",
1078 fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2],
1079 fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp);
1080 fp = fp->fr_fp;
1081 }
1082 }
1083
1084 int
cpu_exec_aout_makecmds(struct lwp * l,struct exec_package * epp)1085 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
1086 {
1087
1088 return (ENOEXEC);
1089 }
1090
1091 #if defined(SUN4)
1092 void
oldmon_w_trace(u_long va)1093 oldmon_w_trace(u_long va)
1094 {
1095 struct cpu_info * const ci = curcpu();
1096 u_long stop;
1097 struct frame *fp;
1098
1099 printf("curlwp = %p, pid %d\n", curlwp, curproc->p_pid);
1100
1101 printf("uvm: cpu%u: swtch %"PRIu64", trap %"PRIu64", sys %"PRIu64", "
1102 "intr %"PRIu64", soft %"PRIu64", faults %"PRIu64"\n",
1103 cpu_index(ci), ci->ci_data.cpu_nswtch, ci->ci_data.cpu_ntrap,
1104 ci->ci_data.cpu_nsyscall, ci->ci_data.cpu_nintr,
1105 ci->ci_data.cpu_nsoft, ci->ci_data.cpu_nfault);
1106 write_user_windows();
1107
1108 #define round_up(x) (( (x) + (PAGE_SIZE-1) ) & (~(PAGE_SIZE-1)) )
1109
1110 printf("\nstack trace with sp = 0x%lx\n", va);
1111 stop = round_up(va);
1112 printf("stop at 0x%lx\n", stop);
1113 fp = (struct frame *) va;
1114 while (round_up((u_long) fp) == stop) {
1115 printf(" 0x%x(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp %p\n", fp->fr_pc,
1116 fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3],
1117 fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp);
1118 fp = fp->fr_fp;
1119 if (fp == NULL)
1120 break;
1121 }
1122 printf("end of stack trace\n");
1123 }
1124
1125 void
oldmon_w_cmd(u_long va,char * ar)1126 oldmon_w_cmd(u_long va, char *ar)
1127 {
1128 switch (*ar) {
1129 case '\0':
1130 switch (va) {
1131 case 0:
1132 panic("g0 panic");
1133 case 4:
1134 printf("w: case 4\n");
1135 break;
1136 default:
1137 printf("w: unknown case %ld\n", va);
1138 break;
1139 }
1140 break;
1141 case 't':
1142 oldmon_w_trace(va);
1143 break;
1144 default:
1145 printf("w: arg not allowed\n");
1146 }
1147 }
1148
1149 int
ldcontrolb(void * addr)1150 ldcontrolb(void *addr)
1151 {
1152 struct pcb *xpcb;
1153 u_long saveonfault;
1154 int res;
1155 int s;
1156
1157 if (CPU_ISSUN4M || CPU_ISSUN4D) {
1158 printf("warning: ldcontrolb called on sun4m/sun4d\n");
1159 return 0;
1160 }
1161
1162 s = splhigh();
1163 xpcb = lwp_getpcb(curlwp);
1164
1165 saveonfault = (u_long)xpcb->pcb_onfault;
1166 res = xldcontrolb(addr, xpcb);
1167 xpcb->pcb_onfault = (void *)saveonfault;
1168
1169 splx(s);
1170 return (res);
1171 }
1172 #endif /* SUN4 */
1173
1174 void
wzero(void * vb,u_int l)1175 wzero(void *vb, u_int l)
1176 {
1177 u_char *b = vb;
1178 u_char *be = b + l;
1179 u_short *sp;
1180
1181 if (l == 0)
1182 return;
1183
1184 /* front, */
1185 if ((u_long)b & 1)
1186 *b++ = 0;
1187
1188 /* back, */
1189 if (b != be && ((u_long)be & 1) != 0) {
1190 be--;
1191 *be = 0;
1192 }
1193
1194 /* and middle. */
1195 sp = (u_short *)b;
1196 while (sp != (u_short *)be)
1197 *sp++ = 0;
1198 }
1199
1200 void
wcopy(const void * vb1,void * vb2,u_int l)1201 wcopy(const void *vb1, void *vb2, u_int l)
1202 {
1203 const u_char *b1e, *b1 = vb1;
1204 u_char *b2 = vb2;
1205 const u_short *sp;
1206 int bstore = 0;
1207
1208 if (l == 0)
1209 return;
1210
1211 /* front, */
1212 if ((u_long)b1 & 1) {
1213 *b2++ = *b1++;
1214 l--;
1215 }
1216
1217 /* middle, */
1218 sp = (const u_short *)b1;
1219 b1e = b1 + l;
1220 if (l & 1)
1221 b1e--;
1222 bstore = (u_long)b2 & 1;
1223
1224 while (sp < (const u_short *)b1e) {
1225 if (bstore) {
1226 b2[1] = *sp & 0xff;
1227 b2[0] = *sp >> 8;
1228 } else
1229 *((short *)b2) = *sp;
1230 sp++;
1231 b2 += 2;
1232 }
1233
1234 /* and back. */
1235 if (l & 1)
1236 *b2 = *b1e;
1237 }
1238
1239 #ifdef MODULAR
1240 void
module_init_md(void)1241 module_init_md(void)
1242 {
1243 }
1244 #endif
1245
1246 static size_t
_bus_dmamap_mapsize(int const nsegments)1247 _bus_dmamap_mapsize(int const nsegments)
1248 {
1249 KASSERT(nsegments > 0);
1250 return sizeof(struct sparc_bus_dmamap) +
1251 (sizeof(bus_dma_segment_t) * (nsegments - 1));
1252 }
1253
1254 /*
1255 * Common function for DMA map creation. May be called by bus-specific
1256 * DMA map creation functions.
1257 */
1258 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)1259 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
1260 bus_size_t maxsegsz, bus_size_t boundary, int flags,
1261 bus_dmamap_t *dmamp)
1262 {
1263 struct sparc_bus_dmamap *map;
1264 void *mapstore;
1265
1266 /*
1267 * Allocate and initialize the DMA map. The end of the map
1268 * is a variable-sized array of segments, so we allocate enough
1269 * room for them in one shot.
1270 *
1271 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
1272 * of ALLOCNOW notifies others that we've reserved these resources,
1273 * and they are not to be freed.
1274 *
1275 * The bus_dmamap_t includes one bus_dma_segment_t, hence
1276 * the (nsegments - 1).
1277 */
1278 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
1279 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1280 return (ENOMEM);
1281
1282 map = (struct sparc_bus_dmamap *)mapstore;
1283 map->_dm_size = size;
1284 map->_dm_segcnt = nsegments;
1285 map->_dm_maxmaxsegsz = maxsegsz;
1286 map->_dm_boundary = boundary;
1287 map->_dm_align = PAGE_SIZE;
1288 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
1289 map->dm_maxsegsz = maxsegsz;
1290 map->dm_mapsize = 0; /* no valid mappings */
1291 map->dm_nsegs = 0;
1292
1293 *dmamp = map;
1294 return (0);
1295 }
1296
1297 /*
1298 * Common function for DMA map destruction. May be called by bus-specific
1299 * DMA map destruction functions.
1300 */
1301 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)1302 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
1303 {
1304
1305 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
1306 }
1307
1308 /*
1309 * Like _bus_dmamap_load(), but for mbufs.
1310 */
1311 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m,int flags)1312 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
1313 struct mbuf *m, int flags)
1314 {
1315
1316 panic("_bus_dmamap_load_mbuf: not implemented");
1317 }
1318
1319 /*
1320 * Like _bus_dmamap_load(), but for uios.
1321 */
1322 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)1323 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
1324 struct uio *uio, int flags)
1325 {
1326
1327 panic("_bus_dmamap_load_uio: not implemented");
1328 }
1329
1330 /*
1331 * Like _bus_dmamap_load(), but for raw memory allocated with
1332 * bus_dmamem_alloc().
1333 */
1334 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1335 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1336 bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1337 int flags)
1338 {
1339
1340 panic("_bus_dmamap_load_raw: not implemented");
1341 }
1342
1343 /*
1344 * Common function for DMA map synchronization. May be called
1345 * by bus-specific DMA map synchronization functions.
1346 */
1347 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)1348 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
1349 bus_addr_t offset, bus_size_t len, int ops)
1350 {
1351 }
1352
1353 /*
1354 * Common function for DMA-safe memory allocation. May be called
1355 * by bus-specific DMA memory allocation functions.
1356 */
1357 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1358 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
1359 bus_size_t alignment, bus_size_t boundary,
1360 bus_dma_segment_t *segs, int nsegs, int *rsegs,
1361 int flags)
1362 {
1363 vaddr_t low, high;
1364 struct pglist *mlist;
1365 int error;
1366
1367 /* Always round the size. */
1368 size = round_page(size);
1369 low = vm_first_phys;
1370 high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1371
1372 if ((mlist = kmem_alloc(sizeof(*mlist),
1373 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1374 return (ENOMEM);
1375
1376 /*
1377 * Allocate pages from the VM system.
1378 */
1379 error = uvm_pglistalloc(size, low, high, 0, 0,
1380 mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1381 if (error) {
1382 kmem_free(mlist, sizeof(*mlist));
1383 return (error);
1384 }
1385
1386 /*
1387 * Simply keep a pointer around to the linked list, so
1388 * bus_dmamap_free() can return it.
1389 *
1390 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
1391 * ARE IN OUR CUSTODY.
1392 */
1393 segs[0]._ds_mlist = mlist;
1394
1395 /*
1396 * We now have physical pages, but no DVMA addresses yet. These
1397 * will be allocated in bus_dmamap_load*() routines. Hence we
1398 * save any alignment and boundary requirements in this DMA
1399 * segment.
1400 */
1401 segs[0].ds_addr = 0;
1402 segs[0].ds_len = 0;
1403 segs[0]._ds_va = 0;
1404 *rsegs = 1;
1405 return (0);
1406 }
1407
1408 /*
1409 * Common function for freeing DMA-safe memory. May be called by
1410 * bus-specific DMA memory free functions.
1411 */
1412 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)1413 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1414 {
1415 struct pglist *mlist = segs[0]._ds_mlist;
1416
1417 if (nsegs != 1)
1418 panic("bus_dmamem_free: nsegs = %d", nsegs);
1419
1420 /*
1421 * Return the list of pages back to the VM system.
1422 */
1423 uvm_pglistfree(mlist);
1424 kmem_free(mlist, sizeof(*mlist));
1425 }
1426
1427 /*
1428 * Common function for unmapping DMA-safe memory. May be called by
1429 * bus-specific DMA memory unmapping functions.
1430 */
1431 void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)1432 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1433 {
1434
1435 #ifdef DIAGNOSTIC
1436 if ((u_long)kva & PAGE_MASK)
1437 panic("_bus_dmamem_unmap");
1438 #endif
1439
1440 size = round_page(size);
1441 pmap_kremove((vaddr_t)kva, size);
1442 pmap_update(pmap_kernel());
1443 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1444 }
1445
1446 /*
1447 * Common function for mmap(2)'ing DMA-safe memory. May be called by
1448 * bus-specific DMA mmap(2)'ing functions.
1449 */
1450 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1451 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1452 off_t off, int prot, int flags)
1453 {
1454
1455 panic("_bus_dmamem_mmap: not implemented");
1456 }
1457
1458 /*
1459 * Utility to allocate an aligned kernel virtual address range
1460 */
1461 vaddr_t
_bus_dma_valloc_skewed(size_t size,u_long boundary,u_long align,u_long skew)1462 _bus_dma_valloc_skewed(size_t size, u_long boundary, u_long align, u_long skew)
1463 {
1464 size_t oversize;
1465 vaddr_t va, sva;
1466
1467 /*
1468 * Find a region of kernel virtual addresses that is aligned
1469 * to the given address modulo the requested alignment, i.e.
1470 *
1471 * (va - skew) == 0 mod align
1472 *
1473 * The following conditions apply to the arguments:
1474 *
1475 * - `size' must be a multiple of the VM page size
1476 * - `align' must be a power of two
1477 * and greater than or equal to the VM page size
1478 * - `skew' must be smaller than `align'
1479 * - `size' must be smaller than `boundary'
1480 */
1481
1482 #ifdef DIAGNOSTIC
1483 if ((size & PAGE_MASK) != 0)
1484 panic("_bus_dma_valloc_skewed: invalid size %lx", size);
1485 if ((align & PAGE_MASK) != 0)
1486 panic("_bus_dma_valloc_skewed: invalid alignment %lx", align);
1487 if (align < skew)
1488 panic("_bus_dma_valloc_skewed: align %lx < skew %lx",
1489 align, skew);
1490 #endif
1491
1492 /* XXX - Implement this! */
1493 if (boundary) {
1494 printf("_bus_dma_valloc_skewed: "
1495 "boundary check not implemented");
1496 return (0);
1497 }
1498
1499 /*
1500 * First, find a region large enough to contain any aligned chunk
1501 */
1502 oversize = size + align - PAGE_SIZE;
1503 sva = vm_map_min(kernel_map);
1504 if (uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET,
1505 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
1506 UVM_ADV_RANDOM, UVM_FLAG_NOWAIT)))
1507 return (0);
1508
1509 /*
1510 * Compute start of aligned region
1511 */
1512 va = sva;
1513 va += (skew + align - va) & (align - 1);
1514
1515 /*
1516 * Return excess virtual addresses
1517 */
1518 if (va != sva)
1519 (void)uvm_unmap(kernel_map, sva, va);
1520 if (va + size != sva + oversize)
1521 (void)uvm_unmap(kernel_map, va + size, sva + oversize);
1522
1523 return (va);
1524 }
1525
1526 /* sun4/sun4c DMA map functions */
1527 int sun4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
1528 bus_size_t, struct proc *, int);
1529 int sun4_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
1530 bus_dma_segment_t *, int, bus_size_t, int);
1531 void sun4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
1532 int sun4_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
1533 int, size_t, void **, int);
1534
1535 /*
1536 * sun4/sun4c: load DMA map with a linear buffer.
1537 */
1538 int
sun4_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)1539 sun4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
1540 void *buf, bus_size_t buflen,
1541 struct proc *p, int flags)
1542 {
1543 bus_size_t sgsize;
1544 vaddr_t va = (vaddr_t)buf;
1545 int pagesz = PAGE_SIZE;
1546 vaddr_t dva;
1547 pmap_t pmap;
1548
1549 /*
1550 * Make sure that on error condition we return "no valid mappings".
1551 */
1552 map->dm_nsegs = 0;
1553
1554 if (buflen > map->_dm_size)
1555 return (EINVAL);
1556
1557 cache_flush(buf, buflen);
1558
1559 if ((map->_dm_flags & BUS_DMA_24BIT) == 0) {
1560 /*
1561 * XXX Need to implement "don't DMA across this boundary".
1562 */
1563 if (map->_dm_boundary != 0) {
1564 bus_addr_t baddr;
1565
1566 /* Calculate first boundary line after `buf' */
1567 baddr = ((bus_addr_t)va + map->_dm_boundary) &
1568 -map->_dm_boundary;
1569
1570 /*
1571 * If the requested segment crosses the boundary,
1572 * we can't grant a direct map. For now, steal some
1573 * space from the `24BIT' map instead.
1574 *
1575 * (XXX - no overflow detection here)
1576 */
1577 if (buflen > (baddr - (bus_addr_t)va))
1578 goto no_fit;
1579 }
1580 map->dm_mapsize = buflen;
1581 map->dm_nsegs = 1;
1582 map->dm_segs[0].ds_addr = (bus_addr_t)va;
1583 map->dm_segs[0].ds_len = buflen;
1584 map->_dm_flags |= _BUS_DMA_DIRECTMAP;
1585 return (0);
1586 }
1587
1588 no_fit:
1589 sgsize = round_page(buflen + (va & (pagesz - 1)));
1590
1591 const vm_flag_t vmflags = VM_BESTFIT |
1592 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1593
1594 if (vmem_xalloc(dvmamap24, sgsize,
1595 0, /* alignment */
1596 0, /* phase */
1597 map->_dm_boundary, /* nocross */
1598 VMEM_ADDR_MIN, /* minaddr */
1599 VMEM_ADDR_MAX, /* maxaddr */
1600 vmflags,
1601 &dva) != 0) {
1602 return (ENOMEM);
1603 }
1604
1605 /*
1606 * We always use just one segment.
1607 */
1608 map->dm_mapsize = buflen;
1609 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
1610 map->dm_segs[0].ds_len = buflen;
1611 map->dm_segs[0]._ds_sgsize = sgsize;
1612
1613 if (p != NULL)
1614 pmap = p->p_vmspace->vm_map.pmap;
1615 else
1616 pmap = pmap_kernel();
1617
1618 for (; buflen > 0; ) {
1619 paddr_t pa;
1620
1621 /*
1622 * Get the physical address for this page.
1623 */
1624 (void) pmap_extract(pmap, va, &pa);
1625
1626 /*
1627 * Compute the segment size, and adjust counts.
1628 */
1629 sgsize = pagesz - (va & (pagesz - 1));
1630 if (buflen < sgsize)
1631 sgsize = buflen;
1632
1633 #ifdef notyet
1634 #if defined(SUN4)
1635 if (have_iocache)
1636 pa |= PG_IOC;
1637 #endif
1638 #endif
1639 pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1640 VM_PROT_READ | VM_PROT_WRITE, 0);
1641
1642 dva += pagesz;
1643 va += sgsize;
1644 buflen -= sgsize;
1645 }
1646 pmap_update(pmap_kernel());
1647
1648 map->dm_nsegs = 1;
1649 return (0);
1650 }
1651
1652 /*
1653 * Like _bus_dmamap_load(), but for raw memory allocated with
1654 * bus_dmamem_alloc().
1655 */
1656 int
sun4_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)1657 sun4_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
1658 bus_dma_segment_t *segs, int nsegs, bus_size_t size,
1659 int flags)
1660 {
1661 struct vm_page *m;
1662 paddr_t pa;
1663 vaddr_t dva;
1664 bus_size_t sgsize;
1665 struct pglist *mlist;
1666 int pagesz = PAGE_SIZE;
1667 int error;
1668
1669 map->dm_nsegs = 0;
1670 sgsize = (size + pagesz - 1) & -pagesz;
1671
1672 /* Allocate DVMA addresses */
1673 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
1674 const vm_flag_t vmflags = VM_BESTFIT |
1675 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1676
1677 error = vmem_xalloc(dvmamap24, sgsize,
1678 0, /* alignment */
1679 0, /* phase */
1680 map->_dm_boundary, /* nocross */
1681 VMEM_ADDR_MIN, /* minaddr */
1682 VMEM_ADDR_MAX, /* maxaddr */
1683 vmflags,
1684 &dva);
1685 if (error)
1686 return (error);
1687 } else {
1688 /* Any properly aligned virtual address will do */
1689 dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
1690 pagesz, 0);
1691 if (dva == 0)
1692 return (ENOMEM);
1693 }
1694
1695 map->dm_segs[0].ds_addr = dva;
1696 map->dm_segs[0].ds_len = size;
1697 map->dm_segs[0]._ds_sgsize = sgsize;
1698
1699 /* Map physical pages into IOMMU */
1700 mlist = segs[0]._ds_mlist;
1701 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
1702 if (sgsize == 0)
1703 panic("sun4_dmamap_load_raw: size botch");
1704 pa = VM_PAGE_TO_PHYS(m);
1705 #ifdef notyet
1706 #if defined(SUN4)
1707 if (have_iocache)
1708 pa |= PG_IOC;
1709 #endif
1710 #endif
1711 pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC,
1712 VM_PROT_READ | VM_PROT_WRITE, 0);
1713
1714 dva += pagesz;
1715 sgsize -= pagesz;
1716 }
1717 pmap_update(pmap_kernel());
1718
1719 map->dm_nsegs = 1;
1720 map->dm_mapsize = size;
1721
1722 return (0);
1723 }
1724
1725 /*
1726 * sun4/sun4c function for unloading a DMA map.
1727 */
1728 void
sun4_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)1729 sun4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1730 {
1731 bus_dma_segment_t *segs = map->dm_segs;
1732 int nsegs = map->dm_nsegs;
1733 int flags = map->_dm_flags;
1734 vaddr_t dva;
1735 bus_size_t len;
1736 int i;
1737
1738 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
1739
1740 if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
1741 /* Nothing to release */
1742 map->dm_mapsize = 0;
1743 map->dm_nsegs = 0;
1744 map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
1745 return;
1746 }
1747
1748 for (i = 0; i < nsegs; i++) {
1749 dva = segs[i].ds_addr & -PAGE_SIZE;
1750 len = segs[i]._ds_sgsize;
1751
1752 pmap_kremove(dva, len);
1753
1754 if ((flags & BUS_DMA_24BIT) != 0) {
1755 vmem_xfree(dvmamap24, dva, len);
1756 } else {
1757 uvm_unmap(kernel_map, dva, dva + len);
1758 }
1759 }
1760 pmap_update(pmap_kernel());
1761
1762 /* Mark the mappings as invalid. */
1763 map->dm_mapsize = 0;
1764 map->dm_nsegs = 0;
1765 }
1766
1767 /*
1768 * Common function for mapping DMA-safe memory. May be called by
1769 * bus-specific DMA memory map functions.
1770 */
1771 int
sun4_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1772 sun4_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1773 size_t size, void **kvap, int flags)
1774 {
1775 struct vm_page *m;
1776 vaddr_t va;
1777 struct pglist *mlist;
1778 const uvm_flag_t kmflags =
1779 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1780
1781 if (nsegs != 1)
1782 panic("sun4_dmamem_map: nsegs = %d", nsegs);
1783
1784 size = round_page(size);
1785
1786 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1787 if (va == 0)
1788 return (ENOMEM);
1789
1790 segs[0]._ds_va = va;
1791 *kvap = (void *)va;
1792
1793 mlist = segs[0]._ds_mlist;
1794 TAILQ_FOREACH(m, mlist, pageq.queue) {
1795 paddr_t pa;
1796
1797 if (size == 0)
1798 panic("sun4_dmamem_map: size botch");
1799
1800 pa = VM_PAGE_TO_PHYS(m);
1801 pmap_kenter_pa(va, pa | PMAP_NC,
1802 VM_PROT_READ | VM_PROT_WRITE, 0);
1803
1804 va += PAGE_SIZE;
1805 size -= PAGE_SIZE;
1806 }
1807 pmap_update(pmap_kernel());
1808
1809 return (0);
1810 }
1811
1812
1813 struct sparc_bus_dma_tag mainbus_dma_tag = {
1814 NULL,
1815 _bus_dmamap_create,
1816 _bus_dmamap_destroy,
1817 sun4_dmamap_load,
1818 _bus_dmamap_load_mbuf,
1819 _bus_dmamap_load_uio,
1820 sun4_dmamap_load_raw,
1821 sun4_dmamap_unload,
1822 _bus_dmamap_sync,
1823
1824 _bus_dmamem_alloc,
1825 _bus_dmamem_free,
1826 sun4_dmamem_map,
1827 _bus_dmamem_unmap,
1828 _bus_dmamem_mmap
1829 };
1830
1831
1832 /*
1833 * Base bus space handlers.
1834 */
1835 static int sparc_bus_map(bus_space_tag_t, bus_addr_t,
1836 bus_size_t, int, vaddr_t,
1837 bus_space_handle_t *);
1838 static int sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t,
1839 bus_size_t);
1840 static int sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t,
1841 bus_size_t, bus_size_t,
1842 bus_space_handle_t *);
1843 static paddr_t sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t,
1844 int, int);
1845 static void *sparc_mainbus_intr_establish(bus_space_tag_t, int, int,
1846 int (*)(void *),
1847 void *,
1848 void (*)(void));
1849 static void sparc_bus_barrier(bus_space_tag_t, bus_space_handle_t,
1850 bus_size_t, bus_size_t, int);
1851
1852 int
bus_space_map(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,bus_space_handle_t * hp)1853 bus_space_map(
1854 bus_space_tag_t t,
1855 bus_addr_t a,
1856 bus_size_t s,
1857 int f,
1858 bus_space_handle_t *hp)
1859 {
1860 return (*t->sparc_bus_map)(t, a, s, f, (vaddr_t)0, hp);
1861 }
1862
1863 int
bus_space_map2(bus_space_tag_t t,bus_addr_t a,bus_size_t s,int f,vaddr_t v,bus_space_handle_t * hp)1864 bus_space_map2(
1865 bus_space_tag_t t,
1866 bus_addr_t a,
1867 bus_size_t s,
1868 int f,
1869 vaddr_t v,
1870 bus_space_handle_t *hp)
1871 {
1872 return (*t->sparc_bus_map)(t, a, s, f, v, hp);
1873 }
1874
1875 void
bus_space_unmap(bus_space_tag_t t,bus_space_handle_t h,bus_size_t s)1876 bus_space_unmap(
1877 bus_space_tag_t t,
1878 bus_space_handle_t h,
1879 bus_size_t s)
1880 {
1881 (*t->sparc_bus_unmap)(t, h, s);
1882 }
1883
1884 int
bus_space_subregion(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,bus_space_handle_t * hp)1885 bus_space_subregion(
1886 bus_space_tag_t t,
1887 bus_space_handle_t h,
1888 bus_size_t o,
1889 bus_size_t s,
1890 bus_space_handle_t *hp)
1891 {
1892 return (*t->sparc_bus_subregion)(t, h, o, s, hp);
1893 }
1894
1895 paddr_t
bus_space_mmap(bus_space_tag_t t,bus_addr_t a,off_t o,int p,int f)1896 bus_space_mmap(
1897 bus_space_tag_t t,
1898 bus_addr_t a,
1899 off_t o,
1900 int p,
1901 int f)
1902 {
1903 return (*t->sparc_bus_mmap)(t, a, o, p, f);
1904 }
1905
1906 void *
bus_intr_establish(bus_space_tag_t t,int p,int l,int (* h)(void *),void * a)1907 bus_intr_establish(
1908 bus_space_tag_t t,
1909 int p,
1910 int l,
1911 int (*h)(void *),
1912 void *a)
1913 {
1914 return (*t->sparc_intr_establish)(t, p, l, h, a, NULL);
1915 }
1916
1917 void *
bus_intr_establish2(bus_space_tag_t t,int p,int l,int (* h)(void *),void * a,void (* v)(void))1918 bus_intr_establish2(
1919 bus_space_tag_t t,
1920 int p,
1921 int l,
1922 int (*h)(void *),
1923 void *a,
1924 void (*v)(void))
1925 {
1926 return (*t->sparc_intr_establish)(t, p, l, h, a, v);
1927 }
1928
1929 void
bus_space_barrier(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,bus_size_t s,int f)1930 bus_space_barrier(
1931 bus_space_tag_t t,
1932 bus_space_handle_t h,
1933 bus_size_t o,
1934 bus_size_t s,
1935 int f)
1936 {
1937 (*t->sparc_bus_barrier)(t, h, o, s, f);
1938 }
1939
1940 void
bus_space_write_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)1941 bus_space_write_multi_stream_2(
1942 bus_space_tag_t t,
1943 bus_space_handle_t h,
1944 bus_size_t o,
1945 const uint16_t *a,
1946 bus_size_t c)
1947 {
1948 while (c-- > 0)
1949 bus_space_write_2_real(t, h, o, *a++);
1950 }
1951
1952 void
bus_space_write_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)1953 bus_space_write_multi_stream_4(
1954 bus_space_tag_t t,
1955 bus_space_handle_t h,
1956 bus_size_t o,
1957 const uint32_t *a,
1958 bus_size_t c)
1959 {
1960 while (c-- > 0)
1961 bus_space_write_4_real(t, h, o, *a++);
1962 }
1963
1964 void
bus_space_write_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)1965 bus_space_write_multi_stream_8(
1966 bus_space_tag_t t,
1967 bus_space_handle_t h,
1968 bus_size_t o,
1969 const uint64_t *a,
1970 bus_size_t c)
1971 {
1972 while (c-- > 0)
1973 bus_space_write_8_real(t, h, o, *a++);
1974 }
1975
1976
1977 /*
1978 * void bus_space_set_multi_N(bus_space_tag_t tag,
1979 * bus_space_handle_t bsh, bus_size_t offset, u_intN_t val,
1980 * bus_size_t count);
1981 *
1982 * Write the 1, 2, 4, or 8 byte value `val' to bus space described
1983 * by tag/handle/offset `count' times.
1984 */
1985 void
bus_space_set_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)1986 bus_space_set_multi_1(
1987 bus_space_tag_t t,
1988 bus_space_handle_t h,
1989 bus_size_t o,
1990 const uint8_t v,
1991 bus_size_t c)
1992 {
1993 while (c-- > 0)
1994 bus_space_write_1(t, h, o, v);
1995 }
1996
1997 void
bus_space_set_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)1998 bus_space_set_multi_2(
1999 bus_space_tag_t t,
2000 bus_space_handle_t h,
2001 bus_size_t o,
2002 const uint16_t v,
2003 bus_size_t c)
2004 {
2005 while (c-- > 0)
2006 bus_space_write_2(t, h, o, v);
2007 }
2008
2009 void
bus_space_set_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2010 bus_space_set_multi_4(
2011 bus_space_tag_t t,
2012 bus_space_handle_t h,
2013 bus_size_t o,
2014 const uint32_t v,
2015 bus_size_t c)
2016 {
2017 while (c-- > 0)
2018 bus_space_write_4(t, h, o, v);
2019 }
2020
2021 void
bus_space_set_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2022 bus_space_set_multi_8(
2023 bus_space_tag_t t,
2024 bus_space_handle_t h,
2025 bus_size_t o,
2026 const uint64_t v,
2027 bus_size_t c)
2028 {
2029 while (c-- > 0)
2030 bus_space_write_8(t, h, o, v);
2031 }
2032
2033
2034 /*
2035 * void bus_space_read_region_N(bus_space_tag_t tag,
2036 * bus_space_handle_t bsh, bus_size_t off,
2037 * u_intN_t *addr, bus_size_t count);
2038 *
2039 */
2040 void
bus_space_read_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2041 bus_space_read_region_1(
2042 bus_space_tag_t t,
2043 bus_space_handle_t h,
2044 bus_size_t o,
2045 uint8_t *a,
2046 bus_size_t c)
2047 {
2048 for (; c; a++, c--, o++)
2049 *a = bus_space_read_1(t, h, o);
2050 }
2051
2052 void
bus_space_read_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2053 bus_space_read_region_2(
2054 bus_space_tag_t t,
2055 bus_space_handle_t h,
2056 bus_size_t o,
2057 uint16_t *a,
2058 bus_size_t c)
2059 {
2060 for (; c; a++, c--, o+=2)
2061 *a = bus_space_read_2(t, h, o);
2062 }
2063
2064 void
bus_space_read_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2065 bus_space_read_region_4(
2066 bus_space_tag_t t,
2067 bus_space_handle_t h,
2068 bus_size_t o,
2069 uint32_t *a,
2070 bus_size_t c)
2071 {
2072 for (; c; a++, c--, o+=4)
2073 *a = bus_space_read_4(t, h, o);
2074 }
2075
2076 void
bus_space_read_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2077 bus_space_read_region_8(
2078 bus_space_tag_t t,
2079 bus_space_handle_t h,
2080 bus_size_t o,
2081 uint64_t *a,
2082 bus_size_t c)
2083 {
2084 for (; c; a++, c--, o+=8)
2085 *a = bus_space_read_8(t, h, o);
2086 }
2087
2088 /*
2089 * void bus_space_write_region_N(bus_space_tag_t tag,
2090 * bus_space_handle_t bsh, bus_size_t off,
2091 * u_intN_t *addr, bus_size_t count);
2092 *
2093 */
2094 void
bus_space_write_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2095 bus_space_write_region_1(
2096 bus_space_tag_t t,
2097 bus_space_handle_t h,
2098 bus_size_t o,
2099 const uint8_t *a,
2100 bus_size_t c)
2101 {
2102 for (; c; a++, c--, o++)
2103 bus_space_write_1(t, h, o, *a);
2104 }
2105
2106 void
bus_space_write_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2107 bus_space_write_region_2(
2108 bus_space_tag_t t,
2109 bus_space_handle_t h,
2110 bus_size_t o,
2111 const uint16_t *a,
2112 bus_size_t c)
2113 {
2114 for (; c; a++, c--, o+=2)
2115 bus_space_write_2(t, h, o, *a);
2116 }
2117
2118 void
bus_space_write_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2119 bus_space_write_region_4(
2120 bus_space_tag_t t,
2121 bus_space_handle_t h,
2122 bus_size_t o,
2123 const uint32_t *a,
2124 bus_size_t c)
2125 {
2126 for (; c; a++, c--, o+=4)
2127 bus_space_write_4(t, h, o, *a);
2128 }
2129
2130 void
bus_space_write_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2131 bus_space_write_region_8(
2132 bus_space_tag_t t,
2133 bus_space_handle_t h,
2134 bus_size_t o,
2135 const uint64_t *a,
2136 bus_size_t c)
2137 {
2138 for (; c; a++, c--, o+=8)
2139 bus_space_write_8(t, h, o, *a);
2140 }
2141
2142
2143 /*
2144 * void bus_space_set_region_N(bus_space_tag_t tag,
2145 * bus_space_handle_t bsh, bus_size_t off,
2146 * u_intN_t *addr, bus_size_t count);
2147 *
2148 */
2149 void
bus_space_set_region_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)2150 bus_space_set_region_1(
2151 bus_space_tag_t t,
2152 bus_space_handle_t h,
2153 bus_size_t o,
2154 const uint8_t v,
2155 bus_size_t c)
2156 {
2157 for (; c; c--, o++)
2158 bus_space_write_1(t, h, o, v);
2159 }
2160
2161 void
bus_space_set_region_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)2162 bus_space_set_region_2(
2163 bus_space_tag_t t,
2164 bus_space_handle_t h,
2165 bus_size_t o,
2166 const uint16_t v,
2167 bus_size_t c)
2168 {
2169 for (; c; c--, o+=2)
2170 bus_space_write_2(t, h, o, v);
2171 }
2172
2173 void
bus_space_set_region_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2174 bus_space_set_region_4(
2175 bus_space_tag_t t,
2176 bus_space_handle_t h,
2177 bus_size_t o,
2178 const uint32_t v,
2179 bus_size_t c)
2180 {
2181 for (; c; c--, o+=4)
2182 bus_space_write_4(t, h, o, v);
2183 }
2184
2185 void
bus_space_set_region_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2186 bus_space_set_region_8(
2187 bus_space_tag_t t,
2188 bus_space_handle_t h,
2189 bus_size_t o,
2190 const uint64_t v,
2191 bus_size_t c)
2192 {
2193 for (; c; c--, o+=8)
2194 bus_space_write_8(t, h, o, v);
2195 }
2196
2197
2198 /*
2199 * void bus_space_copy_region_N(bus_space_tag_t tag,
2200 * bus_space_handle_t bsh1, bus_size_t off1,
2201 * bus_space_handle_t bsh2, bus_size_t off2,
2202 * bus_size_t count);
2203 *
2204 * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
2205 * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
2206 */
2207 void
bus_space_copy_region_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2208 bus_space_copy_region_1(
2209 bus_space_tag_t t,
2210 bus_space_handle_t h1,
2211 bus_size_t o1,
2212 bus_space_handle_t h2,
2213 bus_size_t o2,
2214 bus_size_t c)
2215 {
2216 for (; c; c--, o1++, o2++)
2217 bus_space_write_1(t, h1, o1, bus_space_read_1(t, h2, o2));
2218 }
2219
2220 void
bus_space_copy_region_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2221 bus_space_copy_region_2(
2222 bus_space_tag_t t,
2223 bus_space_handle_t h1,
2224 bus_size_t o1,
2225 bus_space_handle_t h2,
2226 bus_size_t o2,
2227 bus_size_t c)
2228 {
2229 for (; c; c--, o1+=2, o2+=2)
2230 bus_space_write_2(t, h1, o1, bus_space_read_2(t, h2, o2));
2231 }
2232
2233 void
bus_space_copy_region_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2234 bus_space_copy_region_4(
2235 bus_space_tag_t t,
2236 bus_space_handle_t h1,
2237 bus_size_t o1,
2238 bus_space_handle_t h2,
2239 bus_size_t o2,
2240 bus_size_t c)
2241 {
2242 for (; c; c--, o1+=4, o2+=4)
2243 bus_space_write_4(t, h1, o1, bus_space_read_4(t, h2, o2));
2244 }
2245
2246 void
bus_space_copy_region_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2247 bus_space_copy_region_8(
2248 bus_space_tag_t t,
2249 bus_space_handle_t h1,
2250 bus_size_t o1,
2251 bus_space_handle_t h2,
2252 bus_size_t o2,
2253 bus_size_t c)
2254 {
2255 for (; c; c--, o1+=8, o2+=8)
2256 bus_space_write_8(t, h1, o1, bus_space_read_8(t, h2, o2));
2257 }
2258
2259 /*
2260 * void bus_space_read_region_stream_N(bus_space_tag_t tag,
2261 * bus_space_handle_t bsh, bus_size_t off,
2262 * u_intN_t *addr, bus_size_t count);
2263 *
2264 */
2265 void
bus_space_read_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2266 bus_space_read_region_stream_1(
2267 bus_space_tag_t t,
2268 bus_space_handle_t h,
2269 bus_size_t o,
2270 uint8_t *a,
2271 bus_size_t c)
2272 {
2273 for (; c; a++, c--, o++)
2274 *a = bus_space_read_stream_1(t, h, o);
2275 }
2276 void
bus_space_read_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2277 bus_space_read_region_stream_2(
2278 bus_space_tag_t t,
2279 bus_space_handle_t h,
2280 bus_size_t o,
2281 uint16_t *a,
2282 bus_size_t c)
2283 {
2284 for (; c; a++, c--, o+=2)
2285 *a = bus_space_read_stream_2(t, h, o);
2286 }
2287 void
bus_space_read_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2288 bus_space_read_region_stream_4(
2289 bus_space_tag_t t,
2290 bus_space_handle_t h,
2291 bus_size_t o,
2292 uint32_t *a,
2293 bus_size_t c)
2294 {
2295 for (; c; a++, c--, o+=4)
2296 *a = bus_space_read_stream_4(t, h, o);
2297 }
2298 void
bus_space_read_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2299 bus_space_read_region_stream_8(
2300 bus_space_tag_t t,
2301 bus_space_handle_t h,
2302 bus_size_t o,
2303 uint64_t *a,
2304 bus_size_t c)
2305 {
2306 for (; c; a++, c--, o+=8)
2307 *a = bus_space_read_stream_8(t, h, o);
2308 }
2309
2310 /*
2311 * void bus_space_write_region_stream_N(bus_space_tag_t tag,
2312 * bus_space_handle_t bsh, bus_size_t off,
2313 * u_intN_t *addr, bus_size_t count);
2314 *
2315 */
2316 void
bus_space_write_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2317 bus_space_write_region_stream_1(
2318 bus_space_tag_t t,
2319 bus_space_handle_t h,
2320 bus_size_t o,
2321 const uint8_t *a,
2322 bus_size_t c)
2323 {
2324 for (; c; a++, c--, o++)
2325 bus_space_write_stream_1(t, h, o, *a);
2326 }
2327
2328 void
bus_space_write_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2329 bus_space_write_region_stream_2(
2330 bus_space_tag_t t,
2331 bus_space_handle_t h,
2332 bus_size_t o,
2333 const uint16_t *a,
2334 bus_size_t c)
2335 {
2336 for (; c; a++, c--, o+=2)
2337 bus_space_write_stream_2(t, h, o, *a);
2338 }
2339
2340 void
bus_space_write_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2341 bus_space_write_region_stream_4(
2342 bus_space_tag_t t,
2343 bus_space_handle_t h,
2344 bus_size_t o,
2345 const uint32_t *a,
2346 bus_size_t c)
2347 {
2348 for (; c; a++, c--, o+=4)
2349 bus_space_write_stream_4(t, h, o, *a);
2350 }
2351
2352 void
bus_space_write_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2353 bus_space_write_region_stream_8(
2354 bus_space_tag_t t,
2355 bus_space_handle_t h,
2356 bus_size_t o,
2357 const uint64_t *a,
2358 bus_size_t c)
2359 {
2360 for (; c; a++, c--, o+=8)
2361 bus_space_write_stream_8(t, h, o, *a);
2362 }
2363
2364
2365 /*
2366 * void bus_space_set_region_stream_N(bus_space_tag_t tag,
2367 * bus_space_handle_t bsh, bus_size_t off,
2368 * u_intN_t *addr, bus_size_t count);
2369 *
2370 */
2371 void
bus_space_set_region_stream_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t v,bus_size_t c)2372 bus_space_set_region_stream_1(
2373 bus_space_tag_t t,
2374 bus_space_handle_t h,
2375 bus_size_t o,
2376 const uint8_t v,
2377 bus_size_t c)
2378 {
2379 for (; c; c--, o++)
2380 bus_space_write_stream_1(t, h, o, v);
2381 }
2382
2383 void
bus_space_set_region_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t v,bus_size_t c)2384 bus_space_set_region_stream_2(
2385 bus_space_tag_t t,
2386 bus_space_handle_t h,
2387 bus_size_t o,
2388 const uint16_t v,
2389 bus_size_t c)
2390 {
2391 for (; c; c--, o+=2)
2392 bus_space_write_stream_2(t, h, o, v);
2393 }
2394
2395 void
bus_space_set_region_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t v,bus_size_t c)2396 bus_space_set_region_stream_4(
2397 bus_space_tag_t t,
2398 bus_space_handle_t h,
2399 bus_size_t o,
2400 const uint32_t v,
2401 bus_size_t c)
2402 {
2403 for (; c; c--, o+=4)
2404 bus_space_write_stream_4(t, h, o, v);
2405 }
2406
2407 void
bus_space_set_region_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t v,bus_size_t c)2408 bus_space_set_region_stream_8(
2409 bus_space_tag_t t,
2410 bus_space_handle_t h,
2411 bus_size_t o,
2412 const uint64_t v,
2413 bus_size_t c)
2414 {
2415 for (; c; c--, o+=8)
2416 bus_space_write_stream_8(t, h, o, v);
2417 }
2418
2419 /*
2420 * void bus_space_copy_region_stream_N(bus_space_tag_t tag,
2421 * bus_space_handle_t bsh1, bus_size_t off1,
2422 * bus_space_handle_t bsh2, bus_size_t off2,
2423 * bus_size_t count);
2424 *
2425 * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
2426 * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
2427 */
2428
2429 void
bus_space_copy_region_stream_1(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2430 bus_space_copy_region_stream_1(
2431 bus_space_tag_t t,
2432 bus_space_handle_t h1,
2433 bus_size_t o1,
2434 bus_space_handle_t h2,
2435 bus_size_t o2,
2436 bus_size_t c)
2437 {
2438 for (; c; c--, o1++, o2++)
2439 bus_space_write_stream_1(t, h1, o1, bus_space_read_stream_1(t, h2, o2));
2440 }
2441
2442 void
bus_space_copy_region_stream_2(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2443 bus_space_copy_region_stream_2(
2444 bus_space_tag_t t,
2445 bus_space_handle_t h1,
2446 bus_size_t o1,
2447 bus_space_handle_t h2,
2448 bus_size_t o2,
2449 bus_size_t c)
2450 {
2451 for (; c; c--, o1+=2, o2+=2)
2452 bus_space_write_stream_2(t, h1, o1, bus_space_read_stream_2(t, h2, o2));
2453 }
2454
2455 void
bus_space_copy_region_stream_4(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2456 bus_space_copy_region_stream_4(
2457 bus_space_tag_t t,
2458 bus_space_handle_t h1,
2459 bus_size_t o1,
2460 bus_space_handle_t h2,
2461 bus_size_t o2,
2462 bus_size_t c)
2463 {
2464 for (; c; c--, o1+=4, o2+=4)
2465 bus_space_write_stream_4(t, h1, o1, bus_space_read_stream_4(t, h2, o2));
2466 }
2467
2468 void
bus_space_copy_region_stream_8(bus_space_tag_t t,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)2469 bus_space_copy_region_stream_8(
2470 bus_space_tag_t t,
2471 bus_space_handle_t h1,
2472 bus_size_t o1,
2473 bus_space_handle_t h2,
2474 bus_size_t o2,
2475 bus_size_t c)
2476 {
2477 for (; c; c--, o1+=8, o2+=8)
2478 bus_space_write_stream_8(t, h1, o1, bus_space_read_8(t, h2, o2));
2479 }
2480
2481 void
bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)2482 bus_space_write_1(
2483 bus_space_tag_t t,
2484 bus_space_handle_t h,
2485 bus_size_t o,
2486 uint8_t v)
2487 {
2488 (*t->sparc_write_1)(t, h, o, v);
2489 }
2490
2491 void
bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)2492 bus_space_write_2(
2493 bus_space_tag_t t,
2494 bus_space_handle_t h,
2495 bus_size_t o,
2496 uint16_t v)
2497 {
2498 (*t->sparc_write_2)(t, h, o, v);
2499 }
2500
2501 void
bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)2502 bus_space_write_4(
2503 bus_space_tag_t t,
2504 bus_space_handle_t h,
2505 bus_size_t o,
2506 uint32_t v)
2507 {
2508 (*t->sparc_write_4)(t, h, o, v);
2509 }
2510
2511 void
bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)2512 bus_space_write_8(
2513 bus_space_tag_t t,
2514 bus_space_handle_t h,
2515 bus_size_t o,
2516 uint64_t v)
2517 {
2518 (*t->sparc_write_8)(t, h, o, v);
2519 }
2520
2521 #if __SLIM_SPARC_BUS_SPACE
2522
2523 void
bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)2524 bus_space_write_1(
2525 bus_space_tag_t t,
2526 bus_space_handle_t h,
2527 bus_size_t o,
2528 uint8_t v)
2529 {
2530 __insn_barrier();
2531 bus_space_write_1_real(t, h, o, v);
2532 }
2533
2534 void
bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)2535 bus_space_write_2(
2536 bus_space_tag_t t,
2537 bus_space_handle_t h,
2538 bus_size_t o,
2539 uint16_t v)
2540 {
2541 __insn_barrier();
2542 bus_space_write_2_real(t, h, o, v);
2543 }
2544
2545 void
bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)2546 bus_space_write_4(
2547 bus_space_tag_t t,
2548 bus_space_handle_t h,
2549 bus_size_t o,
2550 uint32_t v)
2551 {
2552 __insn_barrier();
2553 bus_space_write_4_real(t, h, o, v);
2554 }
2555
2556 void
bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)2557 bus_space_write_8(
2558 bus_space_tag_t t,
2559 bus_space_handle_t h,
2560 bus_size_t o,
2561 uint64_t v)
2562 {
2563 __insn_barrier();
2564 bus_space_write_8_real(t, h, o, v);
2565 }
2566
2567 #endif /* __SLIM_SPARC_BUS_SPACE */
2568
2569 uint8_t
bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2570 bus_space_read_1(
2571 bus_space_tag_t t,
2572 bus_space_handle_t h,
2573 bus_size_t o)
2574 {
2575 return (*t->sparc_read_1)(t, h, o);
2576 }
2577
2578 uint16_t
bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2579 bus_space_read_2(
2580 bus_space_tag_t t,
2581 bus_space_handle_t h,
2582 bus_size_t o)
2583 {
2584 return (*t->sparc_read_2)(t, h, o);
2585 }
2586
2587 uint32_t
bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2588 bus_space_read_4(
2589 bus_space_tag_t t,
2590 bus_space_handle_t h,
2591 bus_size_t o)
2592 {
2593 return (*t->sparc_read_4)(t, h, o);
2594 }
2595
2596 uint64_t
bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2597 bus_space_read_8(
2598 bus_space_tag_t t,
2599 bus_space_handle_t h,
2600 bus_size_t o)
2601 {
2602 return (*t->sparc_read_8)(t, h, o);
2603 }
2604
2605 #if __SLIM_SPARC_BUS_SPACE
2606 uint8_t
bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2607 bus_space_read_1(
2608 bus_space_tag_t t,
2609 bus_space_handle_t h,
2610 bus_size_t o)
2611 {
2612 __insn_barrier();
2613 return bus_space_read_1_real(t, h, o);
2614 }
2615
2616 uint16_t
bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2617 bus_space_read_2(
2618 bus_space_tag_t t,
2619 bus_space_handle_t h,
2620 bus_size_t o)
2621 {
2622 __insn_barrier();
2623 return bus_space_read_2_real(t, h, o);
2624 }
2625
2626 uint32_t
bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2627 bus_space_read_4(
2628 bus_space_tag_t t,
2629 bus_space_handle_t h,
2630 bus_size_t o)
2631 {
2632 __insn_barrier();
2633 return bus_space_read_4_real(t, h, o);
2634 }
2635
2636 uint64_t
bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)2637 bus_space_read_8(
2638 bus_space_tag_t t,
2639 bus_space_handle_t h,
2640 bus_size_t o)
2641 {
2642 __insn_barrier();
2643 return bus_space_read_8_real(t, h, o);
2644 }
2645
2646 #endif /* __SLIM_SPARC_BUS_SPACE */
2647
2648 void
bus_space_read_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)2649 bus_space_read_multi_1(
2650 bus_space_tag_t t,
2651 bus_space_handle_t h,
2652 bus_size_t o,
2653 uint8_t *a,
2654 bus_size_t c)
2655 {
2656 while (c-- > 0)
2657 *a++ = bus_space_read_1(t, h, o);
2658 }
2659
2660 void
bus_space_read_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2661 bus_space_read_multi_2(
2662 bus_space_tag_t t,
2663 bus_space_handle_t h,
2664 bus_size_t o,
2665 uint16_t *a,
2666 bus_size_t c)
2667 {
2668 while (c-- > 0)
2669 *a++ = bus_space_read_2(t, h, o);
2670 }
2671
2672 void
bus_space_read_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2673 bus_space_read_multi_4(
2674 bus_space_tag_t t,
2675 bus_space_handle_t h,
2676 bus_size_t o,
2677 uint32_t *a,
2678 bus_size_t c)
2679 {
2680 while (c-- > 0)
2681 *a++ = bus_space_read_4(t, h, o);
2682 }
2683
2684 void
bus_space_read_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2685 bus_space_read_multi_8(
2686 bus_space_tag_t t,
2687 bus_space_handle_t h,
2688 bus_size_t o,
2689 uint64_t *a,
2690 bus_size_t c)
2691 {
2692 while (c-- > 0)
2693 *a++ = bus_space_read_8(t, h, o);
2694 }
2695
2696 /*
2697 * void bus_space_read_multi_N(bus_space_tag_t tag,
2698 * bus_space_handle_t bsh, bus_size_t offset,
2699 * u_intN_t *addr, bus_size_t count);
2700 *
2701 * Read `count' 1, 2, 4, or 8 byte quantities from bus space
2702 * described by tag/handle/offset and copy into buffer provided.
2703 */
2704 void
bus_space_read_multi_stream_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)2705 bus_space_read_multi_stream_2(
2706 bus_space_tag_t t,
2707 bus_space_handle_t h,
2708 bus_size_t o,
2709 uint16_t *a,
2710 bus_size_t c)
2711 {
2712 while (c-- > 0)
2713 *a++ = bus_space_read_2_real(t, h, o);
2714 }
2715
2716 void
bus_space_read_multi_stream_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)2717 bus_space_read_multi_stream_4(
2718 bus_space_tag_t t,
2719 bus_space_handle_t h,
2720 bus_size_t o,
2721 uint32_t *a,
2722 bus_size_t c)
2723 {
2724 while (c-- > 0)
2725 *a++ = bus_space_read_4_real(t, h, o);
2726 }
2727
2728 void
bus_space_read_multi_stream_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)2729 bus_space_read_multi_stream_8(
2730 bus_space_tag_t t,
2731 bus_space_handle_t h,
2732 bus_size_t o,
2733 uint64_t *a,
2734 bus_size_t c)
2735 {
2736 while (c-- > 0)
2737 *a++ = bus_space_read_8_real(t, h, o);
2738 }
2739
2740 /*
2741 * void bus_space_write_multi_N(bus_space_tag_t tag,
2742 * bus_space_handle_t bsh, bus_size_t offset,
2743 * const u_intN_t *addr, bus_size_t count);
2744 *
2745 * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
2746 * provided to bus space described by tag/handle/offset.
2747 */
2748 void
bus_space_write_multi_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)2749 bus_space_write_multi_1(
2750 bus_space_tag_t t,
2751 bus_space_handle_t h,
2752 bus_size_t o,
2753 const uint8_t *a,
2754 bus_size_t c)
2755 {
2756 while (c-- > 0)
2757 bus_space_write_1(t, h, o, *a++);
2758 }
2759
2760 void
bus_space_write_multi_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)2761 bus_space_write_multi_2(
2762 bus_space_tag_t t,
2763 bus_space_handle_t h,
2764 bus_size_t o,
2765 const uint16_t *a,
2766 bus_size_t c)
2767 {
2768 while (c-- > 0)
2769 bus_space_write_2(t, h, o, *a++);
2770 }
2771
2772 void
bus_space_write_multi_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)2773 bus_space_write_multi_4(
2774 bus_space_tag_t t,
2775 bus_space_handle_t h,
2776 bus_size_t o,
2777 const uint32_t *a,
2778 bus_size_t c)
2779 {
2780 while (c-- > 0)
2781 bus_space_write_4(t, h, o, *a++);
2782 }
2783
2784 void
bus_space_write_multi_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)2785 bus_space_write_multi_8(
2786 bus_space_tag_t t,
2787 bus_space_handle_t h,
2788 bus_size_t o,
2789 const uint64_t *a,
2790 bus_size_t c)
2791 {
2792 while (c-- > 0)
2793 bus_space_write_8(t, h, o, *a++);
2794 }
2795
2796 /*
2797 * Allocate a new bus tag and have it inherit the methods of the
2798 * given parent.
2799 */
2800 bus_space_tag_t
bus_space_tag_alloc(bus_space_tag_t parent,void * cookie)2801 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie)
2802 {
2803 struct sparc_bus_space_tag *sbt;
2804
2805 sbt = kmem_zalloc(sizeof(*sbt), KM_SLEEP);
2806
2807 if (parent) {
2808 memcpy(sbt, parent, sizeof(*sbt));
2809 sbt->parent = parent;
2810 sbt->ranges = NULL;
2811 sbt->nranges = 0;
2812 }
2813
2814 sbt->cookie = cookie;
2815 return (sbt);
2816 }
2817
2818 /*
2819 * Generic routine to translate an address using OpenPROM `ranges'.
2820 */
2821 int
bus_space_translate_address_generic(struct openprom_range * ranges,int nranges,bus_addr_t * bap)2822 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges,
2823 bus_addr_t *bap)
2824 {
2825 int i, space = BUS_ADDR_IOSPACE(*bap);
2826
2827 for (i = 0; i < nranges; i++) {
2828 struct openprom_range *rp = &ranges[i];
2829
2830 if (rp->or_child_space != space)
2831 continue;
2832
2833 /* We've found the connection to the parent bus. */
2834 *bap = BUS_ADDR(rp->or_parent_space,
2835 rp->or_parent_base + BUS_ADDR_PADDR(*bap));
2836 return (0);
2837 }
2838
2839 return (EINVAL);
2840 }
2841
2842 static int
sparc_bus_map_iodev(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,vaddr_t va,bus_space_handle_t * hp)2843 sparc_bus_map_iodev(bus_space_tag_t t, bus_addr_t ba, bus_size_t size, int flags,
2844 vaddr_t va, bus_space_handle_t *hp)
2845 {
2846 vaddr_t v;
2847 paddr_t pa;
2848 unsigned int pmtype;
2849 bus_space_tag_t pt;
2850 static vaddr_t iobase;
2851
2852 /*
2853 * This base class bus map function knows about address range
2854 * translation so bus drivers that need no other special
2855 * handling can just keep this method in their tags.
2856 *
2857 * We expect to resolve range translations iteratively, but allow
2858 * for recursion just in case.
2859 */
2860 while ((pt = t->parent) != NULL) {
2861 if (t->ranges != NULL) {
2862 int error;
2863
2864 if ((error = bus_space_translate_address_generic(
2865 t->ranges, t->nranges, &ba)) != 0)
2866 return (error);
2867 }
2868 if (pt->sparc_bus_map != sparc_bus_map)
2869 return (bus_space_map2(pt, ba, size, flags, va, hp));
2870 t = pt;
2871 }
2872
2873 if (iobase == 0)
2874 iobase = IODEV_BASE;
2875
2876 size = round_page(size);
2877 if (size == 0) {
2878 printf("sparc_bus_map: zero size\n");
2879 return (EINVAL);
2880 }
2881
2882 if (va)
2883 v = trunc_page(va);
2884 else {
2885 v = iobase;
2886 iobase += size;
2887 if (iobase > IODEV_END) /* unlikely */
2888 panic("sparc_bus_map: iobase=0x%lx", iobase);
2889 }
2890
2891 pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2892 pa = BUS_ADDR_PADDR(ba);
2893
2894 /* note: preserve page offset */
2895 *hp = (bus_space_handle_t)(v | ((u_long)pa & PGOFSET));
2896
2897 pa = trunc_page(pa);
2898 do {
2899 pmap_kenter_pa(v, pa | pmtype | PMAP_NC,
2900 VM_PROT_READ | VM_PROT_WRITE, 0);
2901 v += PAGE_SIZE;
2902 pa += PAGE_SIZE;
2903 } while ((size -= PAGE_SIZE) > 0);
2904
2905 pmap_update(pmap_kernel());
2906 return (0);
2907 }
2908
2909 static int
sparc_bus_map_large(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,bus_space_handle_t * hp)2910 sparc_bus_map_large(bus_space_tag_t t, bus_addr_t ba,
2911 bus_size_t size, int flags, bus_space_handle_t *hp)
2912 {
2913 vaddr_t v = 0;
2914
2915 if (uvm_map(kernel_map, &v, size, NULL, 0, PAGE_SIZE,
2916 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_NORMAL,
2917 0)) == 0) {
2918 return sparc_bus_map_iodev(t, ba, size, flags, v, hp);
2919 }
2920 return -1;
2921 }
2922
2923 int
sparc_bus_map(bus_space_tag_t t,bus_addr_t ba,bus_size_t size,int flags,vaddr_t va,bus_space_handle_t * hp)2924 sparc_bus_map(bus_space_tag_t t, bus_addr_t ba,
2925 bus_size_t size, int flags, vaddr_t va,
2926 bus_space_handle_t *hp)
2927 {
2928
2929 if (flags & BUS_SPACE_MAP_LARGE) {
2930 return sparc_bus_map_large(t, ba, size, flags, hp);
2931 } else
2932 return sparc_bus_map_iodev(t, ba, size, flags, va, hp);
2933
2934 }
2935
2936 int
sparc_bus_unmap(bus_space_tag_t t,bus_space_handle_t bh,bus_size_t size)2937 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
2938 {
2939 vaddr_t va = trunc_page((vaddr_t)bh);
2940
2941 /*
2942 * XXX
2943 * mappings with BUS_SPACE_MAP_LARGE need additional care here
2944 * we can just check if the VA is in the IODEV range
2945 */
2946
2947 pmap_kremove(va, round_page(size));
2948 pmap_update(pmap_kernel());
2949 return (0);
2950 }
2951
2952 int
sparc_bus_subregion(bus_space_tag_t tag,bus_space_handle_t handle,bus_size_t offset,bus_size_t size,bus_space_handle_t * nhandlep)2953 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle,
2954 bus_size_t offset, bus_size_t size,
2955 bus_space_handle_t *nhandlep)
2956 {
2957
2958 *nhandlep = handle + offset;
2959 return (0);
2960 }
2961
2962 paddr_t
sparc_bus_mmap(bus_space_tag_t t,bus_addr_t ba,off_t off,int prot,int flags)2963 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t ba, off_t off,
2964 int prot, int flags)
2965 {
2966 u_int pmtype;
2967 paddr_t pa;
2968 bus_space_tag_t pt;
2969
2970 /*
2971 * Base class bus mmap function; see also sparc_bus_map
2972 */
2973 while ((pt = t->parent) != NULL) {
2974 if (t->ranges != NULL) {
2975 int error;
2976
2977 if ((error = bus_space_translate_address_generic(
2978 t->ranges, t->nranges, &ba)) != 0)
2979 return (-1);
2980 }
2981 if (pt->sparc_bus_mmap != sparc_bus_mmap)
2982 return (bus_space_mmap(pt, ba, off, prot, flags));
2983 t = pt;
2984 }
2985
2986 pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba));
2987 pa = trunc_page(BUS_ADDR_PADDR(ba) + off);
2988
2989 return (paddr_t)(pa | pmtype | PMAP_NC);
2990 }
2991
2992 /*
2993 * Establish a temporary bus mapping for device probing.
2994 */
2995 int
bus_space_probe(bus_space_tag_t tag,bus_addr_t paddr,bus_size_t size,size_t offset,int flags,int (* callback)(void *,void *),void * arg)2996 bus_space_probe(bus_space_tag_t tag, bus_addr_t paddr, bus_size_t size,
2997 size_t offset, int flags,
2998 int (*callback)(void *, void *), void *arg)
2999 {
3000 bus_space_handle_t bh;
3001 void *tmp;
3002 int result;
3003
3004 if (bus_space_map2(tag, paddr, size, flags, TMPMAP_VA, &bh) != 0)
3005 return (0);
3006
3007 tmp = (void *)bh;
3008 result = (probeget((char *)tmp + offset, size) != -1);
3009 if (result && callback != NULL)
3010 result = (*callback)(tmp, arg);
3011 bus_space_unmap(tag, bh, size);
3012 return (result);
3013 }
3014
3015
3016 void *
sparc_mainbus_intr_establish(bus_space_tag_t t,int pil,int level,int (* handler)(void *),void * arg,void (* fastvec)(void))3017 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level,
3018 int (*handler)(void *), void *arg,
3019 void (*fastvec)(void))
3020 {
3021 struct intrhand *ih;
3022
3023 ih = kmem_alloc(sizeof(struct intrhand), KM_SLEEP);
3024 ih->ih_fun = handler;
3025 ih->ih_arg = arg;
3026 intr_establish(pil, level, ih, fastvec, false);
3027 return (ih);
3028 }
3029
sparc_bus_barrier(bus_space_tag_t t,bus_space_handle_t h,bus_size_t offset,bus_size_t size,int flags)3030 void sparc_bus_barrier (bus_space_tag_t t, bus_space_handle_t h,
3031 bus_size_t offset, bus_size_t size, int flags)
3032 {
3033
3034 /* No default barrier action defined */
3035 return;
3036 }
3037
3038 static uint8_t
sparc_bus_space_read_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3039 sparc_bus_space_read_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3040 {
3041
3042 return bus_space_read_1_real(t, h, o);
3043 }
3044
3045 static uint16_t
sparc_bus_space_read_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3046 sparc_bus_space_read_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3047 {
3048
3049 return bus_space_read_2_real(t, h, o);
3050 }
3051
3052 static uint32_t
sparc_bus_space_read_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3053 sparc_bus_space_read_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3054 {
3055
3056 return bus_space_read_4_real(t, h, o);
3057 }
3058
3059 static uint64_t
sparc_bus_space_read_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o)3060 sparc_bus_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
3061 {
3062
3063 return bus_space_read_8_real(t, h, o);
3064 }
3065
3066 static void
sparc_bus_space_write_1(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint8_t v)3067 sparc_bus_space_write_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3068 uint8_t v)
3069 {
3070
3071 bus_space_write_1_real(t, h, o, v);
3072 }
3073
3074 static void
sparc_bus_space_write_2(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint16_t v)3075 sparc_bus_space_write_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3076 uint16_t v)
3077 {
3078
3079 bus_space_write_2_real(t, h, o, v);
3080 }
3081
3082 static void
sparc_bus_space_write_4(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint32_t v)3083 sparc_bus_space_write_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3084 uint32_t v)
3085 {
3086
3087 bus_space_write_4_real(t, h, o, v);
3088 }
3089
3090 static void
sparc_bus_space_write_8(bus_space_tag_t t,bus_space_handle_t h,bus_size_t o,uint64_t v)3091 sparc_bus_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
3092 uint64_t v)
3093 {
3094
3095 bus_space_write_8_real(t, h, o, v);
3096 }
3097
3098 struct sparc_bus_space_tag mainbus_space_tag = {
3099 NULL, /* cookie */
3100 NULL, /* parent bus tag */
3101 NULL, /* ranges */
3102 0, /* nranges */
3103 sparc_bus_map, /* bus_space_map */
3104 sparc_bus_unmap, /* bus_space_unmap */
3105 sparc_bus_subregion, /* bus_space_subregion */
3106 sparc_bus_barrier, /* bus_space_barrier */
3107 sparc_bus_mmap, /* bus_space_mmap */
3108 sparc_mainbus_intr_establish, /* bus_intr_establish */
3109
3110 sparc_bus_space_read_1, /* bus_space_read_1 */
3111 sparc_bus_space_read_2, /* bus_space_read_2 */
3112 sparc_bus_space_read_4, /* bus_space_read_4 */
3113 sparc_bus_space_read_8, /* bus_space_read_8 */
3114 sparc_bus_space_write_1, /* bus_space_write_1 */
3115 sparc_bus_space_write_2, /* bus_space_write_2 */
3116 sparc_bus_space_write_4, /* bus_space_write_4 */
3117 sparc_bus_space_write_8 /* bus_space_write_8 */
3118 };
3119
3120 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)3121 mm_md_physacc(paddr_t pa, vm_prot_t prot)
3122 {
3123
3124 return pmap_pa_exists(pa) ? 0 : EFAULT;
3125 }
3126
3127 int
mm_md_kernacc(void * ptr,vm_prot_t prot,bool * handled)3128 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
3129 {
3130 const vaddr_t v = (vaddr_t)ptr;
3131
3132 *handled = (v >= MSGBUF_VA && v < MSGBUF_VA + PAGE_SIZE) ||
3133 (v >= prom_vstart && v < prom_vend && (prot & VM_PROT_WRITE) == 0);
3134 return 0;
3135 }
3136
3137 int
mm_md_readwrite(dev_t dev,struct uio * uio)3138 mm_md_readwrite(dev_t dev, struct uio *uio)
3139 {
3140
3141 switch (minor(dev)) {
3142 #if defined(SUN4)
3143 case DEV_EEPROM:
3144 if (cputyp == CPU_SUN4)
3145 return eeprom_uio(uio);
3146 #endif
3147 }
3148 return ENXIO;
3149 }
3150