1 /* $NetBSD: machdep.c,v 1.200 2024/05/17 21:37:07 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * Changed for the VAX port (and for readability) /IC
8 *
9 * This code is derived from software contributed to Berkeley by the Systems
10 * Programming Group of the University of Utah Computer Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah Hdr: machdep.c 1.63 91/04/24
37 *
38 * @(#)machdep.c 7.16 (Berkeley) 6/3/91
39 */
40
41 /*
42 * Copyright (c) 2002, Hugh Graham.
43 * Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
44 * Copyright (c) 1993 Adam Glass
45 * Copyright (c) 1988 University of Utah.
46 *
47 * Changed for the VAX port (and for readability) /IC
48 *
49 * This code is derived from software contributed to Berkeley by the Systems
50 * Programming Group of the University of Utah Computer Science Department.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by the University of
63 * California, Berkeley and its contributors.
64 * 4. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * from: Utah Hdr: machdep.c 1.63 91/04/24
81 *
82 * @(#)machdep.c 7.16 (Berkeley) 6/3/91
83 */
84
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.200 2024/05/17 21:37:07 thorpej Exp $");
87
88 #include "opt_ddb.h"
89 #include "opt_compat_netbsd.h"
90 #include "opt_compat_ultrix.h"
91 #include "opt_modular.h"
92 #include "opt_multiprocessor.h"
93 #include "opt_lockdebug.h"
94
95 #include <sys/param.h>
96 #include <sys/systm.h>
97 #include <sys/buf.h>
98 #include <sys/conf.h>
99 #include <sys/cpu.h>
100 #include <sys/device.h>
101 #include <sys/extent.h>
102 #include <sys/kernel.h>
103 #include <sys/ksyms.h>
104 #include <sys/mount.h>
105 #include <sys/msgbuf.h>
106 #include <sys/mbuf.h>
107 #include <sys/proc.h>
108 #include <sys/ptrace.h>
109 #include <sys/reboot.h>
110 #include <sys/kauth.h>
111 #include <sys/sysctl.h>
112 #include <sys/time.h>
113
114 #include <dev/cons.h>
115 #include <dev/mm.h>
116
117 #include <uvm/uvm_extern.h>
118
119 #include <machine/sid.h>
120 #include <machine/macros.h>
121 #include <machine/nexus.h>
122 #include <machine/reg.h>
123 #include <machine/scb.h>
124 #include <machine/leds.h>
125 #include <vax/vax/gencons.h>
126
127 #ifdef DDB
128 #include <machine/db_machdep.h>
129 #include <ddb/db_sym.h>
130 #include <ddb/db_extern.h>
131 #endif
132
133 #include "leds.h"
134 #include "smg.h"
135 #include "ksyms.h"
136
137 #define DEV_LEDS 13 /* minor device 13 is leds */
138
139 extern vaddr_t virtual_avail, virtual_end;
140 extern paddr_t avail_end;
141
142 /*
143 * We do these external declarations here, maybe they should be done
144 * somewhere else...
145 */
146 char machine[] = MACHINE; /* from <machine/param.h> */
147 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
148 void * msgbufaddr;
149 int *symtab_start;
150 int *symtab_end;
151 int symtab_nsyms;
152 struct cpmbx *cpmbx; /* Console program mailbox address */
153
154 /*
155 * Extent map to manage I/O register space. We allocate storage for
156 * 32 regions in the map. iomap_ex_malloc_safe will indicate that it's
157 * safe to use malloc() to dynamically allocate region descriptors in
158 * case we run out.
159 */
160 static long iomap_ex_storage[EXTENT_FIXED_STORAGE_SIZE(32) / sizeof(long)];
161 static struct extent *iomap_ex;
162 static int iomap_ex_malloc_safe;
163
164 struct vm_map *phys_map = NULL;
165
166 #ifdef DEBUG
167 int iospace_inited = 0;
168 #endif
169
170 void
cpu_startup(void)171 cpu_startup(void)
172 {
173 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
174 vaddr_t minaddr, maxaddr;
175 #endif
176 char pbuf[9];
177
178 /*
179 * Initialize error message buffer.
180 */
181 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
182
183 /*
184 * Good {morning,afternoon,evening,night}.
185 * Also call CPU init on systems that need that.
186 */
187 printf("%s%s", copyright, version);
188 printf("%s\n", cpu_getmodel());
189 if (dep_call->cpu_conf)
190 (*dep_call->cpu_conf)();
191
192 format_bytes(pbuf, sizeof(pbuf), avail_end);
193 printf("total memory = %s\n", pbuf);
194 panicstr = NULL;
195 mtpr(AST_NO, PR_ASTLVL);
196 spl0();
197
198 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
199 minaddr = 0;
200
201 /*
202 * Allocate a submap for physio. This map effectively limits the
203 * number of processes doing physio at any one time.
204 */
205 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
206 VM_PHYS_SIZE, 0, false, NULL);
207 #endif
208
209 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
210 printf("avail memory = %s\n", pbuf);
211
212 #ifdef DDB
213 if (boothowto & RB_KDB)
214 Debugger();
215 #endif
216
217 iomap_ex_malloc_safe = 1;
218 }
219
220 uint32_t dumpmag = 0x8fca0101;
221 int dumpsize = 0;
222 long dumplo = 0;
223
224 void
cpu_dumpconf(void)225 cpu_dumpconf(void)
226 {
227 int nblks;
228
229 /*
230 * XXX include the final RAM page which is not included in physmem.
231 */
232 if (dumpdev == NODEV)
233 return;
234 nblks = bdev_size(dumpdev);
235 if (nblks > 0) {
236 if (dumpsize > btoc(dbtob(nblks - dumplo)))
237 dumpsize = btoc(dbtob(nblks - dumplo));
238 else if (dumplo == 0)
239 dumplo = nblks - btodb(ctob(dumpsize));
240 }
241 /*
242 * Don't dump on the first PAGE_SIZE (why PAGE_SIZE?) in case the dump
243 * device includes a disk label.
244 */
245 if (dumplo < btodb(PAGE_SIZE))
246 dumplo = btodb(PAGE_SIZE);
247
248 /*
249 * If we have nothing to dump (XXX implement crash dumps),
250 * make it clear for savecore that there is no dump.
251 */
252 if (dumpsize <= 0)
253 dumplo = 0;
254 }
255
256 static int
sysctl_machdep_booted_device(SYSCTLFN_ARGS)257 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
258 {
259 struct sysctlnode node = *rnode;
260
261 if (booted_device == NULL)
262 return (EOPNOTSUPP);
263 node.sysctl_data = __UNCONST(device_xname(booted_device));
264 node.sysctl_size = strlen(device_xname(booted_device)) + 1;
265 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
266 }
267
268 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
269 {
270
271 sysctl_createv(clog, 0, NULL, NULL,
272 CTLFLAG_PERMANENT,
273 CTLTYPE_NODE, "machdep", NULL,
274 NULL, 0, NULL, 0,
275 CTL_MACHDEP, CTL_EOL);
276
277 sysctl_createv(clog, 0, NULL, NULL,
278 CTLFLAG_PERMANENT,
279 CTLTYPE_INT, "printfataltraps", NULL,
280 NULL, 0, &cpu_printfataltraps, 0,
281 CTL_MACHDEP, CPU_PRINTFATALTRAPS, CTL_EOL);
282 sysctl_createv(clog, 0, NULL, NULL,
283 CTLFLAG_PERMANENT,
284 CTLTYPE_STRUCT, "console_device", NULL,
285 sysctl_consdev, 0, NULL, sizeof(dev_t),
286 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
287 sysctl_createv(clog, 0, NULL, NULL,
288 CTLFLAG_PERMANENT,
289 CTLTYPE_STRUCT, "booted_device", NULL,
290 sysctl_machdep_booted_device, 0, NULL, 0,
291 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
292 /*
293 * I don't think CPU_BOOTED_KERNEL is available to the kernel.
294 */
295 }
296
297 void
setstatclockrate(int hzrate)298 setstatclockrate(int hzrate)
299 {
300 }
301
302 void
consinit(void)303 consinit(void)
304 {
305 extern vaddr_t iospace;
306
307 /*
308 * Init I/O memory extent map. Must be done before cninit()
309 * is called; we may want to use iospace in the console routines.
310 *
311 * NOTE: We need to reserve the first vax-page of iospace
312 * for the console routines.
313 */
314 KASSERT(iospace != 0);
315 iomap_ex = extent_create("iomap", iospace + VAX_NBPG,
316 iospace + ((IOSPSZ * VAX_NBPG) - 1),
317 (void *) iomap_ex_storage, sizeof(iomap_ex_storage),
318 EX_NOCOALESCE|EX_NOWAIT);
319 #ifdef DEBUG
320 iospace_inited = 1;
321 #endif
322 cninit();
323 #if NKSYMS || defined(DDB) || defined(MODULAR)
324 if (symtab_start != NULL && symtab_nsyms != 0 && symtab_end != NULL) {
325 ksyms_addsyms_elf(symtab_nsyms, symtab_start, symtab_end);
326 }
327 #endif
328 #ifdef DEBUG
329 if (sizeof(struct pcb) > REDZONEADDR)
330 panic("struct pcb inside red zone");
331 #endif
332 }
333
334 int waittime = -1;
335 static volatile int showto; /* Must be volatile to survive MM on -> MM off */
336
337 void
cpu_reboot(int howto,char * b)338 cpu_reboot(int howto, char *b)
339 {
340 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
341 waittime = 0;
342 vfs_shutdown();
343 }
344 splhigh(); /* extreme priority */
345 if (howto & RB_HALT) {
346 doshutdownhooks();
347 pmf_system_shutdown(boothowto);
348 if (dep_call->cpu_halt)
349 (*dep_call->cpu_halt) ();
350 printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
351 for (;;)
352 ;
353 } else {
354 showto = howto;
355 #ifdef notyet
356 /*
357 * If we are provided with a bootstring, parse it and send
358 * it to the boot program.
359 */
360 if (b)
361 while (*b) {
362 showto |= (*b == 'a' ? RB_ASKBOOT : (*b == 'd' ?
363 RB_DEBUG : (*b == 's' ? RB_SINGLE : 0)));
364 b++;
365 }
366 #endif
367 /*
368 * Now it's time to:
369 * 0. Save some registers that are needed in new world.
370 * 1. Change stack to somewhere that will survive MM off.
371 * (RPB page is good page to save things in).
372 * 2. Actually turn MM off.
373 * 3. Dump away memory to disk, if asked.
374 * 4. Reboot as asked.
375 * The RPB page is _always_ first page in memory, we can
376 * rely on that.
377 */
378 #ifdef notyet
379 __asm( "\tmovl %sp, (0x80000200)\n"
380 "\tmovl 0x80000200, %sp\n"
381 "\tmfpr $0x10, -(%sp)\n" /* PR_PCBB */
382 "\tmfpr $0x11, -(%sp)\n" /* PR_SCBB */
383 "\tmfpr $0xc, -(%sp)\n" /* PR_SBR */
384 "\tmfpr $0xd, -(%sp)\n" /* PR_SLR */
385 "\tmtpr $0, $0x38\n" /* PR_MAPEN */
386 );
387 #endif
388
389 if (showto & RB_DUMP)
390 dumpsys();
391 if (dep_call->cpu_reboot)
392 (*dep_call->cpu_reboot)(showto);
393
394 /* cpus that don't handle reboots get the standard reboot. */
395 while ((mfpr(PR_TXCS) & GC_RDY) == 0)
396 ;
397
398 mtpr(GC_CONS|GC_BTFL, PR_TXDB);
399 }
400 __asm("movl %0, %%r5":: "g" (showto)); /* How to boot */
401 __asm("movl %0, %%r11":: "r"(showto)); /* ??? */
402 __asm("halt");
403 panic("Halt sket sej");
404 }
405
406 void
dumpsys(void)407 dumpsys(void)
408 {
409 const struct bdevsw *bdev;
410
411 if (dumpdev == NODEV)
412 return;
413 bdev = bdevsw_lookup(dumpdev);
414 if (bdev == NULL)
415 return;
416 /*
417 * For dumps during autoconfiguration, if dump device has already
418 * configured...
419 */
420 if (dumpsize == 0)
421 cpu_dumpconf();
422 if (dumplo <= 0) {
423 printf("\ndump to dev %u,%u not possible\n",
424 major(dumpdev), minor(dumpdev));
425 return;
426 }
427 printf("\ndumping to dev %u,%u offset %ld\n",
428 major(dumpdev), minor(dumpdev), dumplo);
429 printf("dump ");
430 switch ((*bdev->d_dump) (dumpdev, 0, 0, 0)) {
431
432 case ENXIO:
433 printf("device bad\n");
434 break;
435
436 case EFAULT:
437 printf("device not ready\n");
438 break;
439
440 case EINVAL:
441 printf("area improper\n");
442 break;
443
444 case EIO:
445 printf("i/o error\n");
446 break;
447
448 default:
449 printf("succeeded\n");
450 break;
451 }
452 }
453
454 int
process_read_regs(struct lwp * l,struct reg * regs)455 process_read_regs(struct lwp *l, struct reg *regs)
456 {
457 struct trapframe * const tf = l->l_md.md_utf;
458
459 memcpy(®s->r0, &tf->tf_r0, 12 * sizeof(int));
460 regs->ap = tf->tf_ap;
461 regs->fp = tf->tf_fp;
462 regs->sp = tf->tf_sp;
463 regs->pc = tf->tf_pc;
464 regs->psl = tf->tf_psl;
465 return 0;
466 }
467
468 int
process_write_regs(struct lwp * l,const struct reg * regs)469 process_write_regs(struct lwp *l, const struct reg *regs)
470 {
471 struct trapframe * const tf = l->l_md.md_utf;
472
473 memcpy(&tf->tf_r0, ®s->r0, 12 * sizeof(int));
474 tf->tf_ap = regs->ap;
475 tf->tf_fp = regs->fp;
476 tf->tf_sp = regs->sp;
477 tf->tf_pc = regs->pc;
478 tf->tf_psl = (regs->psl|PSL_U|PSL_PREVU) &
479 ~(PSL_MBZ|PSL_IS|PSL_IPL1F|PSL_CM); /* Allow compat mode? */
480 return 0;
481 }
482
483 int
process_set_pc(struct lwp * l,void * addr)484 process_set_pc(struct lwp *l, void *addr)
485 {
486 l->l_md.md_utf->tf_pc = (uintptr_t) addr;
487
488 return (0);
489 }
490
491 int
process_sstep(struct lwp * l,int sstep)492 process_sstep(struct lwp *l, int sstep)
493 {
494 struct trapframe * const tf = l->l_md.md_utf;
495
496 if (sstep)
497 tf->tf_psl |= PSL_T;
498 else
499 tf->tf_psl &= ~PSL_T;
500
501 return (0);
502 }
503
504 #undef PHYSMEMDEBUG
505 /*
506 * Allocates a virtual range suitable for mapping in physical memory.
507 * This differs from the bus_space routines in that it allocates on
508 * physical page sizes instead of logical sizes. This implementation
509 * uses resource maps when allocating space, which is allocated from
510 * the IOMAP submap. The implementation is similar to the uba resource
511 * map handling. Size is given in pages.
512 * If the page requested is bigger than a logical page, space is
513 * allocated from the kernel map instead.
514 *
515 * It is known that the first page in the iospace area is unused; it may
516 * be use by console device drivers (before the map system is inited).
517 */
518 vaddr_t
vax_map_physmem(paddr_t phys,size_t size)519 vax_map_physmem(paddr_t phys, size_t size)
520 {
521 vaddr_t addr;
522 int error;
523 static int warned = 0;
524
525 #ifdef DEBUG
526 if (!iospace_inited)
527 panic("vax_map_physmem: called before rminit()?!?");
528 #endif
529 if (size >= LTOHPN) {
530 addr = uvm_km_alloc(kernel_map, size * VAX_NBPG, 0,
531 UVM_KMF_VAONLY);
532 if (addr == 0)
533 panic("vax_map_physmem: kernel map full");
534 } else {
535 error = extent_alloc(iomap_ex, size * VAX_NBPG, VAX_NBPG, 0,
536 EX_FAST | EX_NOWAIT |
537 (iomap_ex_malloc_safe ? EX_MALLOCOK : 0), &addr);
538 if (error) {
539 if (warned++ == 0) /* Warn only once */
540 printf("vax_map_physmem: iomap too small");
541 return 0;
542 }
543 }
544 ioaccess(addr, phys, size);
545 #ifdef PHYSMEMDEBUG
546 printf("vax_map_physmem: alloc'ed %d pages for paddr %lx, at %lx\n",
547 size, phys, addr);
548 #endif
549 return addr | (phys & VAX_PGOFSET);
550 }
551
552 /*
553 * Unmaps the previous mapped (addr, size) pair.
554 */
555 void
vax_unmap_physmem(vaddr_t addr,size_t size)556 vax_unmap_physmem(vaddr_t addr, size_t size)
557 {
558 #ifdef PHYSMEMDEBUG
559 printf("vax_unmap_physmem: unmapping %zu pages at addr %lx\n",
560 size, addr);
561 #endif
562 addr &= ~VAX_PGOFSET;
563 iounaccess(addr, size);
564 if (size >= LTOHPN)
565 uvm_km_free(kernel_map, addr, size * VAX_NBPG, UVM_KMF_VAONLY);
566 else if (extent_free(iomap_ex, addr, size * VAX_NBPG,
567 EX_NOWAIT |
568 (iomap_ex_malloc_safe ? EX_MALLOCOK : 0)))
569 printf("vax_unmap_physmem: addr 0x%lx size %zu vpg: "
570 "can't free region\n", addr, size);
571 }
572
573 #define SOFTINT_IPLS ((IPL_SOFTCLOCK << (SOFTINT_CLOCK * 5)) \
574 | (IPL_SOFTBIO << (SOFTINT_BIO * 5)) \
575 | (IPL_SOFTNET << (SOFTINT_NET * 5)) \
576 | (IPL_SOFTSERIAL << (SOFTINT_SERIAL * 5)))
577
578 void
softint_init_md(lwp_t * l,u_int level,uintptr_t * machdep)579 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
580 {
581 const int ipl = (SOFTINT_IPLS >> (5 * level)) & 0x1F;
582 l->l_cpu->ci_softlwps[level] = l;
583
584 *machdep = ipl;
585 }
586
587 #include <dev/bi/bivar.h>
588 /*
589 * This should be somewhere else.
590 */
591 void
bi_intr_establish(void * icookie,int vec,void (* func)(void *),void * arg,struct evcnt * ev)592 bi_intr_establish(void *icookie, int vec, void (*func)(void *), void *arg,
593 struct evcnt *ev)
594 {
595 scb_vecalloc(vec, func, arg, SCB_ISTACK, ev);
596 }
597
598 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
599 /*
600 * Called from locore.
601 */
602 void krnlock(void);
603 void krnunlock(void);
604
605 void
krnlock(void)606 krnlock(void)
607 {
608 KERNEL_LOCK(1, NULL);
609 }
610
611 void
krnunlock(void)612 krnunlock(void)
613 {
614 KERNEL_UNLOCK_ONE(NULL);
615 }
616 #endif
617
618 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)619 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
620 {
621 const struct trapframe * const tf = l->l_md.md_utf;
622 __greg_t *gr = mcp->__gregs;
623
624 gr[_REG_R0] = tf->tf_r0;
625 gr[_REG_R1] = tf->tf_r1;
626 gr[_REG_R2] = tf->tf_r2;
627 gr[_REG_R3] = tf->tf_r3;
628 gr[_REG_R4] = tf->tf_r4;
629 gr[_REG_R5] = tf->tf_r5;
630 gr[_REG_R6] = tf->tf_r6;
631 gr[_REG_R7] = tf->tf_r7;
632 gr[_REG_R8] = tf->tf_r8;
633 gr[_REG_R9] = tf->tf_r9;
634 gr[_REG_R10] = tf->tf_r10;
635 gr[_REG_R11] = tf->tf_r11;
636 gr[_REG_AP] = tf->tf_ap;
637 gr[_REG_FP] = tf->tf_fp;
638 gr[_REG_SP] = tf->tf_sp;
639 gr[_REG_PC] = tf->tf_pc;
640 gr[_REG_PSL] = tf->tf_psl;
641 *flags |= _UC_CPU;
642 }
643
644 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)645 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
646 {
647 const __greg_t *gr = mcp->__gregs;
648
649 if ((gr[_REG_PSL] & (PSL_IPL | PSL_IS)) ||
650 ((gr[_REG_PSL] & (PSL_U | PSL_PREVU)) != (PSL_U | PSL_PREVU)) ||
651 (gr[_REG_PSL] & PSL_CM))
652 return EINVAL;
653
654 return 0;
655 }
656
657 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)658 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
659 {
660 struct trapframe * const tf = l->l_md.md_utf;
661 const __greg_t *gr = mcp->__gregs;
662 int error;
663
664 if ((flags & _UC_CPU) == 0)
665 return 0;
666
667 error = cpu_mcontext_validate(l, mcp);
668 if (error)
669 return error;
670
671 tf->tf_r0 = gr[_REG_R0];
672 tf->tf_r1 = gr[_REG_R1];
673 tf->tf_r2 = gr[_REG_R2];
674 tf->tf_r3 = gr[_REG_R3];
675 tf->tf_r4 = gr[_REG_R4];
676 tf->tf_r5 = gr[_REG_R5];
677 tf->tf_r6 = gr[_REG_R6];
678 tf->tf_r7 = gr[_REG_R7];
679 tf->tf_r8 = gr[_REG_R8];
680 tf->tf_r9 = gr[_REG_R9];
681 tf->tf_r10 = gr[_REG_R10];
682 tf->tf_r11 = gr[_REG_R11];
683 tf->tf_ap = gr[_REG_AP];
684 tf->tf_fp = gr[_REG_FP];
685 tf->tf_sp = gr[_REG_SP];
686 tf->tf_pc = gr[_REG_PC];
687 tf->tf_psl = gr[_REG_PSL];
688
689 if (flags & _UC_TLSBASE) {
690 void *tlsbase;
691
692 error = copyin((void *)tf->tf_sp, &tlsbase, sizeof(tlsbase));
693 if (error) {
694 return error;
695 }
696 lwp_setprivate(l, tlsbase);
697 tf->tf_sp += sizeof(tlsbase);
698 }
699
700 mutex_enter(l->l_proc->p_lock);
701 if (flags & _UC_SETSTACK)
702 l->l_sigstk.ss_flags |= SS_ONSTACK;
703 if (flags & _UC_CLRSTACK)
704 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
705 mutex_exit(l->l_proc->p_lock);
706
707 return 0;
708 }
709
710 /*
711 * Generic routines for machines with "console program mailbox".
712 */
713 void
generic_halt(void)714 generic_halt(void)
715 {
716 if (cpmbx == NULL) /* Too late to complain here, but avoid panic */
717 __asm("halt");
718
719 if (cpmbx->user_halt != UHALT_DEFAULT) {
720 if (cpmbx->mbox_halt != 0)
721 cpmbx->mbox_halt = 0; /* let console override */
722 } else if (cpmbx->mbox_halt != MHALT_HALT)
723 cpmbx->mbox_halt = MHALT_HALT; /* the os decides */
724
725 __asm("halt");
726 }
727
728 void
generic_reboot(int arg)729 generic_reboot(int arg)
730 {
731 if (cpmbx == NULL) /* Too late to complain here, but avoid panic */
732 __asm("halt");
733
734 if (cpmbx->user_halt != UHALT_DEFAULT) {
735 if (cpmbx->mbox_halt != 0)
736 cpmbx->mbox_halt = 0;
737 } else if (cpmbx->mbox_halt != MHALT_REBOOT)
738 cpmbx->mbox_halt = MHALT_REBOOT;
739
740 __asm("halt");
741 }
742
743 bool
mm_md_direct_mapped_phys(paddr_t paddr,vaddr_t * vaddr)744 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
745 {
746
747 *vaddr = paddr + KERNBASE;
748 return true;
749 }
750
751 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)752 mm_md_physacc(paddr_t pa, vm_prot_t prot)
753 {
754
755 return (pa < avail_end) ? 0 : EFAULT;
756 }
757
758 int
mm_md_readwrite(dev_t dev,struct uio * uio)759 mm_md_readwrite(dev_t dev, struct uio *uio)
760 {
761
762 switch (minor(dev)) {
763 #if NLEDS
764 case DEV_LEDS:
765 return leds_uio(uio);
766 #endif
767 default:
768 return ENXIO;
769 }
770 }
771
772 /*
773 * Set max virtual size a process may allocate.
774 * This could be tuned based on amount of physical memory.
775 */
776 void
machdep_init(void)777 machdep_init(void)
778 {
779 proc0.p_rlimit[RLIMIT_AS].rlim_cur = MAXDSIZ;
780 proc0.p_rlimit[RLIMIT_AS].rlim_max = MAXDSIZ;
781 }
782