1 /* $NetBSD: sh3_machdep.c,v 1.113 2023/12/20 15:34:45 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*-
34 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * William Jolitz.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
65 */
66
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: sh3_machdep.c,v 1.113 2023/12/20 15:34:45 thorpej Exp $");
69
70 #include "opt_ddb.h"
71 #include "opt_kgdb.h"
72 #include "opt_memsize.h"
73 #include "opt_kstack_debug.h"
74 #include "opt_ptrace.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78
79 #include <sys/buf.h>
80 #include <sys/exec.h>
81 #include <sys/kernel.h>
82 #include <sys/mount.h>
83 #include <sys/proc.h>
84 #include <sys/signalvar.h>
85 #include <sys/ras.h>
86 #include <sys/syscallargs.h>
87 #include <sys/ucontext.h>
88 #include <sys/cpu.h>
89 #include <sys/bus.h>
90
91 #ifdef KGDB
92 #include <sys/kgdb.h>
93 #ifndef KGDB_DEVNAME
94 #define KGDB_DEVNAME "nodev"
95 #endif
96 const char kgdb_devname[] = KGDB_DEVNAME;
97 #endif /* KGDB */
98
99 #include <uvm/uvm.h>
100
101 #include <sh3/cache.h>
102 #include <sh3/clock.h>
103 #include <sh3/exception.h>
104 #include <sh3/locore.h>
105 #include <sh3/mmu.h>
106 #include <sh3/pcb.h>
107 #include <sh3/intr.h>
108 #include <sh3/ubcreg.h>
109
110 /* Our exported CPU info; we can have only one. */
111 struct cpu_info cpu_info_store;
112 int cpu_arch;
113 int cpu_product;
114
115 struct vm_map *phys_map;
116
117 struct pcb *curpcb;
118
119 #if !defined(IOM_RAM_BEGIN)
120 #error "define IOM_RAM_BEGIN"
121 #elif (IOM_RAM_BEGIN & SH3_P1SEG_BASE) != 0
122 #error "IOM_RAM_BEGIN is physical address. not P1 address."
123 #endif
124
125 #define VBR (uint8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN)
126 vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN);
127 /* exception handler holder (sh3/sh3/exception_vector.S) */
128 extern char sh_vector_generic[], sh_vector_generic_end[];
129 extern char sh_vector_interrupt[], sh_vector_interrupt_end[];
130 #ifdef SH3
131 extern char sh3_vector_tlbmiss[], sh3_vector_tlbmiss_end[];
132 #endif
133 #ifdef SH4
134 extern char sh4_vector_tlbmiss[], sh4_vector_tlbmiss_end[];
135 #endif
136 /*
137 * These variables are needed by /sbin/savecore
138 */
139 uint32_t dumpmag = 0x8fca0101; /* magic number */
140 int dumpsize; /* pages */
141 long dumplo; /* blocks */
142
143
144 void
sh_cpu_init(int arch,int product)145 sh_cpu_init(int arch, int product)
146 {
147 /* CPU type */
148 cpu_arch = arch;
149 cpu_product = product;
150
151 #if defined(SH3) && defined(SH4)
152 /* Set register addresses */
153 sh_devreg_init();
154 #endif
155 /* Cache access ops. */
156 sh_cache_init();
157
158 /* MMU access ops. */
159 sh_mmu_init();
160
161 /* Hardclock, RTC initialize. */
162 machine_clock_init();
163
164 /* ICU initialize. */
165 curcpu()->ci_idepth = -1;
166 intc_init();
167
168 /* Exception vector. */
169 memcpy(VBR + 0x100, sh_vector_generic,
170 sh_vector_generic_end - sh_vector_generic);
171 #ifdef SH3
172 if (CPU_IS_SH3)
173 memcpy(VBR + 0x400, sh3_vector_tlbmiss,
174 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss);
175 #endif
176 #ifdef SH4
177 if (CPU_IS_SH4)
178 memcpy(VBR + 0x400, sh4_vector_tlbmiss,
179 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss);
180 #endif
181 memcpy(VBR + 0x600, sh_vector_interrupt,
182 sh_vector_interrupt_end - sh_vector_interrupt);
183
184 if (!SH_HAS_UNIFIED_CACHE)
185 sh_icache_sync_all();
186
187 __asm volatile("ldc %0, vbr" :: "r"(VBR));
188
189 /* kernel stack setup */
190 __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume;
191
192 uvm_md_init();
193 /* setup UBC channel A for single-stepping */
194 #if defined(PTRACE_HOOKS) || defined(DDB)
195 _reg_write_2(SH_(BBRA), 0); /* disable channel A */
196 _reg_write_2(SH_(BBRB), 0); /* disable channel B */
197
198 #ifdef SH3
199 if (CPU_IS_SH3) {
200 /* A: break after execution, ignore ASID */
201 _reg_write_4(SH3_BRCR, (UBC_CTL_A_AFTER_INSN
202 | SH3_UBC_CTL_A_MASK_ASID));
203
204 /* A: compare all address bits */
205 _reg_write_4(SH3_BAMRA, 0x00000000);
206 }
207 #endif /* SH3 */
208
209 #ifdef SH4
210 if (CPU_IS_SH4) {
211 /* A: break after execution */
212 _reg_write_2(SH4_BRCR, UBC_CTL_A_AFTER_INSN);
213
214 /* A: compare all address bits, ignore ASID */
215 _reg_write_1(SH4_BAMRA, SH4_UBC_MASK_NONE | SH4_UBC_MASK_ASID);
216 }
217 #endif /* SH4 */
218 #endif
219 }
220
221
222 /*
223 * void sh_proc0_init(void):
224 * Setup proc0 u-area.
225 */
226 void
sh_proc0_init(void)227 sh_proc0_init(void)
228 {
229 struct switchframe *sf;
230 vaddr_t u;
231
232 /* Steal process0 u-area */
233 u = uvm_pageboot_alloc(USPACE);
234 memset((void *)u, 0, USPACE);
235
236 /* Setup uarea for lwp0 */
237 uvm_lwp_setuarea(&lwp0, u);
238
239 /*
240 * u-area map:
241 * |pcb| .... | .................. |
242 * | PAGE_SIZE | USPACE - PAGE_SIZE |
243 * frame bot stack bot
244 * current frame ... r6_bank
245 * stack bottom ... r7_bank
246 * current stack ... r15
247 */
248 curpcb = lwp_getpcb(&lwp0);
249 lwp0.l_md.md_pcb = curpcb;
250
251 sf = &curpcb->pcb_sf;
252
253 #ifdef KSTACK_DEBUG
254 memset((char *)(u + sizeof(struct pcb)), 0x5a,
255 PAGE_SIZE - sizeof(struct pcb));
256 memset((char *)(u + PAGE_SIZE), 0xa5, USPACE - PAGE_SIZE);
257 memset(sf, 0xb4, sizeof(struct switchframe));
258 #endif /* KSTACK_DEBUG */
259
260 sf->sf_r6_bank = u + PAGE_SIZE;
261 sf->sf_r7_bank = sf->sf_r15 = u + USPACE;
262 __asm volatile("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank));
263 __asm volatile("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank));
264
265 lwp0.l_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1;
266 }
267
268 void
sh_startup(void)269 sh_startup(void)
270 {
271 vaddr_t minaddr, maxaddr;
272 char pbuf[9];
273 const char *model = cpu_getmodel();
274
275 printf("%s%s", copyright, version);
276 if (*model != '\0')
277 printf("%s\n", model);
278 #ifdef DEBUG
279 printf("general exception handler:\t%d byte\n",
280 sh_vector_generic_end - sh_vector_generic);
281 printf("TLB miss exception handler:\t%d byte\n",
282 #if defined(SH3) && defined(SH4)
283 CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss :
284 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
285 #elif defined(SH3)
286 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss
287 #elif defined(SH4)
288 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
289 #endif
290 );
291 printf("interrupt exception handler:\t%d byte\n",
292 sh_vector_interrupt_end - sh_vector_interrupt);
293 #endif /* DEBUG */
294
295 format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
296 printf("total memory = %s\n", pbuf);
297
298 minaddr = 0;
299
300 /*
301 * Allocate a submap for physio
302 */
303 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
304 VM_PHYS_SIZE, 0, false, NULL);
305
306 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
307 printf("avail memory = %s\n", pbuf);
308 }
309
310 /*
311 * This is called by main to set dumplo and dumpsize.
312 * Dumps always skip the first CLBYTES of disk space
313 * in case there might be a disk label stored there.
314 * If there is extra space, put dump at the end to
315 * reduce the chance that swapping trashes it.
316 */
317 void
cpu_dumpconf(void)318 cpu_dumpconf(void)
319 {
320 }
321
322 void
dumpsys(void)323 dumpsys(void)
324 {
325 }
326
327 /*
328 * Get the base address of the signal frame either on the lwp's stack
329 * or on the signal stack and set *onstack accordingly. Caller then
330 * just subtracts the size of appropriate struct sigframe_foo.
331 */
332 void *
getframe(const struct lwp * l,int sig,int * onstack)333 getframe(const struct lwp *l, int sig, int *onstack)
334 {
335 const struct proc *p = l->l_proc;
336 const stack_t *sigstk = &l->l_sigstk;
337
338 /* Do we need to jump onto the signal stack? */
339 *onstack = (sigstk->ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
340 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
341
342 if (*onstack)
343 return ((char *)sigstk->ss_sp + sigstk->ss_size);
344 else
345 return ((void *)l->l_md.md_regs->tf_r15);
346 }
347
348 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)349 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
350 {
351 struct lwp *l = curlwp;
352 struct proc *p = l->l_proc;
353 struct sigacts *ps = p->p_sigacts;
354 struct trapframe *tf = l->l_md.md_regs;
355 int sig = ksi->ksi_signo, error;
356 sig_t catcher = SIGACTION(p, sig).sa_handler;
357 struct sigframe_siginfo *fp, frame;
358 int onstack;
359
360 fp = getframe(l, sig, &onstack);
361 --fp;
362
363 memset(&frame, 0, sizeof(frame));
364 frame.sf_si._info = ksi->ksi_info;
365 frame.sf_uc.uc_link = l->l_ctxlink;
366 frame.sf_uc.uc_sigmask = *mask;
367 frame.sf_uc.uc_flags = _UC_SIGMASK;
368 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
369 ? _UC_SETSTACK : _UC_CLRSTACK;
370 sendsig_reset(l, sig);
371 mutex_exit(p->p_lock);
372 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
373 error = copyout(&frame, fp, sizeof(frame));
374 mutex_enter(p->p_lock);
375
376 if (error != 0) {
377 /*
378 * Process has trashed its stack; give it an illegal
379 * instruction to halt it in its tracks.
380 */
381 sigexit(l, SIGILL);
382 /* NOTREACHED */
383 }
384
385 tf->tf_r4 = sig; /* "signum" argument for handler */
386 tf->tf_r5 = (int)&fp->sf_si; /* "sip" argument for handler */
387 tf->tf_r6 = (int)&fp->sf_uc; /* "ucp" argument for handler */
388 tf->tf_spc = (int)catcher;
389 tf->tf_r15 = (int)fp;
390 tf->tf_pr = (int)ps->sa_sigdesc[sig].sd_tramp;
391
392 /* Remember if we're now on the signal stack. */
393 if (onstack)
394 l->l_sigstk.ss_flags |= SS_ONSTACK;
395 }
396
397 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)398 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
399 {
400 const struct trapframe *tf = l->l_md.md_regs;
401 __greg_t *gr = mcp->__gregs;
402 __greg_t ras_pc;
403
404 /* Save register context. */
405 gr[_REG_GBR] = tf->tf_gbr;
406 gr[_REG_PC] = tf->tf_spc;
407 gr[_REG_SR] = tf->tf_ssr;
408 gr[_REG_MACL] = tf->tf_macl;
409 gr[_REG_MACH] = tf->tf_mach;
410 gr[_REG_PR] = tf->tf_pr;
411 gr[_REG_R14] = tf->tf_r14;
412 gr[_REG_R13] = tf->tf_r13;
413 gr[_REG_R12] = tf->tf_r12;
414 gr[_REG_R11] = tf->tf_r11;
415 gr[_REG_R10] = tf->tf_r10;
416 gr[_REG_R9] = tf->tf_r9;
417 gr[_REG_R8] = tf->tf_r8;
418 gr[_REG_R7] = tf->tf_r7;
419 gr[_REG_R6] = tf->tf_r6;
420 gr[_REG_R5] = tf->tf_r5;
421 gr[_REG_R4] = tf->tf_r4;
422 gr[_REG_R3] = tf->tf_r3;
423 gr[_REG_R2] = tf->tf_r2;
424 gr[_REG_R1] = tf->tf_r1;
425 gr[_REG_R0] = tf->tf_r0;
426 gr[_REG_R15] = tf->tf_r15;
427
428 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
429 (void *) gr[_REG_PC])) != -1)
430 gr[_REG_PC] = ras_pc;
431
432 *flags |= (_UC_CPU|_UC_TLSBASE);
433
434 /* FPU context is currently not handled by the kernel. */
435 memset(&mcp->__fpregs, 0, sizeof (mcp->__fpregs));
436 }
437
438 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)439 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
440 {
441 struct trapframe *tf = l->l_md.md_regs;
442 const __greg_t *gr = mcp->__gregs;
443
444 if (((tf->tf_ssr ^ gr[_REG_SR]) & PSL_USERSTATIC) != 0)
445 return EINVAL;
446
447 return 0;
448 }
449
450 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)451 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
452 {
453 struct trapframe *tf = l->l_md.md_regs;
454 const __greg_t *gr = mcp->__gregs;
455 struct proc *p = l->l_proc;
456 int error;
457
458 /* Restore register context, if any. */
459 if ((flags & _UC_CPU) != 0) {
460 /* Check for security violations. */
461 error = cpu_mcontext_validate(l, mcp);
462 if (error)
463 return error;
464
465 /* done in lwp_setprivate */
466 /* tf->tf_gbr = gr[_REG_GBR]; */
467 tf->tf_spc = gr[_REG_PC];
468 tf->tf_ssr = gr[_REG_SR];
469 tf->tf_macl = gr[_REG_MACL];
470 tf->tf_mach = gr[_REG_MACH];
471 tf->tf_pr = gr[_REG_PR];
472 tf->tf_r14 = gr[_REG_R14];
473 tf->tf_r13 = gr[_REG_R13];
474 tf->tf_r12 = gr[_REG_R12];
475 tf->tf_r11 = gr[_REG_R11];
476 tf->tf_r10 = gr[_REG_R10];
477 tf->tf_r9 = gr[_REG_R9];
478 tf->tf_r8 = gr[_REG_R8];
479 tf->tf_r7 = gr[_REG_R7];
480 tf->tf_r6 = gr[_REG_R6];
481 tf->tf_r5 = gr[_REG_R5];
482 tf->tf_r4 = gr[_REG_R4];
483 tf->tf_r3 = gr[_REG_R3];
484 tf->tf_r2 = gr[_REG_R2];
485 tf->tf_r1 = gr[_REG_R1];
486 tf->tf_r0 = gr[_REG_R0];
487 tf->tf_r15 = gr[_REG_R15];
488
489 if (flags & _UC_TLSBASE)
490 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_GBR]);
491 }
492
493 #if 0
494 /* XXX: FPU context is currently not handled by the kernel. */
495 if (flags & _UC_FPU) {
496 /* TODO */;
497 }
498 #endif
499
500 mutex_enter(p->p_lock);
501 if (flags & _UC_SETSTACK)
502 l->l_sigstk.ss_flags |= SS_ONSTACK;
503 if (flags & _UC_CLRSTACK)
504 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
505 mutex_exit(p->p_lock);
506
507 return (0);
508 }
509
510 /*
511 * Clear registers on exec
512 */
513 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)514 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
515 {
516 struct trapframe *tf;
517
518 l->l_md.md_flags &= ~(MDL_USEDFPU | MDL_SSTEP);
519
520 tf = l->l_md.md_regs;
521
522 tf->tf_ssr = PSL_USERSET;
523 tf->tf_spc = pack->ep_entry;
524 tf->tf_pr = 0;
525
526 tf->tf_gbr = 0;
527 tf->tf_macl = 0;
528 tf->tf_mach = 0;
529
530 tf->tf_r0 = 0;
531 tf->tf_r1 = 0;
532 tf->tf_r2 = 0;
533 tf->tf_r3 = 0;
534 if (ufetch_int((void *)stack, (u_int *)&tf->tf_r4) != 0) /* argc */
535 tf->tf_r4 = -1;
536 tf->tf_r5 = stack + 4; /* argv */
537 tf->tf_r6 = stack + 4 * tf->tf_r4 + 8; /* envp */
538 tf->tf_r7 = 0;
539 tf->tf_r8 = 0;
540 tf->tf_r9 = l->l_proc->p_psstrp;
541 tf->tf_r10 = 0;
542 tf->tf_r11 = 0;
543 tf->tf_r12 = 0;
544 tf->tf_r13 = 0;
545 tf->tf_r14 = 0;
546 tf->tf_r15 = stack;
547 }
548
549 /*
550 * Jump to reset vector.
551 */
552 void
cpu_reset(void)553 cpu_reset(void)
554 {
555
556 _cpu_exception_suspend();
557 _reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL);
558
559 #ifndef __lint__
560 goto *(void *)0xa0000000;
561 #endif
562 /* NOTREACHED */
563 }
564
565 int
cpu_lwp_setprivate(lwp_t * l,void * addr)566 cpu_lwp_setprivate(lwp_t *l, void *addr)
567 {
568
569 l->l_md.md_regs->tf_gbr = (int)addr;
570 return 0;
571 }
572
573