1 /* $OpenBSD: trap.c,v 1.111 2024/01/11 19:16:26 miod Exp $ */
2 /* $NetBSD: trap.c,v 1.52 2000/05/24 16:48:33 thorpej Exp $ */
3
4 /*-
5 * Copyright (c) 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 1999 Christopher G. Demetriou. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by Christopher G. Demetriou
48 * for the NetBSD Project.
49 * 4. The name of the author may not be used to endorse or promote products
50 * derived from this software without specific prior written permission
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
55 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
56 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
57 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
61 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 */
63
64 /*
65 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
66 * All rights reserved.
67 *
68 * Author: Chris G. Demetriou
69 *
70 * Permission to use, copy, modify and distribute this software and
71 * its documentation is hereby granted, provided that both the copyright
72 * notice and this permission notice appear in all copies of the
73 * software, derivative works or modified versions, and any portions
74 * thereof, and that both notices appear in supporting documentation.
75 *
76 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
77 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
78 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
79 *
80 * Carnegie Mellon requests users of this software to return to
81 *
82 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
83 * School of Computer Science
84 * Carnegie Mellon University
85 * Pittsburgh PA 15213-3890
86 *
87 * any improvements or extensions that they make and grant Carnegie the
88 * rights to redistribute these changes.
89 */
90
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/proc.h>
94 #include <sys/signalvar.h>
95 #include <sys/user.h>
96 #include <sys/syscall.h>
97 #include <sys/syscall_mi.h>
98 #include <sys/buf.h>
99 #ifndef NO_IEEE
100 #include <sys/device.h>
101 #endif
102 #include <sys/ptrace.h>
103
104 #include <uvm/uvm_extern.h>
105
106 #include <machine/cpu.h>
107 #include <machine/reg.h>
108 #ifdef DDB
109 #include <machine/db_machdep.h>
110 #endif
111 #include <alpha/alpha/db_instruction.h>
112
113 int handle_opdec(struct proc *p, u_int64_t *ucodep);
114
115 #ifndef NO_IEEE
116 struct device fpevent_use;
117 struct device fpevent_reuse;
118 #endif
119
120 void printtrap(const unsigned long, const unsigned long, const unsigned long,
121 const unsigned long, struct trapframe *, int, int);
122
123 /*
124 * Initialize the trap vectors for the current processor.
125 */
126 void
trap_init()127 trap_init()
128 {
129
130 /*
131 * Point interrupt/exception vectors to our own.
132 */
133 alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT);
134 alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH);
135 alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM);
136 alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF);
137 alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA);
138 alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS);
139
140 /*
141 * Clear pending machine checks and error reports, and enable
142 * system- and processor-correctable error reporting.
143 */
144 alpha_pal_wrmces(alpha_pal_rdmces() &
145 ~(ALPHA_MCES_DSC|ALPHA_MCES_DPC));
146 }
147
148 void
printtrap(const unsigned long a0,const unsigned long a1,const unsigned long a2,const unsigned long entry,struct trapframe * framep,int isfatal,int user)149 printtrap(const unsigned long a0, const unsigned long a1,
150 const unsigned long a2, const unsigned long entry, struct trapframe *framep,
151 int isfatal, int user)
152 {
153 char ubuf[64];
154 const char *entryname;
155
156 switch (entry) {
157 case ALPHA_KENTRY_INT:
158 entryname = "interrupt";
159 break;
160 case ALPHA_KENTRY_ARITH:
161 entryname = "arithmetic trap";
162 break;
163 case ALPHA_KENTRY_MM:
164 entryname = "memory management fault";
165 break;
166 case ALPHA_KENTRY_IF:
167 entryname = "instruction fault";
168 break;
169 case ALPHA_KENTRY_UNA:
170 entryname = "unaligned access fault";
171 break;
172 case ALPHA_KENTRY_SYS:
173 entryname = "system call";
174 break;
175 default:
176 snprintf(ubuf, sizeof ubuf, "type %lx", entry);
177 entryname = (const char *) ubuf;
178 break;
179 }
180
181 printf("\n");
182 printf("%s %s trap:\n", isfatal? "fatal" : "handled",
183 user ? "user" : "kernel");
184 printf("\n");
185 printf(" trap entry = 0x%lx (%s)\n", entry, entryname);
186 printf(" a0 = 0x%lx\n", a0);
187 printf(" a1 = 0x%lx\n", a1);
188 printf(" a2 = 0x%lx\n", a2);
189 printf(" pc = 0x%lx\n", framep->tf_regs[FRAME_PC]);
190 printf(" ra = 0x%lx\n", framep->tf_regs[FRAME_RA]);
191 printf(" curproc = %p\n", curproc);
192 if (curproc != NULL)
193 printf(" pid = %d, comm = %s\n",
194 curproc->p_p->ps_pid, curproc->p_p->ps_comm);
195 printf("\n");
196 }
197
198 /*
199 * Trap is called from locore to handle most types of processor traps.
200 * System calls are broken out for efficiency and ASTs are broken out
201 * to make the code a bit cleaner and more representative of the
202 * Alpha architecture.
203 */
204 void
trap(a0,a1,a2,entry,framep)205 trap(a0, a1, a2, entry, framep)
206 const unsigned long a0, a1, a2, entry;
207 struct trapframe *framep;
208 {
209 struct proc *p;
210 int i;
211 u_int64_t ucode;
212 int user;
213 #if defined(DDB)
214 int call_debugger = 1;
215 #endif
216 caddr_t v;
217 int typ;
218 union sigval sv;
219 vm_prot_t access_type;
220 unsigned long onfault;
221
222 atomic_add_int(&uvmexp.traps, 1);
223 p = curproc;
224 ucode = 0;
225 v = 0;
226 typ = SI_NOINFO;
227 framep->tf_regs[FRAME_SP] = alpha_pal_rdusp();
228 user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0;
229 if (user) {
230 p->p_md.md_tf = framep;
231 refreshcreds(p);
232 }
233
234 switch (entry) {
235 case ALPHA_KENTRY_UNA:
236 /*
237 * If user-land, deliver SIGBUS unconditionally.
238 */
239 if (user) {
240 i = SIGBUS;
241 ucode = ILL_ILLADR;
242 v = (caddr_t)a0;
243 break;
244 }
245
246 /*
247 * Unaligned access from kernel mode is always an error,
248 * EVEN IF A COPY FAULT HANDLER IS SET!
249 *
250 * It's an error if a copy fault handler is set because
251 * the various routines which do user-initiated copies
252 * do so in a bcopy-like manner. In other words, the
253 * kernel never assumes that pointers provided by the
254 * user are properly aligned, and so if the kernel
255 * does cause an unaligned access it's a kernel bug.
256 */
257 goto dopanic;
258
259 case ALPHA_KENTRY_ARITH:
260 /*
261 * Resolve trap shadows, interpret FP ops requiring infinities,
262 * NaNs, or denorms, and maintain FPCR corrections.
263 */
264 if (user) {
265 #ifndef NO_IEEE
266 i = alpha_fp_complete(a0, a1, p, &ucode);
267 if (i == 0)
268 goto out;
269 #else
270 i = SIGFPE;
271 ucode = FPE_FLTINV;
272 #endif
273 v = (caddr_t)framep->tf_regs[FRAME_PC];
274 break;
275 }
276
277 /* Always fatal in kernel. Should never happen. */
278 goto dopanic;
279
280 case ALPHA_KENTRY_IF:
281 /*
282 * These are always fatal in kernel, and should never
283 * happen. (Debugger entry is handled in XentIF.)
284 */
285 if (!user) {
286 #if defined(DDB)
287 /*
288 * ...unless a debugger is configured. It will
289 * inform us if the trap was handled.
290 */
291 if (alpha_debug(a0, a1, a2, entry, framep))
292 goto out;
293
294 /*
295 * Debugger did NOT handle the trap, don't
296 * call the debugger again!
297 */
298 call_debugger = 0;
299 #endif
300 goto dopanic;
301 }
302 i = 0;
303 switch (a0) {
304 case ALPHA_IF_CODE_GENTRAP:
305 if (framep->tf_regs[FRAME_A0] == -2) { /* weird! */
306 i = SIGFPE;
307 ucode = a0; /* exception summary */
308 break;
309 }
310 /* FALLTHROUGH */
311 case ALPHA_IF_CODE_BPT:
312 case ALPHA_IF_CODE_BUGCHK:
313 #ifdef PTRACE
314 if (p->p_md.md_flags & (MDP_STEP1|MDP_STEP2)) {
315 KERNEL_LOCK();
316 process_sstep(p, 0);
317 KERNEL_UNLOCK();
318 p->p_md.md_tf->tf_regs[FRAME_PC] -= 4;
319 }
320 #endif
321 ucode = a0; /* trap type */
322 i = SIGTRAP;
323 break;
324
325 case ALPHA_IF_CODE_OPDEC:
326 KERNEL_LOCK();
327 i = handle_opdec(p, &ucode);
328 KERNEL_UNLOCK();
329 if (i == 0)
330 goto out;
331 break;
332
333 case ALPHA_IF_CODE_FEN:
334 alpha_enable_fp(p, 0);
335 goto out;
336
337 default:
338 printf("trap: unknown IF type 0x%lx\n", a0);
339 goto dopanic;
340 }
341 v = (caddr_t)framep->tf_regs[FRAME_PC];
342 break;
343
344 case ALPHA_KENTRY_MM:
345 if (user &&
346 !uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
347 "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
348 uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
349 goto out;
350
351 switch (a1) {
352 case ALPHA_MMCSR_FOR:
353 case ALPHA_MMCSR_FOE:
354 case ALPHA_MMCSR_FOW:
355 if (pmap_emulate_reference(p, a0, user, a1)) {
356 access_type = PROT_EXEC;
357 goto do_fault;
358 }
359 goto out;
360
361 case ALPHA_MMCSR_INVALTRANS:
362 case ALPHA_MMCSR_ACCESS:
363 {
364 vaddr_t va;
365 struct vmspace *vm = NULL;
366 struct vm_map *map;
367 int rv;
368 extern struct vm_map *kernel_map;
369
370 switch (a2) {
371 case -1: /* instruction fetch fault */
372 access_type = PROT_EXEC;
373 break;
374 case 0: /* load instruction */
375 access_type = PROT_READ;
376 break;
377 case 1: /* store instruction */
378 access_type = PROT_READ | PROT_WRITE;
379 break;
380 }
381 do_fault:
382 /*
383 * It is only a kernel address space fault iff:
384 * 1. !user and
385 * 2. pcb_onfault not set or
386 * 3. pcb_onfault set but kernel space data fault
387 * The last can occur during an exec() copyin where the
388 * argument space is lazy-allocated.
389 */
390 if (!user && (a0 >= VM_MIN_KERNEL_ADDRESS ||
391 p->p_addr->u_pcb.pcb_onfault == 0)) {
392 vm = NULL;
393 map = kernel_map;
394 } else {
395 vm = p->p_vmspace;
396 map = &vm->vm_map;
397 }
398
399 va = trunc_page((vaddr_t)a0);
400 onfault = p->p_addr->u_pcb.pcb_onfault;
401 p->p_addr->u_pcb.pcb_onfault = 0;
402
403 KERNEL_LOCK();
404 rv = uvm_fault(map, va, 0, access_type);
405 KERNEL_UNLOCK();
406
407 p->p_addr->u_pcb.pcb_onfault = onfault;
408
409 /*
410 * If this was a stack access we keep track of the
411 * maximum accessed stack size. Also, if vm_fault
412 * gets a protection failure it is due to accessing
413 * the stack region outside the current limit and
414 * we need to reflect that as an access error.
415 */
416 if (rv == 0) {
417 if (map != kernel_map)
418 uvm_grow(p, va);
419 goto out;
420 }
421
422 if (!user) {
423 /* Check for copyin/copyout fault */
424 if (p->p_addr->u_pcb.pcb_onfault != 0) {
425 framep->tf_regs[FRAME_PC] =
426 p->p_addr->u_pcb.pcb_onfault;
427 goto out;
428 }
429 goto dopanic;
430 }
431 ucode = access_type;
432 v = (caddr_t)a0;
433 typ = SEGV_MAPERR;
434 if (rv == ENOMEM) {
435 printf("UVM: pid %u (%s), uid %d killed: "
436 "out of swap\n", p->p_p->ps_pid,
437 p->p_p->ps_comm,
438 p->p_ucred ? (int)p->p_ucred->cr_uid : -1);
439 i = SIGKILL;
440 } else {
441 i = SIGSEGV;
442 }
443 break;
444 }
445
446 default:
447 printf("trap: unknown MMCSR value 0x%lx\n", a1);
448 goto dopanic;
449 }
450 break;
451
452 default:
453 goto dopanic;
454 }
455
456 #ifdef DEBUG
457 printtrap(a0, a1, a2, entry, framep, 1, user);
458 #endif
459 sv.sival_ptr = v;
460 trapsignal(p, i, ucode, typ, sv);
461 out:
462 if (user) {
463 /* Do any deferred user pmap operations. */
464 PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map));
465
466 userret(p);
467 }
468 return;
469
470 dopanic:
471 printtrap(a0, a1, a2, entry, framep, 1, user);
472 /* XXX dump registers */
473
474 #if defined(DDB)
475 if (call_debugger && alpha_debug(a0, a1, a2, entry, framep)) {
476 /*
477 * The debugger has handled the trap; just return.
478 */
479 goto out;
480 }
481 #endif
482
483 panic("trap");
484 }
485
486 /*
487 * Process a system call.
488 *
489 * System calls are strange beasts. They are passed the syscall number
490 * in v0, and the arguments in the registers (as normal). They return
491 * an error flag in a3 (if a3 != 0 on return, the syscall had an error),
492 * and the return value (if any) in v0.
493 *
494 * The assembly stub takes care of moving the call number into a register
495 * we can get to, and moves all of the argument registers into their places
496 * in the trap frame. On return, it restores the callee-saved registers,
497 * a3, and v0 from the frame before returning to the user process.
498 */
499 void
syscall(u_int64_t code,struct trapframe * framep)500 syscall(u_int64_t code, struct trapframe *framep)
501 {
502 const struct sysent *callp = sysent;
503 struct proc *p;
504 int error = ENOSYS;
505 u_int64_t opc;
506 u_long rval[2];
507 u_long args[6];
508 u_int nargs;
509
510 atomic_add_int(&uvmexp.syscalls, 1);
511 p = curproc;
512 p->p_md.md_tf = framep;
513 framep->tf_regs[FRAME_SP] = alpha_pal_rdusp();
514 opc = framep->tf_regs[FRAME_PC] - 4;
515
516 if (code <= 0 || code >= SYS_MAXSYSCALL)
517 goto bad;
518
519 callp += code;
520
521 nargs = callp->sy_narg;
522 switch (nargs) {
523 case 6:
524 args[5] = framep->tf_regs[FRAME_A5];
525 case 5:
526 args[4] = framep->tf_regs[FRAME_A4];
527 case 4:
528 args[3] = framep->tf_regs[FRAME_A3];
529 case 3:
530 args[2] = framep->tf_regs[FRAME_A2];
531 case 2:
532 args[1] = framep->tf_regs[FRAME_A1];
533 case 1:
534 args[0] = framep->tf_regs[FRAME_A0];
535 case 0:
536 break;
537 }
538
539 rval[0] = 0;
540 rval[1] = 0;
541
542 error = mi_syscall(p, code, callp, args, rval);
543
544 switch (error) {
545 case 0:
546 framep->tf_regs[FRAME_V0] = rval[0];
547 framep->tf_regs[FRAME_A3] = 0;
548 break;
549 case ERESTART:
550 framep->tf_regs[FRAME_PC] = opc;
551 break;
552 case EJUSTRETURN:
553 break;
554 default:
555 bad:
556 framep->tf_regs[FRAME_V0] = error;
557 framep->tf_regs[FRAME_A3] = 1;
558 break;
559 }
560
561 /* Do any deferred user pmap operations. */
562 PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map));
563
564 mi_syscall_return(p, code, error, rval);
565 }
566
567 /*
568 * Process the tail end of a fork() for the child.
569 */
570 void
child_return(arg)571 child_return(arg)
572 void *arg;
573 {
574 struct proc *p = arg;
575 struct trapframe *framep = p->p_md.md_tf;
576
577 /*
578 * Return values in the frame set by cpu_fork().
579 */
580 framep->tf_regs[FRAME_V0] = 0;
581 framep->tf_regs[FRAME_A3] = 0;
582
583 KERNEL_UNLOCK();
584
585 /* Do any deferred user pmap operations. */
586 PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map));
587
588 mi_child_return(p);
589 }
590
591 /*
592 * Set the float-point enable for the current process, and return
593 * the FPU context to the named process. If check == 0, it is an
594 * error for the named process to already be fpcurproc.
595 */
596 void
alpha_enable_fp(struct proc * p,int check)597 alpha_enable_fp(struct proc *p, int check)
598 {
599 struct cpu_info *ci = curcpu();
600 #if defined(MULTIPROCESSOR)
601 int s;
602 #endif
603
604 if (check && ci->ci_fpcurproc == p) {
605 alpha_pal_wrfen(1);
606 return;
607 }
608 if (ci->ci_fpcurproc == p)
609 panic("trap: fp disabled for fpcurproc == %p", p);
610
611 if (ci->ci_fpcurproc != NULL)
612 fpusave_cpu(ci, 1);
613
614 KDASSERT(ci->ci_fpcurproc == NULL);
615
616 #if defined(MULTIPROCESSOR)
617 if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
618 fpusave_proc(p, 1);
619 #else
620 KDASSERT(p->p_addr->u_pcb.pcb_fpcpu == NULL);
621 #endif
622
623 #if defined(MULTIPROCESSOR)
624 /* Need to block IPIs */
625 s = splipi();
626 #endif
627 p->p_addr->u_pcb.pcb_fpcpu = ci;
628 ci->ci_fpcurproc = p;
629 atomic_add_int(&uvmexp.fpswtch, 1);
630
631 p->p_md.md_flags |= MDP_FPUSED;
632 alpha_pal_wrfen(1);
633 restorefpstate(&p->p_addr->u_pcb.pcb_fp);
634 alpha_pal_wrfen(0);
635
636 #if defined(MULTIPROCESSOR)
637 alpha_pal_swpipl(s);
638 #endif
639 }
640
641 /*
642 * Process an asynchronous software trap.
643 * This is relatively easy.
644 */
645 void
ast(framep)646 ast(framep)
647 struct trapframe *framep;
648 {
649 struct proc *p = curproc;
650
651 p->p_md.md_tf = framep;
652 p->p_md.md_astpending = 0;
653
654 #ifdef DIAGNOSTIC
655 if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0)
656 panic("ast and not user");
657 #endif
658
659 refreshcreds(p);
660 atomic_add_int(&uvmexp.softs, 1);
661 mi_ast(p, curcpu()->ci_want_resched);
662
663 /* Do any deferred user pmap operations. */
664 PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map));
665
666 userret(p);
667 }
668
669 static const int reg_to_framereg[32] = {
670 FRAME_V0, FRAME_T0, FRAME_T1, FRAME_T2,
671 FRAME_T3, FRAME_T4, FRAME_T5, FRAME_T6,
672 FRAME_T7, FRAME_S0, FRAME_S1, FRAME_S2,
673 FRAME_S3, FRAME_S4, FRAME_S5, FRAME_S6,
674 FRAME_A0, FRAME_A1, FRAME_A2, FRAME_A3,
675 FRAME_A4, FRAME_A5, FRAME_T8, FRAME_T9,
676 FRAME_T10, FRAME_T11, FRAME_RA, FRAME_T12,
677 FRAME_AT, FRAME_GP, FRAME_SP, -1,
678 };
679
680 #define irp(p, reg) \
681 ((reg_to_framereg[(reg)] == -1) ? NULL : \
682 &(p)->p_md.md_tf->tf_regs[reg_to_framereg[(reg)]])
683
684 /*
685 * Reserved/unimplemented instruction (opDec fault) handler
686 *
687 * Argument is the process that caused it. No useful information
688 * is passed to the trap handler other than the fault type. The
689 * address of the instruction that caused the fault is 4 less than
690 * the PC stored in the trap frame.
691 *
692 * If the instruction is emulated successfully, this function returns 0.
693 * Otherwise, this function returns the signal to deliver to the process,
694 * and fills in *ucodep with the code to be delivered.
695 */
696 int
handle_opdec(p,ucodep)697 handle_opdec(p, ucodep)
698 struct proc *p;
699 u_int64_t *ucodep;
700 {
701 alpha_instruction inst;
702 register_t *regptr, memaddr;
703 u_int64_t inst_pc;
704 int sig;
705
706 /*
707 * Read USP into frame in case it's going to be used or modified.
708 * This keeps us from having to check for it in lots of places
709 * later.
710 */
711 p->p_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp();
712
713 inst_pc = memaddr = p->p_md.md_tf->tf_regs[FRAME_PC] - 4;
714 if (copyinsn(p, (u_int32_t *)inst_pc, (u_int32_t *)&inst) != 0) {
715 /*
716 * really, this should never happen, but in case it
717 * does we handle it.
718 */
719 printf("WARNING: handle_opdec() couldn't fetch instruction\n");
720 goto sigsegv;
721 }
722
723 switch (inst.generic_format.opcode) {
724 case op_ldbu:
725 case op_ldwu:
726 case op_stw:
727 case op_stb:
728 regptr = irp(p, inst.mem_format.rb);
729 if (regptr != NULL)
730 memaddr = *regptr;
731 else
732 memaddr = 0;
733 memaddr += inst.mem_format.displacement;
734
735 regptr = irp(p, inst.mem_format.ra);
736
737 if (inst.mem_format.opcode == op_ldwu ||
738 inst.mem_format.opcode == op_stw) {
739 if (memaddr & 0x01) { /* misaligned address */
740 sig = SIGBUS;
741 goto sigbus;
742 }
743 }
744
745 if (inst.mem_format.opcode == op_ldbu) {
746 u_int8_t b;
747
748 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
749 if (copyin((caddr_t)memaddr, &b, sizeof (b)) != 0)
750 goto sigsegv;
751 if (regptr != NULL)
752 *regptr = b;
753 } else if (inst.mem_format.opcode == op_ldwu) {
754 u_int16_t w;
755
756 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
757 if (copyin((caddr_t)memaddr, &w, sizeof (w)) != 0)
758 goto sigsegv;
759 if (regptr != NULL)
760 *regptr = w;
761 } else if (inst.mem_format.opcode == op_stw) {
762 u_int16_t w;
763
764 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
765 w = (regptr != NULL) ? *regptr : 0;
766 if (copyout(&w, (caddr_t)memaddr, sizeof (w)) != 0)
767 goto sigsegv;
768 } else if (inst.mem_format.opcode == op_stb) {
769 u_int8_t b;
770
771 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
772 b = (regptr != NULL) ? *regptr : 0;
773 if (copyout(&b, (caddr_t)memaddr, sizeof (b)) != 0)
774 goto sigsegv;
775 }
776 break;
777
778 case op_intmisc:
779 if (inst.operate_generic_format.function == op_sextb &&
780 inst.operate_generic_format.ra == 31) {
781 int8_t b;
782
783 if (inst.operate_generic_format.is_lit) {
784 b = inst.operate_lit_format.literal;
785 } else {
786 if (inst.operate_reg_format.sbz != 0)
787 goto sigill;
788 regptr = irp(p, inst.operate_reg_format.rb);
789 b = (regptr != NULL) ? *regptr : 0;
790 }
791
792 regptr = irp(p, inst.operate_generic_format.rc);
793 if (regptr != NULL)
794 *regptr = b;
795 break;
796 }
797 if (inst.operate_generic_format.function == op_sextw &&
798 inst.operate_generic_format.ra == 31) {
799 int16_t w;
800
801 if (inst.operate_generic_format.is_lit) {
802 w = inst.operate_lit_format.literal;
803 } else {
804 if (inst.operate_reg_format.sbz != 0)
805 goto sigill;
806 regptr = irp(p, inst.operate_reg_format.rb);
807 w = (regptr != NULL) ? *regptr : 0;
808 }
809
810 regptr = irp(p, inst.operate_generic_format.rc);
811 if (regptr != NULL)
812 *regptr = w;
813 break;
814 }
815 goto sigill;
816
817 #ifndef NO_IEEE
818 /* case op_fix_float: */
819 /* case op_vax_float: */
820 case op_ieee_float:
821 /* case op_any_float: */
822 /*
823 * EV4 processors do not implement dynamic rounding
824 * instructions at all.
825 */
826 if (cpu_implver <= ALPHA_IMPLVER_EV4) {
827 sig = alpha_fp_complete_at(inst_pc, p, ucodep);
828 if (sig)
829 return sig;
830 break;
831 }
832 goto sigill;
833 #endif
834
835 default:
836 goto sigill;
837 }
838
839 /*
840 * Write back USP. Note that in the error cases below,
841 * nothing will have been successfully modified so we don't
842 * have to write it out.
843 */
844 alpha_pal_wrusp(p->p_md.md_tf->tf_regs[FRAME_SP]);
845
846 return (0);
847
848 sigill:
849 *ucodep = ALPHA_IF_CODE_OPDEC; /* trap type */
850 return (SIGILL);
851
852 sigsegv:
853 sig = SIGSEGV;
854 p->p_md.md_tf->tf_regs[FRAME_PC] = inst_pc; /* re-run instr. */
855 sigbus:
856 *ucodep = memaddr; /* faulting address */
857 return (sig);
858 }
859