xref: /netbsd-src/sys/arch/alpha/alpha/trap.c (revision 2d93273ebd9d1840d6d98e634f2f7a05b707a1b7)
1 /* $NetBSD: trap.c,v 1.140 2023/11/21 22:19:12 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2021 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Ross Harvey.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1999 Christopher G. Demetriou.  All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. All advertising materials mentioning features or use of this software
45  *    must display the following acknowledgement:
46  *      This product includes software developed by Christopher G. Demetriou
47  *	for the NetBSD Project.
48  * 4. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  */
62 
63 /*
64  * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
65  * All rights reserved.
66  *
67  * Author: Chris G. Demetriou
68  *
69  * Permission to use, copy, modify and distribute this software and
70  * its documentation is hereby granted, provided that both the copyright
71  * notice and this permission notice appear in all copies of the
72  * software, derivative works or modified versions, and any portions
73  * thereof, and that both notices appear in supporting documentation.
74  *
75  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
76  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
77  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78  *
79  * Carnegie Mellon requests users of this software to return to
80  *
81  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
82  *  School of Computer Science
83  *  Carnegie Mellon University
84  *  Pittsburgh PA 15213-3890
85  *
86  * any improvements or extensions that they make and grant Carnegie the
87  * rights to redistribute these changes.
88  */
89 
90 #define	__UFETCHSTORE_PRIVATE	/* see handle_opdec() */
91 
92 #include "opt_fix_unaligned_vax_fp.h"
93 #include "opt_ddb.h"
94 #include "opt_multiprocessor.h"
95 
96 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
97 
98 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.140 2023/11/21 22:19:12 thorpej Exp $");
99 
100 #include <sys/param.h>
101 #include <sys/systm.h>
102 #include <sys/proc.h>
103 #include <sys/syscall.h>
104 #include <sys/buf.h>
105 #include <sys/kauth.h>
106 #include <sys/kmem.h>
107 #include <sys/cpu.h>
108 #include <sys/atomic.h>
109 #include <sys/bitops.h>
110 
111 #include <uvm/uvm_extern.h>
112 
113 #include <machine/reg.h>
114 #include <machine/alpha.h>
115 #include <machine/fpu.h>
116 #include <machine/rpb.h>
117 #ifdef DDB
118 #include <machine/db_machdep.h>
119 #endif
120 #include <machine/alpha_instruction.h>
121 #include <machine/userret.h>
122 
123 static int unaligned_fixup(u_long, u_long, u_long, struct lwp *);
124 static int handle_opdec(struct lwp *l, u_long *ucodep);
125 static int alpha_ucode_to_ksiginfo(u_long ucode);
126 
127 /*
128  * Initialize the trap vectors for the current processor.
129  */
130 void
trap_init(void)131 trap_init(void)
132 {
133 
134 	/*
135 	 * Point interrupt/exception vectors to our own.
136 	 */
137 	alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT);
138 	alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH);
139 	alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM);
140 	alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF);
141 	alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA);
142 	alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS);
143 
144 	/*
145 	 * Clear pending machine checks and error reports, and enable
146 	 * system- and processor-correctable error reporting.
147 	 */
148 	alpha_pal_wrmces(alpha_pal_rdmces() &
149 	    ~(ALPHA_MCES_DSC|ALPHA_MCES_DPC));
150 }
151 
152 static void
onfault_restore(struct trapframe * framep,vaddr_t onfault,int error)153 onfault_restore(struct trapframe *framep, vaddr_t onfault, int error)
154 {
155 	framep->tf_regs[FRAME_PC] = onfault;
156 	framep->tf_regs[FRAME_V0] = error;
157 }
158 
159 static vaddr_t
onfault_handler(const struct pcb * pcb,const struct trapframe * tf)160 onfault_handler(const struct pcb *pcb, const struct trapframe *tf)
161 {
162 	struct onfault_table {
163 		vaddr_t start;
164 		vaddr_t end;
165 		vaddr_t handler;
166 	};
167 	extern const struct onfault_table onfault_table[];
168 	const struct onfault_table *p;
169 	vaddr_t pc;
170 
171 	if (pcb->pcb_onfault != 0) {
172 		return pcb->pcb_onfault;
173 	}
174 
175 	pc = tf->tf_regs[FRAME_PC];
176 	for (p = onfault_table; p->start; p++) {
177 		if (p->start <= pc && pc < p->end) {
178 			return p->handler;
179 		}
180 	}
181 	return 0;
182 }
183 
184 static void
printtrap(const u_long a0,const u_long a1,const u_long a2,const u_long entry,struct trapframe * framep,int isfatal,int user)185 printtrap(const u_long a0, const u_long a1, const u_long a2,
186     const u_long entry, struct trapframe *framep, int isfatal, int user)
187 {
188 	char ubuf[64];
189 	const char *entryname;
190 	u_long cpu_id = cpu_number();
191 
192 	switch (entry) {
193 	case ALPHA_KENTRY_INT:
194 		entryname = "interrupt";
195 		break;
196 	case ALPHA_KENTRY_ARITH:
197 		entryname = "arithmetic trap";
198 		break;
199 	case ALPHA_KENTRY_MM:
200 		entryname = "memory management fault";
201 		break;
202 	case ALPHA_KENTRY_IF:
203 		entryname = "instruction fault";
204 		break;
205 	case ALPHA_KENTRY_UNA:
206 		entryname = "unaligned access fault";
207 		break;
208 	case ALPHA_KENTRY_SYS:
209 		entryname = "system call";
210 		break;
211 	default:
212 		snprintf(ubuf, sizeof(ubuf), "type %lx", entry);
213 		entryname = (const char *) ubuf;
214 		break;
215 	}
216 
217 	printf("\n");
218 	printf("CPU %lu: %s %s trap:\n", cpu_id, isfatal ? "fatal" : "handled",
219 	    user ? "user" : "kernel");
220 	printf("\n");
221 	printf("CPU %lu    trap entry = 0x%lx (%s)\n", cpu_id, entry,
222 	    entryname);
223 	printf("CPU %lu    a0         = 0x%lx\n", cpu_id, a0);
224 	printf("CPU %lu    a1         = 0x%lx\n", cpu_id, a1);
225 	printf("CPU %lu    a2         = 0x%lx\n", cpu_id, a2);
226 	printf("CPU %lu    pc         = 0x%lx\n", cpu_id,
227 	    framep->tf_regs[FRAME_PC]);
228 	printf("CPU %lu    ra         = 0x%lx\n", cpu_id,
229 	    framep->tf_regs[FRAME_RA]);
230 	printf("CPU %lu    pv         = 0x%lx\n", cpu_id,
231 	    framep->tf_regs[FRAME_T12]);
232 	printf("CPU %lu    curlwp     = %p\n", cpu_id, curlwp);
233 	printf("CPU %lu        pid = %d, comm = %s\n", cpu_id,
234 	    curproc->p_pid, curproc->p_comm);
235 	printf("\n");
236 }
237 
238 /*
239  * Trap is called from locore to handle most types of processor traps.
240  * System calls are broken out for efficiency and ASTs are broken out
241  * to make the code a bit cleaner and more representative of the
242  * Alpha architecture.
243  */
244 /*ARGSUSED*/
245 void
trap(const u_long a0,const u_long a1,const u_long a2,const u_long entry,struct trapframe * framep)246 trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
247     struct trapframe *framep)
248 {
249 	struct lwp *l;
250 	struct proc *p;
251 	struct pcb *pcb;
252 	vaddr_t onfault;
253 	ksiginfo_t ksi;
254 	vm_prot_t ftype;
255 	uint64_t ucode;
256 	int i, user;
257 #if defined(DDB)
258 	int call_debugger = 1;
259 #endif
260 
261 	curcpu()->ci_data.cpu_ntrap++;
262 
263 	l = curlwp;
264 
265 	user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0;
266 	if (user) {
267 		l->l_md.md_tf = framep;
268 		p = l->l_proc;
269 		(void)memset(&ksi, 0, sizeof(ksi));
270 	} else {
271 		p = NULL;
272 	}
273 
274 	switch (entry) {
275 	case ALPHA_KENTRY_UNA:
276 		/*
277 		 * If user-land, do whatever fixups, printing, and
278 		 * signalling is appropriate (based on system-wide
279 		 * and per-process unaligned-access-handling flags).
280 		 */
281 		if (user) {
282 			i = unaligned_fixup(a0, a1, a2, l);
283 			if (i == 0)
284 				goto out;
285 
286 			KSI_INIT_TRAP(&ksi);
287 			ksi.ksi_signo = i;
288 			ksi.ksi_code = BUS_ADRALN;
289 			ksi.ksi_addr = (void *)a0;		/* VA */
290 			ksi.ksi_trap = BUS_ADRALN;      /* XXX appropriate? */
291 			break;
292 		}
293 
294 		/*
295 		 * Unaligned access from kernel mode is always an error,
296 		 * EVEN IF A COPY FAULT HANDLER IS SET!
297 		 *
298 		 * It's an error if a copy fault handler is set because
299 		 * the various routines which do user-initiated copies
300 		 * do so in a memcpy-like manner.  In other words, the
301 		 * kernel never assumes that pointers provided by the
302 		 * user are properly aligned, and so if the kernel
303 		 * does cause an unaligned access it's a kernel bug.
304 		 */
305 		goto dopanic;
306 
307 	case ALPHA_KENTRY_ARITH:
308 		/*
309 		 * Resolve trap shadows, interpret FP ops requiring infinities,
310 		 * NaNs, or denorms, and maintain FPCR corrections.
311 		 */
312 		if (user) {
313 			i = alpha_fp_complete(a0, a1, l, &ucode);
314 			if (i == 0)
315 				goto out;
316 			KSI_INIT_TRAP(&ksi);
317 			ksi.ksi_signo = i;
318 			if (i == SIGSEGV)
319 				ksi.ksi_code = SEGV_MAPERR; /* just pick one */
320 			else {
321 				ksi.ksi_code = alpha_ucode_to_ksiginfo(ucode);
322 				ksi.ksi_addr =
323 					(void *)l->l_md.md_tf->tf_regs[FRAME_PC];
324 				ksi.ksi_trap = (int)ucode;
325 			}
326 			break;
327 		}
328 
329 		/* Always fatal in kernel.  Should never happen. */
330 		goto dopanic;
331 
332 	case ALPHA_KENTRY_IF:
333 		/*
334 		 * These are always fatal in kernel, and should never
335 		 * happen.  (Debugger entry is handled in XentIF.)
336 		 */
337 		if (user == 0) {
338 #if defined(DDB)
339 			/*
340 			 * ...unless a debugger is configured.  It will
341 			 * inform us if the trap was handled.
342 			 */
343 			if (alpha_debug(a0, a1, a2, entry, framep))
344 				goto out;
345 
346 			/*
347 			 * Debugger did NOT handle the trap, don't
348 			 * call the debugger again!
349 			 */
350 			call_debugger = 0;
351 #endif
352 			goto dopanic;
353 		}
354 		i = 0;
355 		switch (a0) {
356 		case ALPHA_IF_CODE_GENTRAP:
357 			if (framep->tf_regs[FRAME_A0] == -2) { /* weird! */
358 				KSI_INIT_TRAP(&ksi);
359 				ksi.ksi_signo = SIGFPE;
360 				ksi.ksi_code = FPE_INTDIV;
361 				ksi.ksi_addr =
362 					(void *)l->l_md.md_tf->tf_regs[FRAME_PC];
363 				ksi.ksi_trap =  a0;	/* exception summary */
364 				break;
365 			}
366 			/* FALLTHROUGH */
367 		case ALPHA_IF_CODE_BPT:
368 		case ALPHA_IF_CODE_BUGCHK:
369 			KSI_INIT_TRAP(&ksi);
370 			ksi.ksi_signo = SIGTRAP;
371 			ksi.ksi_code = TRAP_BRKPT;
372 			ksi.ksi_addr = (void *)l->l_md.md_tf->tf_regs[FRAME_PC];
373 			ksi.ksi_trap = a0;		/* trap type */
374 			break;
375 
376 		case ALPHA_IF_CODE_OPDEC:
377 			i = handle_opdec(l, &ucode);
378 			KSI_INIT_TRAP(&ksi);
379 			if (i == 0)
380 				goto out;
381 			else if (i == SIGSEGV)
382 				ksi.ksi_code = SEGV_MAPERR;
383 			else if (i == SIGILL)
384 				ksi.ksi_code = ILL_ILLOPC;
385 			else if (i == SIGFPE)
386 				ksi.ksi_code = alpha_ucode_to_ksiginfo(ucode);
387 			ksi.ksi_signo = i;
388 			ksi.ksi_addr =
389 				(void *)l->l_md.md_tf->tf_regs[FRAME_PC];
390 			ksi.ksi_trap = (int)ucode;
391 			break;
392 
393 		case ALPHA_IF_CODE_FEN:
394 			fpu_load();
395 			goto out;
396 
397 		default:
398 			printf("trap: unknown IF type 0x%lx\n", a0);
399 			goto dopanic;
400 		}
401 		break;
402 
403 	case ALPHA_KENTRY_MM:
404 		pcb = lwp_getpcb(l);
405 		onfault = onfault_handler(pcb, framep);
406 
407 		switch (a1) {
408 		case ALPHA_MMCSR_FOR:
409 		case ALPHA_MMCSR_FOE:
410 		case ALPHA_MMCSR_FOW:
411 			if (pmap_emulate_reference(l, a0, user, a1)) {
412 				ftype = VM_PROT_EXECUTE;
413 				goto do_fault;
414 			}
415 			goto out;
416 
417 		case ALPHA_MMCSR_INVALTRANS:
418 		case ALPHA_MMCSR_ACCESS:
419 	    	{
420 			vaddr_t save_onfault;
421 			vaddr_t va;
422 			struct vmspace *vm = NULL;
423 			struct vm_map *map;
424 			int rv;
425 
426 			switch (a2) {
427 			case -1:		/* instruction fetch fault */
428 				ftype = VM_PROT_EXECUTE;
429 				break;
430 			case 0:			/* load instruction */
431 				ftype = VM_PROT_READ;
432 				break;
433 			case 1:			/* store instruction */
434 				ftype = VM_PROT_WRITE;
435 				break;
436 			default:
437 #ifdef DIAGNOSTIC
438 				panic("trap: bad fault type");
439 #else
440 				ftype = VM_PROT_NONE;
441 				break;
442 #endif
443 			}
444 
445 			if (!user) {
446 				struct cpu_info *ci = curcpu();
447 
448 				if (l == NULL) {
449 					/*
450 					 * If there is no current process,
451 					 * it can be nothing but a fatal
452 					 * error (i.e. memory in this case
453 					 * must be wired).
454 					 */
455 					goto dopanic;
456 				}
457 
458 				/*
459 				 * If we're in interrupt context at this
460 				 * point, this is an error.
461 				 */
462 				if (ci->ci_intrdepth != 0)
463 					goto dopanic;
464 			}
465 
466 			/*
467 			 * It is only a kernel address space fault iff:
468 			 *	1. !user and
469 			 *	2. onfault not set or
470 			 *	3. onfault set but kernel space data fault
471 			 * The last can occur during an exec() copyin where the
472 			 * argument space is lazy-allocated.
473 			 */
474 do_fault:
475 			pcb = lwp_getpcb(l);
476 			if (user == 0 && (a0 >= VM_MIN_KERNEL_ADDRESS ||
477 					  onfault == 0))
478 				map = kernel_map;
479 			else {
480 				vm = l->l_proc->p_vmspace;
481 				map = &vm->vm_map;
482 			}
483 
484 			va = trunc_page((vaddr_t)a0);
485 			save_onfault = pcb->pcb_onfault;
486 			pcb->pcb_onfault = 0;
487 			rv = uvm_fault(map, va, ftype);
488 			pcb->pcb_onfault = save_onfault;
489 
490 			/*
491 			 * If this was a stack access we keep track of the
492 			 * maximum accessed stack size.  Also, if vm_fault
493 			 * gets a protection failure it is due to accessing
494 			 * the stack region outside the current limit and
495 			 * we need to reflect that as an access error.
496 			 */
497 			if (map != kernel_map &&
498 			    (void *)va >= vm->vm_maxsaddr &&
499 			    va < USRSTACK) {
500 				if (rv == 0)
501 					uvm_grow(l->l_proc, va);
502 				else if (rv == EACCES &&
503 					   ftype != VM_PROT_EXECUTE)
504 					rv = EFAULT;
505 			}
506 			if (rv == 0) {
507 				goto out;
508 			}
509 
510 			if (user == 0) {
511 				/* Check for copyin/copyout fault */
512 				if (onfault != 0) {
513 					onfault_restore(framep, onfault, rv);
514 					goto out;
515 				}
516 				goto dopanic;
517 			}
518 			KSI_INIT_TRAP(&ksi);
519 			ksi.ksi_addr = (void *)a0;
520 			ksi.ksi_trap = a1; /* MMCSR VALUE */
521 			switch (rv) {
522 			case ENOMEM:
523 				printf("UVM: pid %d (%s), uid %d killed: "
524 				    "out of swap\n", l->l_proc->p_pid,
525 				    l->l_proc->p_comm,
526 				    l->l_cred ?
527 				    kauth_cred_geteuid(l->l_cred) : -1);
528 				ksi.ksi_signo = SIGKILL;
529 				break;
530 			case EINVAL:
531 				ksi.ksi_signo = SIGBUS;
532 				ksi.ksi_code = BUS_ADRERR;
533 				break;
534 			case EACCES:
535 				ksi.ksi_signo = SIGSEGV;
536 				ksi.ksi_code = SEGV_ACCERR;
537 				break;
538 			default:
539 				ksi.ksi_signo = SIGSEGV;
540 				ksi.ksi_code = SEGV_MAPERR;
541 				break;
542 			}
543 			break;
544 		    }
545 
546 		default:
547 			printf("trap: unknown MMCSR value 0x%lx\n", a1);
548 			goto dopanic;
549 		}
550 		break;
551 
552 	default:
553 		goto dopanic;
554 	}
555 
556 #ifdef DEBUG
557 	printtrap(a0, a1, a2, entry, framep, 1, user);
558 #endif
559 	(*p->p_emul->e_trapsignal)(l, &ksi);
560 out:
561 	if (user)
562 		userret(l);
563 	return;
564 
565 dopanic:
566 	printtrap(a0, a1, a2, entry, framep, 1, user);
567 
568 	/* XXX dump registers */
569 
570 #if defined(DDB)
571 	if (call_debugger && alpha_debug(a0, a1, a2, entry, framep)) {
572 		/*
573 		 * The debugger has handled the trap; just return.
574 		 */
575 		goto out;
576 	}
577 #endif
578 
579 	panic("trap");
580 }
581 
582 /*
583  * Process an asynchronous software trap.
584  * This is relatively easy.
585  */
586 void
ast(struct trapframe * framep)587 ast(struct trapframe *framep)
588 {
589 	struct lwp *l;
590 
591 	/*
592 	 * We may not have a current process to do AST processing
593 	 * on.  This happens on multiprocessor systems in which
594 	 * at least one CPU simply has no current process to run,
595 	 * but roundrobin() (called via hardclock()) kicks us to
596 	 * attempt to preempt the process running on our CPU.
597 	 */
598 	l = curlwp;
599 	if (l == NULL)
600 		return;
601 
602 	//curcpu()->ci_data.cpu_nast++;
603 	l->l_md.md_tf = framep;
604 
605 	if (l->l_pflag & LP_OWEUPC) {
606 		l->l_pflag &= ~LP_OWEUPC;
607 		ADDUPROF(l);
608 	}
609 
610 	userret(l);
611 }
612 
613 /*
614  * Unaligned access handler.  It's not clear that this can get much slower...
615  *
616  */
617 static const int reg_to_framereg[32] = {
618 	FRAME_V0,	FRAME_T0,	FRAME_T1,	FRAME_T2,
619 	FRAME_T3,	FRAME_T4,	FRAME_T5,	FRAME_T6,
620 	FRAME_T7,	FRAME_S0,	FRAME_S1,	FRAME_S2,
621 	FRAME_S3,	FRAME_S4,	FRAME_S5,	FRAME_S6,
622 	FRAME_A0,	FRAME_A1,	FRAME_A2,	FRAME_A3,
623 	FRAME_A4,	FRAME_A5,	FRAME_T8,	FRAME_T9,
624 	FRAME_T10,	FRAME_T11,	FRAME_RA,	FRAME_T12,
625 	FRAME_AT,	FRAME_GP,	FRAME_SP,	-1,
626 };
627 
628 #define	irp(l, reg)							\
629 	((reg_to_framereg[(reg)] == -1) ? NULL :			\
630 	    &(l)->l_md.md_tf->tf_regs[reg_to_framereg[(reg)]])
631 
632 #define	frp(l, reg)							\
633 	(&pcb->pcb_fp.fpr_regs[(reg)])
634 
635 #define	unaligned_load(storage, ptrf, mod)				\
636 	if (copyin((void *)va, &(storage), sizeof (storage)) != 0)	\
637 		break;							\
638 	signo = 0;							\
639 	if ((regptr = ptrf(l, reg)) != NULL)				\
640 		*regptr = mod (storage);
641 
642 #define	unaligned_store(storage, ptrf, mod)				\
643 	if ((regptr = ptrf(l, reg)) != NULL)				\
644 		(storage) = mod (*regptr);				\
645 	else								\
646 		(storage) = 0;						\
647 	if (copyout(&(storage), (void *)va, sizeof (storage)) != 0)	\
648 		break;							\
649 	signo = 0;
650 
651 #define	unaligned_load_integer(storage)					\
652 	unaligned_load(storage, irp, )
653 
654 #define	unaligned_store_integer(storage)				\
655 	unaligned_store(storage, irp, )
656 
657 #define	unaligned_load_floating(storage, mod) do {			\
658 	struct pcb * const pcb = lwp_getpcb(l);				\
659 	fpu_save(l);							\
660 	unaligned_load(storage, frp, mod)				\
661 } while (/*CONSTCOND*/0)
662 
663 #define	unaligned_store_floating(storage, mod) do {			\
664 	struct pcb * const pcb = lwp_getpcb(l);				\
665 	fpu_save(l);							\
666 	unaligned_store(storage, frp, mod)				\
667 } while (/*CONSTCOND*/0)
668 
669 static unsigned long
Sfloat_to_reg(u_int s)670 Sfloat_to_reg(u_int s)
671 {
672 	unsigned long sign, expn, frac;
673 	unsigned long result;
674 
675 	sign = (s & 0x80000000) >> 31;
676 	expn = (s & 0x7f800000) >> 23;
677 	frac = (s & 0x007fffff) >>  0;
678 
679 	/* map exponent part, as appropriate. */
680 	if (expn == 0xff)
681 		expn = 0x7ff;
682 	else if ((expn & 0x80) != 0)
683 		expn = (0x400 | (expn & ~0x80));
684 	else if ((expn & 0x80) == 0 && expn != 0)
685 		expn = (0x380 | (expn & ~0x80));
686 
687 	result = (sign << 63) | (expn << 52) | (frac << 29);
688 	return (result);
689 }
690 
691 static unsigned int
reg_to_Sfloat(u_long r)692 reg_to_Sfloat(u_long r)
693 {
694 	unsigned long sign, expn, frac;
695 	unsigned int result;
696 
697 	sign = (r & 0x8000000000000000) >> 63;
698 	expn = (r & 0x7ff0000000000000) >> 52;
699 	frac = (r & 0x000fffffe0000000) >> 29;
700 
701 	/* map exponent part, as appropriate. */
702 	expn = (expn & 0x7f) | ((expn & 0x400) != 0 ? 0x80 : 0x00);
703 
704 	result = (sign << 31) | (expn << 23) | (frac << 0);
705 	return (result);
706 }
707 
708 /*
709  * Conversion of T floating datums to and from register format
710  * requires no bit reordering whatsoever.
711  */
712 static unsigned long
Tfloat_reg_cvt(u_long input)713 Tfloat_reg_cvt(u_long input)
714 {
715 
716 	return (input);
717 }
718 
719 #ifdef FIX_UNALIGNED_VAX_FP
720 static unsigned long
Ffloat_to_reg(u_int f)721 Ffloat_to_reg(u_int f)
722 {
723 	unsigned long sign, expn, frlo, frhi;
724 	unsigned long result;
725 
726 	sign = (f & 0x00008000) >> 15;
727 	expn = (f & 0x00007f80) >>  7;
728 	frhi = (f & 0x0000007f) >>  0;
729 	frlo = (f & 0xffff0000) >> 16;
730 
731 	/* map exponent part, as appropriate. */
732 	if ((expn & 0x80) != 0)
733 		expn = (0x400 | (expn & ~0x80));
734 	else if ((expn & 0x80) == 0 && expn != 0)
735 		expn = (0x380 | (expn & ~0x80));
736 
737 	result = (sign << 63) | (expn << 52) | (frhi << 45) | (frlo << 29);
738 	return (result);
739 }
740 
741 static unsigned int
reg_to_Ffloat(u_long r)742 reg_to_Ffloat(u_long r)
743 {
744 	unsigned long sign, expn, frhi, frlo;
745 	unsigned int result;
746 
747 	sign = (r & 0x8000000000000000) >> 63;
748 	expn = (r & 0x7ff0000000000000) >> 52;
749 	frhi = (r & 0x000fe00000000000) >> 45;
750 	frlo = (r & 0x00001fffe0000000) >> 29;
751 
752 	/* map exponent part, as appropriate. */
753 	expn = (expn & 0x7f) | ((expn & 0x400) != 0 ? 0x80 : 0x00);
754 
755 	result = (sign << 15) | (expn << 7) | (frhi << 0) | (frlo << 16);
756 	return (result);
757 }
758 
759 /*
760  * Conversion of G floating datums to and from register format is
761  * symmetrical.  Just swap shorts in the quad...
762  */
763 static unsigned long
Gfloat_reg_cvt(u_long input)764 Gfloat_reg_cvt(u_long input)
765 {
766 	unsigned long a, b, c, d;
767 	unsigned long result;
768 
769 	a = (input & 0x000000000000ffff) >> 0;
770 	b = (input & 0x00000000ffff0000) >> 16;
771 	c = (input & 0x0000ffff00000000) >> 32;
772 	d = (input & 0xffff000000000000) >> 48;
773 
774 	result = (a << 48) | (b << 32) | (c << 16) | (d << 0);
775 	return (result);
776 }
777 #endif /* FIX_UNALIGNED_VAX_FP */
778 
779 struct unaligned_fixup_data {
780 	const char *type;	/* opcode name */
781 	int fixable;		/* fixable, 0 if fixup not supported */
782 	int size;		/* size, 0 if unknown */
783 };
784 
785 #define	UNKNOWN()	{ "0x%lx", 0, 0 }
786 #define	FIX_LD(n,s)	{ n, 1, s }
787 #define	FIX_ST(n,s)	{ n, 1, s }
788 #define	NOFIX_LD(n,s)	{ n, 0, s }
789 #define	NOFIX_ST(n,s)	{ n, 0, s }
790 
791 int
unaligned_fixup(u_long va,u_long opcode,u_long reg,struct lwp * l)792 unaligned_fixup(u_long va, u_long opcode, u_long reg, struct lwp *l)
793 {
794 	static const struct unaligned_fixup_data tab_unknown[1] = {
795 		UNKNOWN(),
796 	};
797 	static const struct unaligned_fixup_data tab_0c[0x02] = {
798 		FIX_LD("ldwu", 2),	FIX_ST("stw", 2),
799 	};
800 	static const struct unaligned_fixup_data tab_20[0x10] = {
801 #ifdef FIX_UNALIGNED_VAX_FP
802 		FIX_LD("ldf", 4),	FIX_LD("ldg", 8),
803 #else
804 		NOFIX_LD("ldf", 4),	NOFIX_LD("ldg", 8),
805 #endif
806 		FIX_LD("lds", 4),	FIX_LD("ldt", 8),
807 #ifdef FIX_UNALIGNED_VAX_FP
808 		FIX_ST("stf", 4),	FIX_ST("stg", 8),
809 #else
810 		NOFIX_ST("stf", 4),	NOFIX_ST("stg", 8),
811 #endif
812 		FIX_ST("sts", 4),	FIX_ST("stt", 8),
813 		FIX_LD("ldl", 4),	FIX_LD("ldq", 8),
814 		NOFIX_LD("ldl_c", 4),	NOFIX_LD("ldq_c", 8),
815 		FIX_ST("stl", 4),	FIX_ST("stq", 8),
816 		NOFIX_ST("stl_c", 4),	NOFIX_ST("stq_c", 8),
817 	};
818 	const struct unaligned_fixup_data *selected_tab;
819 	int doprint, dofix, dosigbus, signo;
820 	unsigned long *regptr, longdata;
821 	int intdata;		/* signed to get extension when storing */
822 	uint16_t worddata;	/* unsigned to _avoid_ extension */
823 
824 	/*
825 	 * Read USP into frame in case it's the register to be modified.
826 	 * This keeps us from having to check for it in lots of places
827 	 * later.
828 	 */
829 	l->l_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp();
830 
831 	/*
832 	 * Figure out what actions to take.
833 	 *
834 	 * XXX In the future, this should have a per-process component
835 	 * as well.
836 	 */
837 	doprint = alpha_unaligned_print;
838 	dofix = alpha_unaligned_fix;
839 	dosigbus = alpha_unaligned_sigbus;
840 
841 	/*
842 	 * Find out which opcode it is.  Arrange to have the opcode
843 	 * printed if it's an unknown opcode.
844 	 */
845 	if (opcode >= 0x0c && opcode <= 0x0d)
846 		selected_tab = &tab_0c[opcode - 0x0c];
847 	else if (opcode >= 0x20 && opcode <= 0x2f)
848 		selected_tab = &tab_20[opcode - 0x20];
849 	else
850 		selected_tab = tab_unknown;
851 
852 	/*
853 	 * If we're supposed to be noisy, squawk now.
854 	 */
855 	if (doprint) {
856 		uprintf(
857 		"pid %d (%s): unaligned access: "
858 		"va=0x%lx pc=0x%lx ra=0x%lx sp=0x%lx op=",
859 		    l->l_proc->p_pid, l->l_proc->p_comm, va,
860 		    l->l_md.md_tf->tf_regs[FRAME_PC] - 4,
861 		    l->l_md.md_tf->tf_regs[FRAME_RA],
862 		    l->l_md.md_tf->tf_regs[FRAME_SP]);
863 		uprintf(selected_tab->type,opcode);
864 		uprintf("\n");
865 	}
866 
867 	/*
868 	 * If we should try to fix it and know how, give it a shot.
869 	 *
870 	 * We never allow bad data to be unknowingly used by the user process.
871 	 * That is, if we can't access the address needed to fix up the trap,
872 	 * we cause a SIGSEGV rather than letting the user process go on
873 	 * without warning.
874 	 *
875 	 * If we're trying to do a fixup, we assume that things
876 	 * will be botched.  If everything works out OK,
877 	 * unaligned_{load,store}_* clears the signal flag.
878 	 */
879 	signo = SIGSEGV;
880 	if (dofix && selected_tab->fixable) {
881 		switch (opcode) {
882 		case op_ldwu:
883 			/* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
884 			unaligned_load_integer(worddata);
885 			break;
886 
887 		case op_stw:
888 			/* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
889 			unaligned_store_integer(worddata);
890 			break;
891 
892 #ifdef FIX_UNALIGNED_VAX_FP
893 		case op_ldf:
894 			unaligned_load_floating(intdata, Ffloat_to_reg);
895 			break;
896 
897 		case op_ldg:
898 			unaligned_load_floating(longdata, Gfloat_reg_cvt);
899 			break;
900 #endif
901 
902 		case op_lds:
903 			unaligned_load_floating(intdata, Sfloat_to_reg);
904 			break;
905 
906 		case op_ldt:
907 			unaligned_load_floating(longdata, Tfloat_reg_cvt);
908 			break;
909 
910 #ifdef FIX_UNALIGNED_VAX_FP
911 		case op_stf:
912 			unaligned_store_floating(intdata, reg_to_Ffloat);
913 			break;
914 
915 		case op_stg:
916 			unaligned_store_floating(longdata, Gfloat_reg_cvt);
917 			break;
918 #endif
919 
920 		case op_sts:
921 			unaligned_store_floating(intdata, reg_to_Sfloat);
922 			break;
923 
924 		case op_stt:
925 			unaligned_store_floating(longdata, Tfloat_reg_cvt);
926 			break;
927 
928 		case op_ldl:
929 			unaligned_load_integer(intdata);
930 			break;
931 
932 		case op_ldq:
933 			unaligned_load_integer(longdata);
934 			break;
935 
936 		case op_stl:
937 			unaligned_store_integer(intdata);
938 			break;
939 
940 		case op_stq:
941 			unaligned_store_integer(longdata);
942 			break;
943 
944 #ifdef DIAGNOSTIC
945 		default:
946 			panic("unaligned_fixup: can't get here");
947 #endif
948 		}
949 	}
950 
951 	/*
952 	 * Force SIGBUS if requested.
953 	 */
954 	if (dosigbus)
955 		signo = SIGBUS;
956 
957 	/*
958 	 * Write back USP.
959 	 */
960 	alpha_pal_wrusp(l->l_md.md_tf->tf_regs[FRAME_SP]);
961 
962 	return (signo);
963 }
964 
965 #define	EMUL_COUNT(ev)	atomic_inc_64(&(ev).ev_count)
966 
967 static struct evcnt emul_fix_ftoit =
968     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul fix", "ftoit");
969 static struct evcnt emul_fix_ftois =
970     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul fix", "ftois");
971 static struct evcnt emul_fix_itofs =
972     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul fix", "itofs");
973 #if 0
974 static struct evcnt emul_fix_itoff =
975     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul fix", "itoff");
976 #endif
977 static struct evcnt emul_fix_itoft =
978     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul fix", "itoft");
979 static struct evcnt emul_fix_sqrtt =
980     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul fix", "sqrtt");
981 static struct evcnt emul_fix_sqrts =
982     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul fix", "sqrts");
983 
984 EVCNT_ATTACH_STATIC(emul_fix_ftoit);
985 EVCNT_ATTACH_STATIC(emul_fix_ftois);
986 EVCNT_ATTACH_STATIC(emul_fix_itofs);
987 #if 0
988 EVCNT_ATTACH_STATIC(emul_fix_itoff);
989 #endif
990 EVCNT_ATTACH_STATIC(emul_fix_itoft);
991 EVCNT_ATTACH_STATIC(emul_fix_sqrtt);
992 EVCNT_ATTACH_STATIC(emul_fix_sqrts);
993 
994 static void
emul_fix(struct lwp * l,const alpha_instruction * inst)995 emul_fix(struct lwp *l, const alpha_instruction *inst)
996 {
997 	union {
998 		f_float f;
999 		s_float s;
1000 		t_float t;
1001 	} fmem;
1002 	register_t *regptr;
1003 
1004 	KASSERT(l == curlwp);
1005 
1006 	/*
1007 	 * FIX instructions don't cause any exceptions, including
1008 	 * MM exceptions.  However, they are equivalent in result
1009 	 * to e.g. STL,LDF.  We will just assume that we can access
1010 	 * our kernel stack, and thus no exception checks are
1011 	 * required.
1012 	 */
1013 
1014 	kpreempt_disable();
1015 	if ((l->l_md.md_flags & MDLWP_FPACTIVE) == 0) {
1016 		fpu_load();
1017 	}
1018 	alpha_pal_wrfen(1);
1019 
1020 	if (inst->float_format.opcode == op_intmisc) {
1021 		regptr = irp(l, inst->float_format.fc);
1022 		switch (inst->float_format.function) {
1023 		case op_ftoit:
1024 			EMUL_COUNT(emul_fix_ftoit);
1025 			alpha_stt(inst->float_format.fa, &fmem.t);
1026 			if (regptr != NULL) {
1027 				*regptr = fmem.t.i;
1028 			}
1029 			break;
1030 
1031 		case op_ftois:
1032 			EMUL_COUNT(emul_fix_ftois);
1033 			alpha_sts(inst->float_format.fa, &fmem.s);
1034 			if (regptr != NULL) {
1035 				*regptr = (int32_t)fmem.s.i;
1036 			}
1037 			break;
1038 
1039 		default:
1040 			panic("%s: bad intmisc function=0x%x\n", __func__,
1041 			    inst->float_format.function);
1042 		}
1043 	} else if (inst->float_format.opcode == op_fix_float) {
1044 		regptr = irp(l, inst->float_format.fa);
1045 		register_t regval = (regptr != NULL) ? *regptr : 0;
1046 
1047 		switch (inst->float_format.function) {
1048 		case op_itofs:
1049 			EMUL_COUNT(emul_fix_itofs);
1050 			fmem.s.i = (uint32_t)regval;
1051 			alpha_lds(inst->float_format.fc, &fmem.s);
1052 			break;
1053 
1054 		/*
1055 		 * The Book says about ITOFF:
1056 		 *
1057 		 *	ITOFF is equivalent to the following sequence,
1058 		 *	except that the word swapping that LDF normally
1059 		 *	performs is not performed by ITOFF.
1060 		 *
1061 		 *		STL
1062 		 *		LDF
1063 		 *
1064 		 * ...implying that we can't actually use LDF here ??? So
1065 		 * we'll skip it for now.
1066 		 */
1067 
1068 		case op_itoft:
1069 			EMUL_COUNT(emul_fix_itoft);
1070 			fmem.t.i = regval;
1071 			alpha_ldt(inst->float_format.fc, &fmem.t);
1072 			break;
1073 
1074 		default:
1075 			panic("%s: bad fix_float function=0x%x\n", __func__,
1076 			    inst->float_format.function);
1077 		}
1078 	} else {
1079 		panic("%s: bad opcode=0x%02x", __func__,
1080 		    inst->float_format.opcode);
1081 	}
1082 
1083 	alpha_pal_wrfen(0);
1084 	kpreempt_enable();
1085 }
1086 
1087 static struct evcnt emul_bwx_ldbu =
1088     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul bwx", "ldbu");
1089 static struct evcnt emul_bwx_ldwu =
1090     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul bwx", "ldwu");
1091 static struct evcnt emul_bwx_stb =
1092     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul bwx", "stb");
1093 static struct evcnt emul_bwx_stw =
1094     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul bwx", "stw");
1095 static struct evcnt emul_bwx_sextb =
1096     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul bwx", "sextb");
1097 static struct evcnt emul_bwx_sextw =
1098     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul bwx", "sextw");
1099 
1100 EVCNT_ATTACH_STATIC(emul_bwx_ldbu);
1101 EVCNT_ATTACH_STATIC(emul_bwx_ldwu);
1102 EVCNT_ATTACH_STATIC(emul_bwx_stb);
1103 EVCNT_ATTACH_STATIC(emul_bwx_stw);
1104 EVCNT_ATTACH_STATIC(emul_bwx_sextb);
1105 EVCNT_ATTACH_STATIC(emul_bwx_sextw);
1106 
1107 static struct evcnt emul_cix_ctpop =
1108     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul cix", "ctpop");
1109 static struct evcnt emul_cix_ctlz =
1110     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul cix", "ctlz");
1111 static struct evcnt emul_cix_cttz =
1112     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "emul cix", "cttz");
1113 
1114 EVCNT_ATTACH_STATIC(emul_cix_ctpop);
1115 EVCNT_ATTACH_STATIC(emul_cix_ctlz);
1116 EVCNT_ATTACH_STATIC(emul_cix_cttz);
1117 
1118 /*
1119  * Reserved/unimplemented instruction (opDec fault) handler
1120  *
1121  * Argument is the process that caused it.  No useful information
1122  * is passed to the trap handler other than the fault type.  The
1123  * address of the instruction that caused the fault is 4 less than
1124  * the PC stored in the trap frame.
1125  *
1126  * If the instruction is emulated successfully, this function returns 0.
1127  * Otherwise, this function returns the signal to deliver to the process,
1128  * and fills in *ucodep with the code to be delivered.
1129  */
1130 int
handle_opdec(struct lwp * l,u_long * ucodep)1131 handle_opdec(struct lwp *l, u_long *ucodep)
1132 {
1133 	alpha_instruction inst;
1134 	register_t *regptr, memaddr;
1135 	uint64_t inst_pc;
1136 	int sig;
1137 
1138 	/*
1139 	 * Read USP into frame in case it's going to be used or modified.
1140 	 * This keeps us from having to check for it in lots of places
1141 	 * later.
1142 	 */
1143 	l->l_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp();
1144 
1145 	inst_pc = memaddr = l->l_md.md_tf->tf_regs[FRAME_PC] - 4;
1146 	if (ufetch_int((void *)inst_pc, &inst.bits) != 0) {
1147 		/*
1148 		 * really, this should never happen, but in case it
1149 		 * does we handle it.
1150 		 */
1151 		printf("WARNING: handle_opdec() couldn't fetch instruction\n");
1152 		goto sigsegv;
1153 	}
1154 
1155 	switch (inst.generic_format.opcode) {
1156 	case op_ldbu:
1157 	case op_ldwu:
1158 	case op_stw:
1159 	case op_stb:
1160 		regptr = irp(l, inst.mem_format.rb);
1161 		if (regptr != NULL)
1162 			memaddr = *regptr;
1163 		else
1164 			memaddr = 0;
1165 		memaddr += inst.mem_format.displacement;
1166 
1167 		regptr = irp(l, inst.mem_format.ra);
1168 
1169 		if (inst.mem_format.opcode == op_ldwu ||
1170 		    inst.mem_format.opcode == op_stw) {
1171 			if (memaddr & 0x01) {
1172 				if (inst.mem_format.opcode == op_ldwu) {
1173 					EMUL_COUNT(emul_bwx_ldwu);
1174 				} else {
1175 					EMUL_COUNT(emul_bwx_stw);
1176 				}
1177 				sig = unaligned_fixup(memaddr,
1178 				    inst.mem_format.opcode,
1179 				    inst.mem_format.ra, l);
1180 				if (sig)
1181 					goto unaligned_fixup_sig;
1182 				break;
1183 			}
1184 		}
1185 
1186 		/*
1187 		 * We know the addresses are aligned, so it's safe to
1188 		 * use _u{fetch,store}_{8,16}().  Note, these are
1189 		 * __UFETCHSTORE_PRIVATE, but this is MD code, and
1190 		 * we know the details of the alpha implementation.
1191 		 */
1192 
1193 		if (inst.mem_format.opcode == op_ldbu) {
1194 			uint8_t b;
1195 
1196 			EMUL_COUNT(emul_bwx_ldbu);
1197 			if (_ufetch_8((void *)memaddr, &b) != 0)
1198 				goto sigsegv;
1199 			if (regptr != NULL)
1200 				*regptr = b;
1201 		} else if (inst.mem_format.opcode == op_ldwu) {
1202 			uint16_t w;
1203 
1204 			EMUL_COUNT(emul_bwx_ldwu);
1205 			if (_ufetch_16((void *)memaddr, &w) != 0)
1206 				goto sigsegv;
1207 			if (regptr != NULL)
1208 				*regptr = w;
1209 		} else if (inst.mem_format.opcode == op_stw) {
1210 			uint16_t w;
1211 
1212 			EMUL_COUNT(emul_bwx_stw);
1213 			w = (regptr != NULL) ? *regptr : 0;
1214 			if (_ustore_16((void *)memaddr, w) != 0)
1215 				goto sigsegv;
1216 		} else if (inst.mem_format.opcode == op_stb) {
1217 			uint8_t b;
1218 
1219 			EMUL_COUNT(emul_bwx_stb);
1220 			b = (regptr != NULL) ? *regptr : 0;
1221 			if (_ustore_8((void *)memaddr, b) != 0)
1222 				goto sigsegv;
1223 		}
1224 		break;
1225 
1226 	case op_intmisc:
1227 		if (inst.operate_generic_format.function == op_sextb &&
1228 		    inst.operate_generic_format.ra == 31) {
1229 			int8_t b;
1230 
1231 			EMUL_COUNT(emul_bwx_sextb);
1232 			if (inst.operate_generic_format.is_lit) {
1233 				b = inst.operate_lit_format.literal;
1234 			} else {
1235 				if (inst.operate_reg_format.sbz != 0)
1236 					goto sigill;
1237 				regptr = irp(l, inst.operate_reg_format.rb);
1238 				b = (regptr != NULL) ? *regptr : 0;
1239 			}
1240 
1241 			regptr = irp(l, inst.operate_generic_format.rc);
1242 			if (regptr != NULL)
1243 				*regptr = b;
1244 			break;
1245 		}
1246 		if (inst.operate_generic_format.function == op_sextw &&
1247 		    inst.operate_generic_format.ra == 31) {
1248 			int16_t w;
1249 
1250 			EMUL_COUNT(emul_bwx_sextw);
1251 			if (inst.operate_generic_format.is_lit) {
1252 				w = inst.operate_lit_format.literal;
1253 			} else {
1254 				if (inst.operate_reg_format.sbz != 0)
1255 					goto sigill;
1256 				regptr = irp(l, inst.operate_reg_format.rb);
1257 				w = (regptr != NULL) ? *regptr : 0;
1258 			}
1259 
1260 			regptr = irp(l, inst.operate_generic_format.rc);
1261 			if (regptr != NULL)
1262 				*regptr = w;
1263 			break;
1264 		}
1265 		if (inst.operate_reg_format.function == op_ctpop &&
1266 		    inst.operate_reg_format.zero == 0 &&
1267 		    inst.operate_reg_format.sbz == 0 &&
1268 		    inst.operate_reg_format.ra == 31) {
1269 			unsigned long val;
1270 			unsigned int res;
1271 
1272 			EMUL_COUNT(emul_cix_ctpop);
1273 			regptr = irp(l, inst.operate_reg_format.rb);
1274 			val = (regptr != NULL) ? *regptr : 0;
1275 			res = popcount64(val);
1276 			regptr = irp(l, inst.operate_reg_format.rc);
1277 			if (regptr != NULL) {
1278 				*regptr = res;
1279 			}
1280 			break;
1281 		}
1282 		if (inst.operate_reg_format.function == op_ctlz &&
1283 		    inst.operate_reg_format.zero == 0 &&
1284 		    inst.operate_reg_format.sbz == 0 &&
1285 		    inst.operate_reg_format.ra == 31) {
1286 			unsigned long val;
1287 			unsigned int res;
1288 
1289 			EMUL_COUNT(emul_cix_ctlz);
1290 			regptr = irp(l, inst.operate_reg_format.rb);
1291 			val = (regptr != NULL) ? *regptr : 0;
1292 			res = fls64(val);
1293 			res = (res == 0) ? 64 : 64 - res;
1294 			regptr = irp(l, inst.operate_reg_format.rc);
1295 			if (regptr != NULL) {
1296 				*regptr = res;
1297 			}
1298 			break;
1299 		}
1300 		if (inst.operate_reg_format.function == op_cttz &&
1301 		    inst.operate_reg_format.zero == 0 &&
1302 		    inst.operate_reg_format.sbz == 0 &&
1303 		    inst.operate_reg_format.ra == 31) {
1304 			unsigned long val;
1305 			unsigned int res;
1306 
1307 			EMUL_COUNT(emul_cix_cttz);
1308 			regptr = irp(l, inst.operate_reg_format.rb);
1309 			val = (regptr != NULL) ? *regptr : 0;
1310 			res = ffs64(val);
1311 			res = (res == 0) ? 64 : res - 1;
1312 			regptr = irp(l, inst.operate_reg_format.rc);
1313 			if (regptr != NULL) {
1314 				*regptr = res;
1315 			}
1316 			break;
1317 		}
1318 
1319 		/*
1320 		 * FTOIS and FTOIT are in Floating Operate format according
1321 		 * to The Book, which is nearly identical to the Reg Operate
1322 		 * format, but the function field of those overlaps the
1323 		 * "zero" and "sbz" fields and the FTOIS and FTOIT function
1324 		 * codes conviently has zero bits in those fields.
1325 		 */
1326 		if ((inst.float_format.function == op_ftoit ||
1327 		     inst.float_format.function == op_ftois) &&
1328 		    inst.float_format.fb == 31) {
1329 			/*
1330 			 * These FIX instructions can't cause any exceptions,
1331 			 * including MM exceptions.
1332 			 */
1333 			emul_fix(l, &inst);
1334 			break;
1335 		}
1336 
1337 		goto sigill;
1338 
1339 	case op_fix_float:
1340 		if ((inst.float_format.function == op_itofs ||
1341 		     /* ITOFF is a bit more complicated; skip it for now. */
1342 		     /* inst.float_format.function == op_itoff || */
1343 		     inst.float_format.function == op_itoft) &&
1344 		    inst.float_format.fb == 31) {
1345 			/*
1346 			 * These FIX instructions can't cause any exceptions,
1347 			 * including MM exceptions.
1348 			 */
1349 			emul_fix(l, &inst);
1350 			break;
1351 		}
1352 
1353 		/*
1354 		 * The SQRT function encodings are explained in a nice
1355 		 * chart in fp_complete.c -- go read it.
1356 		 *
1357 		 * We only handle the IEEE variants here; we do not have
1358 		 * a VAX softfloat library.
1359 		 */
1360 		if (inst.float_detail.opclass == 11 /* IEEE SQRT */ &&
1361 		    inst.float_detail.fa == 31      /* Fa must be $f31 */ &&
1362 		    (inst.float_detail.src == 0     /* SQRTS (S_float) */ ||
1363 		     inst.float_detail.src == 2     /* SQRTT (T_float) */)) {
1364 			if (inst.float_detail.src == 0) {
1365 				EMUL_COUNT(emul_fix_sqrts);
1366 			} else {
1367 				EMUL_COUNT(emul_fix_sqrtt);
1368 			}
1369 			sig = alpha_fp_complete_at(inst_pc, l, ucodep);
1370 			if (sig) {
1371 				if (sig == SIGSEGV) {
1372 					memaddr = inst_pc;
1373 					goto sigsegv;
1374 				}
1375 				return sig;
1376 			}
1377 			break;
1378 		}
1379 
1380 		goto sigill;
1381 
1382 	default:
1383 		goto sigill;
1384 	}
1385 
1386 	/*
1387 	 * Write back USP.  Note that in the error cases below,
1388 	 * nothing will have been successfully modified so we don't
1389 	 * have to write it out.
1390 	 */
1391 	alpha_pal_wrusp(l->l_md.md_tf->tf_regs[FRAME_SP]);
1392 
1393 	return (0);
1394 
1395 sigill:
1396 	*ucodep = ALPHA_IF_CODE_OPDEC;			/* trap type */
1397 	return (SIGILL);
1398 
1399 sigsegv:
1400 	sig = SIGSEGV;
1401 	l->l_md.md_tf->tf_regs[FRAME_PC] = inst_pc;	/* re-run instr. */
1402 unaligned_fixup_sig:
1403 	*ucodep = memaddr;				/* faulting address */
1404 	return (sig);
1405 }
1406 
1407 /* map alpha fp flags to ksiginfo fp codes */
1408 static int
alpha_ucode_to_ksiginfo(u_long ucode)1409 alpha_ucode_to_ksiginfo(u_long ucode)
1410 {
1411 	long i;
1412 
1413 	static const int alpha_ksiginfo_table[] = { FPE_FLTINV,
1414 					     FPE_FLTDIV,
1415 					     FPE_FLTOVF,
1416 					     FPE_FLTUND,
1417 					     FPE_FLTRES,
1418 					     FPE_INTOVF };
1419 
1420 	for(i=0;i < sizeof(alpha_ksiginfo_table)/sizeof(int); i++) {
1421 		if (ucode & (1 << i))
1422 			return (alpha_ksiginfo_table[i]);
1423 	}
1424 	/* punt if the flags weren't set */
1425 	return (0);
1426 }
1427 
1428 /*
1429  * Start a new LWP
1430  */
1431 void
startlwp(void * arg)1432 startlwp(void *arg)
1433 {
1434 	ucontext_t *uc = arg;
1435 	lwp_t *l = curlwp;
1436 	int error __diagused;
1437 
1438 	error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
1439 	KASSERT(error == 0);
1440 
1441 	kmem_free(uc, sizeof(ucontext_t));
1442 	userret(l);
1443 }
1444