1 /* $NetBSD: trap.c,v 1.123 2023/10/05 19:41:04 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matthew Fredette.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /* $OpenBSD: trap.c,v 1.30 2001/09/19 20:50:56 mickey Exp $ */
33
34 /*
35 * Copyright (c) 1998-2004 Michael Shalayeff
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
51 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
52 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
53 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57 * THE POSSIBILITY OF SUCH DAMAGE.
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.123 2023/10/05 19:41:04 ad Exp $");
62
63 /* #define INTRDEBUG */
64 /* #define TRAPDEBUG */
65 /* #define USERTRACE */
66
67 #include "opt_kgdb.h"
68 #include "opt_ptrace.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/syscall.h>
74 #include <sys/syscallvar.h>
75 #include <sys/mutex.h>
76 #include <sys/ktrace.h>
77 #include <sys/proc.h>
78 #include <sys/signalvar.h>
79 #include <sys/acct.h>
80 #include <sys/signal.h>
81 #include <sys/device.h>
82 #include <sys/kauth.h>
83 #include <sys/kmem.h>
84 #include <sys/userret.h>
85
86 #ifdef KGDB
87 #include <sys/kgdb.h>
88 #endif
89
90 #include <uvm/uvm.h>
91
92 #include <machine/iomod.h>
93 #include <machine/cpufunc.h>
94 #include <machine/reg.h>
95 #include <machine/autoconf.h>
96
97 #include <machine/db_machdep.h>
98
99 #include <hppa/hppa/machdep.h>
100
101 #include <ddb/db_output.h>
102 #include <ddb/db_interface.h>
103
104 #ifdef PTRACE
105 void ss_clear_breakpoints(struct lwp *l);
106 int ss_put_value(struct lwp *, vaddr_t, u_int);
107 int ss_get_value(struct lwp *, vaddr_t, u_int *);
108
109 /* single-step breakpoint */
110 #define SSBREAKPOINT (HPPA_BREAK_KERNEL | (HPPA_BREAK_SS << 13))
111
112 #endif
113
114 #if defined(DEBUG) || defined(DIAGNOSTIC)
115 /*
116 * 0x6fc1000 is a stwm r1, d(sr0, sp), which is the last
117 * instruction in the function prologue that gcc -O0 uses.
118 * When we have this instruction we know the relationship
119 * between the stack pointer and the gcc -O0 frame pointer
120 * (in r3, loaded with the initial sp) for the body of a
121 * function.
122 *
123 * If the given instruction is a stwm r1, d(sr0, sp) where
124 * d > 0, we evaluate to d, else we evaluate to zero.
125 */
126 #define STWM_R1_D_SR0_SP(inst) \
127 (((inst) & 0xffffc001) == 0x6fc10000 ? (((inst) & 0x00003ff) >> 1) : 0)
128 #endif /* DEBUG || DIAGNOSTIC */
129
130 const char *trap_type[] = {
131 "invalid",
132 "HPMC",
133 "power failure",
134 "recovery counter",
135 "external interrupt",
136 "LPMC",
137 "ITLB miss fault",
138 "instruction protection",
139 "Illegal instruction",
140 "break instruction",
141 "privileged operation",
142 "privileged register",
143 "overflow",
144 "conditional",
145 "assist exception",
146 "DTLB miss",
147 "ITLB non-access miss",
148 "DTLB non-access miss",
149 "data protection/rights/alignment",
150 "data break",
151 "TLB dirty",
152 "page reference",
153 "assist emulation",
154 "higher-priv transfer",
155 "lower-priv transfer",
156 "taken branch",
157 "data access rights",
158 "data protection",
159 "unaligned data ref",
160 };
161 int trap_types = __arraycount(trap_type);
162
163 uint8_t fpopmap[] = {
164 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x0c, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
167 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
168 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
172 };
173
174 void pmap_hptdump(void);
175 void syscall(struct trapframe *, int *);
176
177 #if defined(DEBUG)
178 struct trapframe *sanity_frame;
179 struct lwp *sanity_lwp;
180 const char *sanity_string;
181 void frame_sanity_check(const char *, int, int, struct trapframe *,
182 struct lwp *);
183 #endif
184
185
186 #ifdef USERTRACE
187 /*
188 * USERTRACE is a crude facility that traces the PC of a single user process.
189 * This tracing is normally activated by the dispatching of a certain syscall
190 * with certain arguments - see the activation code in syscall().
191 */
192 static void user_backtrace(struct trapframe *, struct lwp *, int);
193 static void user_backtrace_raw(u_int, u_int);
194
195 u_int rctr_next_iioq;
196 #endif
197
198 static inline void
userret(struct lwp * l,struct trapframe * tf)199 userret(struct lwp *l, struct trapframe *tf)
200 {
201 struct proc *p = l->l_proc;
202 int oticks = 0; /* XXX why zero? */
203
204 do {
205 l->l_md.md_astpending = 0;
206 //curcpu()->ci_data.cpu_nast++;
207 mi_userret(l);
208 } while (l->l_md.md_astpending);
209
210 /*
211 * If profiling, charge recent system time to the trapped pc.
212 */
213 if (p->p_stflag & PST_PROFIL) {
214 extern int psratio;
215
216 addupc_task(l, tf->tf_iioq_head,
217 (int)(p->p_sticks - oticks) * psratio);
218 }
219 }
220
221 /*
222 * This handles some messy kernel debugger details.
223 * It dispatches into either kgdb or DDB, and knows
224 * about some special things to do, like skipping over
225 * break instructions and how to really set up for
226 * a single-step.
227 */
228 #if defined(KGDB) || defined(DDB)
229 static int
trap_kdebug(int type,int code,struct trapframe * frame)230 trap_kdebug(int type, int code, struct trapframe *frame)
231 {
232 int handled;
233 u_int tf_iioq_head_old;
234 u_int tf_iioq_tail_old;
235
236 for (;;) {
237
238 /* This trap has not been handled. */
239 handled = 0;
240
241 /* Remember the instruction offset queue. */
242 tf_iioq_head_old = frame->tf_iioq_head;
243 tf_iioq_tail_old = frame->tf_iioq_tail;
244
245 #ifdef KGDB
246 /* Let KGDB handle it (if connected) */
247 if (!handled)
248 handled = kgdb_trap(type, frame);
249 #endif
250 #ifdef DDB
251 /* Let DDB handle it. */
252 if (!handled)
253 handled = kdb_trap(type, code, frame);
254 #endif
255
256 /* If this trap wasn't handled, return now. */
257 if (!handled)
258 return(0);
259
260 /*
261 * If the instruction offset queue head changed, but the offset
262 * queue tail didn't, assume that the user wants to jump to the
263 * head offset, and adjust the tail accordingly. This should
264 * fix the kgdb `jump' command, and can help DDB users who `set'
265 * the offset head but forget the tail.
266 */
267 if (frame->tf_iioq_head != tf_iioq_head_old &&
268 frame->tf_iioq_tail == tf_iioq_tail_old)
269 frame->tf_iioq_tail = frame->tf_iioq_head + 4;
270
271 /*
272 * This is some single-stepping support. If we're trying to
273 * step through a nullified instruction, just advance by hand
274 * and trap again. Otherwise, load the recovery counter with
275 * zero.
276 */
277 if (frame->tf_ipsw & PSW_R) {
278 #ifdef TRAPDEBUG
279 printf("(single stepping at head 0x%x tail 0x%x)\n",
280 frame->tf_iioq_head, frame->tf_iioq_tail);
281 #endif
282 if (frame->tf_ipsw & PSW_N) {
283 #ifdef TRAPDEBUG
284 printf("(single stepping past nullified)\n");
285 #endif
286
287 /* Advance the program counter. */
288 frame->tf_iioq_head = frame->tf_iioq_tail;
289 frame->tf_iioq_tail = frame->tf_iioq_head + 4;
290
291 /* Clear flags. */
292 frame->tf_ipsw &= ~(PSW_N|PSW_X|PSW_Y|PSW_Z|PSW_B|PSW_T|PSW_H|PSW_L);
293
294 /* Simulate another trap. */
295 type = T_RECOVERY;
296 continue;
297 }
298 frame->tf_rctr = 0;
299 }
300
301 /* We handled this trap. */
302 return (1);
303 }
304 /* NOTREACHED */
305 }
306 #else /* !KGDB && !DDB */
307 #define trap_kdebug(t, c, f) (0)
308 #endif /* !KGDB && !DDB */
309
310 #if defined(DEBUG) || defined(USERTRACE)
311 /*
312 * These functions give a crude usermode backtrace. They really only work when
313 * code has been compiled without optimization, as they assume a certain func-
314 * tion prologue sets up a frame pointer and stores the return pointer and arg-
315 * uments in it.
316 */
317 static void
user_backtrace_raw(u_int pc,u_int fp)318 user_backtrace_raw(u_int pc, u_int fp)
319 {
320 int frame_number;
321 int arg_number;
322 uint32_t val;
323
324 for (frame_number = 0;
325 frame_number < 100 && pc > HPPA_PC_PRIV_MASK && fp;
326 frame_number++) {
327
328 printf("%3d: pc=%08x%s fp=0x%08x", frame_number,
329 pc & ~HPPA_PC_PRIV_MASK, USERMODE(pc) ? " " : "**", fp);
330 for (arg_number = 0; arg_number < 4; arg_number++) {
331 if (ufetch_32(HPPA_FRAME_CARG(arg_number, fp),
332 &val) == 0) {
333 printf(" arg%d=0x%08x", arg_number, val);
334 } else {
335 printf(" arg%d=<bad address>", arg_number);
336 }
337 }
338 printf("\n");
339 if (ufetch_int((((uint32_t *) fp) - 5), &pc) != 0) {
340 printf(" ufetch for pc failed\n");
341 break;
342 }
343 if (ufetch_int((((uint32_t *) fp) + 0), &fp) != 0) {
344 printf(" ufetch for fp failed\n");
345 break;
346 }
347 }
348 printf(" backtrace stopped with pc %08x fp 0x%08x\n", pc, fp);
349 }
350
351 static void
user_backtrace(struct trapframe * tf,struct lwp * l,int type)352 user_backtrace(struct trapframe *tf, struct lwp *l, int type)
353 {
354 struct proc *p = l->l_proc;
355 u_int pc, fp, inst;
356
357 /*
358 * Display any trap type that we have.
359 */
360 if (type >= 0)
361 printf("pid %d (%s) trap #%d\n",
362 p->p_pid, p->p_comm, type & ~T_USER);
363
364 /*
365 * Assuming that the frame pointer in r3 is valid,
366 * dump out a stack trace.
367 */
368 fp = tf->tf_r3;
369 printf("pid %d (%s) backtrace, starting with fp 0x%08x\n",
370 p->p_pid, p->p_comm, fp);
371 user_backtrace_raw(tf->tf_iioq_head, fp);
372
373 /*
374 * In case the frame pointer in r3 is not valid, assuming the stack
375 * pointer is valid and the faulting function is a non-leaf, if we can
376 * find its prologue we can recover its frame pointer.
377 */
378 pc = tf->tf_iioq_head;
379 fp = tf->tf_sp - HPPA_FRAME_SIZE;
380 printf("pid %d (%s) backtrace, starting with sp 0x%08x pc 0x%08x\n",
381 p->p_pid, p->p_comm, tf->tf_sp, pc);
382 for (pc &= ~HPPA_PC_PRIV_MASK; pc > 0; pc -= sizeof(inst)) {
383 if (ufetch_int((u_int *) pc, &inst) != 0) {
384 printf(" ufetch for inst at pc %08x failed\n", pc);
385 break;
386 }
387 /* Check for the prologue instruction that sets sp. */
388 if (STWM_R1_D_SR0_SP(inst)) {
389 fp = tf->tf_sp - STWM_R1_D_SR0_SP(inst);
390 printf(" sp from fp at pc %08x: %08x\n", pc, inst);
391 break;
392 }
393 }
394 user_backtrace_raw(tf->tf_iioq_head, fp);
395 }
396 #endif /* DEBUG || USERTRACE */
397
398 #ifdef DEBUG
399 /*
400 * This sanity-checks a trapframe. It is full of various assumptions about
401 * what a healthy CPU state should be, with some documented elsewhere, some not.
402 */
403 void
frame_sanity_check(const char * func,int line,int type,struct trapframe * tf,struct lwp * l)404 frame_sanity_check(const char *func, int line, int type, struct trapframe *tf,
405 struct lwp *l)
406 {
407 #if 0
408 extern int kernel_text;
409 extern int etext;
410 #endif
411 struct cpu_info *ci = curcpu();
412
413 #define SANITY(e) \
414 do { \
415 if (sanity_frame == NULL && !(e)) { \
416 sanity_frame = tf; \
417 sanity_lwp = l; \
418 sanity_string = #e; \
419 } \
420 } while (/* CONSTCOND */ 0)
421
422 KASSERT(l != NULL);
423 SANITY((tf->tf_ipsw & ci->ci_psw) == ci->ci_psw);
424 SANITY((ci->ci_psw & PSW_I) == 0 || tf->tf_eiem != 0);
425 if (tf->tf_iisq_head == HPPA_SID_KERNEL) {
426 vaddr_t minsp, maxsp, uv;
427
428 uv = uvm_lwp_getuarea(l);
429
430 /*
431 * If the trap happened in the gateway page, we take the easy
432 * way out and assume that the trapframe is okay.
433 */
434 if ((tf->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)
435 goto out;
436
437 SANITY(!USERMODE(tf->tf_iioq_head));
438 SANITY(!USERMODE(tf->tf_iioq_tail));
439
440 /*
441 * Don't check the instruction queues or stack on interrupts
442 * as we could be in the sti code (outside normal kernel
443 * text) or switching LWPs (curlwp and sp are not in sync)
444 */
445 if ((type & ~T_USER) == T_INTERRUPT)
446 goto out;
447 #if 0
448 SANITY(tf->tf_iioq_head >= (u_int) &kernel_text);
449 SANITY(tf->tf_iioq_head < (u_int) &etext);
450 SANITY(tf->tf_iioq_tail >= (u_int) &kernel_text);
451 SANITY(tf->tf_iioq_tail < (u_int) &etext);
452 #endif
453
454 maxsp = uv + USPACE + PAGE_SIZE;
455 minsp = uv + PAGE_SIZE;
456
457 SANITY(tf->tf_sp >= minsp && tf->tf_sp < maxsp);
458 } else {
459 struct pcb *pcb = lwp_getpcb(l);
460
461 SANITY(USERMODE(tf->tf_iioq_head));
462 SANITY(USERMODE(tf->tf_iioq_tail));
463 SANITY(tf->tf_cr30 == (u_int)pcb->pcb_fpregs);
464 }
465 #undef SANITY
466 out:
467 if (sanity_frame == tf) {
468 printf("insanity: '%s' at %s:%d type 0x%x tf %p lwp %p "
469 "sp 0x%x pc 0x%x\n",
470 sanity_string, func, line, type, sanity_frame, sanity_lwp,
471 tf->tf_sp, tf->tf_iioq_head);
472 (void) trap_kdebug(T_IBREAK, 0, tf);
473 sanity_frame = NULL;
474 sanity_lwp = NULL;
475 }
476 }
477 #endif /* DEBUG */
478
479
480 #define __PABITS(x, y) __BITS(31 - (x), 31 - (y))
481 #define __PABIT(x) __BIT(31 - (x))
482
483 #define LPA_MASK \
484 ( __PABITS(0, 5) | \
485 __PABITS(18, 25))
486 #define LPA \
487 (__SHIFTIN(1, __PABITS(0, 5)) | \
488 __SHIFTIN(0x4d, __PABITS(18, 25)))
489
490
491 #define PROBE_ENCS (0x46 | 0xc6 | 0x47 | 0xc7)
492 #define PROBE_PL __PABITS(14, 15)
493 #define PROBE_IMMED __PABIT(18)
494 #define PROBE_RW __PABIT(25)
495
496 #define PROBE_MASK \
497 (( __PABITS(0, 5) | \
498 __PABITS(18, 25) | \
499 __PABIT(26)) ^ \
500 (PROBE_IMMED | PROBE_RW))
501
502 #define PROBE \
503 ((__SHIFTIN(1, __PABITS(0, 5)) | \
504 __SHIFTIN(PROBE_ENCS, __PABITS(18, 25)) | \
505 __SHIFTIN(0, __PABIT(26))) ^ \
506 (PROBE_IMMED | PROBE_RW))
507
508 /* for hppa64 */
509 CTASSERT(sizeof(register_t) == sizeof(u_int));
510 size_t hppa_regmap[] = {
511 0, /* r0 is special case */
512 offsetof(struct trapframe, tf_r1 ) / sizeof(register_t),
513 offsetof(struct trapframe, tf_rp ) / sizeof(register_t),
514 offsetof(struct trapframe, tf_r3 ) / sizeof(register_t),
515 offsetof(struct trapframe, tf_r4 ) / sizeof(register_t),
516 offsetof(struct trapframe, tf_r5 ) / sizeof(register_t),
517 offsetof(struct trapframe, tf_r6 ) / sizeof(register_t),
518 offsetof(struct trapframe, tf_r7 ) / sizeof(register_t),
519 offsetof(struct trapframe, tf_r8 ) / sizeof(register_t),
520 offsetof(struct trapframe, tf_r9 ) / sizeof(register_t),
521 offsetof(struct trapframe, tf_r10 ) / sizeof(register_t),
522 offsetof(struct trapframe, tf_r11 ) / sizeof(register_t),
523 offsetof(struct trapframe, tf_r12 ) / sizeof(register_t),
524 offsetof(struct trapframe, tf_r13 ) / sizeof(register_t),
525 offsetof(struct trapframe, tf_r14 ) / sizeof(register_t),
526 offsetof(struct trapframe, tf_r15 ) / sizeof(register_t),
527 offsetof(struct trapframe, tf_r16 ) / sizeof(register_t),
528 offsetof(struct trapframe, tf_r17 ) / sizeof(register_t),
529 offsetof(struct trapframe, tf_r18 ) / sizeof(register_t),
530 offsetof(struct trapframe, tf_t4 ) / sizeof(register_t),
531 offsetof(struct trapframe, tf_t3 ) / sizeof(register_t),
532 offsetof(struct trapframe, tf_t2 ) / sizeof(register_t),
533 offsetof(struct trapframe, tf_t1 ) / sizeof(register_t),
534 offsetof(struct trapframe, tf_arg3) / sizeof(register_t),
535 offsetof(struct trapframe, tf_arg2) / sizeof(register_t),
536 offsetof(struct trapframe, tf_arg1) / sizeof(register_t),
537 offsetof(struct trapframe, tf_arg0) / sizeof(register_t),
538 offsetof(struct trapframe, tf_dp ) / sizeof(register_t),
539 offsetof(struct trapframe, tf_ret0) / sizeof(register_t),
540 offsetof(struct trapframe, tf_ret1) / sizeof(register_t),
541 offsetof(struct trapframe, tf_sp ) / sizeof(register_t),
542 offsetof(struct trapframe, tf_r31 ) / sizeof(register_t),
543 };
544
545
546 static inline register_t
tf_getregno(struct trapframe * tf,u_int regno)547 tf_getregno(struct trapframe *tf, u_int regno)
548 {
549 register_t *tf_reg = (register_t *)tf;
550 if (regno == 0)
551 return 0;
552 else
553 return tf_reg[hppa_regmap[regno]];
554 }
555
556 static inline void
tf_setregno(struct trapframe * tf,u_int regno,register_t val)557 tf_setregno(struct trapframe *tf, u_int regno, register_t val)
558 {
559 register_t *tf_reg = (register_t *)tf;
560 if (regno == 0)
561 return;
562 else
563 tf_reg[hppa_regmap[regno]] = val;
564 }
565
566 void
trap(int type,struct trapframe * frame)567 trap(int type, struct trapframe *frame)
568 {
569 struct lwp *l;
570 struct proc *p;
571 struct pcb *pcb;
572 vaddr_t va;
573 struct vm_map *map;
574 struct vmspace *vm;
575 vm_prot_t vftype;
576 pa_space_t space;
577 ksiginfo_t ksi;
578 u_int opcode, onfault;
579 int ret;
580 const char *tts = "reserved";
581 int trapnum;
582 #ifdef DIAGNOSTIC
583 extern int emergency_stack_start, emergency_stack_end;
584 struct cpu_info *ci = curcpu();
585 int oldcpl = ci->ci_cpl;
586 #endif
587
588 trapnum = type & ~T_USER;
589 opcode = frame->tf_iir;
590
591 if (trapnum <= T_EXCEPTION || trapnum == T_HIGHERPL ||
592 trapnum == T_LOWERPL || trapnum == T_TAKENBR ||
593 trapnum == T_IDEBUG || trapnum == T_PERFMON) {
594 va = frame->tf_iioq_head;
595 space = frame->tf_iisq_head;
596 vftype = VM_PROT_EXECUTE;
597 } else {
598 va = frame->tf_ior;
599 space = frame->tf_isr;
600 vftype = inst_store(opcode) ? VM_PROT_WRITE : VM_PROT_READ;
601 }
602
603 KASSERT(curlwp != NULL);
604 l = curlwp;
605 p = l->l_proc;
606
607 #ifdef DIAGNOSTIC
608 /*
609 * If we are on the emergency stack, then we either got
610 * a fault on the kernel stack, or we're just handling
611 * a trap for the machine check handler (which also
612 * runs on the emergency stack).
613 *
614 * We *very crudely* differentiate between the two cases
615 * by checking the faulting instruction: if it is the
616 * function prologue instruction that stores the old
617 * frame pointer and updates the stack pointer, we assume
618 * that we faulted on the kernel stack.
619 *
620 * In this case, not completing that instruction will
621 * probably confuse backtraces in kgdb/ddb. Completing
622 * it would be difficult, because we already faulted on
623 * that part of the stack, so instead we fix up the
624 * frame as if the function called has just returned.
625 * This has peculiar knowledge about what values are in
626 * what registers during the "normal gcc -g" prologue.
627 */
628 if (&type >= &emergency_stack_start &&
629 &type < &emergency_stack_end &&
630 type != T_IBREAK && STWM_R1_D_SR0_SP(opcode)) {
631 /* Restore the caller's frame pointer. */
632 frame->tf_r3 = frame->tf_r1;
633 /* Restore the caller's instruction offsets. */
634 frame->tf_iioq_head = frame->tf_rp;
635 frame->tf_iioq_tail = frame->tf_iioq_head + 4;
636 goto dead_end;
637 }
638 #endif /* DIAGNOSTIC */
639
640 #ifdef DEBUG
641 frame_sanity_check(__func__, __LINE__, type, frame, l);
642 #endif /* DEBUG */
643
644 if (frame->tf_flags & TFF_LAST)
645 l->l_md.md_regs = frame;
646
647 if (trapnum <= trap_types)
648 tts = trap_type[trapnum];
649
650 #ifdef TRAPDEBUG
651 if (trapnum != T_INTERRUPT && trapnum != T_IBREAK)
652 printf("trap: %d, %s for %x:%lx at %x:%x, fp=%p, rp=%x\n",
653 type, tts, space, va, frame->tf_iisq_head,
654 frame->tf_iioq_head, frame, frame->tf_rp);
655 else if (trapnum == T_IBREAK)
656 printf("trap: break instruction %x:%x at %x:%x, fp=%p\n",
657 break5(opcode), break13(opcode),
658 frame->tf_iisq_head, frame->tf_iioq_head, frame);
659
660 {
661 extern int etext;
662 if (frame < (struct trapframe *)&etext) {
663 printf("trap: bogus frame ptr %p\n", frame);
664 goto dead_end;
665 }
666 }
667 #endif
668
669 pcb = lwp_getpcb(l);
670
671 /* If this is a trap, not an interrupt, reenable interrupts. */
672 if (trapnum != T_INTERRUPT) {
673 curcpu()->ci_data.cpu_ntrap++;
674 mtctl(frame->tf_eiem, CR_EIEM);
675 }
676
677 const bool user = (type & T_USER) != 0;
678 switch (type) {
679 case T_NONEXIST:
680 case T_NONEXIST | T_USER:
681 #if !defined(DDB) && !defined(KGDB)
682 /* we've got screwed up by the central scrutinizer */
683 panic ("trap: elvis has just left the building!");
684 break;
685 #else
686 goto dead_end;
687 #endif
688 case T_RECOVERY | T_USER:
689 #ifdef USERTRACE
690 for (;;) {
691 if (frame->tf_iioq_head != rctr_next_iioq)
692 printf("-%08x\nr %08x",
693 rctr_next_iioq - 4,
694 frame->tf_iioq_head);
695 rctr_next_iioq = frame->tf_iioq_head + 4;
696 if (frame->tf_ipsw & PSW_N) {
697 /* Advance the program counter. */
698 frame->tf_iioq_head = frame->tf_iioq_tail;
699 frame->tf_iioq_tail = frame->tf_iioq_head + 4;
700 /* Clear flags. */
701 frame->tf_ipsw &= ~(PSW_N|PSW_X|PSW_Y|PSW_Z|PSW_B|PSW_T|PSW_H|PSW_L);
702 /* Simulate another trap. */
703 continue;
704 }
705 break;
706 }
707 frame->tf_rctr = 0;
708 break;
709 #endif /* USERTRACE */
710 case T_RECOVERY:
711 #if !defined(DDB) && !defined(KGDB)
712 /* XXX will implement later */
713 printf ("trap: handicapped");
714 break;
715 #else
716 goto dead_end;
717 #endif
718
719 case T_EMULATION | T_USER:
720 hppa_fpu_emulate(frame, l, opcode);
721 break;
722
723 case T_DATALIGN:
724 onfault = pcb->pcb_onfault;
725 if (onfault) {
726 ret = EFAULT;
727 do_onfault:
728 frame->tf_iioq_head = onfault;
729 frame->tf_iioq_tail = frame->tf_iioq_head + 4;
730 frame->tf_ret0 = ret;
731 break;
732 }
733 /*FALLTHROUGH*/
734
735 #ifdef DIAGNOSTIC
736 /* these just can't happen ever */
737 case T_PRIV_OP:
738 case T_PRIV_REG:
739 /* these just can't make it to the trap() ever */
740 case T_HPMC:
741 case T_HPMC | T_USER:
742 case T_EMULATION:
743 case T_EXCEPTION:
744 #endif
745 case T_IBREAK:
746 case T_DBREAK:
747 dead_end:
748 if (type & T_USER) {
749 #ifdef DEBUG
750 user_backtrace(frame, l, type);
751 #endif
752 KSI_INIT_TRAP(&ksi);
753 ksi.ksi_signo = SIGILL;
754 ksi.ksi_code = ILL_ILLTRP;
755 ksi.ksi_trap = type;
756 ksi.ksi_addr = (void *)frame->tf_iioq_head;
757 trapsignal(l, &ksi);
758 break;
759 }
760 if (trap_kdebug(type, va, frame))
761 return;
762 else if (type == T_DATALIGN)
763 panic ("trap: %s at 0x%x", tts, (u_int) va);
764 else
765 panic ("trap: no debugger for \"%s\" (%d)", tts, type);
766 break;
767
768 case T_IBREAK | T_USER:
769 case T_DBREAK | T_USER:
770 KSI_INIT_TRAP(&ksi);
771 ksi.ksi_signo = SIGTRAP;
772 ksi.ksi_code = TRAP_BRKPT;
773 ksi.ksi_trap = trapnum;
774 ksi.ksi_addr = (void *)(frame->tf_iioq_head & ~HPPA_PC_PRIV_MASK);
775 #ifdef PTRACE
776 ss_clear_breakpoints(l);
777 if (opcode == SSBREAKPOINT)
778 ksi.ksi_code = TRAP_TRACE;
779 #endif
780 /* pass to user debugger */
781 trapsignal(l, &ksi);
782 break;
783
784 #ifdef PTRACE
785 case T_TAKENBR | T_USER:
786 ss_clear_breakpoints(l);
787
788 KSI_INIT_TRAP(&ksi);
789 ksi.ksi_signo = SIGTRAP;
790 ksi.ksi_code = TRAP_TRACE;
791 ksi.ksi_trap = trapnum;
792 ksi.ksi_addr = (void *)(frame->tf_iioq_head & ~HPPA_PC_PRIV_MASK);
793
794 /* pass to user debugger */
795 trapsignal(l, &ksi);
796 break;
797 #endif
798
799 case T_EXCEPTION | T_USER: { /* co-proc assist trap */
800 uint64_t *fpp;
801 uint32_t *pex, ex, inst;
802 int i;
803
804 hppa_fpu_flush(l);
805 fpp = (uint64_t *)pcb->pcb_fpregs;
806
807 /* skip the status register */
808 pex = (uint32_t *)&fpp[0];
809 pex++;
810
811 /* loop through the exception registers */
812 for (i = 1; i < 8 && !*pex; i++, pex++)
813 ;
814 KASSERT(i < 8);
815 ex = *pex;
816 *pex = 0;
817
818 /* reset the trap flag, as if there was none */
819 fpp[0] &= ~(((uint64_t)HPPA_FPU_T) << 32);
820
821 /* emulate the instruction */
822 inst = ((uint32_t)fpopmap[ex >> 26] << 26) | (ex & 0x03ffffff);
823 hppa_fpu_emulate(frame, l, inst);
824 }
825 break;
826
827 case T_OVERFLOW | T_USER:
828 KSI_INIT_TRAP(&ksi);
829 ksi.ksi_signo = SIGFPE;
830 ksi.ksi_code = SI_NOINFO;
831 ksi.ksi_trap = type;
832 ksi.ksi_addr = (void *)va;
833 trapsignal(l, &ksi);
834 break;
835
836 case T_CONDITION | T_USER:
837 KSI_INIT_TRAP(&ksi);
838 ksi.ksi_signo = SIGFPE;
839 ksi.ksi_code = FPE_INTDIV;
840 ksi.ksi_trap = type;
841 ksi.ksi_addr = (void *)va;
842 trapsignal(l, &ksi);
843 break;
844
845 case T_ILLEGAL | T_USER:
846 #ifdef DEBUG
847 user_backtrace(frame, l, type);
848 #endif
849 KSI_INIT_TRAP(&ksi);
850 ksi.ksi_signo = SIGILL;
851 ksi.ksi_code = ILL_ILLOPC;
852 ksi.ksi_trap = type;
853 ksi.ksi_addr = (void *)va;
854 trapsignal(l, &ksi);
855 break;
856
857 case T_PRIV_OP | T_USER:
858 #ifdef DEBUG
859 user_backtrace(frame, l, type);
860 #endif
861 KSI_INIT_TRAP(&ksi);
862 ksi.ksi_signo = SIGILL;
863 ksi.ksi_code = ILL_PRVOPC;
864 ksi.ksi_trap = type;
865 ksi.ksi_addr = (void *)va;
866 trapsignal(l, &ksi);
867 break;
868
869 case T_PRIV_REG | T_USER:
870 #ifdef DEBUG
871 user_backtrace(frame, l, type);
872 #endif
873 KSI_INIT_TRAP(&ksi);
874 ksi.ksi_signo = SIGILL;
875 ksi.ksi_code = ILL_PRVREG;
876 ksi.ksi_trap = type;
877 ksi.ksi_addr = (void *)va;
878 trapsignal(l, &ksi);
879 break;
880
881 /* these should never got here */
882 case T_HIGHERPL | T_USER:
883 case T_LOWERPL | T_USER:
884 KSI_INIT_TRAP(&ksi);
885 ksi.ksi_signo = SIGSEGV;
886 ksi.ksi_code = SEGV_ACCERR;
887 ksi.ksi_trap = type;
888 ksi.ksi_addr = (void *)va;
889 trapsignal(l, &ksi);
890 break;
891
892 case T_IPROT | T_USER:
893 case T_DPROT | T_USER:
894 KSI_INIT_TRAP(&ksi);
895 ksi.ksi_signo = SIGSEGV;
896 ksi.ksi_code = SEGV_ACCERR;
897 ksi.ksi_trap = type;
898 ksi.ksi_addr = (void *)va;
899 trapsignal(l, &ksi);
900 break;
901
902 case T_ITLBMISSNA: case T_USER | T_ITLBMISSNA:
903 case T_DTLBMISSNA: case T_USER | T_DTLBMISSNA:
904 vm = p->p_vmspace;
905
906 if (!vm) {
907 #ifdef TRAPDEBUG
908 printf("trap: no vm, p=%p\n", p);
909 #endif
910 goto dead_end;
911 }
912
913 /*
914 * it could be a kernel map for exec_map faults
915 */
916 if (!user && space == HPPA_SID_KERNEL)
917 map = kernel_map;
918 else {
919 map = &vm->vm_map;
920 }
921
922 va = trunc_page(va);
923
924 if ((opcode & LPA_MASK) == LPA) {
925 /* lpa failure case */
926 const u_int regno =
927 __SHIFTOUT(opcode, __PABITS(27, 31));
928 tf_setregno(frame, regno, 0);
929 frame->tf_ipsw |= PSW_N;
930 } else if ((opcode & PROBE_MASK) == PROBE) {
931 u_int pl;
932 if ((opcode & PROBE_IMMED) == 0) {
933 pl = __SHIFTOUT(opcode, __PABITS(14, 15));
934 } else {
935 const u_int plreg =
936 __SHIFTOUT(opcode, __PABITS(11, 15));
937 pl = tf_getregno(frame, plreg);
938 }
939 bool ok = true;
940 if ((user && space == HPPA_SID_KERNEL) ||
941 (frame->tf_iioq_head & 3) != pl ||
942 (user && va >= VM_MAXUSER_ADDRESS)) {
943 ok = false;
944 } else {
945 /* Never call uvm_fault in interrupt context. */
946 KASSERT(curcpu()->ci_intr_depth == 0);
947
948 const bool read =
949 __SHIFTOUT(opcode, PROBE_RW) == 0;
950 onfault = pcb->pcb_onfault;
951 pcb->pcb_onfault = 0;
952 ret = uvm_fault(map, va, read ?
953 VM_PROT_READ : VM_PROT_WRITE);
954 pcb->pcb_onfault = onfault;
955
956 if (ret)
957 ok = false;
958 }
959 if (!ok) {
960 const u_int regno =
961 __SHIFTOUT(opcode, __PABITS(27, 31));
962 tf_setregno(frame, regno, 0);
963 frame->tf_ipsw |= PSW_N;
964 }
965 } else {
966 }
967 break;
968
969 case T_DATACC: case T_USER | T_DATACC:
970 case T_ITLBMISS: case T_USER | T_ITLBMISS:
971 case T_DTLBMISS: case T_USER | T_DTLBMISS:
972 case T_TLB_DIRTY: case T_USER | T_TLB_DIRTY:
973 vm = p->p_vmspace;
974
975 if (!vm) {
976 #ifdef TRAPDEBUG
977 printf("trap: no vm, p=%p\n", p);
978 #endif
979 goto dead_end;
980 }
981
982 /*
983 * it could be a kernel map for exec_map faults
984 */
985 if (!(type & T_USER) && space == HPPA_SID_KERNEL)
986 map = kernel_map;
987 else {
988 map = &vm->vm_map;
989 }
990
991 va = trunc_page(va);
992
993 if (map->pmap->pm_space != space) {
994 #ifdef TRAPDEBUG
995 printf("trap: space mismatch %d != %d\n",
996 space, map->pmap->pm_space);
997 #endif
998 /* actually dump the user, crap the kernel */
999 goto dead_end;
1000 }
1001
1002 /* Never call uvm_fault in interrupt context. */
1003 KASSERT(curcpu()->ci_intr_depth == 0);
1004
1005 onfault = pcb->pcb_onfault;
1006 pcb->pcb_onfault = 0;
1007 ret = uvm_fault(map, va, vftype);
1008 pcb->pcb_onfault = onfault;
1009
1010 #ifdef TRAPDEBUG
1011 printf("uvm_fault(%p, %x, %d)=%d\n",
1012 map, (u_int)va, vftype, ret);
1013 #endif
1014
1015 /*
1016 * If this was a stack access we keep track of the maximum
1017 * accessed stack size. Also, if uvm_fault gets a protection
1018 * failure it is due to accessing the stack region outside
1019 * the current limit and we need to reflect that as an access
1020 * error.
1021 */
1022 if (map != kernel_map && va >= (vaddr_t)vm->vm_minsaddr) {
1023 if (ret == 0)
1024 uvm_grow(l->l_proc, va);
1025 else if (ret == EACCES)
1026 ret = EFAULT;
1027 }
1028
1029 if (ret != 0) {
1030 if (type & T_USER) {
1031 #ifdef DEBUG
1032 user_backtrace(frame, l, type);
1033 #endif
1034 KSI_INIT_TRAP(&ksi);
1035 switch (ret) {
1036 case EACCES:
1037 ksi.ksi_signo = SIGSEGV;
1038 ksi.ksi_code = SEGV_ACCERR;
1039 break;
1040 case ENOMEM:
1041 ksi.ksi_signo = SIGKILL;
1042 printf("UVM: pid %d (%s), uid %d "
1043 "killed: out of swap\n",
1044 p->p_pid, p->p_comm,
1045 l->l_cred ?
1046 kauth_cred_geteuid(l->l_cred)
1047 : -1);
1048 break;
1049 case EINVAL:
1050 ksi.ksi_signo = SIGBUS;
1051 ksi.ksi_code = BUS_ADRERR;
1052 break;
1053 default:
1054 ksi.ksi_signo = SIGSEGV;
1055 ksi.ksi_code = SEGV_MAPERR;
1056 break;
1057 }
1058 ksi.ksi_trap = type;
1059 ksi.ksi_addr = (void *)va;
1060 trapsignal(l, &ksi);
1061 } else {
1062 if (onfault) {
1063 goto do_onfault;
1064 }
1065 panic("trap: uvm_fault(%p, %lx, %d): %d",
1066 map, va, vftype, ret);
1067 }
1068 }
1069 break;
1070
1071 case T_DATALIGN | T_USER:
1072 #ifdef DEBUG
1073 user_backtrace(frame, l, type);
1074 #endif
1075 KSI_INIT_TRAP(&ksi);
1076 ksi.ksi_signo = SIGBUS;
1077 ksi.ksi_code = BUS_ADRALN;
1078 ksi.ksi_trap = type;
1079 ksi.ksi_addr = (void *)va;
1080 trapsignal(l, &ksi);
1081 break;
1082
1083 case T_INTERRUPT:
1084 case T_INTERRUPT | T_USER:
1085 hppa_intr(frame);
1086 mtctl(frame->tf_eiem, CR_EIEM);
1087 break;
1088
1089 case T_LOWERPL:
1090 case T_DPROT:
1091 case T_IPROT:
1092 case T_OVERFLOW:
1093 case T_CONDITION:
1094 case T_ILLEGAL:
1095 case T_HIGHERPL:
1096 case T_TAKENBR:
1097 case T_POWERFAIL:
1098 case T_LPMC:
1099 case T_PAGEREF:
1100 case T_DATAPID: case T_DATAPID | T_USER:
1101 if (0 /* T-chip */) {
1102 break;
1103 }
1104 /* FALLTHROUGH to unimplemented */
1105 default:
1106 panic ("trap: unimplemented \'%s\' (%d)", tts, type);
1107 }
1108
1109 #ifdef DIAGNOSTIC
1110 if (ci->ci_cpl != oldcpl)
1111 printf("WARNING: SPL (%d) NOT LOWERED ON TRAP (%d) EXIT\n",
1112 ci->ci_cpl, trapnum);
1113 #endif
1114
1115 if (type & T_USER)
1116 userret(l, l->l_md.md_regs);
1117
1118 #ifdef DEBUG
1119 frame_sanity_check(__func__, __LINE__, type, frame, l);
1120 if (frame->tf_flags & TFF_LAST && (curlwp->l_flag & LW_IDLE) == 0)
1121 frame_sanity_check(__func__, __LINE__, type,
1122 curlwp->l_md.md_regs, curlwp);
1123 #endif /* DEBUG */
1124 }
1125
1126 void
md_child_return(struct lwp * l)1127 md_child_return(struct lwp *l)
1128 {
1129 /*
1130 * Return values in the frame set by cpu_lwp_fork().
1131 */
1132
1133 userret(l, l->l_md.md_regs);
1134 #ifdef DEBUG
1135 frame_sanity_check(__func__, __LINE__, 0, l->l_md.md_regs, l);
1136 #endif /* DEBUG */
1137 }
1138
1139 /*
1140 * Process the tail end of a posix_spawn() for the child.
1141 */
1142 void
cpu_spawn_return(struct lwp * l)1143 cpu_spawn_return(struct lwp *l)
1144 {
1145
1146 userret(l, l->l_md.md_regs);
1147 #ifdef DEBUG
1148 frame_sanity_check(__func__, __LINE__, 0, l->l_md.md_regs, l);
1149 #endif /* DEBUG */
1150 }
1151
1152 #ifdef PTRACE
1153
1154 #include <sys/ptrace.h>
1155
1156 int
ss_get_value(struct lwp * l,vaddr_t addr,u_int * value)1157 ss_get_value(struct lwp *l, vaddr_t addr, u_int *value)
1158 {
1159 struct uio uio;
1160 struct iovec iov;
1161
1162 iov.iov_base = (void *)value;
1163 iov.iov_len = sizeof(u_int);
1164 uio.uio_iov = &iov;
1165 uio.uio_iovcnt = 1;
1166 uio.uio_offset = (off_t)addr;
1167 uio.uio_resid = sizeof(u_int);
1168 uio.uio_rw = UIO_READ;
1169 UIO_SETUP_SYSSPACE(&uio);
1170
1171 return (process_domem(curlwp, l, &uio));
1172 }
1173
1174 int
ss_put_value(struct lwp * l,vaddr_t addr,u_int value)1175 ss_put_value(struct lwp *l, vaddr_t addr, u_int value)
1176 {
1177 struct uio uio;
1178 struct iovec iov;
1179
1180 iov.iov_base = (void *)&value;
1181 iov.iov_len = sizeof(u_int);
1182 uio.uio_iov = &iov;
1183 uio.uio_iovcnt = 1;
1184 uio.uio_offset = (off_t)addr;
1185 uio.uio_resid = sizeof(u_int);
1186 uio.uio_rw = UIO_WRITE;
1187 UIO_SETUP_SYSSPACE(&uio);
1188
1189 return (process_domem(curlwp, l, &uio));
1190 }
1191
1192 void
ss_clear_breakpoints(struct lwp * l)1193 ss_clear_breakpoints(struct lwp *l)
1194 {
1195 /* Restore original instructions. */
1196 if (l->l_md.md_bpva != 0) {
1197 ss_put_value(l, l->l_md.md_bpva, l->l_md.md_bpsave[0]);
1198 ss_put_value(l, l->l_md.md_bpva + 4, l->l_md.md_bpsave[1]);
1199 l->l_md.md_bpva = 0;
1200 }
1201 }
1202
1203
1204 int
process_sstep(struct lwp * l,int sstep)1205 process_sstep(struct lwp *l, int sstep)
1206 {
1207 struct trapframe *tf = l->l_md.md_regs;
1208 int error;
1209
1210 ss_clear_breakpoints(l);
1211
1212 /* We're continuing... */
1213 if (sstep == 0) {
1214 tf->tf_ipsw &= ~PSW_T;
1215 return 0;
1216 }
1217
1218 /*
1219 * Don't touch the syscall gateway page. Instead, insert a
1220 * breakpoint where we're supposed to return.
1221 */
1222 if ((tf->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
1223 l->l_md.md_bpva = tf->tf_r31 & ~HPPA_PC_PRIV_MASK;
1224 else
1225 l->l_md.md_bpva = tf->tf_iioq_tail & ~HPPA_PC_PRIV_MASK;
1226
1227 error = ss_get_value(l, l->l_md.md_bpva, &l->l_md.md_bpsave[0]);
1228 if (error)
1229 return error;
1230 error = ss_get_value(l, l->l_md.md_bpva + 4, &l->l_md.md_bpsave[1]);
1231 if (error)
1232 return error;
1233
1234 error = ss_put_value(l, l->l_md.md_bpva, SSBREAKPOINT);
1235 if (error)
1236 return error;
1237 error = ss_put_value(l, l->l_md.md_bpva + 4, SSBREAKPOINT);
1238 if (error)
1239 return error;
1240
1241 if ((tf->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
1242 tf->tf_ipsw &= ~PSW_T;
1243 else
1244 tf->tf_ipsw |= PSW_T;
1245
1246 return 0;
1247 }
1248 #endif
1249
1250
1251 void
syscall_intern(struct proc * p)1252 syscall_intern(struct proc *p)
1253 {
1254 p->p_md.md_syscall = syscall;
1255 }
1256
1257 /*
1258 * call actual syscall routine
1259 * from the low-level syscall handler:
1260 * - all HPPA_FRAME_NARGS syscall's arguments supposed to be copied onto
1261 * our stack, this wins compared to copyin just needed amount anyway
1262 * - register args are copied onto stack too
1263 */
1264 void
syscall(struct trapframe * frame,int * args)1265 syscall(struct trapframe *frame, int *args)
1266 {
1267 struct lwp *l;
1268 struct proc *p;
1269 const struct sysent *callp;
1270 size_t nargs64;
1271 int nsys, code, error;
1272 int tmp;
1273 int rval[2];
1274 #ifdef DIAGNOSTIC
1275 struct cpu_info *ci = curcpu();
1276 int oldcpl = ci->ci_cpl;
1277 #endif
1278
1279 curcpu()->ci_data.cpu_nsyscall++;
1280
1281 #ifdef DEBUG
1282 frame_sanity_check(__func__, __LINE__, 0, frame, curlwp);
1283 #endif /* DEBUG */
1284
1285 if (!USERMODE(frame->tf_iioq_head))
1286 panic("syscall");
1287
1288 KASSERT(curlwp != NULL);
1289 l = curlwp;
1290 p = l->l_proc;
1291 l->l_md.md_regs = frame;
1292 nsys = p->p_emul->e_nsysent;
1293 callp = p->p_emul->e_sysent;
1294 code = frame->tf_t1;
1295
1296 /*
1297 * Restarting a system call is touchy on the HPPA, because syscall
1298 * arguments are passed in registers and the program counter of the
1299 * syscall "point" isn't easily divined.
1300 *
1301 * We handle the first problem by assuming that we will have to restart
1302 * this system call, so we stuff the first four words of the original
1303 * arguments back into the frame as arg0...arg3, which is where we
1304 * found them in the first place. Any further arguments are (still) on
1305 * the user's stack and the syscall code will fetch them from there
1306 * (again).
1307 *
1308 * The program counter problem is addressed below.
1309 */
1310 frame->tf_arg0 = args[0];
1311 frame->tf_arg1 = args[1];
1312 frame->tf_arg2 = args[2];
1313 frame->tf_arg3 = args[3];
1314
1315 /*
1316 * Some special handling for the syscall(2) and
1317 * __syscall(2) system calls.
1318 */
1319 switch (code) {
1320 case SYS_syscall:
1321 code = *args;
1322 args += 1;
1323 break;
1324 case SYS___syscall:
1325 if (callp != sysent)
1326 break;
1327 /*
1328 * NB: even though __syscall(2) takes a quad_t containing the
1329 * system call number, because our argument copying word-swaps
1330 * 64-bit arguments, the least significant word of that quad_t
1331 * is the first word in the argument array.
1332 */
1333 code = *args;
1334 args += 2;
1335 }
1336
1337 /*
1338 * Stacks growing from lower addresses to higher addresses are not
1339 * really such a good idea, because it makes it impossible to overlay a
1340 * struct on top of C stack arguments (the arguments appear in
1341 * reversed order).
1342 *
1343 * You can do the obvious thing (as locore.S does) and copy argument
1344 * words one by one, laying them out in the "right" order in the dest-
1345 * ination buffer, but this ends up word-swapping multi-word arguments
1346 * (like off_t).
1347 *
1348 * FIXME - this works only on native binaries and
1349 * will probably screw up any and all emulation.
1350 *
1351 */
1352
1353 if (code < 0 || code >= nsys)
1354 callp += p->p_emul->e_nosys; /* bad syscall # */
1355 else
1356 callp += code;
1357
1358 nargs64 = SYCALL_NARGS64(callp);
1359 if (nargs64 != 0) {
1360 size_t nargs = callp->sy_narg;
1361
1362 for (size_t i = 0; i < nargs + nargs64;) {
1363 if (SYCALL_ARG_64_P(callp, i)) {
1364 tmp = args[i];
1365 args[i] = args[i + 1];
1366 args[i + 1] = tmp;
1367 i += 2;
1368 } else
1369 i++;
1370 }
1371 }
1372
1373 #ifdef USERTRACE
1374 if (0) {
1375 user_backtrace(frame, l, -1);
1376 frame->tf_ipsw |= PSW_R;
1377 frame->tf_rctr = 0;
1378 printf("r %08x", frame->tf_iioq_head);
1379 rctr_next_iioq = frame->tf_iioq_head + 4;
1380 }
1381 #endif
1382
1383 error = sy_invoke(callp, l, args, rval, code);
1384
1385 switch (error) {
1386 case 0:
1387 l = curlwp; /* changes on exec() */
1388 frame = l->l_md.md_regs;
1389 frame->tf_ret0 = rval[0];
1390 frame->tf_ret1 = rval[1];
1391 frame->tf_t1 = 0;
1392 break;
1393 case ERESTART:
1394 /*
1395 * Now we have to wind back the instruction offset queue to the
1396 * point where the system call will be made again. This is
1397 * inherently tied to the SYSCALL macro.
1398 *
1399 * Currently, the part of the SYSCALL macro that we want to re-
1400 * run reads as:
1401 *
1402 * ldil L%SYSCALLGATE, r1
1403 * ble 4(srX, r1)
1404 * ldi __CONCAT(SYS_,x), t1
1405 * comb,<> %r0, %t1, __cerror
1406 *
1407 * And our offset queue head points to the comb instruction.
1408 * So we need to subtract twelve to reach the ldil.
1409 */
1410 frame->tf_iioq_head -= 12;
1411 frame->tf_iioq_tail = frame->tf_iioq_head + 4;
1412 break;
1413 case EJUSTRETURN:
1414 p = curproc;
1415 break;
1416 default:
1417 if (p->p_emul->e_errno)
1418 error = p->p_emul->e_errno[error];
1419 frame->tf_t1 = error;
1420 break;
1421 }
1422
1423 userret(l, frame);
1424
1425 #ifdef DIAGNOSTIC
1426 if (ci->ci_cpl != oldcpl) {
1427 printf("WARNING: SPL (0x%x) NOT LOWERED ON "
1428 "syscall(0x%x, 0x%x, 0x%x, 0x%x...) EXIT, PID %d\n",
1429 ci->ci_cpl, code, args[0], args[1], args[2], p->p_pid);
1430 ci->ci_cpl = oldcpl;
1431 }
1432 #endif
1433
1434 #ifdef DEBUG
1435 frame_sanity_check(__func__, __LINE__, 0, frame, l);
1436 #endif /* DEBUG */
1437 }
1438
1439 /*
1440 * Start a new LWP
1441 */
1442 void
startlwp(void * arg)1443 startlwp(void *arg)
1444 {
1445 ucontext_t *uc = arg;
1446 lwp_t *l = curlwp;
1447 int error __diagused;
1448
1449 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
1450 KASSERT(error == 0);
1451
1452 kmem_free(uc, sizeof(ucontext_t));
1453 userret(l, l->l_md.md_regs);
1454 }
1455