1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: trap.c 1.28 89/09/25$ 13 * 14 * @(#)trap.c 7.1 (Berkeley) 05/08/90 15 */ 16 17 #include "cpu.h" 18 #include "psl.h" 19 #include "reg.h" 20 #include "pte.h" 21 #include "mtpr.h" 22 23 #include "param.h" 24 #include "systm.h" 25 #include "user.h" 26 #include "proc.h" 27 #include "seg.h" 28 #include "trap.h" 29 #include "acct.h" 30 #include "kernel.h" 31 #include "vm.h" 32 #include "cmap.h" 33 #include "syslog.h" 34 #ifdef KTRACE 35 #include "ktrace.h" 36 #endif 37 38 #ifdef HPUXCOMPAT 39 #include "../hpux/hpux.h" 40 #endif 41 42 #define USER 040 /* user-mode flag added to type */ 43 44 struct sysent sysent[]; 45 int nsysent; 46 47 char *trap_type[] = { 48 "Bus error", 49 "Address error", 50 "Illegal instruction", 51 "Zero divide", 52 "CHK instruction", 53 "TRAPV instruction", 54 "Privilege violation", 55 "Trace trap", 56 "MMU fault", 57 "SSIR trap", 58 "Format error", 59 "68881 exception", 60 "Coprocessor violation", 61 "Async system trap" 62 }; 63 #define TRAP_TYPES (sizeof trap_type / sizeof trap_type[0]) 64 65 #ifdef DEBUG 66 int mmudebug = 0; 67 #endif 68 69 /* 70 * Called from the trap handler when a processor trap occurs. 71 */ 72 /*ARGSUSED*/ 73 trap(type, code, v, frame) 74 int type; 75 unsigned code; 76 register unsigned v; 77 struct frame frame; 78 { 79 register int i; 80 unsigned ucode = 0; 81 register struct proc *p = u.u_procp; 82 struct timeval syst; 83 unsigned ncode; 84 85 cnt.v_trap++; 86 syst = u.u_ru.ru_stime; 87 if (USERMODE(frame.f_sr)) { 88 type |= USER; 89 u.u_ar0 = frame.f_regs; 90 } 91 switch (type) { 92 93 default: 94 dopanic: 95 #ifdef KGDB 96 if (!panicstr && kgdb_trap(type, code, v, &frame)) 97 return; 98 #endif 99 printf("trap type %d, code = %x, v = %x\n", type, code, v); 100 regdump(frame.f_regs, 128); 101 type &= ~USER; 102 if ((unsigned)type < TRAP_TYPES) 103 panic(trap_type[type]); 104 panic("trap"); 105 106 case T_BUSERR: /* kernel bus error */ 107 if (!u.u_pcb.pcb_onfault) 108 goto dopanic; 109 /* 110 * If we have arranged to catch this fault in any of the 111 * copy to/from user space routines, set PC to return to 112 * indicated location and set flag informing buserror code 113 * that it may need to clean up stack frame. 114 */ 115 copyfault: 116 frame.f_pc = (int) u.u_pcb.pcb_onfault; 117 frame.f_stackadj = -1; 118 return; 119 120 case T_BUSERR+USER: /* bus error */ 121 case T_ADDRERR+USER: /* address error */ 122 i = SIGBUS; 123 break; 124 125 #ifdef FPCOPROC 126 case T_COPERR: /* kernel coprocessor violation */ 127 #endif 128 case T_FMTERR: /* kernel format error */ 129 /* 130 * The user has most likely trashed the RTE or FP state info 131 * in the stack frame of a signal handler. 132 */ 133 printf("pid %d: kernel %s exception\n", u.u_procp->p_pid, 134 type==T_COPERR ? "coprocessor" : "format"); 135 u.u_signal[SIGILL] = SIG_DFL; 136 i = sigmask(SIGILL); 137 p->p_sigignore &= ~i; 138 p->p_sigcatch &= ~i; 139 p->p_sigmask &= ~i; 140 ucode = frame.f_format; /* XXX was ILL_RESAD_FAULT */ 141 psignal(u.u_procp, SIGILL); 142 goto out; 143 144 #ifdef FPCOPROC 145 case T_COPERR+USER: /* user coprocessor violation */ 146 /* What is a proper response here? */ 147 ucode = 0; 148 i = SIGFPE; 149 break; 150 151 case T_FPERR+USER: /* 68881 exceptions */ 152 /* 153 * We pass along the 68881 status register which locore stashed 154 * in code for us. Note that there is a possibility that the 155 * bit pattern of this register will conflict with one of the 156 * FPE_* codes defined in signal.h. Fortunately for us, the 157 * only such codes we use are all in the range 1-7 and the low 158 * 3 bits of the status register are defined as 0 so there is 159 * no clash. 160 */ 161 ucode = code; 162 i = SIGFPE; 163 break; 164 #endif 165 166 case T_ILLINST+USER: /* illegal instruction fault */ 167 #ifdef HPUXCOMPAT 168 if (u.u_procp->p_flag & SHPUX) { 169 ucode = HPUX_ILL_ILLINST_TRAP; 170 i = SIGILL; 171 break; 172 } 173 /* fall through */ 174 #endif 175 case T_PRIVINST+USER: /* privileged instruction fault */ 176 #ifdef HPUXCOMPAT 177 if (u.u_procp->p_flag & SHPUX) 178 ucode = HPUX_ILL_PRIV_TRAP; 179 else 180 #endif 181 ucode = frame.f_format; /* XXX was ILL_PRIVIN_FAULT */ 182 i = SIGILL; 183 break; 184 185 case T_ZERODIV+USER: /* Divide by zero */ 186 #ifdef HPUXCOMPAT 187 if (u.u_procp->p_flag & SHPUX) 188 ucode = HPUX_FPE_INTDIV_TRAP; 189 else 190 #endif 191 ucode = frame.f_format; /* XXX was FPE_INTDIV_TRAP */ 192 i = SIGFPE; 193 break; 194 195 case T_CHKINST+USER: /* CHK instruction trap */ 196 #ifdef HPUXCOMPAT 197 if (u.u_procp->p_flag & SHPUX) { 198 /* handled differently under hp-ux */ 199 i = SIGILL; 200 ucode = HPUX_ILL_CHK_TRAP; 201 break; 202 } 203 #endif 204 ucode = frame.f_format; /* XXX was FPE_SUBRNG_TRAP */ 205 i = SIGFPE; 206 break; 207 208 case T_TRAPVINST+USER: /* TRAPV instruction trap */ 209 #ifdef HPUXCOMPAT 210 if (u.u_procp->p_flag & SHPUX) { 211 /* handled differently under hp-ux */ 212 i = SIGILL; 213 ucode = HPUX_ILL_TRAPV_TRAP; 214 break; 215 } 216 #endif 217 ucode = frame.f_format; /* XXX was FPE_INTOVF_TRAP */ 218 i = SIGFPE; 219 break; 220 221 /* 222 * XXX: Trace traps are a nightmare. 223 * 224 * HP-UX uses trap #1 for breakpoints, 225 * HPBSD uses trap #2, 226 * SUN 3.x uses trap #15, 227 * KGDB uses trap #15 (for kernel breakpoints). 228 * 229 * HPBSD and HP-UX traps both get mapped by locore.s into T_TRACE. 230 * SUN 3.x traps get passed through as T_TRAP15 and are not really 231 * supported yet. KGDB traps are also passed through as T_TRAP15 232 * and are not used yet. 233 */ 234 case T_TRACE: /* kernel trace trap */ 235 case T_TRAP15: /* SUN (or KGDB) kernel trace trap */ 236 #ifdef KGDB 237 if (kgdb_trap(type, code, v, &frame)) 238 return; 239 #endif 240 frame.f_sr &= ~PSL_T; 241 i = SIGTRAP; 242 break; 243 244 case T_TRACE+USER: /* user trace trap */ 245 case T_TRAP15+USER: /* SUN user trace trap */ 246 #ifdef SUNCOMPAT 247 /* 248 * Trap #2 is used to signal a cache flush. 249 * Should we also flush data cache? 250 */ 251 if (type == T_TRACE+USER && (p->p_flag & SSUN)) { 252 ICIA(); 253 goto out; 254 } 255 #endif 256 frame.f_sr &= ~PSL_T; 257 i = SIGTRAP; 258 break; 259 260 case T_AST: /* system async trap, cannot happen */ 261 goto dopanic; 262 263 case T_AST+USER: /* user async trap */ 264 astoff(); 265 /* 266 * We check for software interrupts first. This is because 267 * they are at a higher level than ASTs, and on a VAX would 268 * interrupt the AST. We assume that if we are processing 269 * an AST that we must be at IPL0 so we don't bother to 270 * check. Note that we ensure that we are at least at SIR 271 * IPL while processing the SIR. 272 */ 273 spl1(); 274 /* fall into... */ 275 276 case T_SSIR: /* software interrupt */ 277 case T_SSIR+USER: 278 if (ssir & SIR_NET) { 279 siroff(SIR_NET); 280 cnt.v_soft++; 281 netintr(); 282 } 283 if (ssir & SIR_CLOCK) { 284 siroff(SIR_CLOCK); 285 cnt.v_soft++; 286 softclock((caddr_t)frame.f_pc, (int)frame.f_sr); 287 } 288 /* 289 * If this was not an AST trap, we are all done. 290 */ 291 if (type != T_AST+USER) { 292 cnt.v_trap--; 293 return; 294 } 295 spl0(); 296 #ifndef PROFTIMER 297 if ((u.u_procp->p_flag&SOWEUPC) && u.u_prof.pr_scale) { 298 addupc(frame.f_pc, &u.u_prof, 1); 299 u.u_procp->p_flag &= ~SOWEUPC; 300 } 301 #endif 302 goto out; 303 304 case T_MMUFLT: /* kernel mode page fault */ 305 /* 306 * Could be caused by a page fault in one of the copy to/from 307 * user space routines. If so, we will have a catch address. 308 */ 309 if (!u.u_pcb.pcb_onfault) 310 goto dopanic; 311 /* fall into ... */ 312 313 case T_MMUFLT+USER: /* page fault */ 314 /* 315 printf("trap: T_MMUFLT pid %d, code %x, v %x, pc %x, ps %x\n", 316 p->p_pid, code, v, frame.f_pc, frame.f_sr); 317 */ 318 if (v >= USRSTACK) { 319 if (type == T_MMUFLT) 320 goto copyfault; 321 i = SIGSEGV; 322 break; 323 } 324 ncode = code >> 16; 325 #if defined(HP330) || defined(HP360) || defined(HP370) 326 /* 327 * Crudely map PMMU faults into HP MMU faults. 328 */ 329 if (mmutype != MMU_HP) { 330 int ocode = ncode; 331 ncode = 0; 332 if (ocode & PMMU_WP) 333 ncode |= MMU_WPF; 334 else if (ocode & PMMU_INV) { 335 if ((ocode & PMMU_LVLMASK) == 2) 336 ncode |= MMU_PF; 337 else 338 ncode |= MMU_PTF; 339 } 340 /* 341 * RMW cycle, must load ATC by hand 342 */ 343 else if ((code & (SSW_DF|SSW_RM)) == (SSW_DF|SSW_RM)) { 344 #ifdef DEBUG 345 log(LOG_WARNING, 346 "RMW fault at %x: MMUSR %x SSW %x\n", 347 v, ocode, code & 0xFFFF); 348 #endif 349 ploadw((caddr_t)v); 350 return; 351 } 352 /* 353 * Fault with no fault bits, should indicate bad 354 * hardware but we see this on 340s using starbase 355 * sometimes (faults accessing catseye registers) 356 */ 357 else { 358 log(LOG_WARNING, 359 "Bad PMMU fault at %x: MMUSR %x SSW %x\n", 360 v, ocode, code & 0xFFFF); 361 return; 362 } 363 #ifdef DEBUG 364 if (mmudebug && mmudebug == p->p_pid) 365 printf("MMU %d: v%x, os%x, ns%x\n", 366 p->p_pid, v, ocode, ncode); 367 #endif 368 } 369 #endif 370 #ifdef DEBUG 371 if ((ncode & (MMU_PTF|MMU_PF|MMU_WPF|MMU_FPE)) == 0) { 372 printf("T_MMUFLT with no fault bits\n"); 373 goto dopanic; 374 } 375 #endif 376 if (ncode & MMU_PTF) { 377 #ifdef DEBUG 378 /* 379 * NOTE: we use a u_int instead of an ste since the 380 * current compiler generates bogus code for some 381 * bitfield operations (i.e. attempts to access last 382 * word of a page as a longword causing fault). 383 */ 384 extern struct ste *vtoste(); 385 u_int *ste = (u_int *)vtoste(p, v); 386 387 if (*ste & SG_V) { 388 if (ncode & MMU_WPF) { 389 printf("PTF|WPF...\n"); 390 if (type == T_MMUFLT) 391 goto copyfault; 392 i = SIGBUS; 393 break; 394 } 395 printf("MMU_PTF with sg_v, ste@%x = %x\n", 396 ste, *ste); 397 goto dopanic; 398 } 399 #endif 400 #ifdef HPUXCOMPAT 401 if (ISHPMMADDR(v)) { 402 extern struct ste *vtoste(); 403 u_int *bste, *nste; 404 405 bste = (u_int *)vtoste(p, HPMMBASEADDR(v)); 406 nste = (u_int *)vtoste(p, v); 407 if ((*bste & SG_V) && *nste == SG_NV) { 408 *nste = *bste; 409 TBIAU(); 410 return; 411 } 412 } 413 #endif 414 growit: 415 if (type == T_MMUFLT) 416 goto copyfault; 417 if (grow((unsigned)frame.f_regs[SP]) || grow(v)) 418 goto out; 419 i = SIGSEGV; 420 break; 421 } 422 #ifdef HPUXCOMPAT 423 if (ISHPMMADDR(v)) { 424 TBIS(v); 425 v = HPMMBASEADDR(v); 426 } 427 #endif 428 /* 429 * NOTE: WPF without PG_V is possible 430 * (e.g. attempt to write shared text which is paged out) 431 */ 432 if (ncode & MMU_WPF) { 433 #ifdef DEBUG 434 extern struct ste *vtoste(); 435 u_int *ste = (u_int *)vtoste(p, v); 436 437 if (!(*ste & SG_V)) { 438 printf("MMU_WPF without sg_v, ste@%x = %x\n", 439 ste, *ste); 440 goto dopanic; 441 } 442 #endif 443 if (type == T_MMUFLT) 444 goto copyfault; 445 i = SIGBUS; 446 break; 447 } 448 if (ncode & MMU_PF) { 449 register u_int vp; 450 #ifdef DEBUG 451 extern struct ste *vtoste(); 452 u_int *ste = (u_int *)vtoste(p, v); 453 struct pte *pte; 454 455 if (!(*ste & SG_V)) { 456 printf("MMU_PF without sg_v, ste@%x = %x\n", 457 ste, *ste); 458 goto dopanic; 459 } 460 #endif 461 vp = btop(v); 462 if (vp >= dptov(p, p->p_dsize) && 463 vp < sptov(p, p->p_ssize-1)) 464 goto growit; 465 #ifdef DEBUG 466 pte = vtopte(p, vp); 467 if (*(u_int *)pte & PG_V) { 468 printf("MMU_PF with pg_v, pte = %x\n", 469 *(u_int *)pte); 470 goto dopanic; 471 } 472 #endif 473 i = u.u_error; 474 pagein(v, 0); 475 u.u_error = i; 476 if (type == T_MMUFLT) 477 return; 478 goto out; 479 } 480 #ifdef DEBUG 481 printf("T_MMUFLT: unrecognized scenerio\n"); 482 goto dopanic; 483 #endif 484 } 485 trapsignal(i, ucode); 486 if ((type & USER) == 0) 487 return; 488 out: 489 p = u.u_procp; 490 if (p->p_cursig || ISSIG(p)) 491 psig(); 492 p->p_pri = p->p_usrpri; 493 if (runrun) { 494 /* 495 * Since we are u.u_procp, clock will normally just change 496 * our priority without moving us from one queue to another 497 * (since the running process is not on a queue.) 498 * If that happened after we setrq ourselves but before we 499 * swtch()'ed, we might not be on the queue indicated by 500 * our priority. 501 */ 502 (void) splclock(); 503 setrq(p); 504 u.u_ru.ru_nivcsw++; 505 swtch(); 506 if (ISSIG(p)) 507 psig(); 508 } 509 if (u.u_prof.pr_scale) { 510 int ticks; 511 struct timeval *tv = &u.u_ru.ru_stime; 512 513 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + 514 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); 515 if (ticks) { 516 #ifdef PROFTIMER 517 extern int profscale; 518 addupc(frame.f_pc, &u.u_prof, ticks * profscale); 519 #else 520 addupc(frame.f_pc, &u.u_prof, ticks); 521 #endif 522 } 523 } 524 curpri = p->p_pri; 525 } 526 527 /* 528 * Called from the trap handler when a system call occurs 529 */ 530 /*ARGSUSED*/ 531 syscall(code, frame) 532 volatile int code; 533 struct frame frame; 534 { 535 register caddr_t params; 536 register int i; 537 register struct sysent *callp; 538 register struct proc *p; 539 register struct user *up; 540 int opc, numsys; 541 struct timeval syst; 542 struct sysent *systab; 543 #ifdef HPUXCOMPAT 544 extern struct sysent hpuxsysent[]; 545 extern int hpuxnsysent, notimp(); 546 #endif 547 548 /* 549 * We assign &u to a local variable for GCC. This ensures that 550 * we can explicitly reload it after the call to qsetjmp below. 551 * If we don't do this, GCC may itself have assigned &u to a 552 * register variable which will not be properly reloaded, since 553 * GCC knows nothing of the funky semantics of qsetjmp. 554 */ 555 up = &u; 556 557 cnt.v_syscall++; 558 syst = up->u_ru.ru_stime; 559 if (!USERMODE(frame.f_sr)) 560 panic("syscall"); 561 up->u_ar0 = frame.f_regs; 562 up->u_error = 0; 563 opc = frame.f_pc - 2; 564 systab = sysent; 565 numsys = nsysent; 566 #ifdef HPUXCOMPAT 567 if (up->u_procp->p_flag & SHPUX) { 568 systab = hpuxsysent; 569 numsys = hpuxnsysent; 570 } 571 #endif 572 params = (caddr_t)frame.f_regs[SP] + NBPW; 573 /* 574 * We use entry 0 instead of 63 to signify an invalid syscall because 575 * HPUX uses 63 and 0 works just as well for our purposes. 576 */ 577 if (code == 0) { 578 i = fuword(params); 579 params += NBPW; 580 callp = ((unsigned)i >= numsys) ? &systab[0] : &systab[i]; 581 } else 582 callp = (code >= numsys) ? &systab[0] : &systab[code]; 583 p = up->u_procp; 584 if ((i = callp->sy_narg * sizeof (int)) && 585 (up->u_error = copyin(params, (caddr_t)up->u_arg, (u_int)i))) { 586 #ifdef HPUXCOMPAT 587 if (p->p_flag & SHPUX) 588 up->u_error = bsdtohpuxerrno(up->u_error); 589 #endif 590 frame.f_regs[D0] = (u_char) up->u_error; 591 frame.f_sr |= PSL_C; /* carry bit */ 592 #ifdef KTRACE 593 if (KTRPOINT(p, KTR_SYSCALL)) 594 ktrsyscall(p->p_tracep, code, callp->sy_narg); 595 #endif 596 goto done; 597 } 598 #ifdef KTRACE 599 if (KTRPOINT(p, KTR_SYSCALL)) 600 ktrsyscall(p->p_tracep, code, callp->sy_narg); 601 #endif 602 up->u_r.r_val1 = 0; 603 up->u_r.r_val2 = frame.f_regs[D0]; 604 /* 605 * qsetjmp only saves a6/a7. This speeds things up in the common 606 * case (where saved values are never used). There is a side effect 607 * however. Namely, if we do return via longjmp() we must restore 608 * our own register variables. 609 */ 610 if (qsetjmp(&up->u_qsave)) { 611 up = &u; 612 if (up->u_error == 0 && up->u_eosys != RESTARTSYS) 613 up->u_error = EINTR; 614 #ifdef HPUXCOMPAT 615 /* there are some HPUX calls where we change u_ap */ 616 if (up->u_ap != up->u_arg) { 617 up->u_ap = up->u_arg; 618 printf("syscall(%d): u_ap changed\n", code); 619 } 620 #endif 621 } else { 622 up->u_eosys = NORMALRETURN; 623 #ifdef HPUXCOMPAT 624 /* debug kludge */ 625 if (callp->sy_call == notimp) 626 notimp(code, callp->sy_narg); 627 else 628 #endif 629 (*(callp->sy_call))(up); 630 } 631 /* 632 * Need to reinit p for two reason. One, it is a register var 633 * and is not saved in the qsetjmp so a EINTR return will leave 634 * it with garbage. Two, even on a normal return, it will be 635 * wrong for the child process of a fork (it will point to the 636 * parent). 637 */ 638 p = up->u_procp; 639 if (up->u_eosys == NORMALRETURN) { 640 if (up->u_error) { 641 #ifdef HPUXCOMPAT 642 if (p->p_flag & SHPUX) 643 up->u_error = bsdtohpuxerrno(up->u_error); 644 #endif 645 frame.f_regs[D0] = (u_char) up->u_error; 646 frame.f_sr |= PSL_C; /* carry bit */ 647 } else { 648 frame.f_regs[D0] = up->u_r.r_val1; 649 frame.f_regs[D1] = up->u_r.r_val2; 650 frame.f_sr &= ~PSL_C; 651 } 652 } else if (up->u_eosys == RESTARTSYS) 653 frame.f_pc = opc; 654 /* else if (up->u_eosys == JUSTRETURN) */ 655 /* nothing to do */ 656 done: 657 /* 658 * The check for sigreturn (code 103) ensures that we don't 659 * attempt to set up a call to a signal handler (sendsig) before 660 * we have cleaned up the stack from the last call (sigreturn). 661 * Allowing this seems to lock up the machine in certain scenarios. 662 * What should really be done is to clean up the signal handling 663 * so that this is not a problem. 664 */ 665 if (code != 103 && (p->p_cursig || ISSIG(p))) 666 psig(); 667 p->p_pri = p->p_usrpri; 668 if (runrun) { 669 /* 670 * Since we are u.u_procp, clock will normally just change 671 * our priority without moving us from one queue to another 672 * (since the running process is not on a queue.) 673 * If that happened after we setrq ourselves but before we 674 * swtch()'ed, we might not be on the queue indicated by 675 * our priority. 676 */ 677 (void) splclock(); 678 setrq(p); 679 up->u_ru.ru_nivcsw++; 680 swtch(); 681 } 682 if (up->u_prof.pr_scale) { 683 int ticks; 684 struct timeval *tv = &up->u_ru.ru_stime; 685 686 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + 687 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); 688 if (ticks) { 689 #ifdef PROFTIMER 690 extern int profscale; 691 addupc(frame.f_pc, &up->u_prof, ticks * profscale); 692 #else 693 addupc(frame.f_pc, &up->u_prof, ticks); 694 #endif 695 } 696 } 697 curpri = p->p_pri; 698 #ifdef KTRACE 699 if (KTRPOINT(p, KTR_SYSRET)) 700 ktrsysret(p->p_tracep, code); 701 #endif 702 } 703