1 /* $NetBSD: trap.c,v 1.86 2021/03/06 08:08:19 rin Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #define __UFETCHSTORE_PRIVATE 70 71 #include <sys/cdefs.h> 72 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.86 2021/03/06 08:08:19 rin Exp $"); 73 74 #ifdef _KERNEL_OPT 75 #include "opt_ddb.h" 76 #include "opt_kgdb.h" 77 #include "opt_ppcarch.h" 78 #endif 79 80 #include <sys/param.h> 81 #include <sys/cpu.h> 82 #include <sys/kauth.h> 83 #include <sys/proc.h> 84 #include <sys/ptrace.h> 85 #include <sys/reboot.h> 86 #include <sys/syscall.h> 87 #include <sys/systm.h> 88 89 #if defined(KGDB) 90 #include <sys/kgdb.h> 91 #endif 92 93 #include <uvm/uvm_extern.h> 94 95 #include <dev/cons.h> 96 97 #include <machine/fpu.h> 98 #include <machine/frame.h> 99 #include <machine/pcb.h> 100 #include <machine/psl.h> 101 #include <machine/trap.h> 102 103 #include <powerpc/db_machdep.h> 104 #include <powerpc/spr.h> 105 #include <powerpc/userret.h> 106 107 #include <powerpc/ibm4xx/cpu.h> 108 #include <powerpc/ibm4xx/pmap.h> 109 #include <powerpc/ibm4xx/spr.h> 110 #include <powerpc/ibm4xx/tlb.h> 111 112 #include <powerpc/fpu/fpu_extern.h> 113 114 /* These definitions should probably be somewhere else XXX */ 115 #define FIRSTARG 3 /* first argument is in reg 3 */ 116 #define NARGREG 8 /* 8 args are in registers */ 117 #define MOREARGS(sp) ((void *)((int)(sp) + 8)) /* more args go here */ 118 119 static int fix_unaligned(struct lwp *l, struct trapframe *tf); 120 121 void trap(struct trapframe *); /* Called from locore / trap_subr */ 122 #if 0 123 /* Not currently used nor exposed externally in any header file */ 124 int badaddr(void *, size_t); 125 int badaddr_read(void *, size_t, int *); 126 #endif 127 int ctx_setup(int, int); 128 129 #ifdef DEBUG 130 #define TDB_ALL 0x1 131 int trapdebug = /* TDB_ALL */ 0; 132 #define DBPRINTF(x, y) if (trapdebug & (x)) printf y 133 #else 134 #define DBPRINTF(x, y) 135 #endif 136 137 void 138 trap(struct trapframe *tf) 139 { 140 struct lwp *l = curlwp; 141 struct proc *p = l->l_proc; 142 struct pcb *pcb; 143 int type = tf->tf_exc; 144 int ftype, rv; 145 ksiginfo_t ksi; 146 147 KASSERT(l->l_stat == LSONPROC); 148 149 if (tf->tf_srr1 & PSL_PR) { 150 LWP_CACHE_CREDS(l, p); 151 type |= EXC_USER; 152 } 153 154 ftype = VM_PROT_READ; 155 156 DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n", 157 type, tf->tf_srr0, tf, &tf)); 158 159 switch (type) { 160 case EXC_DEBUG|EXC_USER: 161 /* We don't use hardware breakpoints for userland. */ 162 goto brain_damage; 163 164 case EXC_TRC|EXC_USER: 165 KSI_INIT_TRAP(&ksi); 166 ksi.ksi_signo = SIGTRAP; 167 ksi.ksi_trap = EXC_TRC; 168 ksi.ksi_addr = (void *)tf->tf_srr0; 169 trapsignal(l, &ksi); 170 break; 171 172 case EXC_DSI: 173 /* FALLTHROUGH */ 174 case EXC_DTMISS: 175 { 176 struct vm_map *map; 177 vaddr_t va; 178 struct faultbuf *fb; 179 180 pcb = lwp_getpcb(l); 181 fb = pcb->pcb_onfault; 182 183 if (curcpu()->ci_idepth >= 0) { 184 rv = EFAULT; 185 goto out; 186 } 187 188 va = tf->tf_dear; 189 if (tf->tf_pid == KERNEL_PID) { 190 map = kernel_map; 191 } else { 192 map = &p->p_vmspace->vm_map; 193 } 194 195 if (tf->tf_esr & (ESR_DST|ESR_DIZ)) 196 ftype = VM_PROT_WRITE; 197 198 DBPRINTF(TDB_ALL, 199 ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n", 200 tf->tf_srr0, 201 (ftype & VM_PROT_WRITE) ? "write" : "read", 202 (void *)va, tf->tf_esr)); 203 204 pcb->pcb_onfault = NULL; 205 rv = uvm_fault(map, trunc_page(va), ftype); 206 pcb->pcb_onfault = fb; 207 if (rv == 0) 208 return; 209 out: 210 if (fb != NULL) { 211 tf->tf_pid = KERNEL_PID; 212 tf->tf_srr0 = fb->fb_pc; 213 tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */ 214 tf->tf_cr = fb->fb_cr; 215 tf->tf_fixreg[1] = fb->fb_sp; 216 tf->tf_fixreg[2] = fb->fb_r2; 217 tf->tf_fixreg[3] = rv; 218 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 219 sizeof(fb->fb_fixreg)); 220 return; 221 } 222 } 223 goto brain_damage; 224 225 case EXC_DSI|EXC_USER: 226 /* FALLTHROUGH */ 227 case EXC_DTMISS|EXC_USER: 228 if (tf->tf_esr & (ESR_DST|ESR_DIZ)) 229 ftype = VM_PROT_WRITE; 230 231 DBPRINTF(TDB_ALL, 232 ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n", 233 tf->tf_srr0, (ftype & VM_PROT_WRITE) ? "write" : "read", 234 tf->tf_dear, tf->tf_esr)); 235 KASSERT(l == curlwp && (l->l_stat == LSONPROC)); 236 // KASSERT(curpcb->pcb_onfault == NULL); 237 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_dear), 238 ftype); 239 if (rv == 0) { 240 break; 241 } 242 KSI_INIT_TRAP(&ksi); 243 ksi.ksi_trap = EXC_DSI; 244 ksi.ksi_addr = (void *)tf->tf_dear; 245 vm_signal: 246 switch (rv) { 247 case EINVAL: 248 ksi.ksi_signo = SIGBUS; 249 ksi.ksi_code = BUS_ADRERR; 250 break; 251 case EACCES: 252 ksi.ksi_signo = SIGSEGV; 253 ksi.ksi_code = SEGV_ACCERR; 254 break; 255 case ENOMEM: 256 ksi.ksi_signo = SIGKILL; 257 printf("UVM: pid %d.%d (%s), uid %d killed: " 258 "out of swap\n", p->p_pid, l->l_lid, p->p_comm, 259 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); 260 break; 261 default: 262 ksi.ksi_signo = SIGSEGV; 263 ksi.ksi_code = SEGV_MAPERR; 264 break; 265 } 266 trapsignal(l, &ksi); 267 break; 268 269 case EXC_ITMISS|EXC_USER: 270 case EXC_ISI|EXC_USER: 271 ftype = VM_PROT_EXECUTE; 272 DBPRINTF(TDB_ALL, 273 ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n", 274 tf->tf_srr0, tf)); 275 // KASSERT(curpcb->pcb_onfault == NULL); 276 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_srr0), 277 ftype); 278 if (rv == 0) { 279 break; 280 } 281 isi: 282 KSI_INIT_TRAP(&ksi); 283 ksi.ksi_trap = EXC_ISI; 284 ksi.ksi_addr = (void *)tf->tf_srr0; 285 goto vm_signal; 286 break; 287 288 case EXC_AST|EXC_USER: 289 cpu_ast(l, curcpu()); 290 break; 291 292 case EXC_ALI|EXC_USER: 293 if (fix_unaligned(l, tf) != 0) { 294 KSI_INIT_TRAP(&ksi); 295 ksi.ksi_signo = SIGBUS; 296 ksi.ksi_trap = EXC_ALI; 297 ksi.ksi_addr = (void *)tf->tf_dear; 298 trapsignal(l, &ksi); 299 } else 300 tf->tf_srr0 += 4; 301 break; 302 303 case EXC_PGM|EXC_USER: 304 curcpu()->ci_data.cpu_ntrap++; 305 306 KSI_INIT_TRAP(&ksi); 307 ksi.ksi_trap = EXC_PGM; 308 ksi.ksi_addr = (void *)tf->tf_srr0; 309 310 if (tf->tf_esr & ESR_PTR) { 311 vaddr_t va; 312 sigtrap: 313 va = (vaddr_t)tf->tf_srr0; 314 /* 315 * Restore original instruction and clear BP. 316 */ 317 if (p->p_md.md_ss_addr[0] == va || 318 p->p_md.md_ss_addr[1] == va) { 319 rv = ppc_sstep(l, 0); 320 if (rv != 0) 321 goto vm_signal; 322 ksi.ksi_code = TRAP_TRACE; 323 } else 324 ksi.ksi_code = TRAP_BRKPT; 325 if (p->p_raslist != NULL && 326 ras_lookup(p, (void *)va) != (void *)-1) { 327 tf->tf_srr0 += (ksi.ksi_code == TRAP_TRACE) ? 328 0 : 4; 329 break; 330 } 331 ksi.ksi_signo = SIGTRAP; 332 } else if (tf->tf_esr & ESR_PPR) { 333 uint32_t opcode; 334 335 rv = copyin((void *)tf->tf_srr0, &opcode, 336 sizeof(opcode)); 337 if (rv) 338 goto isi; 339 if (emulate_mxmsr(l, tf, opcode)) { 340 tf->tf_srr0 += 4; 341 break; 342 } 343 344 ksi.ksi_code = ILL_PRVOPC; 345 ksi.ksi_signo = SIGILL; 346 } else { 347 pcb = lwp_getpcb(l); 348 349 if (__predict_false(!fpu_used_p(l))) { 350 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu)); 351 fpu_mark_used(l); 352 } 353 354 if (fpu_emulate(tf, &pcb->pcb_fpu, &ksi)) { 355 if (ksi.ksi_signo == 0) /* was emulated */ 356 break; 357 else if (ksi.ksi_signo == SIGTRAP) 358 goto sigtrap; /* XXX H/W bug? */ 359 } else { 360 ksi.ksi_code = ILL_ILLOPC; 361 ksi.ksi_signo = SIGILL; 362 } 363 } 364 365 trapsignal(l, &ksi); 366 break; 367 368 case EXC_MCHK: 369 { 370 struct faultbuf *fb; 371 372 pcb = lwp_getpcb(l); 373 if ((fb = pcb->pcb_onfault) != NULL) { 374 tf->tf_pid = KERNEL_PID; 375 tf->tf_srr0 = fb->fb_pc; 376 tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */ 377 tf->tf_fixreg[1] = fb->fb_sp; 378 tf->tf_fixreg[2] = fb->fb_r2; 379 tf->tf_fixreg[3] = 1; /* Return TRUE */ 380 tf->tf_cr = fb->fb_cr; 381 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 382 sizeof(fb->fb_fixreg)); 383 return; 384 } 385 } 386 goto brain_damage; 387 388 default: 389 brain_damage: 390 printf("trap type 0x%x at 0x%lx\n", type, tf->tf_srr0); 391 #if defined(DDB) || defined(KGDB) 392 if (kdb_trap(type, tf)) 393 return; 394 #endif 395 #ifdef TRAP_PANICWAIT 396 printf("Press a key to panic.\n"); 397 cngetc(); 398 #endif 399 panic("trap"); 400 } 401 402 /* Invoke powerpc userret code */ 403 userret(l, tf); 404 } 405 406 int 407 ctx_setup(int ctx, int srr1) 408 { 409 volatile struct pmap *pm; 410 411 /* Update PID if we're returning to user mode. */ 412 if (srr1 & PSL_PR) { 413 pm = curproc->p_vmspace->vm_map.pmap; 414 if (!pm->pm_ctx) { 415 ctx_alloc(__UNVOLATILE(pm)); 416 } 417 ctx = pm->pm_ctx; 418 } 419 else if (!ctx) { 420 ctx = KERNEL_PID; 421 } 422 return (ctx); 423 } 424 425 /* 426 * Used by copyin()/copyout() 427 */ 428 extern vaddr_t vmaprange(struct proc *, vaddr_t, vsize_t, int); 429 extern void vunmaprange(vaddr_t, vsize_t); 430 static int bigcopyin(const void *, void *, size_t ); 431 static int bigcopyout(const void *, void *, size_t ); 432 433 int 434 copyin(const void *udaddr, void *kaddr, size_t len) 435 { 436 struct pmap *pm = curproc->p_vmspace->vm_map.pmap; 437 int rv, msr, pid, tmp, ctx, count = 0; 438 struct faultbuf env; 439 440 /* For bigger buffers use the faster copy */ 441 if (len > 1024) 442 return (bigcopyin(udaddr, kaddr, len)); 443 444 if ((rv = setfault(&env))) { 445 curpcb->pcb_onfault = NULL; 446 return rv; 447 } 448 449 if (!(ctx = pm->pm_ctx)) { 450 /* No context -- assign it one */ 451 ctx_alloc(pm); 452 ctx = pm->pm_ctx; 453 } 454 455 __asm volatile( 456 " mfmsr %[msr];" /* Save MSR */ 457 " li %[pid],0x20;" 458 " andc %[pid],%[msr],%[pid]; mtmsr %[pid];" /* Disable IMMU */ 459 " isync;" 460 " mfpid %[pid];" /* Save old PID */ 461 462 " srwi. %[count],%[len],0x2;" /* How many words? */ 463 " beq- 2f;" /* No words. Go do bytes */ 464 " mtctr %[count];" 465 "1: mtpid %[ctx]; isync;" 466 #ifdef PPC_IBM403 467 " lswi %[tmp],%[udaddr],4;" /* Load user word */ 468 #else 469 " lwz %[tmp],0(%[udaddr]);" 470 #endif 471 " addi %[udaddr],%[udaddr],0x4;" /* next udaddr word */ 472 " sync;" 473 " mtpid %[pid]; isync;" 474 #ifdef PPC_IBM403 475 " stswi %[tmp],%[kaddr],4;" /* Store kernel word */ 476 #else 477 " stw %[tmp],0(%[kaddr]);" 478 #endif 479 " dcbst 0,%[kaddr];" /* flush cache */ 480 " addi %[kaddr],%[kaddr],0x4;" /* next udaddr word */ 481 " sync;" 482 " bdnz 1b;" /* repeat */ 483 484 "2: andi. %[count],%[len],0x3;" /* How many remaining bytes? */ 485 " addi %[count],%[count],0x1;" 486 " mtctr %[count];" 487 "3: bdz 10f;" /* while count */ 488 " mtpid %[ctx]; isync;" 489 " lbz %[tmp],0(%[udaddr]);" /* Load user byte */ 490 " addi %[udaddr],%[udaddr],0x1;" /* next udaddr byte */ 491 " sync;" 492 " mtpid %[pid]; isync;" 493 " stb %[tmp],0(%[kaddr]);" /* Store kernel byte */ 494 " dcbst 0,%[kaddr];" /* flush cache */ 495 " addi %[kaddr],%[kaddr],0x1;" 496 " sync;" 497 " b 3b;" 498 "10:mtpid %[pid]; mtmsr %[msr]; isync;" 499 /* Restore PID and MSR */ 500 : [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp) 501 : [udaddr] "b" (udaddr), [ctx] "b" (ctx), [kaddr] "b" (kaddr), 502 [len] "b" (len), [count] "b" (count)); 503 504 curpcb->pcb_onfault = NULL; 505 return 0; 506 } 507 508 static int 509 bigcopyin(const void *udaddr, void *kaddr, size_t len) 510 { 511 const char *up; 512 char *kp = kaddr; 513 struct lwp *l = curlwp; 514 struct proc *p; 515 struct faultbuf env; 516 int error; 517 518 p = l->l_proc; 519 520 /* 521 * Stolen from physio(): 522 */ 523 error = uvm_vslock(p->p_vmspace, __UNCONST(udaddr), len, VM_PROT_READ); 524 if (error) { 525 return error; 526 } 527 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ); 528 529 if ((error = setfault(&env)) == 0) { 530 memcpy(kp, up, len); 531 } 532 533 curpcb->pcb_onfault = NULL; 534 vunmaprange((vaddr_t)up, len); 535 uvm_vsunlock(p->p_vmspace, __UNCONST(udaddr), len); 536 537 return error; 538 } 539 540 int 541 copyout(const void *kaddr, void *udaddr, size_t len) 542 { 543 struct pmap *pm = curproc->p_vmspace->vm_map.pmap; 544 int rv, msr, pid, tmp, ctx, count = 0; 545 struct faultbuf env; 546 547 /* For big copies use more efficient routine */ 548 if (len > 1024) 549 return (bigcopyout(kaddr, udaddr, len)); 550 551 if ((rv = setfault(&env))) { 552 curpcb->pcb_onfault = NULL; 553 return rv; 554 } 555 556 if (!(ctx = pm->pm_ctx)) { 557 /* No context -- assign it one */ 558 ctx_alloc(pm); 559 ctx = pm->pm_ctx; 560 } 561 562 __asm volatile( 563 " mfmsr %[msr];" /* Save MSR */ 564 " li %[pid],0x20;" 565 " andc %[pid],%[msr],%[pid]; mtmsr %[pid];" /* Disable IMMU */ 566 " isync;" 567 " mfpid %[pid];" /* Save old PID */ 568 569 " srwi. %[count],%[len],0x2;" /* How many words? */ 570 " beq- 2f;" /* No words. Go do bytes */ 571 " mtctr %[count];" 572 "1: mtpid %[pid]; isync;" 573 #ifdef PPC_IBM403 574 " lswi %[tmp],%[kaddr],4;" /* Load kernel word */ 575 #else 576 " lwz %[tmp],0(%[kaddr]);" 577 #endif 578 " addi %[kaddr],%[kaddr],0x4;" /* next kaddr word */ 579 " sync;" 580 " mtpid %[ctx]; isync;" 581 #ifdef PPC_IBM403 582 " stswi %[tmp],%[udaddr],4;" /* Store user word */ 583 #else 584 " stw %[tmp],0(%[udaddr]);" 585 #endif 586 " dcbst 0,%[udaddr];" /* flush cache */ 587 " addi %[udaddr],%[udaddr],0x4;" /* next udaddr word */ 588 " sync;" 589 " bdnz 1b;" /* repeat */ 590 591 "2: andi. %[count],%[len],0x3;" /* How many remaining bytes? */ 592 " addi %[count],%[count],0x1;" 593 " mtctr %[count];" 594 "3: bdz 10f;" /* while count */ 595 " mtpid %[pid]; isync;" 596 " lbz %[tmp],0(%[kaddr]);" /* Load kernel byte */ 597 " addi %[kaddr],%[kaddr],0x1;" /* next kaddr byte */ 598 " sync;" 599 " mtpid %[ctx]; isync;" 600 " stb %[tmp],0(%[udaddr]);" /* Store user byte */ 601 " dcbst 0,%[udaddr];" /* flush cache */ 602 " addi %[udaddr],%[udaddr],0x1;" 603 " sync;" 604 " b 3b;" 605 "10:mtpid %[pid]; mtmsr %[msr]; isync;" 606 /* Restore PID and MSR */ 607 : [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp) 608 : [udaddr] "b" (udaddr), [ctx] "b" (ctx), [kaddr] "b" (kaddr), 609 [len] "b" (len), [count] "b" (count)); 610 611 curpcb->pcb_onfault = NULL; 612 return 0; 613 } 614 615 static int 616 bigcopyout(const void *kaddr, void *udaddr, size_t len) 617 { 618 char *up; 619 const char *kp = (const char *)kaddr; 620 struct lwp *l = curlwp; 621 struct proc *p; 622 struct faultbuf env; 623 int error; 624 625 p = l->l_proc; 626 627 /* 628 * Stolen from physio(): 629 */ 630 error = uvm_vslock(p->p_vmspace, udaddr, len, VM_PROT_WRITE); 631 if (error) { 632 return error; 633 } 634 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, 635 VM_PROT_READ | VM_PROT_WRITE); 636 637 if ((error = setfault(&env)) == 0) { 638 memcpy(up, kp, len); 639 } 640 641 curpcb->pcb_onfault = NULL; 642 vunmaprange((vaddr_t)up, len); 643 uvm_vsunlock(p->p_vmspace, udaddr, len); 644 645 return error; 646 } 647 648 /* 649 * kcopy(const void *src, void *dst, size_t len); 650 * 651 * Copy len bytes from src to dst, aborting if we encounter a fatal 652 * page fault. 653 * 654 * kcopy() _must_ save and restore the old fault handler since it is 655 * called by uiomove(), which may be in the path of servicing a non-fatal 656 * page fault. 657 */ 658 int 659 kcopy(const void *src, void *dst, size_t len) 660 { 661 struct faultbuf env, *oldfault; 662 int rv; 663 664 oldfault = curpcb->pcb_onfault; 665 if ((rv = setfault(&env))) { 666 curpcb->pcb_onfault = oldfault; 667 return rv; 668 } 669 670 memcpy(dst, src, len); 671 672 curpcb->pcb_onfault = oldfault; 673 return 0; 674 } 675 676 #if 0 677 int 678 badaddr(void *addr, size_t size) 679 { 680 681 return badaddr_read(addr, size, NULL); 682 } 683 684 int 685 badaddr_read(void *addr, size_t size, int *rptr) 686 { 687 struct faultbuf env; 688 int x; 689 690 /* Get rid of any stale machine checks that have been waiting. */ 691 __asm volatile ("sync; isync"); 692 693 if (setfault(&env)) { 694 curpcb->pcb_onfault = NULL; 695 __asm volatile ("sync"); 696 return 1; 697 } 698 699 __asm volatile ("sync"); 700 701 switch (size) { 702 case 1: 703 x = *(volatile int8_t *)addr; 704 break; 705 case 2: 706 x = *(volatile int16_t *)addr; 707 break; 708 case 4: 709 x = *(volatile int32_t *)addr; 710 break; 711 default: 712 panic("badaddr: invalid size (%d)", size); 713 } 714 715 /* Make sure we took the machine check, if we caused one. */ 716 __asm volatile ("sync; isync"); 717 718 curpcb->pcb_onfault = NULL; 719 __asm volatile ("sync"); /* To be sure. */ 720 721 /* Use the value to avoid reorder. */ 722 if (rptr) 723 *rptr = x; 724 725 return 0; 726 } 727 #endif 728 729 /* 730 * For now, this only deals with the particular unaligned access case 731 * that gcc tends to generate. Eventually it should handle all of the 732 * possibilities that can happen on a 32-bit PowerPC in big-endian mode. 733 */ 734 735 static int 736 fix_unaligned(struct lwp *l, struct trapframe *tf) 737 { 738 739 return -1; 740 } 741 742 /* 743 * XXX Extremely lame implementations of _ufetch_* / _ustore_*. IBM 4xx 744 * experts should make versions that are good. 745 */ 746 747 #define UFETCH(sz) \ 748 int \ 749 _ufetch_ ## sz(const uint ## sz ## _t *uaddr, uint ## sz ## _t *valp) \ 750 { \ 751 return copyin(uaddr, valp, sizeof(*valp)); \ 752 } 753 754 UFETCH(8) 755 UFETCH(16) 756 UFETCH(32) 757 758 #define USTORE(sz) \ 759 int \ 760 _ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val) \ 761 { \ 762 return copyout(&val, uaddr, sizeof(val)); \ 763 } 764 765 USTORE(8) 766 USTORE(16) 767 USTORE(32) 768