1 /* $NetBSD: trap.c,v 1.85 2020/07/15 09:10:14 rin Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #define __UFETCHSTORE_PRIVATE 70 71 #include <sys/cdefs.h> 72 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.85 2020/07/15 09:10:14 rin Exp $"); 73 74 #ifdef _KERNEL_OPT 75 #include "opt_ddb.h" 76 #include "opt_kgdb.h" 77 #include "opt_ppcarch.h" 78 #endif 79 80 #include <sys/param.h> 81 #include <sys/cpu.h> 82 #include <sys/kauth.h> 83 #include <sys/proc.h> 84 #include <sys/reboot.h> 85 #include <sys/syscall.h> 86 #include <sys/systm.h> 87 88 #if defined(KGDB) 89 #include <sys/kgdb.h> 90 #endif 91 92 #include <uvm/uvm_extern.h> 93 94 #include <dev/cons.h> 95 96 #include <machine/fpu.h> 97 #include <machine/frame.h> 98 #include <machine/pcb.h> 99 #include <machine/psl.h> 100 #include <machine/trap.h> 101 102 #include <powerpc/db_machdep.h> 103 #include <powerpc/spr.h> 104 #include <powerpc/userret.h> 105 106 #include <powerpc/ibm4xx/cpu.h> 107 #include <powerpc/ibm4xx/pmap.h> 108 #include <powerpc/ibm4xx/spr.h> 109 #include <powerpc/ibm4xx/tlb.h> 110 111 #include <powerpc/fpu/fpu_extern.h> 112 113 /* These definitions should probably be somewhere else XXX */ 114 #define FIRSTARG 3 /* first argument is in reg 3 */ 115 #define NARGREG 8 /* 8 args are in registers */ 116 #define MOREARGS(sp) ((void *)((int)(sp) + 8)) /* more args go here */ 117 118 static int fix_unaligned(struct lwp *l, struct trapframe *tf); 119 120 void trap(struct trapframe *); /* Called from locore / trap_subr */ 121 #if 0 122 /* Not currently used nor exposed externally in any header file */ 123 int badaddr(void *, size_t); 124 int badaddr_read(void *, size_t, int *); 125 #endif 126 int ctx_setup(int, int); 127 128 #ifdef DEBUG 129 #define TDB_ALL 0x1 130 int trapdebug = /* TDB_ALL */ 0; 131 #define DBPRINTF(x, y) if (trapdebug & (x)) printf y 132 #else 133 #define DBPRINTF(x, y) 134 #endif 135 136 void 137 trap(struct trapframe *tf) 138 { 139 struct lwp *l = curlwp; 140 struct proc *p = l->l_proc; 141 struct pcb *pcb; 142 int type = tf->tf_exc; 143 int ftype, rv; 144 ksiginfo_t ksi; 145 146 KASSERT(l->l_stat == LSONPROC); 147 148 if (tf->tf_srr1 & PSL_PR) { 149 LWP_CACHE_CREDS(l, p); 150 type |= EXC_USER; 151 } 152 153 ftype = VM_PROT_READ; 154 155 DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n", 156 type, tf->tf_srr0, tf, &tf)); 157 158 switch (type) { 159 case EXC_DEBUG|EXC_USER: 160 { 161 int srr2, srr3; 162 163 __asm volatile("mfspr %0,0x3f0" : 164 "=r" (rv), "=r" (srr2), "=r" (srr3) :); 165 printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2, 166 srr3); 167 /* XXX fall through or break here?! */ 168 } 169 /* 170 * DEBUG intr -- probably single-step. 171 */ 172 case EXC_TRC|EXC_USER: 173 tf->tf_srr1 &= ~PSL_SE; 174 KSI_INIT_TRAP(&ksi); 175 ksi.ksi_signo = SIGTRAP; 176 ksi.ksi_trap = EXC_TRC; 177 ksi.ksi_addr = (void *)tf->tf_srr0; 178 trapsignal(l, &ksi); 179 break; 180 181 case EXC_DSI: 182 /* FALLTHROUGH */ 183 case EXC_DTMISS: 184 { 185 struct vm_map *map; 186 vaddr_t va; 187 struct faultbuf *fb; 188 189 pcb = lwp_getpcb(l); 190 fb = pcb->pcb_onfault; 191 192 if (curcpu()->ci_idepth >= 0) { 193 rv = EFAULT; 194 goto out; 195 } 196 197 va = tf->tf_dear; 198 if (tf->tf_pid == KERNEL_PID) { 199 map = kernel_map; 200 } else { 201 map = &p->p_vmspace->vm_map; 202 } 203 204 if (tf->tf_esr & (ESR_DST|ESR_DIZ)) 205 ftype = VM_PROT_WRITE; 206 207 DBPRINTF(TDB_ALL, 208 ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n", 209 tf->tf_srr0, 210 (ftype & VM_PROT_WRITE) ? "write" : "read", 211 (void *)va, tf->tf_esr)); 212 213 pcb->pcb_onfault = NULL; 214 rv = uvm_fault(map, trunc_page(va), ftype); 215 pcb->pcb_onfault = fb; 216 if (rv == 0) 217 return; 218 out: 219 if (fb != NULL) { 220 tf->tf_pid = KERNEL_PID; 221 tf->tf_srr0 = fb->fb_pc; 222 tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */ 223 tf->tf_cr = fb->fb_cr; 224 tf->tf_fixreg[1] = fb->fb_sp; 225 tf->tf_fixreg[2] = fb->fb_r2; 226 tf->tf_fixreg[3] = rv; 227 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 228 sizeof(fb->fb_fixreg)); 229 return; 230 } 231 } 232 goto brain_damage; 233 234 case EXC_DSI|EXC_USER: 235 /* FALLTHROUGH */ 236 case EXC_DTMISS|EXC_USER: 237 if (tf->tf_esr & (ESR_DST|ESR_DIZ)) 238 ftype = VM_PROT_WRITE; 239 240 DBPRINTF(TDB_ALL, 241 ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n", 242 tf->tf_srr0, (ftype & VM_PROT_WRITE) ? "write" : "read", 243 tf->tf_dear, tf->tf_esr)); 244 KASSERT(l == curlwp && (l->l_stat == LSONPROC)); 245 // KASSERT(curpcb->pcb_onfault == NULL); 246 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_dear), 247 ftype); 248 if (rv == 0) { 249 break; 250 } 251 KSI_INIT_TRAP(&ksi); 252 ksi.ksi_trap = EXC_DSI; 253 ksi.ksi_addr = (void *)tf->tf_dear; 254 vm_signal: 255 switch (rv) { 256 case EINVAL: 257 ksi.ksi_signo = SIGBUS; 258 ksi.ksi_code = BUS_ADRERR; 259 break; 260 case EACCES: 261 ksi.ksi_signo = SIGSEGV; 262 ksi.ksi_code = SEGV_ACCERR; 263 break; 264 case ENOMEM: 265 ksi.ksi_signo = SIGKILL; 266 printf("UVM: pid %d.%d (%s), uid %d killed: " 267 "out of swap\n", p->p_pid, l->l_lid, p->p_comm, 268 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); 269 break; 270 default: 271 ksi.ksi_signo = SIGSEGV; 272 ksi.ksi_code = SEGV_MAPERR; 273 break; 274 } 275 trapsignal(l, &ksi); 276 break; 277 278 case EXC_ITMISS|EXC_USER: 279 case EXC_ISI|EXC_USER: 280 ftype = VM_PROT_EXECUTE; 281 DBPRINTF(TDB_ALL, 282 ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n", 283 tf->tf_srr0, tf)); 284 // KASSERT(curpcb->pcb_onfault == NULL); 285 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_srr0), 286 ftype); 287 if (rv == 0) { 288 break; 289 } 290 isi: 291 KSI_INIT_TRAP(&ksi); 292 ksi.ksi_trap = EXC_ISI; 293 ksi.ksi_addr = (void *)tf->tf_srr0; 294 goto vm_signal; 295 break; 296 297 case EXC_AST|EXC_USER: 298 cpu_ast(l, curcpu()); 299 break; 300 301 case EXC_ALI|EXC_USER: 302 if (fix_unaligned(l, tf) != 0) { 303 KSI_INIT_TRAP(&ksi); 304 ksi.ksi_signo = SIGBUS; 305 ksi.ksi_trap = EXC_ALI; 306 ksi.ksi_addr = (void *)tf->tf_dear; 307 trapsignal(l, &ksi); 308 } else 309 tf->tf_srr0 += 4; 310 break; 311 312 case EXC_PGM|EXC_USER: 313 curcpu()->ci_data.cpu_ntrap++; 314 315 KSI_INIT_TRAP(&ksi); 316 ksi.ksi_trap = EXC_PGM; 317 ksi.ksi_addr = (void *)tf->tf_srr0; 318 319 if (tf->tf_esr & ESR_PTR) { 320 sigtrap: 321 if (p->p_raslist != NULL && 322 ras_lookup(p, (void *)tf->tf_srr0) != (void *) -1) { 323 tf->tf_srr1 += 4; 324 break; 325 } 326 ksi.ksi_code = TRAP_BRKPT; 327 ksi.ksi_signo = SIGTRAP; 328 } else if (tf->tf_esr & ESR_PPR) { 329 uint32_t opcode; 330 331 rv = copyin((void *)tf->tf_srr0, &opcode, 332 sizeof(opcode)); 333 if (rv) 334 goto isi; 335 if (emulate_mxmsr(l, tf, opcode)) { 336 tf->tf_srr0 += 4; 337 break; 338 } 339 340 ksi.ksi_code = ILL_PRVOPC; 341 ksi.ksi_signo = SIGILL; 342 } else { 343 pcb = lwp_getpcb(l); 344 345 if (__predict_false(!fpu_used_p(l))) { 346 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu)); 347 fpu_mark_used(l); 348 } 349 350 if (fpu_emulate(tf, &pcb->pcb_fpu, &ksi)) { 351 if (ksi.ksi_signo == 0) /* was emulated */ 352 break; 353 else if (ksi.ksi_signo == SIGTRAP) 354 goto sigtrap; /* XXX H/W bug? */ 355 } else { 356 ksi.ksi_code = ILL_ILLOPC; 357 ksi.ksi_signo = SIGILL; 358 } 359 } 360 361 trapsignal(l, &ksi); 362 break; 363 364 case EXC_MCHK: 365 { 366 struct faultbuf *fb; 367 368 pcb = lwp_getpcb(l); 369 if ((fb = pcb->pcb_onfault) != NULL) { 370 tf->tf_pid = KERNEL_PID; 371 tf->tf_srr0 = fb->fb_pc; 372 tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */ 373 tf->tf_fixreg[1] = fb->fb_sp; 374 tf->tf_fixreg[2] = fb->fb_r2; 375 tf->tf_fixreg[3] = 1; /* Return TRUE */ 376 tf->tf_cr = fb->fb_cr; 377 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 378 sizeof(fb->fb_fixreg)); 379 return; 380 } 381 } 382 goto brain_damage; 383 384 default: 385 brain_damage: 386 printf("trap type 0x%x at 0x%lx\n", type, tf->tf_srr0); 387 #if defined(DDB) || defined(KGDB) 388 if (kdb_trap(type, tf)) 389 return; 390 #endif 391 #ifdef TRAP_PANICWAIT 392 printf("Press a key to panic.\n"); 393 cngetc(); 394 #endif 395 panic("trap"); 396 } 397 398 /* Invoke powerpc userret code */ 399 userret(l, tf); 400 } 401 402 int 403 ctx_setup(int ctx, int srr1) 404 { 405 volatile struct pmap *pm; 406 407 /* Update PID if we're returning to user mode. */ 408 if (srr1 & PSL_PR) { 409 pm = curproc->p_vmspace->vm_map.pmap; 410 if (!pm->pm_ctx) { 411 ctx_alloc(__UNVOLATILE(pm)); 412 } 413 ctx = pm->pm_ctx; 414 if (srr1 & PSL_SE) { 415 int dbreg, mask = 0x48000000; 416 /* 417 * Set the Internal Debug and 418 * Instruction Completion bits of 419 * the DBCR0 register. 420 * 421 * XXX this is also used by jtag debuggers... 422 */ 423 __asm volatile("mfspr %0,0x3f2;" 424 "or %0,%0,%1;" 425 "mtspr 0x3f2,%0;" : 426 "=&r" (dbreg) : "r" (mask)); 427 } 428 } 429 else if (!ctx) { 430 ctx = KERNEL_PID; 431 } 432 return (ctx); 433 } 434 435 /* 436 * Used by copyin()/copyout() 437 */ 438 extern vaddr_t vmaprange(struct proc *, vaddr_t, vsize_t, int); 439 extern void vunmaprange(vaddr_t, vsize_t); 440 static int bigcopyin(const void *, void *, size_t ); 441 static int bigcopyout(const void *, void *, size_t ); 442 443 int 444 copyin(const void *udaddr, void *kaddr, size_t len) 445 { 446 struct pmap *pm = curproc->p_vmspace->vm_map.pmap; 447 int rv, msr, pid, tmp, ctx, count = 0; 448 struct faultbuf env; 449 450 /* For bigger buffers use the faster copy */ 451 if (len > 1024) 452 return (bigcopyin(udaddr, kaddr, len)); 453 454 if ((rv = setfault(&env))) { 455 curpcb->pcb_onfault = NULL; 456 return rv; 457 } 458 459 if (!(ctx = pm->pm_ctx)) { 460 /* No context -- assign it one */ 461 ctx_alloc(pm); 462 ctx = pm->pm_ctx; 463 } 464 465 __asm volatile( 466 " mfmsr %[msr];" /* Save MSR */ 467 " li %[pid],0x20;" 468 " andc %[pid],%[msr],%[pid]; mtmsr %[pid];" /* Disable IMMU */ 469 " isync;" 470 " mfpid %[pid];" /* Save old PID */ 471 472 " srwi. %[count],%[len],0x2;" /* How many words? */ 473 " beq- 2f;" /* No words. Go do bytes */ 474 " mtctr %[count];" 475 "1: mtpid %[ctx]; isync;" 476 #ifdef PPC_IBM403 477 " lswi %[tmp],%[udaddr],4;" /* Load user word */ 478 #else 479 " lwz %[tmp],0(%[udaddr]);" 480 #endif 481 " addi %[udaddr],%[udaddr],0x4;" /* next udaddr word */ 482 " sync;" 483 " mtpid %[pid]; isync;" 484 #ifdef PPC_IBM403 485 " stswi %[tmp],%[kaddr],4;" /* Store kernel word */ 486 #else 487 " stw %[tmp],0(%[kaddr]);" 488 #endif 489 " dcbst 0,%[kaddr];" /* flush cache */ 490 " addi %[kaddr],%[kaddr],0x4;" /* next udaddr word */ 491 " sync;" 492 " bdnz 1b;" /* repeat */ 493 494 "2: andi. %[count],%[len],0x3;" /* How many remaining bytes? */ 495 " addi %[count],%[count],0x1;" 496 " mtctr %[count];" 497 "3: bdz 10f;" /* while count */ 498 " mtpid %[ctx]; isync;" 499 " lbz %[tmp],0(%[udaddr]);" /* Load user byte */ 500 " addi %[udaddr],%[udaddr],0x1;" /* next udaddr byte */ 501 " sync;" 502 " mtpid %[pid]; isync;" 503 " stb %[tmp],0(%[kaddr]);" /* Store kernel byte */ 504 " dcbst 0,%[kaddr];" /* flush cache */ 505 " addi %[kaddr],%[kaddr],0x1;" 506 " sync;" 507 " b 3b;" 508 "10:mtpid %[pid]; mtmsr %[msr]; isync;" 509 /* Restore PID and MSR */ 510 : [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp) 511 : [udaddr] "b" (udaddr), [ctx] "b" (ctx), [kaddr] "b" (kaddr), 512 [len] "b" (len), [count] "b" (count)); 513 514 curpcb->pcb_onfault = NULL; 515 return 0; 516 } 517 518 static int 519 bigcopyin(const void *udaddr, void *kaddr, size_t len) 520 { 521 const char *up; 522 char *kp = kaddr; 523 struct lwp *l = curlwp; 524 struct proc *p; 525 struct faultbuf env; 526 int error; 527 528 p = l->l_proc; 529 530 /* 531 * Stolen from physio(): 532 */ 533 error = uvm_vslock(p->p_vmspace, __UNCONST(udaddr), len, VM_PROT_READ); 534 if (error) { 535 return error; 536 } 537 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ); 538 539 if ((error = setfault(&env)) == 0) { 540 memcpy(kp, up, len); 541 } 542 543 curpcb->pcb_onfault = NULL; 544 vunmaprange((vaddr_t)up, len); 545 uvm_vsunlock(p->p_vmspace, __UNCONST(udaddr), len); 546 547 return error; 548 } 549 550 int 551 copyout(const void *kaddr, void *udaddr, size_t len) 552 { 553 struct pmap *pm = curproc->p_vmspace->vm_map.pmap; 554 int rv, msr, pid, tmp, ctx, count = 0; 555 struct faultbuf env; 556 557 /* For big copies use more efficient routine */ 558 if (len > 1024) 559 return (bigcopyout(kaddr, udaddr, len)); 560 561 if ((rv = setfault(&env))) { 562 curpcb->pcb_onfault = NULL; 563 return rv; 564 } 565 566 if (!(ctx = pm->pm_ctx)) { 567 /* No context -- assign it one */ 568 ctx_alloc(pm); 569 ctx = pm->pm_ctx; 570 } 571 572 __asm volatile( 573 " mfmsr %[msr];" /* Save MSR */ 574 " li %[pid],0x20;" 575 " andc %[pid],%[msr],%[pid]; mtmsr %[pid];" /* Disable IMMU */ 576 " isync;" 577 " mfpid %[pid];" /* Save old PID */ 578 579 " srwi. %[count],%[len],0x2;" /* How many words? */ 580 " beq- 2f;" /* No words. Go do bytes */ 581 " mtctr %[count];" 582 "1: mtpid %[pid]; isync;" 583 #ifdef PPC_IBM403 584 " lswi %[tmp],%[kaddr],4;" /* Load kernel word */ 585 #else 586 " lwz %[tmp],0(%[kaddr]);" 587 #endif 588 " addi %[kaddr],%[kaddr],0x4;" /* next kaddr word */ 589 " sync;" 590 " mtpid %[ctx]; isync;" 591 #ifdef PPC_IBM403 592 " stswi %[tmp],%[udaddr],4;" /* Store user word */ 593 #else 594 " stw %[tmp],0(%[udaddr]);" 595 #endif 596 " dcbst 0,%[udaddr];" /* flush cache */ 597 " addi %[udaddr],%[udaddr],0x4;" /* next udaddr word */ 598 " sync;" 599 " bdnz 1b;" /* repeat */ 600 601 "2: andi. %[count],%[len],0x3;" /* How many remaining bytes? */ 602 " addi %[count],%[count],0x1;" 603 " mtctr %[count];" 604 "3: bdz 10f;" /* while count */ 605 " mtpid %[pid]; isync;" 606 " lbz %[tmp],0(%[kaddr]);" /* Load kernel byte */ 607 " addi %[kaddr],%[kaddr],0x1;" /* next kaddr byte */ 608 " sync;" 609 " mtpid %[ctx]; isync;" 610 " stb %[tmp],0(%[udaddr]);" /* Store user byte */ 611 " dcbst 0,%[udaddr];" /* flush cache */ 612 " addi %[udaddr],%[udaddr],0x1;" 613 " sync;" 614 " b 3b;" 615 "10:mtpid %[pid]; mtmsr %[msr]; isync;" 616 /* Restore PID and MSR */ 617 : [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp) 618 : [udaddr] "b" (udaddr), [ctx] "b" (ctx), [kaddr] "b" (kaddr), 619 [len] "b" (len), [count] "b" (count)); 620 621 curpcb->pcb_onfault = NULL; 622 return 0; 623 } 624 625 static int 626 bigcopyout(const void *kaddr, void *udaddr, size_t len) 627 { 628 char *up; 629 const char *kp = (const char *)kaddr; 630 struct lwp *l = curlwp; 631 struct proc *p; 632 struct faultbuf env; 633 int error; 634 635 p = l->l_proc; 636 637 /* 638 * Stolen from physio(): 639 */ 640 error = uvm_vslock(p->p_vmspace, udaddr, len, VM_PROT_WRITE); 641 if (error) { 642 return error; 643 } 644 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, 645 VM_PROT_READ | VM_PROT_WRITE); 646 647 if ((error = setfault(&env)) == 0) { 648 memcpy(up, kp, len); 649 } 650 651 curpcb->pcb_onfault = NULL; 652 vunmaprange((vaddr_t)up, len); 653 uvm_vsunlock(p->p_vmspace, udaddr, len); 654 655 return error; 656 } 657 658 /* 659 * kcopy(const void *src, void *dst, size_t len); 660 * 661 * Copy len bytes from src to dst, aborting if we encounter a fatal 662 * page fault. 663 * 664 * kcopy() _must_ save and restore the old fault handler since it is 665 * called by uiomove(), which may be in the path of servicing a non-fatal 666 * page fault. 667 */ 668 int 669 kcopy(const void *src, void *dst, size_t len) 670 { 671 struct faultbuf env, *oldfault; 672 int rv; 673 674 oldfault = curpcb->pcb_onfault; 675 if ((rv = setfault(&env))) { 676 curpcb->pcb_onfault = oldfault; 677 return rv; 678 } 679 680 memcpy(dst, src, len); 681 682 curpcb->pcb_onfault = oldfault; 683 return 0; 684 } 685 686 #if 0 687 int 688 badaddr(void *addr, size_t size) 689 { 690 691 return badaddr_read(addr, size, NULL); 692 } 693 694 int 695 badaddr_read(void *addr, size_t size, int *rptr) 696 { 697 struct faultbuf env; 698 int x; 699 700 /* Get rid of any stale machine checks that have been waiting. */ 701 __asm volatile ("sync; isync"); 702 703 if (setfault(&env)) { 704 curpcb->pcb_onfault = NULL; 705 __asm volatile ("sync"); 706 return 1; 707 } 708 709 __asm volatile ("sync"); 710 711 switch (size) { 712 case 1: 713 x = *(volatile int8_t *)addr; 714 break; 715 case 2: 716 x = *(volatile int16_t *)addr; 717 break; 718 case 4: 719 x = *(volatile int32_t *)addr; 720 break; 721 default: 722 panic("badaddr: invalid size (%d)", size); 723 } 724 725 /* Make sure we took the machine check, if we caused one. */ 726 __asm volatile ("sync; isync"); 727 728 curpcb->pcb_onfault = NULL; 729 __asm volatile ("sync"); /* To be sure. */ 730 731 /* Use the value to avoid reorder. */ 732 if (rptr) 733 *rptr = x; 734 735 return 0; 736 } 737 #endif 738 739 /* 740 * For now, this only deals with the particular unaligned access case 741 * that gcc tends to generate. Eventually it should handle all of the 742 * possibilities that can happen on a 32-bit PowerPC in big-endian mode. 743 */ 744 745 static int 746 fix_unaligned(struct lwp *l, struct trapframe *tf) 747 { 748 749 return -1; 750 } 751 752 /* 753 * XXX Extremely lame implementations of _ufetch_* / _ustore_*. IBM 4xx 754 * experts should make versions that are good. 755 */ 756 757 #define UFETCH(sz) \ 758 int \ 759 _ufetch_ ## sz(const uint ## sz ## _t *uaddr, uint ## sz ## _t *valp) \ 760 { \ 761 return copyin(uaddr, valp, sizeof(*valp)); \ 762 } 763 764 UFETCH(8) 765 UFETCH(16) 766 UFETCH(32) 767 768 #define USTORE(sz) \ 769 int \ 770 _ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val) \ 771 { \ 772 return copyout(&val, uaddr, sizeof(val)); \ 773 } 774 775 USTORE(8) 776 USTORE(16) 777 USTORE(32) 778