1 /* $NetBSD: oea_machdep.c,v 1.71 2014/03/24 19:29:59 christos Exp $ */ 2 3 /* 4 * Copyright (C) 2002 Matt Thomas 5 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 * Copyright (C) 1995, 1996 TooLs GmbH. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by TooLs GmbH. 20 * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: oea_machdep.c,v 1.71 2014/03/24 19:29:59 christos Exp $"); 37 38 #include "opt_ppcarch.h" 39 #include "opt_compat_netbsd.h" 40 #include "opt_ddb.h" 41 #include "opt_kgdb.h" 42 #include "opt_ipkdb.h" 43 #include "opt_multiprocessor.h" 44 #include "opt_altivec.h" 45 46 #include <sys/param.h> 47 #include <sys/buf.h> 48 #include <sys/boot_flag.h> 49 #include <sys/exec.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/mount.h> 53 #include <sys/msgbuf.h> 54 #include <sys/proc.h> 55 #include <sys/reboot.h> 56 #include <sys/syscallargs.h> 57 #include <sys/syslog.h> 58 #include <sys/systm.h> 59 #include <sys/cpu.h> 60 61 #include <uvm/uvm_extern.h> 62 63 #ifdef DDB 64 #include <powerpc/db_machdep.h> 65 #include <ddb/db_extern.h> 66 #endif 67 68 #ifdef KGDB 69 #include <sys/kgdb.h> 70 #endif 71 72 #ifdef IPKDB 73 #include <ipkdb/ipkdb.h> 74 #endif 75 76 #include <machine/powerpc.h> 77 78 #include <powerpc/trap.h> 79 #include <powerpc/spr.h> 80 #include <powerpc/pte.h> 81 #include <powerpc/altivec.h> 82 #include <powerpc/pcb.h> 83 84 #include <powerpc/oea/bat.h> 85 #include <powerpc/oea/cpufeat.h> 86 #include <powerpc/oea/spr.h> 87 #include <powerpc/oea/sr_601.h> 88 89 char machine[] = MACHINE; /* from <machine/param.h> */ 90 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 91 92 struct vm_map *phys_map = NULL; 93 94 /* 95 * Global variables used here and there 96 */ 97 static void trap0(void *); 98 99 /* XXXSL: The battable is not initialized to non-zero for PPC_OEA64 and PPC_OEA64_BRIDGE */ 100 struct bat battable[BAT_VA2IDX(0xffffffff)+1]; 101 102 register_t iosrtable[16]; /* I/O segments, for kernel_pmap setup */ 103 #ifndef MSGBUFADDR 104 paddr_t msgbuf_paddr; 105 #endif 106 107 extern int dsitrap_fix_dbat4[]; 108 extern int dsitrap_fix_dbat5[]; 109 extern int dsitrap_fix_dbat6[]; 110 extern int dsitrap_fix_dbat7[]; 111 112 void 113 oea_init(void (*handler)(void)) 114 { 115 extern int trapcode[], trapsize[]; 116 extern int sctrap[], scsize[]; 117 extern int alitrap[], alisize[]; 118 extern int dsitrap[], dsisize[]; 119 extern int trapstart[], trapend[]; 120 #ifdef PPC_OEA601 121 extern int dsi601trap[], dsi601size[]; 122 #endif 123 extern int decrint[], decrsize[]; 124 extern int tlbimiss[], tlbimsize[]; 125 extern int tlbdlmiss[], tlbdlmsize[]; 126 extern int tlbdsmiss[], tlbdsmsize[]; 127 #if defined(DDB) || defined(KGDB) 128 extern int ddblow[], ddbsize[]; 129 #endif 130 #ifdef IPKDB 131 extern int ipkdblow[], ipkdbsize[]; 132 #endif 133 #ifdef ALTIVEC 134 register_t msr; 135 #endif 136 uintptr_t exc, exc_base; 137 #if defined(ALTIVEC) || defined(PPC_OEA) 138 register_t scratch; 139 #endif 140 unsigned int cpuvers; 141 size_t size; 142 struct cpu_info * const ci = &cpu_info[0]; 143 144 #ifdef PPC_HIGH_VEC 145 exc_base = EXC_HIGHVEC; 146 #else 147 exc_base = 0; 148 #endif 149 KASSERT(mfspr(SPR_SPRG0) == (uintptr_t)ci); 150 151 #if defined (PPC_OEA64_BRIDGE) && defined (PPC_OEA) 152 if (oeacpufeat & OEACPU_64_BRIDGE) 153 pmap_setup64bridge(); 154 else 155 pmap_setup32(); 156 #endif 157 158 159 cpuvers = mfpvr() >> 16; 160 161 /* 162 * Initialize proc0 and current pcb and pmap pointers. 163 */ 164 (void) ci; 165 KASSERT(ci != NULL); 166 KASSERT(curcpu() == ci); 167 KASSERT(lwp0.l_cpu == ci); 168 169 curpcb = lwp_getpcb(&lwp0); 170 memset(curpcb, 0, sizeof(struct pcb)); 171 172 #ifdef ALTIVEC 173 /* 174 * Initialize the vectors with NaNs 175 */ 176 for (scratch = 0; scratch < 32; scratch++) { 177 curpcb->pcb_vr.vreg[scratch][0] = 0x7FFFDEAD; 178 curpcb->pcb_vr.vreg[scratch][1] = 0x7FFFDEAD; 179 curpcb->pcb_vr.vreg[scratch][2] = 0x7FFFDEAD; 180 curpcb->pcb_vr.vreg[scratch][3] = 0x7FFFDEAD; 181 } 182 #endif 183 curpm = curpcb->pcb_pm = pmap_kernel(); 184 185 /* 186 * Cause a PGM trap if we branch to 0. 187 * 188 * XXX GCC4.1 complains about memset on address zero, so 189 * don't use the builtin. 190 */ 191 #undef memset 192 memset(0, 0, 0x100); 193 194 /* 195 * Set up trap vectors. Don't assume vectors are on 0x100. 196 */ 197 for (exc = exc_base; exc <= exc_base + EXC_LAST; exc += 0x100) { 198 switch (exc - exc_base) { 199 default: 200 size = (size_t)trapsize; 201 memcpy((void *)exc, trapcode, size); 202 break; 203 #if 0 204 case EXC_EXI: 205 /* 206 * This one is (potentially) installed during autoconf 207 */ 208 break; 209 #endif 210 case EXC_SC: 211 size = (size_t)scsize; 212 memcpy((void *)exc, sctrap, size); 213 break; 214 case EXC_ALI: 215 size = (size_t)alisize; 216 memcpy((void *)exc, alitrap, size); 217 break; 218 case EXC_DSI: 219 #ifdef PPC_OEA601 220 if (cpuvers == MPC601) { 221 size = (size_t)dsi601size; 222 memcpy((void *)exc, dsi601trap, size); 223 break; 224 } else 225 #endif /* PPC_OEA601 */ 226 if (oeacpufeat & OEACPU_NOBAT) { 227 size = (size_t)alisize; 228 memcpy((void *)exc, alitrap, size); 229 } else { 230 size = (size_t)dsisize; 231 memcpy((void *)exc, dsitrap, size); 232 } 233 break; 234 case EXC_DECR: 235 size = (size_t)decrsize; 236 memcpy((void *)exc, decrint, size); 237 break; 238 case EXC_IMISS: 239 size = (size_t)tlbimsize; 240 memcpy((void *)exc, tlbimiss, size); 241 break; 242 case EXC_DLMISS: 243 size = (size_t)tlbdlmsize; 244 memcpy((void *)exc, tlbdlmiss, size); 245 break; 246 case EXC_DSMISS: 247 size = (size_t)tlbdsmsize; 248 memcpy((void *)exc, tlbdsmiss, size); 249 break; 250 case EXC_PERF: 251 size = (size_t)trapsize; 252 memcpy((void *)exc, trapcode, size); 253 memcpy((void *)(exc_base + EXC_VEC), trapcode, size); 254 break; 255 #if defined(DDB) || defined(IPKDB) || defined(KGDB) 256 case EXC_RUNMODETRC: 257 #ifdef PPC_OEA601 258 if (cpuvers != MPC601) { 259 #endif 260 size = (size_t)trapsize; 261 memcpy((void *)exc, trapcode, size); 262 break; 263 #ifdef PPC_OEA601 264 } 265 /* FALLTHROUGH */ 266 #endif 267 case EXC_PGM: 268 case EXC_TRC: 269 case EXC_BPT: 270 #if defined(DDB) || defined(KGDB) 271 size = (size_t)ddbsize; 272 memcpy((void *)exc, ddblow, size); 273 #if defined(IPKDB) 274 #error "cannot enable IPKDB with DDB or KGDB" 275 #endif 276 #else 277 size = (size_t)ipkdbsize; 278 memcpy((void *)exc, ipkdblow, size); 279 #endif 280 break; 281 #endif /* DDB || IPKDB || KGDB */ 282 } 283 #if 0 284 exc += roundup(size, 32); 285 #endif 286 } 287 288 /* 289 * Install a branch absolute to trap0 to force a panic. 290 */ 291 if ((uintptr_t)trap0 < 0x2000000) { 292 *(volatile uint32_t *) 0 = 0x7c6802a6; 293 *(volatile uint32_t *) 4 = 0x48000002 | (uintptr_t) trap0; 294 } 295 296 /* 297 * Get the cache sizes because install_extint calls __syncicache. 298 */ 299 cpu_probe_cache(); 300 301 #define MxSPR_MASK 0x7c1fffff 302 #define MFSPR_MQ 0x7c0002a6 303 #define MTSPR_MQ 0x7c0003a6 304 #define MTSPR_IBAT0L 0x7c1183a6 305 #define MTSPR_IBAT1L 0x7c1383a6 306 #define NOP 0x60000000 307 #define B 0x48000000 308 #define TLBSYNC 0x7c00046c 309 #define SYNC 0x7c0004ac 310 #ifdef PPC_OEA64_BRIDGE 311 #define MFMSR_MASK 0xfc1fffff 312 #define MFMSR 0x7c0000a6 313 #define MTMSRD_MASK 0xfc1effff 314 #define MTMSRD 0x7c000164 315 #define RLDICL_MASK 0xfc00001c 316 #define RLDICL 0x78000000 317 #define RFID 0x4c000024 318 #define RFI 0x4c000064 319 #endif 320 321 #ifdef ALTIVEC 322 #define MFSPR_VRSAVE 0x7c0042a6 323 #define MTSPR_VRSAVE 0x7c0043a6 324 325 /* 326 * Try to set the VEC bit in the MSR. If it doesn't get set, we are 327 * not on a AltiVec capable processor. 328 */ 329 __asm volatile ( 330 "mfmsr %0; oris %1,%0,%2@h; mtmsr %1; isync; " 331 "mfmsr %1; mtmsr %0; isync" 332 : "=r"(msr), "=r"(scratch) 333 : "J"(PSL_VEC)); 334 335 /* 336 * If we aren't on an AltiVec capable processor, we need to zap any of 337 * the sequences we save/restore the VRSAVE SPR into NOPs. 338 */ 339 if (scratch & PSL_VEC) { 340 cpu_altivec = 1; 341 } else { 342 for (int *ip = trapstart; ip < trapend; ip++) { 343 if ((ip[0] & MxSPR_MASK) == MFSPR_VRSAVE) { 344 ip[0] = NOP; /* mfspr */ 345 ip[1] = NOP; /* stw */ 346 } else if ((ip[0] & MxSPR_MASK) == MTSPR_VRSAVE) { 347 ip[-1] = NOP; /* lwz */ 348 ip[0] = NOP; /* mtspr */ 349 } 350 } 351 } 352 #endif 353 354 /* XXX It would seem like this code could be elided ifndef 601, but 355 * doing so breaks my power3 machine. 356 */ 357 /* 358 * If we aren't on a MPC601 processor, we need to zap any of the 359 * sequences we save/restore the MQ SPR into NOPs, and skip over the 360 * sequences where we zap/restore BAT registers on kernel exit/entry. 361 */ 362 if (cpuvers != MPC601) { 363 for (int *ip = trapstart; ip < trapend; ip++) { 364 if ((ip[0] & MxSPR_MASK) == MFSPR_MQ) { 365 ip[0] = NOP; /* mfspr */ 366 ip[1] = NOP; /* stw */ 367 } else if ((ip[0] & MxSPR_MASK) == MTSPR_MQ) { 368 ip[-1] = NOP; /* lwz */ 369 ip[0] = NOP; /* mtspr */ 370 } else if ((ip[0] & MxSPR_MASK) == MTSPR_IBAT0L) { 371 if ((ip[1] & MxSPR_MASK) == MTSPR_IBAT1L) 372 ip[-1] = B | 0x14; /* li */ 373 else 374 ip[-4] = B | 0x24; /* lis */ 375 } 376 } 377 } 378 379 #ifdef PPC_OEA64_BRIDGE 380 if ((oeacpufeat & OEACPU_64_BRIDGE) == 0) { 381 for (int *ip = (int *)exc_base; 382 (uintptr_t)ip <= exc_base + EXC_LAST; 383 ip++) { 384 if ((ip[0] & MFMSR_MASK) == MFMSR 385 && (ip[1] & RLDICL_MASK) == RLDICL 386 && (ip[2] & MTMSRD_MASK) == MTMSRD) { 387 *ip++ = NOP; 388 *ip++ = NOP; 389 ip[0] = NOP; 390 } else if (*ip == RFID) { 391 *ip = RFI; 392 } 393 } 394 395 /* 396 * Now replace each rfid instruction with a rfi instruction. 397 */ 398 for (int *ip = trapstart; ip < trapend; ip++) { 399 if ((ip[0] & MFMSR_MASK) == MFMSR 400 && (ip[1] & RLDICL_MASK) == RLDICL 401 && (ip[2] & MTMSRD_MASK) == MTMSRD) { 402 *ip++ = NOP; 403 *ip++ = NOP; 404 ip[0] = NOP; 405 } else if (*ip == RFID) { 406 *ip = RFI; 407 } 408 } 409 } 410 #endif /* PPC_OEA64_BRIDGE */ 411 412 /* 413 * Sync the changed instructions. 414 */ 415 __syncicache((void *) trapstart, 416 (uintptr_t) trapend - (uintptr_t) trapstart); 417 __syncicache(dsitrap_fix_dbat4, 16); 418 __syncicache(dsitrap_fix_dbat7, 8); 419 #ifdef PPC_OEA601 420 421 /* 422 * If we are on a MPC601 processor, we need to zap any tlbsync 423 * instructions into sync. This differs from the above in 424 * examing all kernel text, as opposed to just the exception handling. 425 * We sync the icache on every instruction found since there are 426 * only very few of them. 427 */ 428 if (cpuvers == MPC601) { 429 extern int kernel_text[], etext[]; 430 int *ip; 431 432 for (ip = kernel_text; ip < etext; ip++) { 433 if (*ip == TLBSYNC) { 434 *ip = SYNC; 435 __syncicache(ip, sizeof(*ip)); 436 } 437 } 438 } 439 #endif /* PPC_OEA601 */ 440 441 /* 442 * Configure a PSL user mask matching this processor. 443 */ 444 cpu_psluserset = PSL_EE | PSL_PR | PSL_ME | PSL_IR | PSL_DR | PSL_RI; 445 cpu_pslusermod = PSL_FP | PSL_FE0 | PSL_FE1 | PSL_LE | PSL_SE | PSL_BE; 446 #ifdef PPC_OEA601 447 if (cpuvers == MPC601) { 448 cpu_psluserset &= PSL_601_MASK; 449 cpu_pslusermod &= PSL_601_MASK; 450 } 451 #endif 452 #ifdef ALTIVEC 453 if (cpu_altivec) 454 cpu_pslusermod |= PSL_VEC; 455 #endif 456 #ifdef PPC_HIGH_VEC 457 cpu_psluserset |= PSL_IP; /* XXX ok? */ 458 #endif 459 460 /* 461 * external interrupt handler install 462 */ 463 if (handler) 464 oea_install_extint(handler); 465 466 __syncicache((void *)exc_base, EXC_LAST + 0x100); 467 468 /* 469 * Now enable translation (and machine checks/recoverable interrupts). 470 */ 471 #ifdef PPC_OEA 472 __asm volatile ("sync; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync" 473 : "=r"(scratch) 474 : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI)); 475 #endif 476 477 /* 478 * Let's take all the indirect calls via our stubs and patch 479 * them to be direct calls. 480 */ 481 cpu_fixup_stubs(); 482 483 KASSERT(curcpu() == ci); 484 } 485 486 #ifdef PPC_OEA601 487 void 488 mpc601_ioseg_add(paddr_t pa, register_t len) 489 { 490 const u_int i = pa >> ADDR_SR_SHFT; 491 492 if (len != BAT_BL_256M) 493 panic("mpc601_ioseg_add: len != 256M"); 494 495 /* 496 * Translate into an I/O segment, load it, and stash away for use 497 * in pmap_bootstrap(). 498 */ 499 iosrtable[i] = SR601(SR601_Ks, SR601_BUID_MEMFORCED, 0, i); 500 501 /* 502 * XXX Setting segment register 0xf on my powermac 7200 503 * wedges machine so set later in pmap.c 504 */ 505 /* 506 __asm volatile ("mtsrin %0,%1" 507 :: "r"(iosrtable[i]), 508 "r"(pa)); 509 */ 510 } 511 #endif /* PPC_OEA601 */ 512 513 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE) 514 #define DBAT_SET(n, batl, batu) \ 515 do { \ 516 mtspr(SPR_DBAT##n##L, (batl)); \ 517 mtspr(SPR_DBAT##n##U, (batu)); \ 518 } while (/*CONSTCOND*/ 0) 519 #define DBAT_RESET(n) DBAT_SET(n, 0, 0) 520 #define DBATU_GET(n) mfspr(SPR_DBAT##n##U) 521 #define IBAT_SET(n, batl, batu) \ 522 do { \ 523 mtspr(SPR_IBAT##n##L, (batl)); \ 524 mtspr(SPR_IBAT##n##U, (batu)); \ 525 } while (/*CONSTCOND*/ 0) 526 #define IBAT_RESET(n) IBAT_SET(n, 0, 0) 527 528 void 529 oea_iobat_add(paddr_t pa, register_t len) 530 { 531 static int z = 1; 532 const u_int n = BAT_BL_TO_SIZE(len) / BAT_BL_TO_SIZE(BAT_BL_8M); 533 const u_int i = BAT_VA2IDX(pa) & -n; /* in case pa was in the middle */ 534 const int after_bat3 = (oeacpufeat & OEACPU_HIGHBAT) ? 4 : 8; 535 536 KASSERT(len >= BAT_BL_8M); 537 538 /* 539 * If the caller wanted a bigger BAT than the hardware supports, 540 * split it into smaller BATs. 541 */ 542 if (len > BAT_BL_256M && (oeacpufeat & OEACPU_XBSEN) == 0) { 543 u_int xn = BAT_BL_TO_SIZE(len) >> 28; 544 while (xn-- > 0) { 545 oea_iobat_add(pa, BAT_BL_256M); 546 pa += 0x10000000; 547 } 548 return; 549 } 550 551 const register_t batl = BATL(pa, BAT_I|BAT_G, BAT_PP_RW); 552 const register_t batu = BATU(pa, len, BAT_Vs); 553 554 for (u_int j = 0; j < n; j++) { 555 battable[i + j].batl = batl; 556 battable[i + j].batu = batu; 557 } 558 559 /* 560 * Let's start loading the BAT registers. 561 */ 562 switch (z) { 563 case 1: 564 DBAT_SET(1, batl, batu); 565 z = 2; 566 break; 567 case 2: 568 DBAT_SET(2, batl, batu); 569 z = 3; 570 break; 571 case 3: 572 DBAT_SET(3, batl, batu); 573 z = after_bat3; /* no highbat, skip to end */ 574 break; 575 case 4: 576 DBAT_SET(4, batl, batu); 577 z = 5; 578 break; 579 case 5: 580 DBAT_SET(5, batl, batu); 581 z = 6; 582 break; 583 case 6: 584 DBAT_SET(6, batl, batu); 585 z = 7; 586 break; 587 case 7: 588 DBAT_SET(7, batl, batu); 589 z = 8; 590 break; 591 default: 592 break; 593 } 594 } 595 596 void 597 oea_iobat_remove(paddr_t pa) 598 { 599 const u_int i = BAT_VA2IDX(pa); 600 601 if (!BAT_VA_MATCH_P(battable[i].batu, pa) || 602 !BAT_VALID_P(battable[i].batu, PSL_PR)) 603 return; 604 const int n = 605 __SHIFTOUT(battable[i].batu, (BAT_XBL|BAT_BL) & ~BAT_BL_8M) + 1; 606 KASSERT((n & (n-1)) == 0); /* power of 2 */ 607 KASSERT((i & (n-1)) == 0); /* multiple of n */ 608 609 memset(&battable[i], 0, n*sizeof(battable[0])); 610 611 const int maxbat = oeacpufeat & OEACPU_HIGHBAT ? 8 : 4; 612 for (u_int k = 1 ; k < maxbat; k++) { 613 register_t batu; 614 switch (k) { 615 case 1: 616 batu = DBATU_GET(1); 617 if (BAT_VA_MATCH_P(batu, pa) && 618 BAT_VALID_P(batu, PSL_PR)) 619 DBAT_RESET(1); 620 break; 621 case 2: 622 batu = DBATU_GET(2); 623 if (BAT_VA_MATCH_P(batu, pa) && 624 BAT_VALID_P(batu, PSL_PR)) 625 DBAT_RESET(2); 626 break; 627 case 3: 628 batu = DBATU_GET(3); 629 if (BAT_VA_MATCH_P(batu, pa) && 630 BAT_VALID_P(batu, PSL_PR)) 631 DBAT_RESET(3); 632 break; 633 case 4: 634 batu = DBATU_GET(4); 635 if (BAT_VA_MATCH_P(batu, pa) && 636 BAT_VALID_P(batu, PSL_PR)) 637 DBAT_RESET(4); 638 break; 639 case 5: 640 batu = DBATU_GET(5); 641 if (BAT_VA_MATCH_P(batu, pa) && 642 BAT_VALID_P(batu, PSL_PR)) 643 DBAT_RESET(5); 644 break; 645 case 6: 646 batu = DBATU_GET(6); 647 if (BAT_VA_MATCH_P(batu, pa) && 648 BAT_VALID_P(batu, PSL_PR)) 649 DBAT_RESET(6); 650 break; 651 case 7: 652 batu = DBATU_GET(7); 653 if (BAT_VA_MATCH_P(batu, pa) && 654 BAT_VALID_P(batu, PSL_PR)) 655 DBAT_RESET(7); 656 break; 657 default: 658 break; 659 } 660 } 661 } 662 663 void 664 oea_batinit(paddr_t pa, ...) 665 { 666 struct mem_region *allmem, *availmem, *mp; 667 register_t msr = mfmsr(); 668 va_list ap; 669 #ifdef PPC_OEA601 670 unsigned int cpuvers; 671 672 cpuvers = mfpvr() >> 16; 673 #endif /* PPC_OEA601 */ 674 675 /* 676 * we need to call this before zapping BATs so OF calls work 677 */ 678 mem_regions(&allmem, &availmem); 679 680 /* 681 * Initialize BAT registers to unmapped to not generate 682 * overlapping mappings below. 683 * 684 * The 601's implementation differs in the Valid bit being situated 685 * in the lower BAT register, and in being a unified BAT only whose 686 * four entries are accessed through the IBAT[0-3] SPRs. 687 * 688 * Also, while the 601 does distinguish between supervisor/user 689 * protection keys, it does _not_ distinguish between validity in 690 * supervisor/user mode. 691 */ 692 if ((msr & (PSL_IR|PSL_DR)) == 0) { 693 #ifdef PPC_OEA601 694 if (cpuvers == MPC601) { 695 __asm volatile ("mtibatl 0,%0" :: "r"(0)); 696 __asm volatile ("mtibatl 1,%0" :: "r"(0)); 697 __asm volatile ("mtibatl 2,%0" :: "r"(0)); 698 __asm volatile ("mtibatl 3,%0" :: "r"(0)); 699 } else 700 #endif /* PPC_OEA601 */ 701 { 702 DBAT_RESET(0); IBAT_RESET(0); 703 DBAT_RESET(1); IBAT_RESET(1); 704 DBAT_RESET(2); IBAT_RESET(2); 705 DBAT_RESET(3); IBAT_RESET(3); 706 if (oeacpufeat & OEACPU_HIGHBAT) { 707 DBAT_RESET(4); IBAT_RESET(4); 708 DBAT_RESET(5); IBAT_RESET(5); 709 DBAT_RESET(6); IBAT_RESET(6); 710 DBAT_RESET(7); IBAT_RESET(7); 711 712 /* 713 * Change the first instruction to branch to 714 * dsitrap_fix_dbat6 715 */ 716 dsitrap_fix_dbat4[0] &= ~0xfffc; 717 dsitrap_fix_dbat4[0] 718 += (uintptr_t)dsitrap_fix_dbat6 719 - (uintptr_t)&dsitrap_fix_dbat4[0]; 720 721 /* 722 * Change the second instruction to branch to 723 * dsitrap_fix_dbat5 if bit 30 (aka bit 1) is 724 * true. 725 */ 726 dsitrap_fix_dbat4[1] = 0x419e0000 727 + (uintptr_t)dsitrap_fix_dbat5 728 - (uintptr_t)&dsitrap_fix_dbat4[1]; 729 730 /* 731 * Change it to load dbat4 instead of dbat2 732 */ 733 dsitrap_fix_dbat4[2] = 0x7fd88ba6; 734 dsitrap_fix_dbat4[3] = 0x7ff98ba6; 735 736 /* 737 * Change it to load dbat7 instead of dbat3 738 */ 739 dsitrap_fix_dbat7[0] = 0x7fde8ba6; 740 dsitrap_fix_dbat7[1] = 0x7fff8ba6; 741 } 742 } 743 } 744 745 /* 746 * Set up BAT to map physical memory 747 */ 748 #ifdef PPC_OEA601 749 if (cpuvers == MPC601) { 750 int i; 751 752 /* 753 * Set up battable to map the lowest 256 MB area. 754 * Map the lowest 32 MB area via BAT[0-3]; 755 * BAT[01] are fixed, BAT[23] are floating. 756 */ 757 for (i = 0; i < 32; i++) { 758 battable[i].batl = BATL601(i << 23, 759 BAT601_BSM_8M, BAT601_V); 760 battable[i].batu = BATU601(i << 23, 761 BAT601_M, BAT601_Ku, BAT601_PP_NONE); 762 } 763 __asm volatile ("mtibatu 0,%1; mtibatl 0,%0" 764 :: "r"(battable[0x00000000 >> 23].batl), 765 "r"(battable[0x00000000 >> 23].batu)); 766 __asm volatile ("mtibatu 1,%1; mtibatl 1,%0" 767 :: "r"(battable[0x00800000 >> 23].batl), 768 "r"(battable[0x00800000 >> 23].batu)); 769 __asm volatile ("mtibatu 2,%1; mtibatl 2,%0" 770 :: "r"(battable[0x01000000 >> 23].batl), 771 "r"(battable[0x01000000 >> 23].batu)); 772 __asm volatile ("mtibatu 3,%1; mtibatl 3,%0" 773 :: "r"(battable[0x01800000 >> 23].batl), 774 "r"(battable[0x01800000 >> 23].batu)); 775 } 776 #endif /* PPC_OEA601 */ 777 778 /* 779 * Now setup other fixed bat registers 780 * 781 * Note that we still run in real mode, and the BAT 782 * registers were cleared above. 783 */ 784 785 va_start(ap, pa); 786 787 /* 788 * Add any I/O BATs specificed; 789 * use I/O segments on the BAT-starved 601. 790 */ 791 #ifdef PPC_OEA601 792 if (cpuvers == MPC601) { 793 while (pa != 0) { 794 register_t len = va_arg(ap, register_t); 795 mpc601_ioseg_add(pa, len); 796 pa = va_arg(ap, paddr_t); 797 } 798 } else 799 #endif 800 { 801 while (pa != 0) { 802 register_t len = va_arg(ap, register_t); 803 oea_iobat_add(pa, len); 804 pa = va_arg(ap, paddr_t); 805 } 806 } 807 808 va_end(ap); 809 810 /* 811 * Set up battable to map all RAM regions. 812 */ 813 #ifdef PPC_OEA601 814 if (cpuvers == MPC601) { 815 for (mp = allmem; mp->size; mp++) { 816 paddr_t paddr = mp->start & 0xff800000; 817 paddr_t end = mp->start + mp->size; 818 819 do { 820 u_int ix = paddr >> 23; 821 822 battable[ix].batl = 823 BATL601(paddr, BAT601_BSM_8M, BAT601_V); 824 battable[ix].batu = 825 BATU601(paddr, BAT601_M, BAT601_Ku, BAT601_PP_NONE); 826 paddr += (1 << 23); 827 } while (paddr < end); 828 } 829 } else 830 #endif 831 { 832 const register_t bat_inc = BAT_IDX2VA(1); 833 for (mp = allmem; mp->size; mp++) { 834 paddr_t paddr = mp->start & -bat_inc; 835 paddr_t end = roundup2(mp->start + mp->size, bat_inc); 836 837 /* 838 * If the next entries are adjacent, merge them 839 * into this one 840 */ 841 while (mp[1].size && end == (mp[1].start & -bat_inc)) { 842 mp++; 843 end = roundup2(mp->start + mp->size, bat_inc); 844 } 845 846 while (paddr < end) { 847 register_t bl = (oeacpufeat & OEACPU_XBSEN 848 ? BAT_BL_2G 849 : BAT_BL_256M); 850 psize_t size = BAT_BL_TO_SIZE(bl); 851 u_int n = BAT_VA2IDX(size); 852 u_int i = BAT_VA2IDX(paddr); 853 854 while ((paddr & (size - 1)) 855 || paddr + size > end) { 856 size >>= 1; 857 bl = (bl >> 1) & (BAT_XBL|BAT_BL); 858 n >>= 1; 859 } 860 861 KASSERT(size >= bat_inc); 862 KASSERT(n >= 1); 863 KASSERT(bl >= BAT_BL_8M); 864 865 register_t batl = BATL(paddr, BAT_M, BAT_PP_RW); 866 register_t batu = BATU(paddr, bl, BAT_Vs); 867 868 for (; n-- > 0; i++) { 869 battable[i].batl = batl; 870 battable[i].batu = batu; 871 } 872 paddr += size; 873 } 874 } 875 /* 876 * Set up BAT0 to only map the lowest area. 877 */ 878 __asm volatile ("mtibatl 0,%0; mtibatu 0,%1;" 879 "mtdbatl 0,%0; mtdbatu 0,%1;" 880 :: "r"(battable[0].batl), "r"(battable[0].batu)); 881 } 882 } 883 #endif /* PPC_OEA || PPC_OEA64_BRIDGE */ 884 885 void 886 oea_install_extint(void (*handler)(void)) 887 { 888 extern int extint[], extsize[]; 889 extern int extint_call[]; 890 uintptr_t offset = (uintptr_t)handler - (uintptr_t)extint_call; 891 #ifdef PPC_HIGH_VEC 892 const uintptr_t exc_exi_base = EXC_HIGHVEC + EXC_EXI; 893 #else 894 const uintptr_t exc_exi_base = EXC_EXI; 895 #endif 896 int omsr, msr; 897 898 #ifdef DIAGNOSTIC 899 if (offset > 0x1ffffff) 900 panic("install_extint: %p too far away (%#lx)", handler, 901 (unsigned long) offset); 902 #endif 903 __asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1" 904 : "=r" (omsr), "=r" (msr) 905 : "K" ((u_short)~PSL_EE)); 906 extint_call[0] = (extint_call[0] & 0xfc000003) | offset; 907 __syncicache((void *)extint_call, sizeof extint_call[0]); 908 memcpy((void *)exc_exi_base, extint, (size_t)extsize); 909 #ifdef PPC_OEA64_BRIDGE 910 if ((oeacpufeat & OEACPU_64_BRIDGE) == 0) { 911 for (int *ip = (int *)exc_exi_base; 912 (uintptr_t)ip <= exc_exi_base + (size_t)extsize; 913 ip++) { 914 if ((ip[0] & MFMSR_MASK) == MFMSR 915 && (ip[1] & RLDICL_MASK) == RLDICL 916 && (ip[2] & MTMSRD_MASK) == MTMSRD) { 917 *ip++ = NOP; 918 *ip++ = NOP; 919 ip[0] = NOP; 920 } else if (*ip == RFID) { 921 *ip = RFI; 922 } 923 } 924 } 925 #endif 926 __syncicache((void *)exc_exi_base, (size_t)extsize); 927 928 __asm volatile ("mtmsr %0" :: "r"(omsr)); 929 } 930 931 /* 932 * Machine dependent startup code. 933 */ 934 void 935 oea_startup(const char *model) 936 { 937 uintptr_t sz; 938 void *v; 939 vaddr_t minaddr, maxaddr; 940 char pbuf[9], mstr[128]; 941 942 KASSERT(curcpu() != NULL); 943 KASSERT(lwp0.l_cpu != NULL); 944 KASSERT(curcpu()->ci_idepth == -1); 945 946 sz = round_page(MSGBUFSIZE); 947 #ifdef MSGBUFADDR 948 v = (void *) MSGBUFADDR; 949 #else 950 /* 951 * If the msgbuf is not in segment 0, allocate KVA for it and access 952 * it via mapped pages. [This prevents unneeded BAT switches.] 953 */ 954 v = (void *) msgbuf_paddr; 955 if (msgbuf_paddr + sz > SEGMENT_LENGTH) { 956 u_int i; 957 958 minaddr = 0; 959 if (uvm_map(kernel_map, &minaddr, sz, 960 NULL, UVM_UNKNOWN_OFFSET, 0, 961 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 962 UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) 963 panic("startup: cannot allocate VM for msgbuf"); 964 v = (void *)minaddr; 965 for (i = 0; i < sz; i += PAGE_SIZE) { 966 pmap_kenter_pa(minaddr + i, msgbuf_paddr + i, 967 VM_PROT_READ|VM_PROT_WRITE, 0); 968 } 969 pmap_update(pmap_kernel()); 970 } 971 #endif 972 initmsgbuf(v, sz); 973 974 printf("%s%s", copyright, version); 975 if (model != NULL) 976 printf("Model: %s\n", model); 977 cpu_identify(mstr, sizeof(mstr)); 978 cpu_setmodel("%s", mstr); 979 980 format_bytes(pbuf, sizeof(pbuf), ctob((u_int)physmem)); 981 printf("total memory = %s\n", pbuf); 982 983 /* 984 * Allocate away the pages that map to 0xDEA[CDE]xxxx. Do this after 985 * the bufpages are allocated in case they overlap since it's not 986 * fatal if we can't allocate these. 987 */ 988 if (KERNEL_SR == 13 || KERNEL2_SR == 14) { 989 int error; 990 minaddr = 0xDEAC0000; 991 error = uvm_map(kernel_map, &minaddr, 0x30000, 992 NULL, UVM_UNKNOWN_OFFSET, 0, 993 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 994 UVM_ADV_NORMAL, UVM_FLAG_FIXED)); 995 if (error != 0 || minaddr != 0xDEAC0000) 996 printf("oea_startup: failed to allocate DEAD " 997 "ZONE: error=%d\n", error); 998 } 999 1000 minaddr = 0; 1001 1002 /* 1003 * Allocate a submap for physio 1004 */ 1005 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 1006 VM_PHYS_SIZE, 0, false, NULL); 1007 1008 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 1009 printf("avail memory = %s\n", pbuf); 1010 } 1011 1012 /* 1013 * Crash dump handling. 1014 */ 1015 1016 void 1017 oea_dumpsys(void) 1018 { 1019 printf("dumpsys: TBD\n"); 1020 } 1021 1022 /* 1023 * Convert kernel VA to physical address 1024 */ 1025 paddr_t 1026 kvtop(void *addr) 1027 { 1028 vaddr_t va; 1029 paddr_t pa; 1030 uintptr_t off; 1031 extern char end[]; 1032 1033 if (addr < (void *)end) 1034 return (paddr_t)addr; 1035 1036 va = trunc_page((vaddr_t)addr); 1037 off = (uintptr_t)addr - va; 1038 1039 if (pmap_extract(pmap_kernel(), va, &pa) == false) { 1040 /*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/ 1041 return (paddr_t)addr; 1042 } 1043 1044 return(pa + off); 1045 } 1046 1047 /* 1048 * Allocate vm space and mapin the I/O address 1049 */ 1050 void * 1051 mapiodev(paddr_t pa, psize_t len, bool prefetchable) 1052 { 1053 paddr_t faddr; 1054 vaddr_t taddr, va; 1055 int off; 1056 1057 faddr = trunc_page(pa); 1058 off = pa - faddr; 1059 len = round_page(off + len); 1060 va = taddr = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY); 1061 1062 if (va == 0) 1063 return NULL; 1064 1065 for (; len > 0; len -= PAGE_SIZE) { 1066 pmap_kenter_pa(taddr, faddr, VM_PROT_READ | VM_PROT_WRITE, 1067 (prefetchable ? PMAP_MD_PREFETCHABLE : PMAP_NOCACHE)); 1068 faddr += PAGE_SIZE; 1069 taddr += PAGE_SIZE; 1070 } 1071 pmap_update(pmap_kernel()); 1072 return (void *)(va + off); 1073 } 1074 1075 void 1076 unmapiodev(vaddr_t va, vsize_t len) 1077 { 1078 paddr_t faddr; 1079 1080 if (! va) 1081 return; 1082 1083 faddr = trunc_page(va); 1084 len = round_page(va - faddr + len); 1085 1086 pmap_kremove(faddr, len); 1087 pmap_update(pmap_kernel()); 1088 uvm_km_free(kernel_map, faddr, len, UVM_KMF_VAONLY); 1089 } 1090 1091 void 1092 trap0(void *lr) 1093 { 1094 panic("call to null-ptr from %p", lr); 1095 } 1096