1 /* $NetBSD: oea_machdep.c,v 1.81 2020/07/06 10:34:23 rin Exp $ */ 2 3 /* 4 * Copyright (C) 2002 Matt Thomas 5 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 * Copyright (C) 1995, 1996 TooLs GmbH. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by TooLs GmbH. 20 * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: oea_machdep.c,v 1.81 2020/07/06 10:34:23 rin Exp $"); 37 38 #ifdef _KERNEL_OPT 39 #include "opt_altivec.h" 40 #include "opt_ddb.h" 41 #include "opt_kgdb.h" 42 #include "opt_multiprocessor.h" 43 #include "opt_ppcarch.h" 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/buf.h> 48 #include <sys/boot_flag.h> 49 #include <sys/exec.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/mount.h> 53 #include <sys/msgbuf.h> 54 #include <sys/proc.h> 55 #include <sys/reboot.h> 56 #include <sys/syscallargs.h> 57 #include <sys/syslog.h> 58 #include <sys/systm.h> 59 #include <sys/cpu.h> 60 61 #include <uvm/uvm_extern.h> 62 63 #ifdef DDB 64 #include <powerpc/db_machdep.h> 65 #include <ddb/db_extern.h> 66 #endif 67 68 #ifdef KGDB 69 #include <sys/kgdb.h> 70 #endif 71 72 #include <machine/powerpc.h> 73 74 #include <powerpc/trap.h> 75 #include <powerpc/spr.h> 76 #include <powerpc/pte.h> 77 #include <powerpc/altivec.h> 78 #include <powerpc/pcb.h> 79 80 #include <powerpc/oea/bat.h> 81 #include <powerpc/oea/cpufeat.h> 82 #include <powerpc/oea/spr.h> 83 #include <powerpc/oea/sr_601.h> 84 85 char machine[] = MACHINE; /* from <machine/param.h> */ 86 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 87 88 struct vm_map *phys_map = NULL; 89 90 /* 91 * Global variables used here and there 92 */ 93 static void trap0(void *); 94 95 /* XXXSL: The battable is not initialized to non-zero for PPC_OEA64 and PPC_OEA64_BRIDGE */ 96 struct bat battable[BAT_VA2IDX(0xffffffff)+1]; 97 98 register_t iosrtable[16]; /* I/O segments, for kernel_pmap setup */ 99 #ifndef MSGBUFADDR 100 paddr_t msgbuf_paddr; 101 #endif 102 103 extern int dsitrap_fix_dbat4[]; 104 extern int dsitrap_fix_dbat5[]; 105 extern int dsitrap_fix_dbat6[]; 106 extern int dsitrap_fix_dbat7[]; 107 108 /* 109 * Load pointer with 0 behind GCC's back, otherwise it will 110 * emit a "trap" instead. 111 */ 112 static __inline__ uintptr_t 113 zero_value(void) 114 { 115 uintptr_t dont_tell_gcc; 116 117 __asm volatile ("li %0, 0" : "=r"(dont_tell_gcc) :); 118 return dont_tell_gcc; 119 } 120 121 void 122 oea_init(void (*handler)(void)) 123 { 124 extern int trapcode[], trapsize[]; 125 extern int sctrap[], scsize[]; 126 extern int alitrap[], alisize[]; 127 extern int dsitrap[], dsisize[]; 128 extern int trapstart[], trapend[]; 129 #ifdef PPC_OEA601 130 extern int dsi601trap[], dsi601size[]; 131 #endif 132 extern int decrint[], decrsize[]; 133 extern int tlbimiss[], tlbimsize[]; 134 extern int tlbdlmiss[], tlbdlmsize[]; 135 extern int tlbdsmiss[], tlbdsmsize[]; 136 #if defined(DDB) || defined(KGDB) 137 extern int ddblow[], ddbsize[]; 138 #endif 139 #ifdef ALTIVEC 140 register_t msr; 141 #endif 142 uintptr_t exc, exc_base; 143 #if defined(ALTIVEC) || defined(PPC_OEA) 144 register_t scratch; 145 #endif 146 unsigned int cpuvers; 147 size_t size; 148 struct cpu_info * const ci = &cpu_info[0]; 149 150 #ifdef PPC_HIGH_VEC 151 exc_base = EXC_HIGHVEC; 152 #else 153 exc_base = zero_value(); 154 #endif 155 KASSERT(mfspr(SPR_SPRG0) == (uintptr_t)ci); 156 157 #if defined (PPC_OEA64_BRIDGE) && defined (PPC_OEA) 158 if (oeacpufeat & OEACPU_64_BRIDGE) 159 pmap_setup64bridge(); 160 else 161 pmap_setup32(); 162 #endif 163 164 165 cpuvers = mfpvr() >> 16; 166 167 /* 168 * Initialize proc0 and current pcb and pmap pointers. 169 */ 170 (void) ci; 171 KASSERT(ci != NULL); 172 KASSERT(curcpu() == ci); 173 KASSERT(lwp0.l_cpu == ci); 174 175 curpcb = lwp_getpcb(&lwp0); 176 memset(curpcb, 0, sizeof(struct pcb)); 177 178 #ifdef ALTIVEC 179 /* 180 * Initialize the vectors with NaNs 181 */ 182 for (scratch = 0; scratch < 32; scratch++) { 183 curpcb->pcb_vr.vreg[scratch][0] = 0x7FFFDEAD; 184 curpcb->pcb_vr.vreg[scratch][1] = 0x7FFFDEAD; 185 curpcb->pcb_vr.vreg[scratch][2] = 0x7FFFDEAD; 186 curpcb->pcb_vr.vreg[scratch][3] = 0x7FFFDEAD; 187 } 188 #endif 189 curpm = curpcb->pcb_pm = pmap_kernel(); 190 191 /* 192 * Cause a PGM trap if we branch to 0. 193 * 194 * XXX GCC4.1 complains about memset on address zero, so 195 * don't use the builtin. 196 */ 197 #undef memset 198 memset(0, 0, 0x100); 199 200 /* 201 * Set up trap vectors. Don't assume vectors are on 0x100. 202 */ 203 for (exc = exc_base; exc <= exc_base + EXC_LAST; exc += 0x100) { 204 switch (exc - exc_base) { 205 default: 206 size = (size_t)trapsize; 207 memcpy((void *)exc, trapcode, size); 208 break; 209 #if 0 210 case EXC_EXI: 211 /* 212 * This one is (potentially) installed during autoconf 213 */ 214 break; 215 #endif 216 case EXC_SC: 217 size = (size_t)scsize; 218 memcpy((void *)exc, sctrap, size); 219 break; 220 case EXC_ALI: 221 size = (size_t)alisize; 222 memcpy((void *)exc, alitrap, size); 223 break; 224 case EXC_DSI: 225 #ifdef PPC_OEA601 226 if (cpuvers == MPC601) { 227 size = (size_t)dsi601size; 228 memcpy((void *)exc, dsi601trap, size); 229 break; 230 } else 231 #endif /* PPC_OEA601 */ 232 if (oeacpufeat & OEACPU_NOBAT) { 233 size = (size_t)alisize; 234 memcpy((void *)exc, alitrap, size); 235 } else { 236 size = (size_t)dsisize; 237 memcpy((void *)exc, dsitrap, size); 238 } 239 break; 240 case EXC_DECR: 241 size = (size_t)decrsize; 242 memcpy((void *)exc, decrint, size); 243 break; 244 case EXC_IMISS: 245 size = (size_t)tlbimsize; 246 memcpy((void *)exc, tlbimiss, size); 247 break; 248 case EXC_DLMISS: 249 size = (size_t)tlbdlmsize; 250 memcpy((void *)exc, tlbdlmiss, size); 251 break; 252 case EXC_DSMISS: 253 size = (size_t)tlbdsmsize; 254 memcpy((void *)exc, tlbdsmiss, size); 255 break; 256 case EXC_PERF: 257 size = (size_t)trapsize; 258 memcpy((void *)exc, trapcode, size); 259 memcpy((void *)(exc_base + EXC_VEC), trapcode, size); 260 break; 261 #if defined(DDB) || defined(KGDB) 262 case EXC_RUNMODETRC: 263 #ifdef PPC_OEA601 264 if (cpuvers != MPC601) 265 #endif 266 { 267 size = (size_t)trapsize; 268 memcpy((void *)exc, trapcode, size); 269 break; 270 } 271 /* FALLTHROUGH */ 272 case EXC_PGM: 273 case EXC_TRC: 274 case EXC_BPT: 275 size = (size_t)ddbsize; 276 memcpy((void *)exc, ddblow, size); 277 break; 278 #endif /* DDB || KGDB */ 279 } 280 #if 0 281 exc += roundup(size, 32); 282 #endif 283 } 284 285 /* 286 * Install a branch absolute to trap0 to force a panic. 287 */ 288 if ((uintptr_t)trap0 < 0x2000000) { 289 uint32_t *p = (uint32_t *)zero_value(); 290 291 p[0] = 0x7c6802a6; 292 p[1] = 0x48000002 | (uintptr_t) trap0; 293 } 294 295 /* 296 * Get the cache sizes because install_extint calls __syncicache. 297 */ 298 cpu_probe_cache(); 299 300 #define MxSPR_MASK 0x7c1fffff 301 #define MFSPR_MQ 0x7c0002a6 302 #define MTSPR_MQ 0x7c0003a6 303 #define MTSPR_IBAT0L 0x7c1183a6 304 #define MTSPR_IBAT1L 0x7c1383a6 305 #define NOP 0x60000000 306 #define B 0x48000000 307 #define TLBSYNC 0x7c00046c 308 #define SYNC 0x7c0004ac 309 #ifdef PPC_OEA64_BRIDGE 310 #define MFMSR_MASK 0xfc1fffff 311 #define MFMSR 0x7c0000a6 312 #define MTMSRD_MASK 0xfc1effff 313 #define MTMSRD 0x7c000164 314 #define RLDICL_MASK 0xfc00001c 315 #define RLDICL 0x78000000 316 #define RFID 0x4c000024 317 #define RFI 0x4c000064 318 #endif 319 320 #ifdef ALTIVEC 321 #define MFSPR_VRSAVE 0x7c0042a6 322 #define MTSPR_VRSAVE 0x7c0043a6 323 324 /* 325 * Try to set the VEC bit in the MSR. If it doesn't get set, we are 326 * not on a AltiVec capable processor. 327 */ 328 __asm volatile ( 329 "mfmsr %0; oris %1,%0,%2@h; mtmsr %1; isync; " 330 "mfmsr %1; mtmsr %0; isync" 331 : "=r"(msr), "=r"(scratch) 332 : "J"(PSL_VEC)); 333 334 /* 335 * If we aren't on an AltiVec capable processor, we need to zap any of 336 * the sequences we save/restore the VRSAVE SPR into NOPs. 337 */ 338 if (scratch & PSL_VEC) { 339 cpu_altivec = 1; 340 } else { 341 for (int *ip = trapstart; ip < trapend; ip++) { 342 if ((ip[0] & MxSPR_MASK) == MFSPR_VRSAVE) { 343 ip[0] = NOP; /* mfspr */ 344 ip[1] = NOP; /* stw */ 345 } else if ((ip[0] & MxSPR_MASK) == MTSPR_VRSAVE) { 346 ip[-1] = NOP; /* lwz */ 347 ip[0] = NOP; /* mtspr */ 348 } 349 } 350 } 351 #endif 352 353 /* XXX It would seem like this code could be elided ifndef 601, but 354 * doing so breaks my power3 machine. 355 */ 356 /* 357 * If we aren't on a MPC601 processor, we need to zap any of the 358 * sequences we save/restore the MQ SPR into NOPs, and skip over the 359 * sequences where we zap/restore BAT registers on kernel exit/entry. 360 */ 361 if (cpuvers != MPC601) { 362 for (int *ip = trapstart; ip < trapend; ip++) { 363 if ((ip[0] & MxSPR_MASK) == MFSPR_MQ) { 364 ip[0] = NOP; /* mfspr */ 365 ip[1] = NOP; /* stw */ 366 } else if ((ip[0] & MxSPR_MASK) == MTSPR_MQ) { 367 ip[-1] = NOP; /* lwz */ 368 ip[0] = NOP; /* mtspr */ 369 } else if ((ip[0] & MxSPR_MASK) == MTSPR_IBAT0L) { 370 if ((ip[1] & MxSPR_MASK) == MTSPR_IBAT1L) 371 ip[-1] = B | 0x14; /* li */ 372 else 373 ip[-4] = B | 0x24; /* lis */ 374 } 375 } 376 } 377 378 #ifdef PPC_OEA64_BRIDGE 379 if ((oeacpufeat & OEACPU_64_BRIDGE) == 0) { 380 for (int *ip = (int *)exc_base; 381 (uintptr_t)ip <= exc_base + EXC_LAST; 382 ip++) { 383 if ((ip[0] & MFMSR_MASK) == MFMSR 384 && (ip[1] & RLDICL_MASK) == RLDICL 385 && (ip[2] & MTMSRD_MASK) == MTMSRD) { 386 *ip++ = NOP; 387 *ip++ = NOP; 388 ip[0] = NOP; 389 } else if (*ip == RFID) { 390 *ip = RFI; 391 } 392 } 393 394 /* 395 * Now replace each rfid instruction with a rfi instruction. 396 */ 397 for (int *ip = trapstart; ip < trapend; ip++) { 398 if ((ip[0] & MFMSR_MASK) == MFMSR 399 && (ip[1] & RLDICL_MASK) == RLDICL 400 && (ip[2] & MTMSRD_MASK) == MTMSRD) { 401 *ip++ = NOP; 402 *ip++ = NOP; 403 ip[0] = NOP; 404 } else if (*ip == RFID) { 405 *ip = RFI; 406 } 407 } 408 } 409 #endif /* PPC_OEA64_BRIDGE */ 410 411 /* 412 * Sync the changed instructions. 413 */ 414 __syncicache((void *) trapstart, 415 (uintptr_t) trapend - (uintptr_t) trapstart); 416 __syncicache(dsitrap_fix_dbat4, 16); 417 __syncicache(dsitrap_fix_dbat7, 8); 418 #ifdef PPC_OEA601 419 420 /* 421 * If we are on a MPC601 processor, we need to zap any tlbsync 422 * instructions into sync. This differs from the above in 423 * examing all kernel text, as opposed to just the exception handling. 424 * We sync the icache on every instruction found since there are 425 * only very few of them. 426 */ 427 if (cpuvers == MPC601) { 428 extern int kernel_text[], etext[]; 429 int *ip; 430 431 for (ip = kernel_text; ip < etext; ip++) { 432 if (*ip == TLBSYNC) { 433 *ip = SYNC; 434 __syncicache(ip, sizeof(*ip)); 435 } 436 } 437 } 438 #endif /* PPC_OEA601 */ 439 440 /* 441 * Configure a PSL user mask matching this processor. 442 * Don't allow to set PSL_FP/PSL_VEC, since that will affect PCU. 443 */ 444 cpu_psluserset = PSL_EE | PSL_PR | PSL_ME | PSL_IR | PSL_DR | PSL_RI; 445 cpu_pslusermod = PSL_FE0 | PSL_FE1 | PSL_LE | PSL_SE | PSL_BE; 446 #ifdef PPC_OEA601 447 if (cpuvers == MPC601) { 448 cpu_psluserset &= PSL_601_MASK; 449 cpu_pslusermod &= PSL_601_MASK; 450 } 451 #endif 452 #ifdef PPC_HIGH_VEC 453 cpu_psluserset |= PSL_IP; /* XXX ok? */ 454 #endif 455 456 /* 457 * external interrupt handler install 458 */ 459 if (handler) 460 oea_install_extint(handler); 461 462 __syncicache((void *)exc_base, EXC_LAST + 0x100); 463 464 /* 465 * Now enable translation (and machine checks/recoverable interrupts). 466 */ 467 #ifdef PPC_OEA 468 __asm volatile ("sync; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync" 469 : "=r"(scratch) 470 : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI)); 471 #endif 472 473 /* 474 * Let's take all the indirect calls via our stubs and patch 475 * them to be direct calls. 476 */ 477 cpu_fixup_stubs(); 478 479 KASSERT(curcpu() == ci); 480 } 481 482 #ifdef PPC_OEA601 483 void 484 mpc601_ioseg_add(paddr_t pa, register_t len) 485 { 486 const u_int i = pa >> ADDR_SR_SHFT; 487 488 if (len != BAT_BL_256M) 489 panic("mpc601_ioseg_add: len != 256M"); 490 491 /* 492 * Translate into an I/O segment, load it, and stash away for use 493 * in pmap_bootstrap(). 494 */ 495 iosrtable[i] = SR601(SR601_Ks, SR601_BUID_MEMFORCED, 0, i); 496 497 /* 498 * XXX Setting segment register 0xf on my powermac 7200 499 * wedges machine so set later in pmap.c 500 */ 501 /* 502 __asm volatile ("mtsrin %0,%1" 503 :: "r"(iosrtable[i]), 504 "r"(pa)); 505 */ 506 } 507 #endif /* PPC_OEA601 */ 508 509 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE) 510 #define DBAT_SET(n, batl, batu) \ 511 do { \ 512 mtspr(SPR_DBAT##n##L, (batl)); \ 513 mtspr(SPR_DBAT##n##U, (batu)); \ 514 } while (/*CONSTCOND*/ 0) 515 #define DBAT_RESET(n) DBAT_SET(n, 0, 0) 516 #define DBATU_GET(n) mfspr(SPR_DBAT##n##U) 517 #define IBAT_SET(n, batl, batu) \ 518 do { \ 519 mtspr(SPR_IBAT##n##L, (batl)); \ 520 mtspr(SPR_IBAT##n##U, (batu)); \ 521 } while (/*CONSTCOND*/ 0) 522 #define IBAT_RESET(n) IBAT_SET(n, 0, 0) 523 524 void 525 oea_iobat_add(paddr_t pa, register_t len) 526 { 527 static int z = 1; 528 const u_int n = BAT_BL_TO_SIZE(len) / BAT_BL_TO_SIZE(BAT_BL_8M); 529 const u_int i = BAT_VA2IDX(pa) & -n; /* in case pa was in the middle */ 530 const int after_bat3 = (oeacpufeat & OEACPU_HIGHBAT) ? 4 : 8; 531 532 KASSERT(len >= BAT_BL_8M); 533 534 /* 535 * If the caller wanted a bigger BAT than the hardware supports, 536 * split it into smaller BATs. 537 */ 538 if (len > BAT_BL_256M && (oeacpufeat & OEACPU_XBSEN) == 0) { 539 u_int xn = BAT_BL_TO_SIZE(len) >> 28; 540 while (xn-- > 0) { 541 oea_iobat_add(pa, BAT_BL_256M); 542 pa += 0x10000000; 543 } 544 return; 545 } 546 547 const register_t batl = BATL(pa, BAT_I|BAT_G, BAT_PP_RW); 548 const register_t batu = BATU(pa, len, BAT_Vs); 549 550 for (u_int j = 0; j < n; j++) { 551 battable[i + j].batl = batl; 552 battable[i + j].batu = batu; 553 } 554 555 /* 556 * Let's start loading the BAT registers. 557 */ 558 switch (z) { 559 case 1: 560 DBAT_SET(1, batl, batu); 561 z = 2; 562 break; 563 case 2: 564 DBAT_SET(2, batl, batu); 565 z = 3; 566 break; 567 case 3: 568 DBAT_SET(3, batl, batu); 569 z = after_bat3; /* no highbat, skip to end */ 570 break; 571 case 4: 572 DBAT_SET(4, batl, batu); 573 z = 5; 574 break; 575 case 5: 576 DBAT_SET(5, batl, batu); 577 z = 6; 578 break; 579 case 6: 580 DBAT_SET(6, batl, batu); 581 z = 7; 582 break; 583 case 7: 584 DBAT_SET(7, batl, batu); 585 z = 8; 586 break; 587 default: 588 break; 589 } 590 } 591 592 void 593 oea_iobat_remove(paddr_t pa) 594 { 595 const u_int i = BAT_VA2IDX(pa); 596 597 if (!BAT_VA_MATCH_P(battable[i].batu, pa) || 598 !BAT_VALID_P(battable[i].batu, PSL_PR)) 599 return; 600 const int n = 601 __SHIFTOUT(battable[i].batu, (BAT_XBL|BAT_BL) & ~BAT_BL_8M) + 1; 602 KASSERT((n & (n-1)) == 0); /* power of 2 */ 603 KASSERT((i & (n-1)) == 0); /* multiple of n */ 604 605 memset(&battable[i], 0, n*sizeof(battable[0])); 606 607 const int maxbat = oeacpufeat & OEACPU_HIGHBAT ? 8 : 4; 608 for (u_int k = 1 ; k < maxbat; k++) { 609 register_t batu; 610 switch (k) { 611 case 1: 612 batu = DBATU_GET(1); 613 if (BAT_VA_MATCH_P(batu, pa) && 614 BAT_VALID_P(batu, PSL_PR)) 615 DBAT_RESET(1); 616 break; 617 case 2: 618 batu = DBATU_GET(2); 619 if (BAT_VA_MATCH_P(batu, pa) && 620 BAT_VALID_P(batu, PSL_PR)) 621 DBAT_RESET(2); 622 break; 623 case 3: 624 batu = DBATU_GET(3); 625 if (BAT_VA_MATCH_P(batu, pa) && 626 BAT_VALID_P(batu, PSL_PR)) 627 DBAT_RESET(3); 628 break; 629 case 4: 630 batu = DBATU_GET(4); 631 if (BAT_VA_MATCH_P(batu, pa) && 632 BAT_VALID_P(batu, PSL_PR)) 633 DBAT_RESET(4); 634 break; 635 case 5: 636 batu = DBATU_GET(5); 637 if (BAT_VA_MATCH_P(batu, pa) && 638 BAT_VALID_P(batu, PSL_PR)) 639 DBAT_RESET(5); 640 break; 641 case 6: 642 batu = DBATU_GET(6); 643 if (BAT_VA_MATCH_P(batu, pa) && 644 BAT_VALID_P(batu, PSL_PR)) 645 DBAT_RESET(6); 646 break; 647 case 7: 648 batu = DBATU_GET(7); 649 if (BAT_VA_MATCH_P(batu, pa) && 650 BAT_VALID_P(batu, PSL_PR)) 651 DBAT_RESET(7); 652 break; 653 default: 654 break; 655 } 656 } 657 } 658 659 void 660 oea_batinit(paddr_t pa, ...) 661 { 662 struct mem_region *allmem, *availmem, *mp; 663 register_t msr = mfmsr(); 664 va_list ap; 665 #ifdef PPC_OEA601 666 unsigned int cpuvers; 667 668 cpuvers = mfpvr() >> 16; 669 #endif /* PPC_OEA601 */ 670 671 /* 672 * we need to call this before zapping BATs so OF calls work 673 */ 674 mem_regions(&allmem, &availmem); 675 676 /* 677 * Initialize BAT registers to unmapped to not generate 678 * overlapping mappings below. 679 * 680 * The 601's implementation differs in the Valid bit being situated 681 * in the lower BAT register, and in being a unified BAT only whose 682 * four entries are accessed through the IBAT[0-3] SPRs. 683 * 684 * Also, while the 601 does distinguish between supervisor/user 685 * protection keys, it does _not_ distinguish between validity in 686 * supervisor/user mode. 687 */ 688 if ((msr & (PSL_IR|PSL_DR)) == 0) { 689 #ifdef PPC_OEA601 690 if (cpuvers == MPC601) { 691 __asm volatile ("mtibatl 0,%0" :: "r"(0)); 692 __asm volatile ("mtibatl 1,%0" :: "r"(0)); 693 __asm volatile ("mtibatl 2,%0" :: "r"(0)); 694 __asm volatile ("mtibatl 3,%0" :: "r"(0)); 695 } else 696 #endif /* PPC_OEA601 */ 697 { 698 DBAT_RESET(0); IBAT_RESET(0); 699 DBAT_RESET(1); IBAT_RESET(1); 700 DBAT_RESET(2); IBAT_RESET(2); 701 DBAT_RESET(3); IBAT_RESET(3); 702 if (oeacpufeat & OEACPU_HIGHBAT) { 703 DBAT_RESET(4); IBAT_RESET(4); 704 DBAT_RESET(5); IBAT_RESET(5); 705 DBAT_RESET(6); IBAT_RESET(6); 706 DBAT_RESET(7); IBAT_RESET(7); 707 708 /* 709 * Change the first instruction to branch to 710 * dsitrap_fix_dbat6 711 */ 712 dsitrap_fix_dbat4[0] &= ~0xfffc; 713 dsitrap_fix_dbat4[0] 714 += (uintptr_t)dsitrap_fix_dbat6 715 - (uintptr_t)&dsitrap_fix_dbat4[0]; 716 717 /* 718 * Change the second instruction to branch to 719 * dsitrap_fix_dbat5 if bit 30 (aka bit 1) is 720 * true. 721 */ 722 dsitrap_fix_dbat4[1] = 0x419e0000 723 + (uintptr_t)dsitrap_fix_dbat5 724 - (uintptr_t)&dsitrap_fix_dbat4[1]; 725 726 /* 727 * Change it to load dbat4 instead of dbat2 728 */ 729 dsitrap_fix_dbat4[2] = 0x7fd88ba6; 730 dsitrap_fix_dbat4[3] = 0x7ff98ba6; 731 732 /* 733 * Change it to load dbat7 instead of dbat3 734 */ 735 dsitrap_fix_dbat7[0] = 0x7fde8ba6; 736 dsitrap_fix_dbat7[1] = 0x7fff8ba6; 737 } 738 } 739 } 740 741 /* 742 * Set up BAT to map physical memory 743 */ 744 #ifdef PPC_OEA601 745 if (cpuvers == MPC601) { 746 int i; 747 748 /* 749 * Set up battable to map the lowest 256 MB area. 750 * Map the lowest 32 MB area via BAT[0-3]; 751 * BAT[01] are fixed, BAT[23] are floating. 752 */ 753 for (i = 0; i < 32; i++) { 754 battable[i].batl = BATL601(i << 23, 755 BAT601_BSM_8M, BAT601_V); 756 battable[i].batu = BATU601(i << 23, 757 BAT601_M, BAT601_Ku, BAT601_PP_NONE); 758 } 759 __asm volatile ("mtibatu 0,%1; mtibatl 0,%0" 760 :: "r"(battable[0x00000000 >> 23].batl), 761 "r"(battable[0x00000000 >> 23].batu)); 762 __asm volatile ("mtibatu 1,%1; mtibatl 1,%0" 763 :: "r"(battable[0x00800000 >> 23].batl), 764 "r"(battable[0x00800000 >> 23].batu)); 765 __asm volatile ("mtibatu 2,%1; mtibatl 2,%0" 766 :: "r"(battable[0x01000000 >> 23].batl), 767 "r"(battable[0x01000000 >> 23].batu)); 768 __asm volatile ("mtibatu 3,%1; mtibatl 3,%0" 769 :: "r"(battable[0x01800000 >> 23].batl), 770 "r"(battable[0x01800000 >> 23].batu)); 771 } 772 #endif /* PPC_OEA601 */ 773 774 /* 775 * Now setup other fixed bat registers 776 * 777 * Note that we still run in real mode, and the BAT 778 * registers were cleared above. 779 */ 780 781 va_start(ap, pa); 782 783 /* 784 * Add any I/O BATs specificed; 785 * use I/O segments on the BAT-starved 601. 786 */ 787 #ifdef PPC_OEA601 788 if (cpuvers == MPC601) { 789 while (pa != 0) { 790 register_t len = va_arg(ap, register_t); 791 mpc601_ioseg_add(pa, len); 792 pa = va_arg(ap, paddr_t); 793 } 794 } else 795 #endif 796 { 797 while (pa != 0) { 798 register_t len = va_arg(ap, register_t); 799 oea_iobat_add(pa, len); 800 pa = va_arg(ap, paddr_t); 801 } 802 } 803 804 va_end(ap); 805 806 /* 807 * Set up battable to map all RAM regions. 808 */ 809 #ifdef PPC_OEA601 810 if (cpuvers == MPC601) { 811 for (mp = allmem; mp->size; mp++) { 812 paddr_t paddr = mp->start & 0xff800000; 813 paddr_t end = mp->start + mp->size; 814 815 do { 816 u_int ix = paddr >> 23; 817 818 battable[ix].batl = 819 BATL601(paddr, BAT601_BSM_8M, BAT601_V); 820 battable[ix].batu = 821 BATU601(paddr, BAT601_M, BAT601_Ku, BAT601_PP_NONE); 822 paddr += (1 << 23); 823 } while (paddr < end); 824 } 825 } else 826 #endif 827 { 828 const register_t bat_inc = BAT_IDX2VA(1); 829 for (mp = allmem; mp->size; mp++) { 830 paddr_t paddr = mp->start & -bat_inc; 831 paddr_t end = roundup2(mp->start + mp->size, bat_inc); 832 833 /* 834 * If the next entries are adjacent, merge them 835 * into this one 836 */ 837 while (mp[1].size && end == (mp[1].start & -bat_inc)) { 838 mp++; 839 end = roundup2(mp->start + mp->size, bat_inc); 840 } 841 842 while (paddr < end) { 843 register_t bl = (oeacpufeat & OEACPU_XBSEN 844 ? BAT_BL_2G 845 : BAT_BL_256M); 846 psize_t size = BAT_BL_TO_SIZE(bl); 847 u_int n = BAT_VA2IDX(size); 848 u_int i = BAT_VA2IDX(paddr); 849 850 while ((paddr & (size - 1)) 851 || paddr + size > end) { 852 size >>= 1; 853 bl = (bl >> 1) & (BAT_XBL|BAT_BL); 854 n >>= 1; 855 } 856 857 KASSERT(size >= bat_inc); 858 KASSERT(n >= 1); 859 KASSERT(bl >= BAT_BL_8M); 860 861 register_t batl = BATL(paddr, BAT_M, BAT_PP_RW); 862 register_t batu = BATU(paddr, bl, BAT_Vs); 863 864 for (; n-- > 0; i++) { 865 battable[i].batl = batl; 866 battable[i].batu = batu; 867 } 868 paddr += size; 869 } 870 } 871 /* 872 * Set up BAT0 to only map the lowest area. 873 */ 874 __asm volatile ("mtibatl 0,%0; mtibatu 0,%1;" 875 "mtdbatl 0,%0; mtdbatu 0,%1;" 876 :: "r"(battable[0].batl), "r"(battable[0].batu)); 877 } 878 } 879 #endif /* PPC_OEA || PPC_OEA64_BRIDGE */ 880 881 void 882 oea_install_extint(void (*handler)(void)) 883 { 884 extern int extint[], extsize[]; 885 extern int extint_call[]; 886 uintptr_t offset = (uintptr_t)handler - (uintptr_t)extint_call; 887 #ifdef PPC_HIGH_VEC 888 const uintptr_t exc_exi_base = EXC_HIGHVEC + EXC_EXI; 889 #else 890 const uintptr_t exc_exi_base = EXC_EXI; 891 #endif 892 int omsr, msr; 893 894 #ifdef DIAGNOSTIC 895 if (offset > 0x1ffffff) 896 panic("install_extint: %p too far away (%#lx)", handler, 897 (unsigned long) offset); 898 #endif 899 __asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1" 900 : "=r" (omsr), "=r" (msr) 901 : "K" ((u_short)~PSL_EE)); 902 extint_call[0] = (extint_call[0] & 0xfc000003) | offset; 903 __syncicache((void *)extint_call, sizeof extint_call[0]); 904 memcpy((void *)exc_exi_base, extint, (size_t)extsize); 905 #ifdef PPC_OEA64_BRIDGE 906 if ((oeacpufeat & OEACPU_64_BRIDGE) == 0) { 907 for (int *ip = (int *)exc_exi_base; 908 (uintptr_t)ip <= exc_exi_base + (size_t)extsize; 909 ip++) { 910 if ((ip[0] & MFMSR_MASK) == MFMSR 911 && (ip[1] & RLDICL_MASK) == RLDICL 912 && (ip[2] & MTMSRD_MASK) == MTMSRD) { 913 *ip++ = NOP; 914 *ip++ = NOP; 915 ip[0] = NOP; 916 } else if (*ip == RFID) { 917 *ip = RFI; 918 } 919 } 920 } 921 #endif 922 __syncicache((void *)exc_exi_base, (size_t)extsize); 923 924 __asm volatile ("mtmsr %0" :: "r"(omsr)); 925 } 926 927 /* 928 * Machine dependent startup code. 929 */ 930 void 931 oea_startup(const char *model) 932 { 933 uintptr_t sz; 934 void *v; 935 vaddr_t minaddr, maxaddr; 936 char pbuf[9], mstr[128]; 937 938 KASSERT(curcpu() != NULL); 939 KASSERT(lwp0.l_cpu != NULL); 940 KASSERT(curcpu()->ci_idepth == -1); 941 942 sz = round_page(MSGBUFSIZE); 943 #ifdef MSGBUFADDR 944 v = (void *) MSGBUFADDR; 945 #else 946 /* 947 * If the msgbuf is not in segment 0, allocate KVA for it and access 948 * it via mapped pages. [This prevents unneeded BAT switches.] 949 */ 950 v = (void *) msgbuf_paddr; 951 if (msgbuf_paddr + sz > SEGMENT_LENGTH) { 952 u_int i; 953 954 minaddr = 0; 955 if (uvm_map(kernel_map, &minaddr, sz, 956 NULL, UVM_UNKNOWN_OFFSET, 0, 957 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 958 UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) 959 panic("startup: cannot allocate VM for msgbuf"); 960 v = (void *)minaddr; 961 for (i = 0; i < sz; i += PAGE_SIZE) { 962 pmap_kenter_pa(minaddr + i, msgbuf_paddr + i, 963 VM_PROT_READ|VM_PROT_WRITE, 0); 964 } 965 pmap_update(pmap_kernel()); 966 } 967 #endif 968 initmsgbuf(v, sz); 969 970 printf("%s%s", copyright, version); 971 if (model != NULL) 972 printf("Model: %s\n", model); 973 cpu_identify(mstr, sizeof(mstr)); 974 cpu_setmodel("%s", mstr); 975 976 format_bytes(pbuf, sizeof(pbuf), ctob((u_int)physmem)); 977 printf("total memory = %s\n", pbuf); 978 979 /* 980 * Allocate away the pages that map to 0xDEA[CDE]xxxx. Do this after 981 * the bufpages are allocated in case they overlap since it's not 982 * fatal if we can't allocate these. 983 */ 984 if (KERNEL_SR == 13 || KERNEL2_SR == 14) { 985 int error; 986 minaddr = 0xDEAC0000; 987 error = uvm_map(kernel_map, &minaddr, 0x30000, 988 NULL, UVM_UNKNOWN_OFFSET, 0, 989 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 990 UVM_ADV_NORMAL, UVM_FLAG_FIXED)); 991 if (error != 0 || minaddr != 0xDEAC0000) 992 printf("oea_startup: failed to allocate DEAD " 993 "ZONE: error=%d\n", error); 994 } 995 996 minaddr = 0; 997 998 /* 999 * Allocate a submap for physio 1000 */ 1001 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 1002 VM_PHYS_SIZE, 0, false, NULL); 1003 1004 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 1005 printf("avail memory = %s\n", pbuf); 1006 1007 #ifdef MULTIPROCESSOR 1008 kcpuset_create(&cpuset_info.cpus_running, true); 1009 kcpuset_create(&cpuset_info.cpus_hatched, true); 1010 kcpuset_create(&cpuset_info.cpus_paused, true); 1011 kcpuset_create(&cpuset_info.cpus_resumed, true); 1012 kcpuset_create(&cpuset_info.cpus_halted, true); 1013 1014 kcpuset_set(cpuset_info.cpus_running, cpu_number()); 1015 #endif 1016 } 1017 1018 /* 1019 * Crash dump handling. 1020 */ 1021 1022 void 1023 oea_dumpsys(void) 1024 { 1025 printf("dumpsys: TBD\n"); 1026 } 1027 1028 /* 1029 * Convert kernel VA to physical address 1030 */ 1031 paddr_t 1032 kvtop(void *addr) 1033 { 1034 vaddr_t va; 1035 paddr_t pa; 1036 uintptr_t off; 1037 extern char end[]; 1038 1039 if (addr < (void *)end) 1040 return (paddr_t)addr; 1041 1042 va = trunc_page((vaddr_t)addr); 1043 off = (uintptr_t)addr - va; 1044 1045 if (pmap_extract(pmap_kernel(), va, &pa) == false) { 1046 /*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/ 1047 return (paddr_t)addr; 1048 } 1049 1050 return(pa + off); 1051 } 1052 1053 /* 1054 * Allocate vm space and mapin the I/O address 1055 */ 1056 void * 1057 mapiodev(paddr_t pa, psize_t len, bool prefetchable) 1058 { 1059 paddr_t faddr; 1060 vaddr_t taddr, va; 1061 int off; 1062 1063 faddr = trunc_page(pa); 1064 off = pa - faddr; 1065 len = round_page(off + len); 1066 va = taddr = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY); 1067 1068 if (va == 0) 1069 return NULL; 1070 1071 for (; len > 0; len -= PAGE_SIZE) { 1072 pmap_kenter_pa(taddr, faddr, VM_PROT_READ | VM_PROT_WRITE, 1073 (prefetchable ? PMAP_MD_PREFETCHABLE : PMAP_NOCACHE)); 1074 faddr += PAGE_SIZE; 1075 taddr += PAGE_SIZE; 1076 } 1077 pmap_update(pmap_kernel()); 1078 return (void *)(va + off); 1079 } 1080 1081 void 1082 unmapiodev(vaddr_t va, vsize_t len) 1083 { 1084 paddr_t faddr; 1085 1086 if (! va) 1087 return; 1088 1089 faddr = trunc_page(va); 1090 len = round_page(va - faddr + len); 1091 1092 pmap_kremove(faddr, len); 1093 pmap_update(pmap_kernel()); 1094 uvm_km_free(kernel_map, faddr, len, UVM_KMF_VAONLY); 1095 } 1096 1097 void 1098 trap0(void *lr) 1099 { 1100 panic("call to null-ptr from %p", lr); 1101 } 1102