1 /* $NetBSD: oea_machdep.c,v 1.53 2010/02/25 23:31:47 matt Exp $ */ 2 3 /* 4 * Copyright (C) 2002 Matt Thomas 5 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 * Copyright (C) 1995, 1996 TooLs GmbH. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by TooLs GmbH. 20 * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: oea_machdep.c,v 1.53 2010/02/25 23:31:47 matt Exp $"); 37 38 #include "opt_ppcarch.h" 39 #include "opt_compat_netbsd.h" 40 #include "opt_ddb.h" 41 #include "opt_kgdb.h" 42 #include "opt_ipkdb.h" 43 #include "opt_multiprocessor.h" 44 #include "opt_altivec.h" 45 46 #include <sys/param.h> 47 #include <sys/buf.h> 48 #include <sys/exec.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/mount.h> 52 #include <sys/msgbuf.h> 53 #include <sys/proc.h> 54 #include <sys/reboot.h> 55 #include <sys/syscallargs.h> 56 #include <sys/syslog.h> 57 #include <sys/systm.h> 58 #include <sys/kernel.h> 59 #include <sys/boot_flag.h> 60 61 #include <uvm/uvm_extern.h> 62 63 #include <net/netisr.h> 64 65 #ifdef DDB 66 #include <machine/db_machdep.h> 67 #include <ddb/db_extern.h> 68 #endif 69 70 #ifdef KGDB 71 #include <sys/kgdb.h> 72 #endif 73 74 #ifdef IPKDB 75 #include <ipkdb/ipkdb.h> 76 #endif 77 78 #include <powerpc/trap.h> 79 #include <powerpc/stdarg.h> 80 #include <powerpc/spr.h> 81 #include <powerpc/pte.h> 82 #include <powerpc/altivec.h> 83 #include <machine/powerpc.h> 84 85 #include <powerpc/oea/spr.h> 86 #include <powerpc/oea/bat.h> 87 #include <powerpc/oea/sr_601.h> 88 #include <powerpc/oea/cpufeat.h> 89 90 char machine[] = MACHINE; /* from <machine/param.h> */ 91 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 92 93 struct vm_map *phys_map = NULL; 94 95 /* 96 * Global variables used here and there 97 */ 98 static void trap0(void *); 99 100 /* XXXSL: The battable is not initialized to non-zero for PPC_OEA64 and PPC_OEA64_BRIDGE */ 101 struct bat battable[512]; 102 103 register_t iosrtable[16]; /* I/O segments, for kernel_pmap setup */ 104 #ifndef MSGBUFADDR 105 paddr_t msgbuf_paddr; 106 #endif 107 108 void 109 oea_init(void (*handler)(void)) 110 { 111 extern int trapcode[], trapsize[]; 112 extern int sctrap[], scsize[]; 113 extern int alitrap[], alisize[]; 114 extern int dsitrap[], dsisize[]; 115 extern int trapstart[], trapend[]; 116 #ifdef PPC_OEA601 117 extern int dsi601trap[], dsi601size[]; 118 #endif 119 extern int decrint[], decrsize[]; 120 extern int tlbimiss[], tlbimsize[]; 121 extern int tlbdlmiss[], tlbdlmsize[]; 122 extern int tlbdsmiss[], tlbdsmsize[]; 123 #if defined(DDB) || defined(KGDB) 124 extern int ddblow[], ddbsize[]; 125 #endif 126 #ifdef IPKDB 127 extern int ipkdblow[], ipkdbsize[]; 128 #endif 129 #ifdef ALTIVEC 130 register_t msr; 131 #endif 132 uintptr_t exc, exc_base; 133 #if defined(ALTIVEC) || defined(PPC_OEA) 134 register_t scratch; 135 #endif 136 unsigned int cpuvers; 137 size_t size; 138 struct cpu_info * const ci = &cpu_info[0]; 139 140 #ifdef PPC_HIGH_VEC 141 exc_base = EXC_HIGHVEC; 142 #else 143 exc_base = 0; 144 #endif 145 mtspr(SPR_SPRG0, ci); 146 cpuvers = mfpvr() >> 16; 147 148 /* 149 * Initialize proc0 and current pcb and pmap pointers. 150 */ 151 KASSERT(ci != NULL); 152 KASSERT(curcpu() == ci); 153 lwp0.l_cpu = ci; 154 155 curpcb = lwp_getpcb(&lwp0); 156 memset(curpcb, 0, sizeof(struct pcb)); 157 158 #ifdef ALTIVEC 159 /* 160 * Initialize the vectors with NaNs 161 */ 162 for (scratch = 0; scratch < 32; scratch++) { 163 curpcb->pcb_vr.vreg[scratch][0] = 0x7FFFDEAD; 164 curpcb->pcb_vr.vreg[scratch][1] = 0x7FFFDEAD; 165 curpcb->pcb_vr.vreg[scratch][2] = 0x7FFFDEAD; 166 curpcb->pcb_vr.vreg[scratch][3] = 0x7FFFDEAD; 167 } 168 curpcb->pcb_vr.vscr = 0; 169 curpcb->pcb_vr.vrsave = 0; 170 #endif 171 curpm = curpcb->pcb_pm = pmap_kernel(); 172 173 /* 174 * Cause a PGM trap if we branch to 0. 175 * 176 * XXX GCC4.1 complains about memset on address zero, so 177 * don't use the builtin. 178 */ 179 #undef memset 180 memset(0, 0, 0x100); 181 182 /* 183 * Set up trap vectors. Don't assume vectors are on 0x100. 184 */ 185 for (exc = exc_base; exc <= exc_base + EXC_LAST; exc += 0x100) { 186 switch (exc - exc_base) { 187 default: 188 size = (size_t)trapsize; 189 memcpy((void *)exc, trapcode, size); 190 break; 191 #if 0 192 case EXC_EXI: 193 /* 194 * This one is (potentially) installed during autoconf 195 */ 196 break; 197 #endif 198 case EXC_SC: 199 size = (size_t)scsize; 200 memcpy((void *)exc, sctrap, size); 201 break; 202 case EXC_ALI: 203 size = (size_t)alisize; 204 memcpy((void *)exc, alitrap, size); 205 break; 206 case EXC_DSI: 207 #ifdef PPC_OEA601 208 if (cpuvers == MPC601) { 209 size = (size_t)dsi601size; 210 memcpy((void *)exc, dsi601trap, size); 211 break; 212 } else 213 #endif /* PPC_OEA601 */ 214 if (oeacpufeat & OEACPU_NOBAT) { 215 size = (size_t)alisize; 216 memcpy((void *)exc, alitrap, size); 217 } else { 218 size = (size_t)dsisize; 219 memcpy((void *)exc, dsitrap, size); 220 } 221 break; 222 case EXC_DECR: 223 size = (size_t)decrsize; 224 memcpy((void *)exc, decrint, size); 225 break; 226 case EXC_IMISS: 227 size = (size_t)tlbimsize; 228 memcpy((void *)exc, tlbimiss, size); 229 break; 230 case EXC_DLMISS: 231 size = (size_t)tlbdlmsize; 232 memcpy((void *)exc, tlbdlmiss, size); 233 break; 234 case EXC_DSMISS: 235 size = (size_t)tlbdsmsize; 236 memcpy((void *)exc, tlbdsmiss, size); 237 break; 238 case EXC_PERF: 239 size = (size_t)trapsize; 240 memcpy((void *)exc, trapcode, size); 241 memcpy((void *)(exc_base + EXC_VEC), trapcode, size); 242 break; 243 #if defined(DDB) || defined(IPKDB) || defined(KGDB) 244 case EXC_RUNMODETRC: 245 #ifdef PPC_OEA601 246 if (cpuvers != MPC601) { 247 #endif 248 size = (size_t)trapsize; 249 memcpy((void *)exc, trapcode, size); 250 break; 251 #ifdef PPC_OEA601 252 } 253 /* FALLTHROUGH */ 254 #endif 255 case EXC_PGM: 256 case EXC_TRC: 257 case EXC_BPT: 258 #if defined(DDB) || defined(KGDB) 259 size = (size_t)ddbsize; 260 memcpy((void *)exc, ddblow, size); 261 #if defined(IPKDB) 262 #error "cannot enable IPKDB with DDB or KGDB" 263 #endif 264 #else 265 size = (size_t)ipkdbsize; 266 memcpy((void *)exc, ipkdblow, size); 267 #endif 268 break; 269 #endif /* DDB || IPKDB || KGDB */ 270 } 271 #if 0 272 exc += roundup(size, 32); 273 #endif 274 } 275 276 /* 277 * Install a branch absolute to trap0 to force a panic. 278 */ 279 if ((uintptr_t)trap0 < 0x2000000) { 280 *(uint32_t *) 0 = 0x7c6802a6; 281 *(uint32_t *) 4 = 0x48000002 | (uintptr_t) trap0; 282 } 283 284 /* 285 * Get the cache sizes because install_extint calls __syncicache. 286 */ 287 cpu_probe_cache(); 288 289 #define MxSPR_MASK 0x7c1fffff 290 #define MFSPR_MQ 0x7c0002a6 291 #define MTSPR_MQ 0x7c0003a6 292 #define MTSPR_IBAT0L 0x7c1183a6 293 #define MTSPR_IBAT1L 0x7c1383a6 294 #define NOP 0x60000000 295 #define B 0x48000000 296 #define TLBSYNC 0x7c00046c 297 #define SYNC 0x7c0004ac 298 299 #ifdef ALTIVEC 300 #define MFSPR_VRSAVE 0x7c0042a6 301 #define MTSPR_VRSAVE 0x7c0043a6 302 303 /* 304 * Try to set the VEC bit in the MSR. If it doesn't get set, we are 305 * not on a AltiVec capable processor. 306 */ 307 __asm volatile ( 308 "mfmsr %0; oris %1,%0,%2@h; mtmsr %1; isync; " 309 "mfmsr %1; mtmsr %0; isync" 310 : "=r"(msr), "=r"(scratch) 311 : "J"(PSL_VEC)); 312 313 /* 314 * If we aren't on an AltiVec capable processor, we need to zap any of 315 * the sequences we save/restore the VRSAVE SPR into NOPs. 316 */ 317 if (scratch & PSL_VEC) { 318 cpu_altivec = 1; 319 } else { 320 int *ip = trapstart; 321 322 for (; ip < trapend; ip++) { 323 if ((ip[0] & MxSPR_MASK) == MFSPR_VRSAVE) { 324 ip[0] = NOP; /* mfspr */ 325 ip[1] = NOP; /* stw */ 326 } else if ((ip[0] & MxSPR_MASK) == MTSPR_VRSAVE) { 327 ip[-1] = NOP; /* lwz */ 328 ip[0] = NOP; /* mtspr */ 329 } 330 } 331 } 332 #endif 333 334 /* XXX It would seem like this code could be elided ifndef 601, but 335 * doing so breaks my power3 machine. 336 */ 337 /* 338 * If we aren't on a MPC601 processor, we need to zap any of the 339 * sequences we save/restore the MQ SPR into NOPs, and skip over the 340 * sequences where we zap/restore BAT registers on kernel exit/entry. 341 */ 342 if (cpuvers != MPC601) { 343 int *ip = trapstart; 344 345 for (; ip < trapend; ip++) { 346 if ((ip[0] & MxSPR_MASK) == MFSPR_MQ) { 347 ip[0] = NOP; /* mfspr */ 348 ip[1] = NOP; /* stw */ 349 } else if ((ip[0] & MxSPR_MASK) == MTSPR_MQ) { 350 ip[-1] = NOP; /* lwz */ 351 ip[0] = NOP; /* mtspr */ 352 } else if ((ip[0] & MxSPR_MASK) == MTSPR_IBAT0L) { 353 if ((ip[1] & MxSPR_MASK) == MTSPR_IBAT1L) 354 ip[-1] = B | 0x14; /* li */ 355 else 356 ip[-4] = B | 0x24; /* lis */ 357 } 358 } 359 } 360 361 /* 362 * Sync the changed instructions. 363 */ 364 __syncicache((void *) trapstart, 365 (uintptr_t) trapend - (uintptr_t) trapstart); 366 #ifdef PPC_OEA601 367 368 /* 369 * If we are on a MPC601 processor, we need to zap any tlbsync 370 * instructions into sync. This differs from the above in 371 * examing all kernel text, as opposed to just the exception handling. 372 * We sync the icache on every instruction found since there are 373 * only very few of them. 374 */ 375 if (cpuvers == MPC601) { 376 extern int kernel_text[], etext[]; 377 int *ip; 378 379 for (ip = kernel_text; ip < etext; ip++) 380 if (*ip == TLBSYNC) { 381 *ip = SYNC; 382 __syncicache(ip, sizeof(*ip)); 383 } 384 } 385 #endif /* PPC_OEA601 */ 386 387 /* 388 * Configure a PSL user mask matching this processor. 389 */ 390 cpu_psluserset = PSL_EE | PSL_PR | PSL_ME | PSL_IR | PSL_DR | PSL_RI; 391 cpu_pslusermod = PSL_FP | PSL_FE0 | PSL_FE1 | PSL_LE | PSL_SE | PSL_BE; 392 #ifdef PPC_OEA601 393 if (cpuvers == MPC601) { 394 cpu_psluserset &= PSL_601_MASK; 395 cpu_pslusermod &= PSL_601_MASK; 396 } 397 #endif 398 #ifdef ALTIVEC 399 if (cpu_altivec) 400 cpu_pslusermod |= PSL_VEC; 401 #endif 402 #ifdef PPC_HIGH_VEC 403 cpu_psluserset |= PSL_IP; /* XXX ok? */ 404 #endif 405 406 /* 407 * external interrupt handler install 408 */ 409 if (handler) 410 oea_install_extint(handler); 411 412 __syncicache((void *)exc_base, EXC_LAST + 0x100); 413 414 /* 415 * Now enable translation (and machine checks/recoverable interrupts). 416 */ 417 #ifdef PPC_OEA 418 __asm volatile ("sync; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync" 419 : "=r"(scratch) 420 : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI)); 421 #endif 422 423 KASSERT(curcpu() == ci); 424 } 425 426 #ifdef PPC_OEA601 427 void 428 mpc601_ioseg_add(paddr_t pa, register_t len) 429 { 430 const u_int i = pa >> ADDR_SR_SHFT; 431 432 if (len != BAT_BL_256M) 433 panic("mpc601_ioseg_add: len != 256M"); 434 435 /* 436 * Translate into an I/O segment, load it, and stash away for use 437 * in pmap_bootstrap(). 438 */ 439 iosrtable[i] = SR601(SR601_Ks, SR601_BUID_MEMFORCED, 0, i); 440 __asm volatile ("mtsrin %0,%1" 441 :: "r"(iosrtable[i]), 442 "r"(pa)); 443 } 444 #endif /* PPC_OEA601 */ 445 446 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE) 447 void 448 oea_iobat_add(paddr_t pa, register_t len) 449 { 450 static int n = 1; 451 const u_int i = pa >> 28; 452 battable[i].batl = BATL(pa, BAT_I|BAT_G, BAT_PP_RW); 453 battable[i].batu = BATU(pa, len, BAT_Vs); 454 455 /* 456 * Let's start loading the BAT registers. 457 */ 458 switch (n) { 459 case 1: 460 __asm volatile ("mtdbatl 1,%0; mtdbatu 1,%1;" 461 :: "r"(battable[i].batl), 462 "r"(battable[i].batu)); 463 n = 2; 464 break; 465 case 2: 466 __asm volatile ("mtdbatl 2,%0; mtdbatu 2,%1;" 467 :: "r"(battable[i].batl), 468 "r"(battable[i].batu)); 469 n = 3; 470 break; 471 case 3: 472 __asm volatile ("mtdbatl 3,%0; mtdbatu 3,%1;" 473 :: "r"(battable[i].batl), 474 "r"(battable[i].batu)); 475 n = 4; 476 break; 477 default: 478 break; 479 } 480 } 481 482 void 483 oea_iobat_remove(paddr_t pa) 484 { 485 register_t batu; 486 int i, n; 487 488 n = pa >> ADDR_SR_SHFT; 489 if (!BAT_VA_MATCH_P(battable[n].batu, pa) || 490 !BAT_VALID_P(battable[n].batu, PSL_PR)) 491 return; 492 battable[n].batl = 0; 493 battable[n].batu = 0; 494 #define BAT_RESET(n) \ 495 __asm volatile("mtdbatu %0,%1; mtdbatl %0,%1" :: "n"(n), "r"(0)) 496 #define BATU_GET(n, r) __asm volatile("mfdbatu %0,%1" : "=r"(r) : "n"(n)) 497 498 for (i=1 ; i<4 ; i++) { 499 switch (i) { 500 case 1: 501 BATU_GET(1, batu); 502 if (BAT_VA_MATCH_P(batu, pa) && 503 BAT_VALID_P(batu, PSL_PR)) 504 BAT_RESET(1); 505 break; 506 case 2: 507 BATU_GET(2, batu); 508 if (BAT_VA_MATCH_P(batu, pa) && 509 BAT_VALID_P(batu, PSL_PR)) 510 BAT_RESET(2); 511 break; 512 case 3: 513 BATU_GET(3, batu); 514 if (BAT_VA_MATCH_P(batu, pa) && 515 BAT_VALID_P(batu, PSL_PR)) 516 BAT_RESET(3); 517 break; 518 default: 519 break; 520 } 521 } 522 } 523 524 void 525 oea_batinit(paddr_t pa, ...) 526 { 527 struct mem_region *allmem, *availmem, *mp; 528 unsigned int cpuvers; 529 register_t msr = mfmsr(); 530 va_list ap; 531 532 cpuvers = mfpvr() >> 16; 533 534 /* 535 * Initialize BAT registers to unmapped to not generate 536 * overlapping mappings below. 537 * 538 * The 601's implementation differs in the Valid bit being situated 539 * in the lower BAT register, and in being a unified BAT only whose 540 * four entries are accessed through the IBAT[0-3] SPRs. 541 * 542 * Also, while the 601 does distinguish between supervisor/user 543 * protection keys, it does _not_ distinguish between validity in 544 * supervisor/user mode. 545 */ 546 if ((msr & (PSL_IR|PSL_DR)) == 0) { 547 #ifdef PPC_OEA601 548 if (cpuvers == MPC601) { 549 __asm volatile ("mtibatl 0,%0" :: "r"(0)); 550 __asm volatile ("mtibatl 1,%0" :: "r"(0)); 551 __asm volatile ("mtibatl 2,%0" :: "r"(0)); 552 __asm volatile ("mtibatl 3,%0" :: "r"(0)); 553 } else 554 #endif /* PPC_OEA601 */ 555 { 556 __asm volatile ("mtibatu 0,%0" :: "r"(0)); 557 __asm volatile ("mtibatu 1,%0" :: "r"(0)); 558 __asm volatile ("mtibatu 2,%0" :: "r"(0)); 559 __asm volatile ("mtibatu 3,%0" :: "r"(0)); 560 __asm volatile ("mtdbatu 0,%0" :: "r"(0)); 561 __asm volatile ("mtdbatu 1,%0" :: "r"(0)); 562 __asm volatile ("mtdbatu 2,%0" :: "r"(0)); 563 __asm volatile ("mtdbatu 3,%0" :: "r"(0)); 564 } 565 } 566 567 /* 568 * Set up BAT to map physical memory 569 */ 570 #ifdef PPC_OEA601 571 if (cpuvers == MPC601) { 572 int i; 573 574 /* 575 * Set up battable to map the lowest 256 MB area. 576 * Map the lowest 32 MB area via BAT[0-3]; 577 * BAT[01] are fixed, BAT[23] are floating. 578 */ 579 for (i = 0; i < 32; i++) { 580 battable[i].batl = BATL601(i << 23, 581 BAT601_BSM_8M, BAT601_V); 582 battable[i].batu = BATU601(i << 23, 583 BAT601_M, BAT601_Ku, BAT601_PP_NONE); 584 } 585 __asm volatile ("mtibatu 0,%1; mtibatl 0,%0" 586 :: "r"(battable[0x00000000 >> 23].batl), 587 "r"(battable[0x00000000 >> 23].batu)); 588 __asm volatile ("mtibatu 1,%1; mtibatl 1,%0" 589 :: "r"(battable[0x00800000 >> 23].batl), 590 "r"(battable[0x00800000 >> 23].batu)); 591 __asm volatile ("mtibatu 2,%1; mtibatl 2,%0" 592 :: "r"(battable[0x01000000 >> 23].batl), 593 "r"(battable[0x01000000 >> 23].batu)); 594 __asm volatile ("mtibatu 3,%1; mtibatl 3,%0" 595 :: "r"(battable[0x01800000 >> 23].batl), 596 "r"(battable[0x01800000 >> 23].batu)); 597 } else 598 #endif /* PPC_OEA601 */ 599 { 600 /* 601 * Set up BAT0 to only map the lowest 256 MB area 602 */ 603 battable[0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 604 battable[0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 605 606 __asm volatile ("mtibatl 0,%0; mtibatu 0,%1;" 607 "mtdbatl 0,%0; mtdbatu 0,%1;" 608 :: "r"(battable[0].batl), "r"(battable[0].batu)); 609 } 610 611 /* 612 * Now setup other fixed bat registers 613 * 614 * Note that we still run in real mode, and the BAT 615 * registers were cleared above. 616 */ 617 618 va_start(ap, pa); 619 620 /* 621 * Add any I/O BATs specificed; 622 * use I/O segments on the BAT-starved 601. 623 */ 624 #ifdef PPC_OEA601 625 if (cpuvers == MPC601) { 626 while (pa != 0) { 627 register_t len = va_arg(ap, register_t); 628 mpc601_ioseg_add(pa, len); 629 pa = va_arg(ap, paddr_t); 630 } 631 } else 632 #endif 633 { 634 while (pa != 0) { 635 register_t len = va_arg(ap, register_t); 636 oea_iobat_add(pa, len); 637 pa = va_arg(ap, paddr_t); 638 } 639 } 640 641 va_end(ap); 642 643 /* 644 * Set up battable to map all RAM regions. 645 * This is here because mem_regions() call needs bat0 set up. 646 */ 647 mem_regions(&allmem, &availmem); 648 #ifdef PPC_OEA601 649 if (cpuvers == MPC601) { 650 for (mp = allmem; mp->size; mp++) { 651 paddr_t paddr = mp->start & 0xff800000; 652 paddr_t end = mp->start + mp->size; 653 654 do { 655 u_int ix = paddr >> 23; 656 657 battable[ix].batl = 658 BATL601(paddr, BAT601_BSM_8M, BAT601_V); 659 battable[ix].batu = 660 BATU601(paddr, BAT601_M, BAT601_Ku, BAT601_PP_NONE); 661 paddr += (1 << 23); 662 } while (paddr < end); 663 } 664 } else 665 #endif 666 { 667 for (mp = allmem; mp->size; mp++) { 668 paddr_t paddr = mp->start & 0xf0000000; 669 paddr_t end = mp->start + mp->size; 670 671 do { 672 u_int ix = paddr >> 28; 673 674 battable[ix].batl = 675 BATL(paddr, BAT_M, BAT_PP_RW); 676 battable[ix].batu = 677 BATU(paddr, BAT_BL_256M, BAT_Vs); 678 paddr += SEGMENT_LENGTH; 679 } while (paddr < end); 680 } 681 } 682 } 683 #endif /* PPC_OEA || PPC_OEA64_BRIDGE */ 684 685 void 686 oea_install_extint(void (*handler)(void)) 687 { 688 extern int extint[], extsize[]; 689 extern int extint_call[]; 690 uintptr_t offset = (uintptr_t)handler - (uintptr_t)extint_call; 691 int omsr, msr; 692 693 #ifdef DIAGNOSTIC 694 if (offset > 0x1ffffff) 695 panic("install_extint: %p too far away (%#lx)", handler, 696 (unsigned long) offset); 697 #endif 698 __asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1" 699 : "=r" (omsr), "=r" (msr) 700 : "K" ((u_short)~PSL_EE)); 701 extint_call[0] = (extint_call[0] & 0xfc000003) | offset; 702 __syncicache((void *)extint_call, sizeof extint_call[0]); 703 #ifdef PPC_HIGH_VEC 704 memcpy((void *)(EXC_HIGHVEC + EXC_EXI), extint, (size_t)extsize); 705 __syncicache((void *)(EXC_HIGHVEC + EXC_EXI), (int)extsize); 706 #else 707 memcpy((void *)EXC_EXI, extint, (size_t)extsize); 708 __syncicache((void *)EXC_EXI, (int)extsize); 709 #endif 710 __asm volatile ("mtmsr %0" :: "r"(omsr)); 711 } 712 713 /* 714 * Machine dependent startup code. 715 */ 716 void 717 oea_startup(const char *model) 718 { 719 uintptr_t sz; 720 void *v; 721 vaddr_t minaddr, maxaddr; 722 char pbuf[9]; 723 724 KASSERT(curcpu() != NULL); 725 KASSERT(lwp0.l_cpu != NULL); 726 KASSERT(curcpu()->ci_intstk != 0); 727 KASSERT(curcpu()->ci_intrdepth == -1); 728 729 sz = round_page(MSGBUFSIZE); 730 #ifdef MSGBUFADDR 731 v = (void *) MSGBUFADDR; 732 #else 733 /* 734 * If the msgbuf is not in segment 0, allocate KVA for it and access 735 * it via mapped pages. [This prevents unneeded BAT switches.] 736 */ 737 v = (void *) msgbuf_paddr; 738 if (msgbuf_paddr + sz > SEGMENT_LENGTH) { 739 u_int i; 740 741 minaddr = 0; 742 if (uvm_map(kernel_map, &minaddr, sz, 743 NULL, UVM_UNKNOWN_OFFSET, 0, 744 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 745 UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) 746 panic("startup: cannot allocate VM for msgbuf"); 747 v = (void *)minaddr; 748 for (i = 0; i < sz; i += PAGE_SIZE) { 749 pmap_kenter_pa(minaddr + i, msgbuf_paddr + i, 750 VM_PROT_READ|VM_PROT_WRITE, 0); 751 } 752 pmap_update(pmap_kernel()); 753 } 754 #endif 755 initmsgbuf(v, sz); 756 757 printf("%s%s", copyright, version); 758 if (model != NULL) 759 printf("Model: %s\n", model); 760 cpu_identify(NULL, 0); 761 762 format_bytes(pbuf, sizeof(pbuf), ctob((u_int)physmem)); 763 printf("total memory = %s\n", pbuf); 764 765 /* 766 * Allocate away the pages that map to 0xDEA[CDE]xxxx. Do this after 767 * the bufpages are allocated in case they overlap since it's not 768 * fatal if we can't allocate these. 769 */ 770 if (KERNEL_SR == 13 || KERNEL2_SR == 14) { 771 int error; 772 minaddr = 0xDEAC0000; 773 error = uvm_map(kernel_map, &minaddr, 0x30000, 774 NULL, UVM_UNKNOWN_OFFSET, 0, 775 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 776 UVM_ADV_NORMAL, UVM_FLAG_FIXED)); 777 if (error != 0 || minaddr != 0xDEAC0000) 778 printf("oea_startup: failed to allocate DEAD " 779 "ZONE: error=%d\n", error); 780 } 781 782 minaddr = 0; 783 784 /* 785 * Allocate a submap for physio 786 */ 787 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 788 VM_PHYS_SIZE, 0, false, NULL); 789 790 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 791 printf("avail memory = %s\n", pbuf); 792 } 793 794 /* 795 * Crash dump handling. 796 */ 797 798 void 799 oea_dumpsys(void) 800 { 801 printf("dumpsys: TBD\n"); 802 } 803 804 /* 805 * Convert kernel VA to physical address 806 */ 807 paddr_t 808 kvtop(void *addr) 809 { 810 vaddr_t va; 811 paddr_t pa; 812 uintptr_t off; 813 extern char end[]; 814 815 if (addr < (void *)end) 816 return (paddr_t)addr; 817 818 va = trunc_page((vaddr_t)addr); 819 off = (uintptr_t)addr - va; 820 821 if (pmap_extract(pmap_kernel(), va, &pa) == false) { 822 /*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/ 823 return (paddr_t)addr; 824 } 825 826 return(pa + off); 827 } 828 829 /* 830 * Allocate vm space and mapin the I/O address 831 */ 832 void * 833 mapiodev(paddr_t pa, psize_t len) 834 { 835 paddr_t faddr; 836 vaddr_t taddr, va; 837 int off; 838 839 faddr = trunc_page(pa); 840 off = pa - faddr; 841 len = round_page(off + len); 842 va = taddr = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY); 843 844 if (va == 0) 845 return NULL; 846 847 for (; len > 0; len -= PAGE_SIZE) { 848 pmap_kenter_pa(taddr, faddr, VM_PROT_READ | VM_PROT_WRITE, 0); 849 faddr += PAGE_SIZE; 850 taddr += PAGE_SIZE; 851 } 852 pmap_update(pmap_kernel()); 853 return (void *)(va + off); 854 } 855 856 void 857 unmapiodev(vaddr_t va, vsize_t len) 858 { 859 paddr_t faddr; 860 861 if (! va) 862 return; 863 864 faddr = trunc_page(va); 865 len = round_page(va - faddr + len); 866 867 pmap_kremove(faddr, len); 868 pmap_update(pmap_kernel()); 869 uvm_km_free(kernel_map, faddr, len, UVM_KMF_VAONLY); 870 } 871 872 void 873 trap0(void *lr) 874 { 875 panic("call to null-ptr from %p", lr); 876 } 877