1 /* $NetBSD: oea_machdep.c,v 1.45 2008/05/24 21:39:01 phx Exp $ */ 2 3 /* 4 * Copyright (C) 2002 Matt Thomas 5 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 * Copyright (C) 1995, 1996 TooLs GmbH. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by TooLs GmbH. 20 * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: oea_machdep.c,v 1.45 2008/05/24 21:39:01 phx Exp $"); 37 38 #include "opt_ppcarch.h" 39 #include "opt_compat_netbsd.h" 40 #include "opt_ddb.h" 41 #include "opt_kgdb.h" 42 #include "opt_ipkdb.h" 43 #include "opt_multiprocessor.h" 44 #include "opt_altivec.h" 45 46 #include <sys/param.h> 47 #include <sys/buf.h> 48 #include <sys/exec.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/mount.h> 52 #include <sys/msgbuf.h> 53 #include <sys/proc.h> 54 #include <sys/reboot.h> 55 #include <sys/syscallargs.h> 56 #include <sys/syslog.h> 57 #include <sys/systm.h> 58 #include <sys/kernel.h> 59 #include <sys/user.h> 60 #include <sys/boot_flag.h> 61 62 #include <uvm/uvm_extern.h> 63 64 #include <net/netisr.h> 65 66 #ifdef DDB 67 #include <machine/db_machdep.h> 68 #include <ddb/db_extern.h> 69 #endif 70 71 #ifdef KGDB 72 #include <sys/kgdb.h> 73 #endif 74 75 #ifdef IPKDB 76 #include <ipkdb/ipkdb.h> 77 #endif 78 79 #include <powerpc/oea/bat.h> 80 #include <powerpc/oea/sr_601.h> 81 #include <powerpc/oea/cpufeat.h> 82 #include <powerpc/trap.h> 83 #include <powerpc/stdarg.h> 84 #include <powerpc/spr.h> 85 #include <powerpc/pte.h> 86 #include <powerpc/altivec.h> 87 #include <machine/powerpc.h> 88 89 char machine[] = MACHINE; /* from <machine/param.h> */ 90 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 91 92 struct vm_map *exec_map = NULL; 93 struct vm_map *mb_map = NULL; 94 struct vm_map *phys_map = NULL; 95 96 /* 97 * Global variables used here and there 98 */ 99 extern struct user *proc0paddr; 100 101 static void trap0(void *); 102 103 /* XXXSL: The battable is not initialized to non-zero for PPC_OEA64 and PPC_OEA64_BRIDGE */ 104 struct bat battable[512]; 105 106 register_t iosrtable[16]; /* I/O segments, for kernel_pmap setup */ 107 paddr_t msgbuf_paddr; 108 109 void 110 oea_init(void (*handler)(void)) 111 { 112 extern int trapcode[], trapsize[]; 113 extern int sctrap[], scsize[]; 114 extern int alitrap[], alisize[]; 115 extern int dsitrap[], dsisize[]; 116 extern int trapstart[], trapend[]; 117 #ifdef PPC_OEA601 118 extern int dsi601trap[], dsi601size[]; 119 #endif 120 extern int decrint[], decrsize[]; 121 extern int tlbimiss[], tlbimsize[]; 122 extern int tlbdlmiss[], tlbdlmsize[]; 123 extern int tlbdsmiss[], tlbdsmsize[]; 124 #if defined(DDB) || defined(KGDB) 125 extern int ddblow[], ddbsize[]; 126 #endif 127 #ifdef IPKDB 128 extern int ipkdblow[], ipkdbsize[]; 129 #endif 130 #ifdef ALTIVEC 131 register_t msr; 132 #endif 133 uintptr_t exc, exc_base; 134 #if defined(ALTIVEC) || defined(PPC_OEA) 135 register_t scratch; 136 #endif 137 unsigned int cpuvers; 138 size_t size; 139 struct cpu_info * const ci = &cpu_info[0]; 140 141 #ifdef PPC_HIGH_VEC 142 exc_base = EXC_HIGHVEC; 143 #else 144 exc_base = 0; 145 #endif 146 mtspr(SPR_SPRG0, ci); 147 cpuvers = mfpvr() >> 16; 148 149 /* 150 * Initialize proc0 and current pcb and pmap pointers. 151 */ 152 KASSERT(ci != NULL); 153 KASSERT(curcpu() == ci); 154 lwp0.l_cpu = ci; 155 lwp0.l_addr = proc0paddr; 156 memset(lwp0.l_addr, 0, sizeof *lwp0.l_addr); 157 KASSERT(lwp0.l_cpu != NULL); 158 159 curpcb = &proc0paddr->u_pcb; 160 memset(curpcb, 0, sizeof(*curpcb)); 161 #ifdef ALTIVEC 162 /* 163 * Initialize the vectors with NaNs 164 */ 165 for (scratch = 0; scratch < 32; scratch++) { 166 curpcb->pcb_vr.vreg[scratch][0] = 0x7FFFDEAD; 167 curpcb->pcb_vr.vreg[scratch][1] = 0x7FFFDEAD; 168 curpcb->pcb_vr.vreg[scratch][2] = 0x7FFFDEAD; 169 curpcb->pcb_vr.vreg[scratch][3] = 0x7FFFDEAD; 170 } 171 curpcb->pcb_vr.vscr = 0; 172 curpcb->pcb_vr.vrsave = 0; 173 #endif 174 curpm = curpcb->pcb_pm = pmap_kernel(); 175 176 /* 177 * Cause a PGM trap if we branch to 0. 178 * 179 * XXX GCC4.1 complains about memset on address zero, so 180 * don't use the builtin. 181 */ 182 #undef memset 183 memset(0, 0, 0x100); 184 185 /* 186 * Set up trap vectors. Don't assume vectors are on 0x100. 187 */ 188 for (exc = exc_base; exc <= exc_base + EXC_LAST; exc += 0x100) { 189 switch (exc - exc_base) { 190 default: 191 size = (size_t)trapsize; 192 memcpy((void *)exc, trapcode, size); 193 break; 194 #if 0 195 case EXC_EXI: 196 /* 197 * This one is (potentially) installed during autoconf 198 */ 199 break; 200 #endif 201 case EXC_SC: 202 size = (size_t)scsize; 203 memcpy((void *)exc, sctrap, size); 204 break; 205 case EXC_ALI: 206 size = (size_t)alisize; 207 memcpy((void *)exc, alitrap, size); 208 break; 209 case EXC_DSI: 210 #ifdef PPC_OEA601 211 if (cpuvers == MPC601) { 212 size = (size_t)dsi601size; 213 memcpy((void *)exc, dsi601trap, size); 214 break; 215 } else 216 #endif /* PPC_OEA601 */ 217 if (oeacpufeat & OEACPU_NOBAT) { 218 size = (size_t)alisize; 219 memcpy((void *)exc, alitrap, size); 220 } else { 221 size = (size_t)dsisize; 222 memcpy((void *)exc, dsitrap, size); 223 } 224 break; 225 case EXC_DECR: 226 size = (size_t)decrsize; 227 memcpy((void *)exc, decrint, size); 228 break; 229 case EXC_IMISS: 230 size = (size_t)tlbimsize; 231 memcpy((void *)exc, tlbimiss, size); 232 break; 233 case EXC_DLMISS: 234 size = (size_t)tlbdlmsize; 235 memcpy((void *)exc, tlbdlmiss, size); 236 break; 237 case EXC_DSMISS: 238 size = (size_t)tlbdsmsize; 239 memcpy((void *)exc, tlbdsmiss, size); 240 break; 241 case EXC_PERF: 242 size = (size_t)trapsize; 243 memcpy((void *)exc, trapcode, size); 244 memcpy((void *)(exc_base + EXC_VEC), trapcode, size); 245 break; 246 #if defined(DDB) || defined(IPKDB) || defined(KGDB) 247 case EXC_RUNMODETRC: 248 #ifdef PPC_OEA601 249 if (cpuvers != MPC601) { 250 #endif 251 size = (size_t)trapsize; 252 memcpy((void *)exc, trapcode, size); 253 break; 254 #ifdef PPC_OEA601 255 } 256 /* FALLTHROUGH */ 257 #endif 258 case EXC_PGM: 259 case EXC_TRC: 260 case EXC_BPT: 261 #if defined(DDB) || defined(KGDB) 262 size = (size_t)ddbsize; 263 memcpy((void *)exc, ddblow, size); 264 #if defined(IPKDB) 265 #error "cannot enable IPKDB with DDB or KGDB" 266 #endif 267 #else 268 size = (size_t)ipkdbsize; 269 memcpy((void *)exc, ipkdblow, size); 270 #endif 271 break; 272 #endif /* DDB || IPKDB || KGDB */ 273 } 274 #if 0 275 exc += roundup(size, 32); 276 #endif 277 } 278 279 /* 280 * Install a branch absolute to trap0 to force a panic. 281 */ 282 if ((uintptr_t)trap0 < 0x2000000) { 283 *(uint32_t *) 0 = 0x7c6802a6; 284 *(uint32_t *) 4 = 0x48000002 | (uintptr_t) trap0; 285 } 286 287 /* 288 * Get the cache sizes because install_extint calls __syncicache. 289 */ 290 cpu_probe_cache(); 291 292 #define MxSPR_MASK 0x7c1fffff 293 #define MFSPR_MQ 0x7c0002a6 294 #define MTSPR_MQ 0x7c0003a6 295 #define MTSPR_IBAT0L 0x7c1183a6 296 #define MTSPR_IBAT1L 0x7c1383a6 297 #define NOP 0x60000000 298 #define B 0x48000000 299 #define TLBSYNC 0x7c00046c 300 #define SYNC 0x7c0004ac 301 302 #ifdef ALTIVEC 303 #define MFSPR_VRSAVE 0x7c0042a6 304 #define MTSPR_VRSAVE 0x7c0043a6 305 306 /* 307 * Try to set the VEC bit in the MSR. If it doesn't get set, we are 308 * not on a AltiVec capable processor. 309 */ 310 __asm volatile ( 311 "mfmsr %0; oris %1,%0,%2@h; mtmsr %1; isync; " 312 "mfmsr %1; mtmsr %0; isync" 313 : "=r"(msr), "=r"(scratch) 314 : "J"(PSL_VEC)); 315 316 /* 317 * If we aren't on an AltiVec capable processor, we need to zap any of 318 * the sequences we save/restore the VRSAVE SPR into NOPs. 319 */ 320 if (scratch & PSL_VEC) { 321 cpu_altivec = 1; 322 } else { 323 int *ip = trapstart; 324 325 for (; ip < trapend; ip++) { 326 if ((ip[0] & MxSPR_MASK) == MFSPR_VRSAVE) { 327 ip[0] = NOP; /* mfspr */ 328 ip[1] = NOP; /* stw */ 329 } else if ((ip[0] & MxSPR_MASK) == MTSPR_VRSAVE) { 330 ip[-1] = NOP; /* lwz */ 331 ip[0] = NOP; /* mtspr */ 332 } 333 } 334 } 335 #endif 336 337 /* XXX It would seem like this code could be elided ifndef 601, but 338 * doing so breaks my power3 machine. 339 */ 340 /* 341 * If we aren't on a MPC601 processor, we need to zap any of the 342 * sequences we save/restore the MQ SPR into NOPs, and skip over the 343 * sequences where we zap/restore BAT registers on kernel exit/entry. 344 */ 345 if (cpuvers != MPC601) { 346 int *ip = trapstart; 347 348 for (; ip < trapend; ip++) { 349 if ((ip[0] & MxSPR_MASK) == MFSPR_MQ) { 350 ip[0] = NOP; /* mfspr */ 351 ip[1] = NOP; /* stw */ 352 } else if ((ip[0] & MxSPR_MASK) == MTSPR_MQ) { 353 ip[-1] = NOP; /* lwz */ 354 ip[0] = NOP; /* mtspr */ 355 } else if ((ip[0] & MxSPR_MASK) == MTSPR_IBAT0L) { 356 if ((ip[1] & MxSPR_MASK) == MTSPR_IBAT1L) 357 ip[-1] = B | 0x14; /* li */ 358 else 359 ip[-4] = B | 0x24; /* lis */ 360 } 361 } 362 } 363 364 /* 365 * Sync the changed instructions. 366 */ 367 __syncicache((void *) trapstart, 368 (uintptr_t) trapend - (uintptr_t) trapstart); 369 #ifdef PPC_OEA601 370 371 /* 372 * If we are on a MPC601 processor, we need to zap any tlbsync 373 * instructions into sync. This differs from the above in 374 * examing all kernel text, as opposed to just the exception handling. 375 * We sync the icache on every instruction found since there are 376 * only very few of them. 377 */ 378 if (cpuvers == MPC601) { 379 extern int kernel_text[], etext[]; 380 int *ip; 381 382 for (ip = kernel_text; ip < etext; ip++) 383 if (*ip == TLBSYNC) { 384 *ip = SYNC; 385 __syncicache(ip, sizeof(*ip)); 386 } 387 } 388 #endif /* PPC_OEA601 */ 389 390 /* 391 * Configure a PSL user mask matching this processor. 392 */ 393 cpu_psluserset = PSL_EE | PSL_PR | PSL_ME | PSL_IR | PSL_DR | PSL_RI; 394 cpu_pslusermod = PSL_FP | PSL_FE0 | PSL_FE1 | PSL_LE | PSL_SE | PSL_BE; 395 #ifdef PPC_OEA601 396 if (cpuvers == MPC601) { 397 cpu_psluserset &= PSL_601_MASK; 398 cpu_pslusermod &= PSL_601_MASK; 399 } 400 #endif 401 #ifdef ALTIVEC 402 if (cpu_altivec) 403 cpu_pslusermod |= PSL_VEC; 404 #endif 405 #ifdef PPC_HIGH_VEC 406 cpu_psluserset |= PSL_IP; /* XXX ok? */ 407 #endif 408 409 /* 410 * external interrupt handler install 411 */ 412 if (handler) 413 oea_install_extint(handler); 414 415 __syncicache((void *)exc_base, EXC_LAST + 0x100); 416 417 /* 418 * Now enable translation (and machine checks/recoverable interrupts). 419 */ 420 #ifdef PPC_OEA 421 __asm volatile ("sync; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync" 422 : "=r"(scratch) 423 : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI)); 424 #endif 425 426 KASSERT(curcpu() == ci); 427 } 428 429 #ifdef PPC_OEA601 430 void 431 mpc601_ioseg_add(paddr_t pa, register_t len) 432 { 433 const u_int i = pa >> ADDR_SR_SHFT; 434 435 if (len != BAT_BL_256M) 436 panic("mpc601_ioseg_add: len != 256M"); 437 438 /* 439 * Translate into an I/O segment, load it, and stash away for use 440 * in pmap_bootstrap(). 441 */ 442 iosrtable[i] = SR601(SR601_Ks, SR601_BUID_MEMFORCED, 0, i); 443 __asm volatile ("mtsrin %0,%1" 444 :: "r"(iosrtable[i]), 445 "r"(pa)); 446 } 447 #endif /* PPC_OEA601 */ 448 449 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE) 450 void 451 oea_iobat_add(paddr_t pa, register_t len) 452 { 453 static int n = 1; 454 const u_int i = pa >> 28; 455 battable[i].batl = BATL(pa, BAT_I|BAT_G, BAT_PP_RW); 456 battable[i].batu = BATU(pa, len, BAT_Vs); 457 458 /* 459 * Let's start loading the BAT registers. 460 */ 461 switch (n) { 462 case 1: 463 __asm volatile ("mtdbatl 1,%0; mtdbatu 1,%1;" 464 :: "r"(battable[i].batl), 465 "r"(battable[i].batu)); 466 n = 2; 467 break; 468 case 2: 469 __asm volatile ("mtdbatl 2,%0; mtdbatu 2,%1;" 470 :: "r"(battable[i].batl), 471 "r"(battable[i].batu)); 472 n = 3; 473 break; 474 case 3: 475 __asm volatile ("mtdbatl 3,%0; mtdbatu 3,%1;" 476 :: "r"(battable[i].batl), 477 "r"(battable[i].batu)); 478 n = 4; 479 break; 480 default: 481 break; 482 } 483 } 484 485 void 486 oea_iobat_remove(paddr_t pa) 487 { 488 register_t batu; 489 int i, n; 490 491 n = pa >> ADDR_SR_SHFT; 492 if (!BAT_VA_MATCH_P(battable[n].batu, pa) || 493 !BAT_VALID_P(battable[n].batu, PSL_PR)) 494 return; 495 battable[n].batl = 0; 496 battable[n].batu = 0; 497 #define BAT_RESET(n) \ 498 __asm volatile("mtdbatu %0,%1; mtdbatl %0,%1" :: "n"(n), "r"(0)) 499 #define BATU_GET(n, r) __asm volatile("mfdbatu %0,%1" : "=r"(r) : "n"(n)) 500 501 for (i=1 ; i<4 ; i++) { 502 switch (i) { 503 case 1: 504 BATU_GET(1, batu); 505 if (BAT_VA_MATCH_P(batu, pa) && 506 BAT_VALID_P(batu, PSL_PR)) 507 BAT_RESET(1); 508 break; 509 case 2: 510 BATU_GET(2, batu); 511 if (BAT_VA_MATCH_P(batu, pa) && 512 BAT_VALID_P(batu, PSL_PR)) 513 BAT_RESET(2); 514 break; 515 case 3: 516 BATU_GET(3, batu); 517 if (BAT_VA_MATCH_P(batu, pa) && 518 BAT_VALID_P(batu, PSL_PR)) 519 BAT_RESET(3); 520 break; 521 default: 522 break; 523 } 524 } 525 } 526 527 void 528 oea_batinit(paddr_t pa, ...) 529 { 530 struct mem_region *allmem, *availmem, *mp; 531 unsigned int cpuvers; 532 register_t msr = mfmsr(); 533 va_list ap; 534 535 cpuvers = mfpvr() >> 16; 536 537 /* 538 * Initialize BAT registers to unmapped to not generate 539 * overlapping mappings below. 540 * 541 * The 601's implementation differs in the Valid bit being situated 542 * in the lower BAT register, and in being a unified BAT only whose 543 * four entries are accessed through the IBAT[0-3] SPRs. 544 * 545 * Also, while the 601 does distinguish between supervisor/user 546 * protection keys, it does _not_ distinguish between validity in 547 * supervisor/user mode. 548 */ 549 if ((msr & (PSL_IR|PSL_DR)) == 0) { 550 #ifdef PPC_OEA601 551 if (cpuvers == MPC601) { 552 __asm volatile ("mtibatl 0,%0" :: "r"(0)); 553 __asm volatile ("mtibatl 1,%0" :: "r"(0)); 554 __asm volatile ("mtibatl 2,%0" :: "r"(0)); 555 __asm volatile ("mtibatl 3,%0" :: "r"(0)); 556 } else 557 #endif /* PPC_OEA601 */ 558 { 559 __asm volatile ("mtibatu 0,%0" :: "r"(0)); 560 __asm volatile ("mtibatu 1,%0" :: "r"(0)); 561 __asm volatile ("mtibatu 2,%0" :: "r"(0)); 562 __asm volatile ("mtibatu 3,%0" :: "r"(0)); 563 __asm volatile ("mtdbatu 0,%0" :: "r"(0)); 564 __asm volatile ("mtdbatu 1,%0" :: "r"(0)); 565 __asm volatile ("mtdbatu 2,%0" :: "r"(0)); 566 __asm volatile ("mtdbatu 3,%0" :: "r"(0)); 567 } 568 } 569 570 /* 571 * Set up BAT to map physical memory 572 */ 573 #ifdef PPC_OEA601 574 if (cpuvers == MPC601) { 575 int i; 576 577 /* 578 * Set up battable to map the lowest 256 MB area. 579 * Map the lowest 32 MB area via BAT[0-3]; 580 * BAT[01] are fixed, BAT[23] are floating. 581 */ 582 for (i = 0; i < 32; i++) { 583 battable[i].batl = BATL601(i << 23, 584 BAT601_BSM_8M, BAT601_V); 585 battable[i].batu = BATU601(i << 23, 586 BAT601_M, BAT601_Ku, BAT601_PP_NONE); 587 } 588 __asm volatile ("mtibatu 0,%1; mtibatl 0,%0" 589 :: "r"(battable[0x00000000 >> 23].batl), 590 "r"(battable[0x00000000 >> 23].batu)); 591 __asm volatile ("mtibatu 1,%1; mtibatl 1,%0" 592 :: "r"(battable[0x00800000 >> 23].batl), 593 "r"(battable[0x00800000 >> 23].batu)); 594 __asm volatile ("mtibatu 2,%1; mtibatl 2,%0" 595 :: "r"(battable[0x01000000 >> 23].batl), 596 "r"(battable[0x01000000 >> 23].batu)); 597 __asm volatile ("mtibatu 3,%1; mtibatl 3,%0" 598 :: "r"(battable[0x01800000 >> 23].batl), 599 "r"(battable[0x01800000 >> 23].batu)); 600 } else 601 #endif /* PPC_OEA601 */ 602 { 603 /* 604 * Set up BAT0 to only map the lowest 256 MB area 605 */ 606 battable[0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 607 battable[0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 608 609 __asm volatile ("mtibatl 0,%0; mtibatu 0,%1;" 610 "mtdbatl 0,%0; mtdbatu 0,%1;" 611 :: "r"(battable[0].batl), "r"(battable[0].batu)); 612 } 613 614 /* 615 * Now setup other fixed bat registers 616 * 617 * Note that we still run in real mode, and the BAT 618 * registers were cleared above. 619 */ 620 621 va_start(ap, pa); 622 623 /* 624 * Add any I/O BATs specificed; 625 * use I/O segments on the BAT-starved 601. 626 */ 627 #ifdef PPC_OEA601 628 if (cpuvers == MPC601) { 629 while (pa != 0) { 630 register_t len = va_arg(ap, register_t); 631 mpc601_ioseg_add(pa, len); 632 pa = va_arg(ap, paddr_t); 633 } 634 } else 635 #endif 636 { 637 while (pa != 0) { 638 register_t len = va_arg(ap, register_t); 639 oea_iobat_add(pa, len); 640 pa = va_arg(ap, paddr_t); 641 } 642 } 643 644 va_end(ap); 645 646 /* 647 * Set up battable to map all RAM regions. 648 * This is here because mem_regions() call needs bat0 set up. 649 */ 650 mem_regions(&allmem, &availmem); 651 #ifdef PPC_OEA601 652 if (cpuvers == MPC601) { 653 for (mp = allmem; mp->size; mp++) { 654 paddr_t paddr = mp->start & 0xff800000; 655 paddr_t end = mp->start + mp->size; 656 657 do { 658 u_int ix = paddr >> 23; 659 660 battable[ix].batl = 661 BATL601(paddr, BAT601_BSM_8M, BAT601_V); 662 battable[ix].batu = 663 BATU601(paddr, BAT601_M, BAT601_Ku, BAT601_PP_NONE); 664 paddr += (1 << 23); 665 } while (paddr < end); 666 } 667 } else 668 #endif 669 { 670 for (mp = allmem; mp->size; mp++) { 671 paddr_t paddr = mp->start & 0xf0000000; 672 paddr_t end = mp->start + mp->size; 673 674 do { 675 u_int ix = paddr >> 28; 676 677 battable[ix].batl = 678 BATL(paddr, BAT_M, BAT_PP_RW); 679 battable[ix].batu = 680 BATU(paddr, BAT_BL_256M, BAT_Vs); 681 paddr += SEGMENT_LENGTH; 682 } while (paddr < end); 683 } 684 } 685 } 686 #endif /* PPC_OEA || PPC_OEA64_BRIDGE */ 687 688 void 689 oea_install_extint(void (*handler)(void)) 690 { 691 extern int extint[], extsize[]; 692 extern int extint_call[]; 693 uintptr_t offset = (uintptr_t)handler - (uintptr_t)extint_call; 694 int omsr, msr; 695 696 #ifdef DIAGNOSTIC 697 if (offset > 0x1ffffff) 698 panic("install_extint: %p too far away (%#lx)", handler, 699 (unsigned long) offset); 700 #endif 701 __asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1" 702 : "=r" (omsr), "=r" (msr) 703 : "K" ((u_short)~PSL_EE)); 704 extint_call[0] = (extint_call[0] & 0xfc000003) | offset; 705 __syncicache((void *)extint_call, sizeof extint_call[0]); 706 #ifdef PPC_HIGH_VEC 707 memcpy((void *)(EXC_HIGHVEC + EXC_EXI), extint, (size_t)extsize); 708 __syncicache((void *)(EXC_HIGHVEC + EXC_EXI), (int)extsize); 709 #else 710 memcpy((void *)EXC_EXI, extint, (size_t)extsize); 711 __syncicache((void *)EXC_EXI, (int)extsize); 712 #endif 713 __asm volatile ("mtmsr %0" :: "r"(omsr)); 714 } 715 716 /* 717 * Machine dependent startup code. 718 */ 719 void 720 oea_startup(const char *model) 721 { 722 uintptr_t sz; 723 void *v; 724 vaddr_t minaddr, maxaddr; 725 char pbuf[9]; 726 u_int i; 727 728 KASSERT(curcpu() != NULL); 729 KASSERT(lwp0.l_cpu != NULL); 730 KASSERT(curcpu()->ci_intstk != 0); 731 KASSERT(curcpu()->ci_intrdepth == -1); 732 733 /* 734 * If the msgbuf is not in segment 0, allocate KVA for it and access 735 * it via mapped pages. [This prevents unneeded BAT switches.] 736 */ 737 sz = round_page(MSGBUFSIZE); 738 v = (void *) msgbuf_paddr; 739 if (msgbuf_paddr + sz > SEGMENT_LENGTH) { 740 minaddr = 0; 741 if (uvm_map(kernel_map, &minaddr, sz, 742 NULL, UVM_UNKNOWN_OFFSET, 0, 743 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 744 UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) 745 panic("startup: cannot allocate VM for msgbuf"); 746 v = (void *)minaddr; 747 for (i = 0; i < sz; i += PAGE_SIZE) { 748 pmap_kenter_pa(minaddr + i, msgbuf_paddr + i, 749 VM_PROT_READ|VM_PROT_WRITE); 750 } 751 pmap_update(pmap_kernel()); 752 } 753 initmsgbuf(v, sz); 754 755 printf("%s%s", copyright, version); 756 if (model != NULL) 757 printf("Model: %s\n", model); 758 cpu_identify(NULL, 0); 759 760 format_bytes(pbuf, sizeof(pbuf), ctob((u_int)physmem)); 761 printf("total memory = %s\n", pbuf); 762 763 /* 764 * Allocate away the pages that map to 0xDEA[CDE]xxxx. Do this after 765 * the bufpages are allocated in case they overlap since it's not 766 * fatal if we can't allocate these. 767 */ 768 if (KERNEL_SR == 13 || KERNEL2_SR == 14) { 769 int error; 770 minaddr = 0xDEAC0000; 771 error = uvm_map(kernel_map, &minaddr, 0x30000, 772 NULL, UVM_UNKNOWN_OFFSET, 0, 773 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 774 UVM_ADV_NORMAL, UVM_FLAG_FIXED)); 775 if (error != 0 || minaddr != 0xDEAC0000) 776 printf("oea_startup: failed to allocate DEAD " 777 "ZONE: error=%d\n", error); 778 } 779 780 minaddr = 0; 781 /* 782 * Allocate a submap for exec arguments. This map effectively 783 * limits the number of processes exec'ing at any time. These 784 * submaps will be allocated after the dead zone. 785 */ 786 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 787 16*NCARGS, VM_MAP_PAGEABLE, false, NULL); 788 789 /* 790 * Allocate a submap for physio 791 */ 792 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 793 VM_PHYS_SIZE, 0, false, NULL); 794 795 #ifndef PMAP_MAP_POOLPAGE 796 /* 797 * No need to allocate an mbuf cluster submap. Mbuf clusters 798 * are allocated via the pool allocator, and we use direct-mapped 799 * pool pages. 800 */ 801 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 802 mclbytes*nmbclusters, VM_MAP_INTRSAFE, false, NULL); 803 #endif 804 805 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 806 printf("avail memory = %s\n", pbuf); 807 } 808 809 /* 810 * Crash dump handling. 811 */ 812 813 void 814 oea_dumpsys(void) 815 { 816 printf("dumpsys: TBD\n"); 817 } 818 819 /* 820 * Convert kernel VA to physical address 821 */ 822 paddr_t 823 kvtop(void *addr) 824 { 825 vaddr_t va; 826 paddr_t pa; 827 uintptr_t off; 828 extern char end[]; 829 830 if (addr < (void *)end) 831 return (paddr_t)addr; 832 833 va = trunc_page((vaddr_t)addr); 834 off = (uintptr_t)addr - va; 835 836 if (pmap_extract(pmap_kernel(), va, &pa) == false) { 837 /*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/ 838 return (paddr_t)addr; 839 } 840 841 return(pa + off); 842 } 843 844 /* 845 * Allocate vm space and mapin the I/O address 846 */ 847 void * 848 mapiodev(paddr_t pa, psize_t len) 849 { 850 paddr_t faddr; 851 vaddr_t taddr, va; 852 int off; 853 854 faddr = trunc_page(pa); 855 off = pa - faddr; 856 len = round_page(off + len); 857 va = taddr = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY); 858 859 if (va == 0) 860 return NULL; 861 862 for (; len > 0; len -= PAGE_SIZE) { 863 pmap_kenter_pa(taddr, faddr, VM_PROT_READ | VM_PROT_WRITE); 864 faddr += PAGE_SIZE; 865 taddr += PAGE_SIZE; 866 } 867 pmap_update(pmap_kernel()); 868 return (void *)(va + off); 869 } 870 871 void 872 unmapiodev(vaddr_t va, vsize_t len) 873 { 874 paddr_t faddr; 875 876 if (! va) 877 return; 878 879 faddr = trunc_page(va); 880 len = round_page(va - faddr + len); 881 882 pmap_kremove(faddr, len); 883 pmap_update(pmap_kernel()); 884 uvm_km_free(kernel_map, faddr, len, UVM_KMF_VAONLY); 885 } 886 887 void 888 trap0(void *lr) 889 { 890 panic("call to null-ptr from %p", lr); 891 } 892