1 /* $NetBSD: oea_machdep.c,v 1.9 2003/07/15 02:54:45 lukem Exp $ */ 2 3 /* 4 * Copyright (C) 2002 Matt Thomas 5 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 * Copyright (C) 1995, 1996 TooLs GmbH. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by TooLs GmbH. 20 * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: oea_machdep.c,v 1.9 2003/07/15 02:54:45 lukem Exp $"); 37 38 #include "opt_compat_netbsd.h" 39 #include "opt_ddb.h" 40 #include "opt_kgdb.h" 41 #include "opt_ipkdb.h" 42 #include "opt_multiprocessor.h" 43 #include "opt_altivec.h" 44 45 #include <sys/param.h> 46 #include <sys/buf.h> 47 #include <sys/exec.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/mount.h> 51 #include <sys/msgbuf.h> 52 #include <sys/proc.h> 53 #include <sys/reboot.h> 54 #include <sys/sa.h> 55 #include <sys/syscallargs.h> 56 #include <sys/syslog.h> 57 #include <sys/systm.h> 58 #include <sys/kernel.h> 59 #include <sys/user.h> 60 #include <sys/boot_flag.h> 61 62 #include <uvm/uvm_extern.h> 63 64 #include <net/netisr.h> 65 66 #ifdef DDB 67 #include <machine/db_machdep.h> 68 #include <ddb/db_extern.h> 69 #endif 70 71 #ifdef KGDB 72 #include <sys/kgdb.h> 73 #endif 74 75 #ifdef IPKDB 76 #include <ipkdb/ipkdb.h> 77 #endif 78 79 #include <powerpc/oea/bat.h> 80 #include <powerpc/oea/sr_601.h> 81 #include <powerpc/trap.h> 82 #include <powerpc/stdarg.h> 83 #include <powerpc/spr.h> 84 #include <powerpc/pte.h> 85 #include <powerpc/altivec.h> 86 #include <machine/powerpc.h> 87 88 char machine[] = MACHINE; /* from <machine/param.h> */ 89 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 90 91 struct vm_map *exec_map = NULL; 92 struct vm_map *mb_map = NULL; 93 struct vm_map *phys_map = NULL; 94 95 /* 96 * Global variables used here and there 97 */ 98 extern struct user *proc0paddr; 99 100 struct bat battable[512]; 101 register_t iosrtable[16]; /* I/O segments, for kernel_pmap setup */ 102 paddr_t msgbuf_paddr; 103 104 void 105 oea_init(void (*handler)(void)) 106 { 107 extern int trapstart[], trapend[]; 108 extern int trapcode[], trapsize[]; 109 extern int sctrap[], scsize[]; 110 extern int alitrap[], alisize[]; 111 extern int dsitrap[], dsisize[]; 112 extern int dsi601trap[], dsi601size[]; 113 extern int decrint[], decrsize[]; 114 extern int tlbimiss[], tlbimsize[]; 115 extern int tlbdlmiss[], tlbdlmsize[]; 116 extern int tlbdsmiss[], tlbdsmsize[]; 117 #if defined(DDB) || defined(KGDB) 118 extern int ddblow[], ddbsize[]; 119 #endif 120 #ifdef IPKDB 121 extern int ipkdblow[], ipkdbsize[]; 122 #endif 123 #ifdef ALTIVEC 124 register_t msr; 125 #endif 126 uintptr_t exc; 127 register_t scratch; 128 unsigned int cpuvers; 129 size_t size; 130 struct cpu_info * const ci = &cpu_info[0]; 131 132 mtspr(SPR_SPRG0, ci); 133 cpuvers = mfpvr() >> 16; 134 135 136 /* 137 * Initialize proc0 and current pcb and pmap pointers. 138 */ 139 KASSERT(ci != NULL); 140 KASSERT(curcpu() == ci); 141 lwp0.l_cpu = ci; 142 lwp0.l_addr = proc0paddr; 143 memset(lwp0.l_addr, 0, sizeof *lwp0.l_addr); 144 KASSERT(lwp0.l_cpu != NULL); 145 146 curpcb = &proc0paddr->u_pcb; 147 memset(curpcb, 0, sizeof(*curpcb)); 148 #ifdef ALTIVEC 149 /* 150 * Initialize the vectors with NaNs 151 */ 152 for (scratch = 0; scratch < 32; scratch++) { 153 curpcb->pcb_vr.vreg[scratch][0] = 0x7FFFDEAD; 154 curpcb->pcb_vr.vreg[scratch][1] = 0x7FFFDEAD; 155 curpcb->pcb_vr.vreg[scratch][2] = 0x7FFFDEAD; 156 curpcb->pcb_vr.vreg[scratch][3] = 0x7FFFDEAD; 157 } 158 curpcb->pcb_vr.vscr = 0; 159 curpcb->pcb_vr.vrsave = 0; 160 #endif 161 curpm = curpcb->pcb_pmreal = curpcb->pcb_pm = pmap_kernel(); 162 163 /* 164 * Cause a PGM trap if we branch to 0. 165 */ 166 memset(0, 0, 0x100); 167 168 /* 169 * Set up trap vectors. Don't assume vectors are on 0x100. 170 */ 171 for (exc = 0; exc <= EXC_LAST; exc += 0x100) { 172 switch (exc) { 173 default: 174 size = (size_t)trapsize; 175 memcpy((void *)exc, trapcode, size); 176 break; 177 #if 0 178 case EXC_EXI: 179 /* 180 * This one is (potentially) installed during autoconf 181 */ 182 break; 183 #endif 184 case EXC_SC: 185 size = (size_t)scsize; 186 memcpy((void *)EXC_SC, sctrap, size); 187 break; 188 case EXC_ALI: 189 size = (size_t)alisize; 190 memcpy((void *)EXC_ALI, alitrap, size); 191 break; 192 case EXC_DSI: 193 if (cpuvers == MPC601) { 194 size = (size_t)dsi601size; 195 memcpy((void *)EXC_DSI, dsi601trap, size); 196 } else { 197 size = (size_t)dsisize; 198 memcpy((void *)EXC_DSI, dsitrap, size); 199 } 200 break; 201 case EXC_DECR: 202 size = (size_t)decrsize; 203 memcpy((void *)EXC_DECR, decrint, size); 204 break; 205 case EXC_IMISS: 206 size = (size_t)tlbimsize; 207 memcpy((void *)EXC_IMISS, tlbimiss, size); 208 break; 209 case EXC_DLMISS: 210 size = (size_t)tlbdlmsize; 211 memcpy((void *)EXC_DLMISS, tlbdlmiss, size); 212 break; 213 case EXC_DSMISS: 214 size = (size_t)tlbdsmsize; 215 memcpy((void *)EXC_DSMISS, tlbdsmiss, size); 216 break; 217 case EXC_PERF: 218 size = (size_t)trapsize; 219 memcpy((void *)EXC_PERF, trapcode, size); 220 memcpy((void *)EXC_VEC, trapcode, size); 221 break; 222 #if defined(DDB) || defined(IPKDB) || defined(KGDB) 223 case EXC_RUNMODETRC: 224 if (cpuvers != MPC601) { 225 size = (size_t)trapsize; 226 memcpy((void *)EXC_RUNMODETRC, trapcode, size); 227 break; 228 } 229 /* FALLTHROUGH */ 230 case EXC_PGM: 231 case EXC_TRC: 232 case EXC_BPT: 233 #if defined(DDB) || defined(KGDB) 234 size = (size_t)ddbsize; 235 memcpy((void *)exc, ddblow, size); 236 #if defined(IPKDB) 237 #error "cannot enable IPKDB with DDB or KGDB" 238 #endif 239 #else 240 size = (size_t)ipkdbsize; 241 memcpy((void *)exc, ipkdblow, size); 242 #endif 243 break; 244 #endif /* DDB || IPKDB || KGDB */ 245 } 246 #if 0 247 exc += roundup(size, 32); 248 #endif 249 } 250 251 /* 252 * Get the cache sizes because install_extint calls __syncicache. 253 */ 254 cpu_probe_cache(); 255 256 #define MxSPR_MASK 0x7c1fffff 257 #define MFSPR_MQ 0x7c0002a6 258 #define MTSPR_MQ 0x7c0003a6 259 #define NOP 0x60000000 260 261 #ifdef ALTIVEC 262 #define MFSPR_VRSAVE 0x7c0042a6 263 #define MTSPR_VRSAVE 0x7c0043a6 264 265 /* 266 * Try to set the VEC bit in the MSR. If it doesn't get set, we are 267 * not on a AltiVec capable processor. 268 */ 269 __asm __volatile ( 270 "mfmsr %0; oris %1,%0,%2@h; mtmsr %1; isync; " 271 "mfmsr %1; mtmsr %0; isync" 272 : "=r"(msr), "=r"(scratch) 273 : "J"(PSL_VEC)); 274 275 /* 276 * If we aren't on an AltiVec capable processor, we to need zap any of 277 * sequences we save/restore the VRSAVE SPR into NOPs. 278 */ 279 if (scratch & PSL_VEC) { 280 cpu_altivec = 1; 281 } else { 282 int *ip = trapstart; 283 284 for (; ip < trapend; ip++) { 285 if ((ip[0] & MxSPR_MASK) == MFSPR_VRSAVE) { 286 ip[0] = NOP; /* mfspr */ 287 ip[1] = NOP; /* stw */ 288 } else if ((ip[0] & MxSPR_MASK) == MTSPR_VRSAVE) { 289 ip[-1] = NOP; /* lwz */ 290 ip[0] = NOP; /* mtspr */ 291 } 292 } 293 } 294 #endif 295 296 /* 297 * If we aren't on a MPC601 processor, we to need zap any of 298 * sequences we save/restore the MQ SPR into NOPs. 299 */ 300 if (cpuvers != MPC601) { 301 int *ip = trapstart; 302 303 for (; ip < trapend; ip++) { 304 if ((ip[0] & MxSPR_MASK) == MFSPR_MQ) { 305 ip[0] = NOP; /* mfspr */ 306 ip[1] = NOP; /* stw */ 307 } else if ((ip[0] & MxSPR_MASK) == MTSPR_MQ) { 308 ip[-1] = NOP; /* lwz */ 309 ip[0] = NOP; /* mtspr */ 310 } 311 } 312 } 313 314 if (!cpu_altivec || cpuvers != MPC601) { 315 /* 316 * Sync the changed instructions. 317 */ 318 __syncicache((void *) trapstart, 319 (uintptr_t) trapend - (uintptr_t) trapstart); 320 } 321 322 /* 323 * external interrupt handler install 324 */ 325 if (handler) 326 oea_install_extint(handler); 327 328 __syncicache(0, EXC_LAST + 0x100); 329 330 /* 331 * Now enable translation (and machine checks/recoverable interrupts). 332 */ 333 __asm __volatile ("sync; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync" 334 : "=r"(scratch) 335 : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI)); 336 337 KASSERT(curcpu() == ci); 338 } 339 340 void 341 mpc601_ioseg_add(paddr_t pa, register_t len) 342 { 343 const u_int i = pa >> ADDR_SR_SHFT; 344 345 if (len != BAT_BL_256M) 346 panic("mpc601_ioseg_add: len != 256M"); 347 348 /* 349 * Translate into an I/O segment, load it, and stash away for use 350 * in pmap_bootstrap(). 351 */ 352 iosrtable[i] = SR601(SR601_Ks, SR601_BUID_MEMFORCED, 0, i); 353 __asm __volatile ("mtsrin %0,%1" 354 :: "r"(iosrtable[i]), 355 "r"(pa)); 356 } 357 358 void 359 oea_iobat_add(paddr_t pa, register_t len) 360 { 361 static int n = 1; 362 const u_int i = pa >> 28; 363 battable[i].batl = BATL(pa, BAT_I|BAT_G, BAT_PP_RW); 364 battable[i].batu = BATU(pa, len, BAT_Vs); 365 366 /* 367 * Let's start loading the BAT registers. 368 */ 369 switch (n) { 370 case 1: 371 __asm __volatile ("mtdbatl 1,%0; mtdbatu 1,%1;" 372 :: "r"(battable[i].batl), 373 "r"(battable[i].batu)); 374 n = 2; 375 break; 376 case 2: 377 __asm __volatile ("mtdbatl 2,%0; mtdbatu 2,%1;" 378 :: "r"(battable[i].batl), 379 "r"(battable[i].batu)); 380 n = 3; 381 break; 382 case 3: 383 __asm __volatile ("mtdbatl 3,%0; mtdbatu 3,%1;" 384 :: "r"(battable[i].batl), 385 "r"(battable[i].batu)); 386 n = 4; 387 break; 388 default: 389 break; 390 } 391 } 392 393 void 394 oea_iobat_remove(paddr_t pa) 395 { 396 register_t batu; 397 int i, n; 398 399 n = pa >> ADDR_SR_SHFT; 400 if (!BAT_VA_MATCH_P(battable[n].batu, pa) || 401 !BAT_VALID_P(battable[n].batu, PSL_PR)) 402 return; 403 battable[n].batl = 0; 404 battable[n].batu = 0; 405 #define BAT_RESET(n) \ 406 __asm __volatile("mtdbatu %0,%1; mtdbatl %0,%1" :: "n"(n), "r"(0)) 407 #define BATU_GET(n, r) __asm __volatile("mfdbatu %0,%1" : "=r"(r) : "n"(n)) 408 409 for (i=1 ; i<4 ; i++) { 410 switch (i) { 411 case 1: 412 BATU_GET(1, batu); 413 if (BAT_VA_MATCH_P(batu, pa) && 414 BAT_VALID_P(batu, PSL_PR)) 415 BAT_RESET(1); 416 break; 417 case 2: 418 BATU_GET(2, batu); 419 if (BAT_VA_MATCH_P(batu, pa) && 420 BAT_VALID_P(batu, PSL_PR)) 421 BAT_RESET(2); 422 break; 423 case 3: 424 BATU_GET(3, batu); 425 if (BAT_VA_MATCH_P(batu, pa) && 426 BAT_VALID_P(batu, PSL_PR)) 427 BAT_RESET(3); 428 break; 429 default: 430 break; 431 } 432 } 433 } 434 435 void 436 oea_batinit(paddr_t pa, ...) 437 { 438 struct mem_region *allmem, *availmem, *mp; 439 int i; 440 unsigned int cpuvers; 441 register_t msr = mfmsr(); 442 va_list ap; 443 444 cpuvers = mfpvr() >> 16; 445 446 /* 447 * Initialize BAT registers to unmapped to not generate 448 * overlapping mappings below. 449 * 450 * The 601's implementation differs in the Valid bit being situated 451 * in the lower BAT register, and in being a unified BAT only whose 452 * four entries are accessed through the IBAT[0-3] SPRs. 453 * 454 * Also, while the 601 does distinguish between supervisor/user 455 * protection keys, it does _not_ distinguish distinguish between 456 * validity in supervisor/user mode. 457 */ 458 if ((msr & (PSL_IR|PSL_DR)) == 0) { 459 if (cpuvers == MPC601) { 460 __asm __volatile ("mtibatl 0,%0" :: "r"(0)); 461 __asm __volatile ("mtibatl 1,%0" :: "r"(0)); 462 __asm __volatile ("mtibatl 2,%0" :: "r"(0)); 463 __asm __volatile ("mtibatl 3,%0" :: "r"(0)); 464 } else { 465 __asm __volatile ("mtibatu 0,%0" :: "r"(0)); 466 __asm __volatile ("mtibatu 1,%0" :: "r"(0)); 467 __asm __volatile ("mtibatu 2,%0" :: "r"(0)); 468 __asm __volatile ("mtibatu 3,%0" :: "r"(0)); 469 __asm __volatile ("mtdbatu 0,%0" :: "r"(0)); 470 __asm __volatile ("mtdbatu 1,%0" :: "r"(0)); 471 __asm __volatile ("mtdbatu 2,%0" :: "r"(0)); 472 __asm __volatile ("mtdbatu 3,%0" :: "r"(0)); 473 } 474 } 475 476 /* 477 * Set up BAT to map physical memory 478 */ 479 if (cpuvers == MPC601) { 480 /* 481 * Set up battable to map the lowest 256 MB area. 482 * Map the lowest 32 MB area via BAT[0-3]; 483 * BAT[01] are fixed, BAT[23] are floating. 484 */ 485 for (i = 0; i < 32; i++) { 486 battable[i].batl = BATL601(i << 23, 487 BAT601_BSM_8M, BAT601_V); 488 battable[i].batu = BATU601(i << 23, 489 BAT601_M, BAT601_Ku, BAT601_PP_NONE); 490 } 491 __asm __volatile ("mtibatu 0,%1; mtibatl 0,%0" 492 :: "r"(battable[0x00000000 >> 23].batl), 493 "r"(battable[0x00000000 >> 23].batu)); 494 __asm __volatile ("mtibatu 1,%1; mtibatl 1,%0" 495 :: "r"(battable[0x00800000 >> 23].batl), 496 "r"(battable[0x00800000 >> 23].batu)); 497 __asm __volatile ("mtibatu 2,%1; mtibatl 2,%0" 498 :: "r"(battable[0x01000000 >> 23].batl), 499 "r"(battable[0x01000000 >> 23].batu)); 500 __asm __volatile ("mtibatu 3,%1; mtibatl 3,%0" 501 :: "r"(battable[0x01800000 >> 23].batl), 502 "r"(battable[0x01800000 >> 23].batu)); 503 } else { 504 /* 505 * Set up BAT0 to only map the lowest 256 MB area 506 */ 507 battable[0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 508 battable[0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 509 510 __asm __volatile ("mtibatl 0,%0; mtibatu 0,%1;" 511 "mtdbatl 0,%0; mtdbatu 0,%1;" 512 :: "r"(battable[0].batl), "r"(battable[0].batu)); 513 } 514 515 /* 516 * Now setup other fixed bat registers 517 * 518 * Note that we still run in real mode, and the BAT 519 * registers were cleared above. 520 */ 521 522 va_start(ap, pa); 523 524 /* 525 * Add any I/O BATs specificed; 526 * use I/O segments on the BAT-starved 601. 527 */ 528 if (cpuvers == MPC601) { 529 while (pa != 0) { 530 register_t len = va_arg(ap, register_t); 531 mpc601_ioseg_add(pa, len); 532 pa = va_arg(ap, paddr_t); 533 } 534 } else { 535 while (pa != 0) { 536 register_t len = va_arg(ap, register_t); 537 oea_iobat_add(pa, len); 538 pa = va_arg(ap, paddr_t); 539 } 540 } 541 542 va_end(ap); 543 544 /* 545 * Set up battable to map all RAM regions. 546 * This is here because mem_regions() call needs bat0 set up. 547 */ 548 mem_regions(&allmem, &availmem); 549 if (cpuvers == MPC601) { 550 for (mp = allmem; mp->size; mp++) { 551 paddr_t pa = mp->start & 0xff800000; 552 paddr_t end = mp->start + mp->size; 553 554 do { 555 u_int i = pa >> 23; 556 557 battable[i].batl = 558 BATL601(pa, BAT601_BSM_8M, BAT601_V); 559 battable[i].batu = 560 BATU601(pa, BAT601_M, BAT601_Ku, BAT601_PP_NONE); 561 pa += (1 << 23); 562 } while (pa < end); 563 } 564 } else { 565 for (mp = allmem; mp->size; mp++) { 566 paddr_t pa = mp->start & 0xf0000000; 567 paddr_t end = mp->start + mp->size; 568 569 do { 570 u_int i = pa >> 28; 571 572 battable[i].batl = 573 BATL(pa, BAT_M, BAT_PP_RW); 574 battable[i].batu = 575 BATU(pa, BAT_BL_256M, BAT_Vs); 576 pa += SEGMENT_LENGTH; 577 } while (pa < end); 578 } 579 } 580 } 581 582 void 583 oea_install_extint(void (*handler)(void)) 584 { 585 extern int extint[], extsize[]; 586 extern int extint_call[]; 587 uintptr_t offset = (uintptr_t)handler - (uintptr_t)extint_call; 588 int omsr, msr; 589 590 #ifdef DIAGNOSTIC 591 if (offset > 0x1ffffff) 592 panic("install_extint: %p too far away (%#lx)", handler, 593 (unsigned long) offset); 594 #endif 595 __asm __volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1" 596 : "=r" (omsr), "=r" (msr) 597 : "K" ((u_short)~PSL_EE)); 598 extint_call[0] = (extint_call[0] & 0xfc000003) | offset; 599 memcpy((void *)EXC_EXI, extint, (size_t)extsize); 600 __syncicache((void *)extint_call, sizeof extint_call[0]); 601 __syncicache((void *)EXC_EXI, (int)extsize); 602 __asm __volatile ("mtmsr %0" :: "r"(omsr)); 603 } 604 605 /* 606 * Machine dependent startup code. 607 */ 608 void 609 oea_startup(const char *model) 610 { 611 uintptr_t sz; 612 u_int i; 613 u_long base, residual; 614 caddr_t v; 615 vaddr_t minaddr, maxaddr; 616 char pbuf[9]; 617 618 KASSERT(curcpu() != NULL); 619 KASSERT(lwp0.l_cpu != NULL); 620 KASSERT(curcpu()->ci_intstk != 0); 621 KASSERT(curcpu()->ci_spillstk != 0); 622 KASSERT(curcpu()->ci_intrdepth == -1); 623 624 /* 625 * If the msgbuf is not in segment 0, allocate KVA for it and access 626 * it via mapped pages. [This prevents unneeded BAT switches.] 627 */ 628 sz = round_page(MSGBUFSIZE); 629 v = (caddr_t) msgbuf_paddr; 630 if (msgbuf_paddr + sz > SEGMENT_LENGTH) { 631 minaddr = 0; 632 if (uvm_map(kernel_map, &minaddr, sz, 633 NULL, UVM_UNKNOWN_OFFSET, 0, 634 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 635 UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) 636 panic("startup: cannot allocate VM for msgbuf"); 637 v = (caddr_t)minaddr; 638 for (i = 0; i < sz; i += PAGE_SIZE) { 639 pmap_kenter_pa(minaddr + i, msgbuf_paddr + i, 640 VM_PROT_READ|VM_PROT_WRITE); 641 } 642 pmap_update(pmap_kernel()); 643 } 644 initmsgbuf(v, sz); 645 646 printf("%s", version); 647 if (model != NULL) 648 printf("Model: %s\n", model); 649 cpu_identify(NULL, 0); 650 651 format_bytes(pbuf, sizeof(pbuf), ctob((u_int)physmem)); 652 printf("total memory = %s\n", pbuf); 653 654 /* 655 * Find out how much space we need, allocate it, 656 * and then give everything true virtual addresses. 657 */ 658 sz = (uintptr_t)allocsys(NULL, NULL); 659 if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0) 660 panic("startup: no room for tables"); 661 if (allocsys(v, NULL) - v != sz) 662 panic("startup: table size inconsistency"); 663 664 /* 665 * Now allocate buffers proper. They are different than the above 666 * in that they usually occupy more virtual memory than physical. 667 * Allocate the buffer starting at the top of the kernel VM space. 668 */ 669 sz = MAXBSIZE * nbuf; 670 minaddr = VM_MAX_KERNEL_ADDRESS - round_page(sz); 671 if (uvm_map(kernel_map, &minaddr, round_page(sz), 672 NULL, UVM_UNKNOWN_OFFSET, 0, 673 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 674 UVM_ADV_NORMAL, 0)) != 0) 675 panic("startup: cannot allocate VM for buffers"); 676 buffers = (char *)minaddr; 677 base = bufpages / nbuf; 678 residual = bufpages % nbuf; 679 if (base >= MAXBSIZE) { 680 /* Don't want to alloc more physical mem than ever needed */ 681 base = MAXBSIZE; 682 residual = 0; 683 } 684 for (i = 0; i < nbuf; i++) { 685 vsize_t curbufsize; 686 vaddr_t curbuf; 687 struct vm_page *pg; 688 689 curbuf = (vaddr_t)buffers + i * MAXBSIZE; 690 curbufsize = PAGE_SIZE * (i < residual ? base + 1 : base); 691 692 while (curbufsize) { 693 pg = uvm_pagealloc(NULL, 0, NULL, 0); 694 if (pg == NULL) 695 panic("cpu_startup: not enough memory for " 696 "buffer cache"); 697 pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), 698 VM_PROT_READ|VM_PROT_WRITE); 699 curbuf += PAGE_SIZE; 700 curbufsize -= PAGE_SIZE; 701 } 702 } 703 pmap_update(pmap_kernel()); 704 705 /* 706 * Allocate away the pages that map to 0xDEA[CDE]xxxx. Do this after 707 * the bufpages are allocated in case they overlap since it's not 708 * fatal if we can't allocate these. 709 */ 710 if (KERNEL_SR == 13 || KERNEL2_SR == 14) { 711 int error; 712 minaddr = 0xDEAC0000; 713 error = uvm_map(kernel_map, &minaddr, 0x30000, 714 NULL, UVM_UNKNOWN_OFFSET, 0, 715 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 716 UVM_ADV_NORMAL, UVM_FLAG_FIXED)); 717 if (error != 0 || minaddr != 0xDEAC0000) 718 printf("oea_startup: failed to allocate DEAD " 719 "ZONE: error=%d\n", error); 720 } 721 minaddr = 0; 722 723 /* 724 * Allocate a submap for exec arguments. This map effectively 725 * limits the number of processes exec'ing at any time. These 726 * submaps will be allocated after the dead zone. 727 */ 728 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 729 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); 730 731 /* 732 * Allocate a submap for physio 733 */ 734 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 735 VM_PHYS_SIZE, 0, FALSE, NULL); 736 737 #ifndef PMAP_MAP_POOLPAGE 738 /* 739 * No need to allocate an mbuf cluster submap. Mbuf clusters 740 * are allocated via the pool allocator, and we use direct-mapped 741 * pool pages. 742 */ 743 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 744 mclbytes*nmbclusters, VM_MAP_INTRSAFE, FALSE, NULL); 745 #endif 746 747 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 748 printf("avail memory = %s\n", pbuf); 749 format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE); 750 printf("using %u buffers containing %s of memory\n", nbuf, pbuf); 751 752 /* 753 * Set up the buffers. 754 */ 755 bufinit(); 756 } 757 758 /* 759 * Crash dump handling. 760 */ 761 762 void 763 oea_dumpsys(void) 764 { 765 printf("dumpsys: TBD\n"); 766 } 767 768 /* 769 * Soft networking interrupts. 770 */ 771 void 772 softnet(int pendisr) 773 { 774 #define DONETISR(bit, fn) do { \ 775 if (pendisr & (1 << bit)) \ 776 (*fn)(); \ 777 } while (0) 778 779 #include <net/netisr_dispatch.h> 780 781 #undef DONETISR 782 783 } 784 785 /* 786 * Convert kernel VA to physical address 787 */ 788 paddr_t 789 kvtop(caddr_t addr) 790 { 791 vaddr_t va; 792 paddr_t pa; 793 uintptr_t off; 794 extern char end[]; 795 796 if (addr < end) 797 return (paddr_t)addr; 798 799 va = trunc_page((vaddr_t)addr); 800 off = (uintptr_t)addr - va; 801 802 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) { 803 /*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/ 804 return (paddr_t)addr; 805 } 806 807 return(pa + off); 808 } 809 810 /* 811 * Allocate vm space and mapin the I/O address 812 */ 813 void * 814 mapiodev(paddr_t pa, psize_t len) 815 { 816 paddr_t faddr; 817 vaddr_t taddr, va; 818 int off; 819 820 faddr = trunc_page(pa); 821 off = pa - faddr; 822 len = round_page(off + len); 823 va = taddr = uvm_km_valloc(kernel_map, len); 824 825 if (va == 0) 826 return NULL; 827 828 for (; len > 0; len -= PAGE_SIZE) { 829 pmap_kenter_pa(taddr, faddr, VM_PROT_READ | VM_PROT_WRITE); 830 faddr += PAGE_SIZE; 831 taddr += PAGE_SIZE; 832 } 833 pmap_update(pmap_kernel()); 834 return (void *)(va + off); 835 } 836