1 /* $NetBSD: pmap_bootstrap.c,v 1.78 2009/08/11 17:04:18 matt Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.78 2009/08/11 17:04:18 matt Exp $"); 40 41 #include "opt_ddb.h" 42 #include "opt_kgdb.h" 43 #include "zsc.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/reboot.h> 48 49 #include <uvm/uvm_extern.h> 50 51 #include <machine/pte.h> 52 #include <machine/vmparam.h> 53 #include <machine/cpu.h> 54 #include <machine/pmap.h> 55 #include <machine/autoconf.h> 56 #include <machine/video.h> 57 58 #include <mac68k/mac68k/macrom.h> 59 60 #define PA2VA(v, t) (t)((u_int)(v) - firstpa) 61 62 extern char *etext; 63 extern char *extiobase, *proc0paddr; 64 65 extern paddr_t avail_start; 66 extern paddr_t avail_end; 67 68 #if NZSC > 0 69 extern int zsinited; 70 #endif 71 72 /* 73 * These are used to map the RAM: 74 */ 75 int numranges; /* = 0 == don't use the ranges */ 76 u_long low[8]; 77 u_long high[8]; 78 u_long maxaddr; /* PA of the last physical page */ 79 int vidlen; 80 #define VIDMAPSIZE btoc(vidlen) 81 static vaddr_t newvideoaddr; 82 83 extern void * ROMBase; 84 85 /* 86 * Special purpose kernel virtual addresses, used for mapping 87 * physical pages for a variety of temporary or permanent purposes: 88 * 89 * CADDR1, CADDR2: pmap zero/copy operations 90 * vmmap: /dev/mem, crash dumps, parity error checking 91 * msgbufaddr: kernel message buffer 92 */ 93 void *CADDR1, *CADDR2; 94 char *vmmap; 95 void *msgbufaddr; 96 97 void pmap_bootstrap(paddr_t, paddr_t); 98 void bootstrap_mac68k(int); 99 100 /* 101 * Bootstrap the VM system. 102 * 103 * This is called with the MMU either on or off. If it's on, we assume 104 * that it's mapped with the same PA <=> LA mapping that we eventually 105 * want. The page sizes and the protections will be wrong, anyway. 106 * 107 * nextpa is the first address following the loaded kernel. On a IIsi 108 * on 12 May 1996, that was 0xf9000 beyond firstpa. 109 */ 110 void 111 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) 112 { 113 paddr_t kstpa, kptpa, kptmpa, p0upa; 114 u_int nptpages, kstsize; 115 paddr_t avail_next; 116 int avail_remaining; 117 int avail_range; 118 int i; 119 st_entry_t protoste, *ste; 120 pt_entry_t protopte, *pte, *epte; 121 extern char start[]; 122 123 vidlen = m68k_round_page(mac68k_video.mv_height * 124 mac68k_video.mv_stride + m68k_page_offset(mac68k_video.mv_phys)); 125 126 /* 127 * Calculate important physical addresses: 128 * 129 * kstpa kernel segment table 1 page (!040) 130 * N pages (040) 131 * 132 * kptpa statically allocated 133 * kernel PT pages Sysptsize+ pages 134 * 135 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 136 * NBMAPSIZE are the number of PTEs, hence we need to round 137 * the total to a page boundary with IO maps at the end. ] 138 * 139 * kptmpa kernel PT map 1 page 140 * 141 * p0upa proc 0 u-area UPAGES pages 142 * 143 */ 144 if (mmutype == MMU_68040) 145 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 146 else 147 kstsize = 1; 148 kstpa = nextpa; 149 nextpa += kstsize * PAGE_SIZE; 150 kptmpa = nextpa; 151 nextpa += PAGE_SIZE; 152 p0upa = nextpa; 153 nextpa += USPACE; 154 kptpa = nextpa; 155 nptpages = Sysptsize + 156 (IIOMAPSIZE + ROMMAPSIZE + VIDMAPSIZE + NPTEPG - 1) / NPTEPG; 157 nextpa += nptpages * PAGE_SIZE; 158 159 for (i = 0; i < numranges; i++) 160 if (low[i] <= firstpa && firstpa < high[i]) 161 break; 162 if (i >= numranges || nextpa > high[i]) { 163 if (mac68k_machine.do_graybars) { 164 printf("Failure in NetBSD boot; "); 165 if (i < numranges) 166 printf("nextpa=0x%lx, high[%d]=0x%lx.\n", 167 nextpa, i, high[i]); 168 else 169 printf("can't find kernel RAM segment.\n"); 170 printf("You're hosed! Try booting with 32-bit "); 171 printf("addressing enabled in the memory control "); 172 printf("panel.\n"); 173 printf("Older machines may need Mode32 to get that "); 174 printf("option.\n"); 175 } 176 panic("Cannot work with the current memory mappings."); 177 } 178 179 /* 180 * Initialize segment table and kernel page table map. 181 * 182 * On 68030s and earlier MMUs the two are identical except for 183 * the valid bits so both are initialized with essentially the 184 * same values. On the 68040, which has a mandatory 3-level 185 * structure, the segment table holds the level 1 table and part 186 * (or all) of the level 2 table and hence is considerably 187 * different. Here the first level consists of 128 descriptors 188 * (512 bytes) each mapping 32mb of address space. Each of these 189 * points to blocks of 128 second level descriptors (512 bytes) 190 * each mapping 256kb. Note that there may be additional "segment 191 * table" pages depending on how large MAXKL2SIZE is. 192 * 193 * XXX cramming two levels of mapping into the single "segment" 194 * table on the 68040 is intended as a temporary hack to get things 195 * working. The 224mb of address space that this allows will most 196 * likely be insufficient in the future (at least for the kernel). 197 */ 198 if (mmutype == MMU_68040) { 199 int num; 200 201 /* 202 * First invalidate the entire "segment table" pages 203 * (levels 1 and 2 have the same "invalid" value). 204 */ 205 pte = PA2VA(kstpa, u_int *); 206 epte = &pte[kstsize * NPTEPG]; 207 while (pte < epte) 208 *pte++ = SG_NV; 209 /* 210 * Initialize level 2 descriptors (which immediately 211 * follow the level 1 table). We need: 212 * NPTEPG / SG4_LEV3SIZE 213 * level 2 descriptors to map each of the nptpages 214 * pages of PTEs. Note that we set the "used" bit 215 * now to save the HW the expense of doing it. 216 */ 217 num = nptpages * (NPTEPG / SG4_LEV3SIZE); 218 pte = &(PA2VA(kstpa, u_int *))[SG4_LEV1SIZE]; 219 epte = &pte[num]; 220 protoste = kptpa | SG_U | SG_RW | SG_V; 221 while (pte < epte) { 222 *pte++ = protoste; 223 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 224 } 225 /* 226 * Initialize level 1 descriptors. We need: 227 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 228 * level 1 descriptors to map the `num' level 2's. 229 */ 230 pte = PA2VA(kstpa, u_int *); 231 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 232 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 233 while (pte < epte) { 234 *pte++ = protoste; 235 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 236 } 237 /* 238 * Initialize the final level 1 descriptor to map the last 239 * block of level 2 descriptors. 240 */ 241 ste = &(PA2VA(kstpa, u_int*))[SG4_LEV1SIZE-1]; 242 pte = &(PA2VA(kstpa, u_int*))[kstsize*NPTEPG - SG4_LEV2SIZE]; 243 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 244 /* 245 * Now initialize the final portion of that block of 246 * descriptors to map Sysmap. 247 */ 248 pte = &(PA2VA(kstpa, u_int*)) 249 [kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; 250 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 251 protoste = kptmpa | SG_U | SG_RW | SG_V; 252 while (pte < epte) { 253 *pte++ = protoste; 254 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 255 } 256 /* 257 * Initialize Sysptmap 258 */ 259 pte = PA2VA(kptmpa, u_int *); 260 epte = &pte[nptpages]; 261 protopte = kptpa | PG_RW | PG_CI | PG_V; 262 while (pte < epte) { 263 *pte++ = protopte; 264 protopte += PAGE_SIZE; 265 } 266 /* 267 * Invalidate all but the last remaining entry. 268 */ 269 epte = &(PA2VA(kptmpa, u_int *))[NPTEPG - 1]; 270 while (pte < epte) { 271 *pte++ = PG_NV; 272 } 273 /* 274 * Initialize the last one to point to Sysptmap. 275 */ 276 *pte = kptmpa | PG_RW | PG_CI | PG_V; 277 } else { 278 /* 279 * Map the page table pages in both the HW segment table 280 * and the software Sysptmap. 281 */ 282 ste = PA2VA(kstpa, u_int*); 283 pte = PA2VA(kptmpa, u_int*); 284 epte = &pte[nptpages]; 285 protoste = kptpa | SG_RW | SG_V; 286 protopte = kptpa | PG_RW | PG_CI | PG_V; 287 while (pte < epte) { 288 *ste++ = protoste; 289 *pte++ = protopte; 290 protoste += PAGE_SIZE; 291 protopte += PAGE_SIZE; 292 } 293 /* 294 * Invalidate all but the last remaining entries in both. 295 */ 296 epte = &(PA2VA(kptmpa, u_int *))[NPTEPG - 1]; 297 while (pte < epte) { 298 *ste++ = SG_NV; 299 *pte++ = PG_NV; 300 } 301 /* 302 * Initialize the last one to point to Sysptmap. 303 */ 304 *ste = kptmpa | SG_RW | SG_V; 305 *pte = kptmpa | PG_RW | PG_CI | PG_V; 306 } 307 308 /* 309 * Initialize kernel page table. 310 * Start by invalidating the `nptpages' that we have allocated. 311 */ 312 pte = PA2VA(kptpa, u_int *); 313 epte = &pte[nptpages * NPTEPG]; 314 while (pte < epte) 315 *pte++ = PG_NV; 316 317 /* 318 * Validate PTEs for kernel text (RO). 319 * Pages up to "start" must be writable for the ROM. 320 */ 321 pte = &(PA2VA(kptpa, u_int *))[m68k_btop(KERNBASE)]; 322 /* XXX why KERNBASE relative? */ 323 epte = &pte[m68k_btop(m68k_round_page(start))]; 324 protopte = firstpa | PG_RW | PG_V; 325 while (pte < epte) { 326 *pte++ = protopte; 327 protopte += PAGE_SIZE; 328 } 329 /* XXX why KERNBASE relative? */ 330 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 331 protopte = (protopte & ~PG_PROT) | PG_RO; 332 while (pte < epte) { 333 *pte++ = protopte; 334 protopte += PAGE_SIZE; 335 } 336 /* 337 * Validate PTEs for kernel data/bss, dynamic data allocated 338 * by us so far (nextpa - firstpa bytes), and pages for proc0 339 * u-area and page table allocated below (RW). 340 */ 341 epte = &(PA2VA(kptpa, u_int *))[m68k_btop(nextpa - firstpa)]; 342 protopte = (protopte & ~PG_PROT) | PG_RW; 343 /* 344 * Enable copy-back caching of data pages 345 */ 346 if (mmutype == MMU_68040) 347 protopte |= PG_CCB; 348 while (pte < epte) { 349 *pte++ = protopte; 350 protopte += PAGE_SIZE; 351 } 352 353 #define PTE2VA(pte) m68k_ptob(pte - PA2VA(kptpa, pt_entry_t *)) 354 355 protopte = IOBase | PG_RW | PG_CI | PG_V; 356 IOBase = PTE2VA(pte); 357 epte = &pte[IIOMAPSIZE]; 358 while (pte < epte) { 359 *pte++ = protopte; 360 protopte += PAGE_SIZE; 361 } 362 363 protopte = (pt_entry_t)ROMBase | PG_RO | PG_V; 364 ROMBase = (void *)PTE2VA(pte); 365 epte = &pte[ROMMAPSIZE]; 366 while (pte < epte) { 367 *pte++ = protopte; 368 protopte += PAGE_SIZE; 369 } 370 371 if (vidlen) { 372 protopte = m68k_trunc_page(mac68k_video.mv_phys) | 373 PG_RW | PG_V | PG_CI; 374 newvideoaddr = PTE2VA(pte) 375 + m68k_page_offset(mac68k_video.mv_phys); 376 epte = &pte[VIDMAPSIZE]; 377 while (pte < epte) { 378 *pte++ = protopte; 379 protopte += PAGE_SIZE; 380 } 381 } 382 virtual_avail = PTE2VA(pte); 383 384 /* 385 * Calculate important exported kernel virtual addresses 386 */ 387 /* 388 * Sysseg: base of kernel segment table 389 */ 390 Sysseg = PA2VA(kstpa, st_entry_t *); 391 /* 392 * Sysptmap: base of kernel page table map 393 */ 394 Sysptmap = PA2VA(kptmpa, pt_entry_t *); 395 /* 396 * Sysmap: kernel page table (as mapped through Sysptmap) 397 * Allocated at the end of KVA space. 398 */ 399 Sysmap = (pt_entry_t *)m68k_ptob((NPTEPG - 1) * NPTEPG); 400 401 /* 402 * Setup u-area for process 0. 403 */ 404 /* 405 * Zero the u-area. 406 * NOTE: `pte' and `epte' aren't PTEs here. 407 */ 408 pte = PA2VA(p0upa, u_int *); 409 epte = (u_int *)(PA2VA(p0upa, u_int) + USPACE); 410 while (pte < epte) 411 *pte++ = 0; 412 /* 413 * Remember the u-area address so it can be loaded in the 414 * proc struct p_addr field later. 415 */ 416 proc0paddr = PA2VA(p0upa, char *); 417 418 /* 419 * VM data structures are now initialized, set up data for 420 * the pmap module. 421 * 422 * Note about avail_end: msgbuf is initialized just after 423 * avail_end in machdep.c. Since the last page is used 424 * for rebooting the system (code is copied there and 425 * excution continues from copied code before the MMU 426 * is disabled), the msgbuf will get trounced between 427 * reboots if it's placed in the last physical page. 428 * To work around this, we move avail_end back one more 429 * page so the msgbuf can be preserved. 430 */ 431 avail_next = avail_start = m68k_round_page(nextpa); 432 avail_remaining = 0; 433 avail_range = -1; 434 for (i = 0; i < numranges; i++) { 435 if (low[i] <= avail_next && avail_next < high[i]) { 436 avail_range = i; 437 avail_remaining = high[i] - avail_next; 438 } else if (avail_range != -1) { 439 avail_remaining += (high[i] - low[i]); 440 } 441 } 442 physmem = m68k_btop(avail_remaining + nextpa - firstpa); 443 444 maxaddr = high[numranges - 1] - m68k_ptob(1); 445 high[numranges - 1] -= (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1)); 446 avail_end = high[numranges - 1]; 447 mem_size = m68k_ptob(physmem); 448 virtual_end = VM_MAX_KERNEL_ADDRESS; 449 450 /* 451 * Initialize protection array. 452 * XXX don't use a switch statement, it might produce an 453 * absolute "jmp" table. 454 */ 455 { 456 u_int *kp; 457 458 kp = (u_int *)&protection_codes; 459 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 460 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 461 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 462 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 463 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 464 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 465 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 466 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 467 } 468 469 /* 470 * Kernel page/segment table allocated above, 471 * just initialize pointers. 472 */ 473 { 474 struct pmap *kpm = kernel_pmap_ptr; 475 476 kpm->pm_stab = Sysseg; 477 kpm->pm_ptab = Sysmap; 478 simple_lock_init(&kpm->pm_lock); 479 kpm->pm_count = 1; 480 kpm->pm_stpa = (st_entry_t *)kstpa; 481 /* 482 * For the 040 we also initialize the free level 2 483 * descriptor mask noting that we have used: 484 * 0: level 1 table 485 * 1 to `num': map page tables 486 * MAXKL2SIZE-1: maps kptmpa 487 */ 488 if (mmutype == MMU_68040) { 489 int num; 490 491 kpm->pm_stfree = ~l2tobm(0); 492 num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), 493 SG4_LEV2SIZE) / SG4_LEV2SIZE; 494 while (num) 495 kpm->pm_stfree &= ~l2tobm(num--); 496 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 497 for (num = MAXKL2SIZE; 498 num < sizeof(kpm->pm_stfree)*NBBY; 499 num++) 500 kpm->pm_stfree &= ~l2tobm(num); 501 } 502 } 503 504 /* 505 * Allocate some fixed, special purpose kernel virtual addresses 506 */ 507 { 508 vaddr_t va = virtual_avail; 509 510 CADDR1 = (void *)va; 511 va += PAGE_SIZE; 512 CADDR2 = (void *)va; 513 va += PAGE_SIZE; 514 vmmap = (void *)va; 515 va += PAGE_SIZE; 516 msgbufaddr = (void *)va; 517 va += m68k_round_page(MSGBUFSIZE); 518 virtual_avail = va; 519 } 520 } 521 522 void 523 bootstrap_mac68k(int tc) 524 { 525 #if NZSC > 0 526 extern void zs_init(void); 527 #endif 528 extern int *esym; 529 paddr_t nextpa; 530 void *oldROMBase; 531 532 if (mac68k_machine.do_graybars) 533 printf("Bootstrapping NetBSD/mac68k.\n"); 534 535 oldROMBase = ROMBase; 536 mac68k_video.mv_phys = mac68k_video.mv_kvaddr; 537 538 if (((tc & 0x80000000) && (mmutype == MMU_68030)) || 539 ((tc & 0x8000) && (mmutype == MMU_68040))) { 540 if (mac68k_machine.do_graybars) 541 printf("Getting mapping from MMU.\n"); 542 (void) get_mapping(); 543 if (mac68k_machine.do_graybars) 544 printf("Done.\n"); 545 } else { 546 /* MMU not enabled. Fake up ranges. */ 547 numranges = 1; 548 low[0] = 0; 549 high[0] = mac68k_machine.mach_memsize * (1024 * 1024); 550 if (mac68k_machine.do_graybars) 551 printf("Faked range to byte 0x%lx.\n", high[0]); 552 } 553 nextpa = load_addr + m68k_round_page(esym); 554 555 if (mac68k_machine.do_graybars) 556 printf("Bootstrapping the pmap system.\n"); 557 558 pmap_bootstrap(nextpa, load_addr); 559 560 if (mac68k_machine.do_graybars) 561 printf("Pmap bootstrapped.\n"); 562 563 if (!vidlen) 564 panic("Don't know how to relocate video!"); 565 566 if (mac68k_machine.do_graybars) 567 printf("Moving ROMBase from %p to %p.\n", oldROMBase, ROMBase); 568 569 mrg_fixupROMBase(oldROMBase, ROMBase); 570 571 if (mac68k_machine.do_graybars) 572 printf("Video address 0x%p -> 0x%p.\n", 573 (void *)mac68k_video.mv_kvaddr, (void *)newvideoaddr); 574 575 mac68k_set_io_offsets(IOBase); 576 577 /* 578 * If the serial ports are going (for console or 'echo'), then 579 * we need to make sure the IO change gets propagated properly. 580 * This resets the base addresses for the 8530 (serial) driver. 581 * 582 * WARNING!!! No printfs() (etc) BETWEEN zs_init() and the end 583 * of this function (where we start using the MMU, so the new 584 * address is correct. 585 */ 586 #if NZSC > 0 587 if (zsinited != 0) 588 zs_init(); 589 #endif 590 591 mac68k_video.mv_kvaddr = newvideoaddr; 592 } 593